Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler updates from Ingo Molnar:

 - Move the nohz kick code out of the scheduler tick to a dedicated IPI,
   from Frederic Weisbecker.

  This necessiated quite some background infrastructure rework,
  including:

   * Clean up some irq-work internals
   * Implement remote irq-work
   * Implement nohz kick on top of remote irq-work
   * Move full dynticks timer enqueue notification to new kick
   * Move multi-task notification to new kick
   * Remove unecessary barriers on multi-task notification

 - Remove proliferation of wait_on_bit() action functions and allow
   wait_on_bit_action() functions to support a timeout.  (Neil Brown)

 - Another round of sched/numa improvements, cleanups and fixes.  (Rik
   van Riel)

 - Implement fast idling of CPUs when the system is partially loaded,
   for better scalability.  (Tim Chen)

 - Restructure and fix the CPU hotplug handling code that may leave
   cfs_rq and rt_rq's throttled when tasks are migrated away from a dead
   cpu.  (Kirill Tkhai)

 - Robustify the sched topology setup code.  (Peterz Zijlstra)

 - Improve sched_feat() handling wrt.  static_keys (Jason Baron)

 - Misc fixes.

* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (37 commits)
  sched/fair: Fix 'make xmldocs' warning caused by missing description
  sched: Use macro for magic number of -1 for setparam
  sched: Robustify topology setup
  sched: Fix sched_setparam() policy == -1 logic
  sched: Allow wait_on_bit_action() functions to support a timeout
  sched: Remove proliferation of wait_on_bit() action functions
  sched/numa: Revert "Use effective_load() to balance NUMA loads"
  sched: Fix static_key race with sched_feat()
  sched: Remove extra static_key*() function indirection
  sched/rt: Fix replenish_dl_entity() comments to match the current upstream code
  sched: Transform resched_task() into resched_curr()
  sched/deadline: Kill task_struct->pi_top_task
  sched: Rework check_for_tasks()
  sched/rt: Enqueue just unthrottled rt_rq back on the stack in __disable_runtime()
  sched/fair: Disable runtime_enabled on dying rq
  sched/numa: Change scan period code to match intent
  sched/numa: Rework best node setting in task_numa_migrate()
  sched/numa: Examine a task move when examining a task swap
  sched/numa: Simplify task_numa_compare()
  sched/numa: Use effective_load() to balance NUMA loads
  ...
diff --git a/.mailmap b/.mailmap
index df1baba..1ad6873 100644
--- a/.mailmap
+++ b/.mailmap
@@ -62,6 +62,11 @@
 Jens Axboe <axboe@suse.de>
 Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
 John Stultz <johnstul@us.ibm.com>
+<josh@joshtriplett.org> <josh@freedesktop.org>
+<josh@joshtriplett.org> <josh@kernel.org>
+<josh@joshtriplett.org> <josht@linux.vnet.ibm.com>
+<josh@joshtriplett.org> <josht@us.ibm.com>
+<josh@joshtriplett.org> <josht@vnet.ibm.com>
 Juha Yrjola <at solidboot.com>
 Juha Yrjola <juha.yrjola@nokia.com>
 Juha Yrjola <juha.yrjola@solidboot.com>
diff --git a/CREDITS b/CREDITS
index 28ee151..a80b667 100644
--- a/CREDITS
+++ b/CREDITS
@@ -3511,10 +3511,11 @@
 S: Australia
 
 N: Josh Triplett
-E: josh@freedesktop.org
-P: 1024D/D0FE7AFB B24A 65C9 1D71 2AC2 DE87  CA26 189B 9946 D0FE 7AFB
-D: rcutorture maintainer
+E: josh@joshtriplett.org
+P: 4096R/8AFF873D 758E 5042 E397 4BA3 3A9C  1E67 0ED9 A3DF 8AFF 873D
+D: RCU and rcutorture
 D: lock annotations, finding and fixing lock bugs
+D: kernel tinification
 
 N: Winfried Trümper
 E: winni@xpilot.org
diff --git a/Documentation/RCU/RTFP.txt b/Documentation/RCU/RTFP.txt
index 2f0fcb2..f29bcbc 100644
--- a/Documentation/RCU/RTFP.txt
+++ b/Documentation/RCU/RTFP.txt
@@ -2451,8 +2451,8 @@
 ,month="February"
 ,year="2010"
 ,note="Available:
-\url{http://kerneltrap.com/mailarchive/linux-netdev/2010/2/26/6270589}
-[Viewed March 20, 2011]"
+\url{http://thread.gmane.org/gmane.linux.network/153338}
+[Viewed June 9, 2014]"
 ,annotation={
 	Use a pair of list_head structures to support RCU-protected
 	resizable hash tables.
diff --git a/Documentation/RCU/rcuref.txt b/Documentation/RCU/rcuref.txt
index 141d531..613033f 100644
--- a/Documentation/RCU/rcuref.txt
+++ b/Documentation/RCU/rcuref.txt
@@ -1,5 +1,14 @@
 Reference-count design for elements of lists/arrays protected by RCU.
 
+
+Please note that the percpu-ref feature is likely your first
+stop if you need to combine reference counts and RCU.  Please see
+include/linux/percpu-refcount.h for more information.  However, in
+those unusual cases where percpu-ref would consume too much memory,
+please read on.
+
+------------------------------------------------------------------------
+
 Reference counting on elements of lists which are protected by traditional
 reader/writer spinlocks or semaphores are straightforward:
 
diff --git a/Documentation/arm64/booting.txt b/Documentation/arm64/booting.txt
index 37fc4f6..85af34d 100644
--- a/Documentation/arm64/booting.txt
+++ b/Documentation/arm64/booting.txt
@@ -72,27 +72,54 @@
 
   u32 code0;			/* Executable code */
   u32 code1;			/* Executable code */
-  u64 text_offset;		/* Image load offset */
-  u64 res0	= 0;		/* reserved */
-  u64 res1	= 0;		/* reserved */
+  u64 text_offset;		/* Image load offset, little endian */
+  u64 image_size;		/* Effective Image size, little endian */
+  u64 flags;			/* kernel flags, little endian */
   u64 res2	= 0;		/* reserved */
   u64 res3	= 0;		/* reserved */
   u64 res4	= 0;		/* reserved */
   u32 magic	= 0x644d5241;	/* Magic number, little endian, "ARM\x64" */
-  u32 res5 = 0;      		/* reserved */
+  u32 res5;      		/* reserved (used for PE COFF offset) */
 
 
 Header notes:
 
+- As of v3.17, all fields are little endian unless stated otherwise.
+
 - code0/code1 are responsible for branching to stext.
+
 - when booting through EFI, code0/code1 are initially skipped.
   res5 is an offset to the PE header and the PE header has the EFI
-  entry point (efi_stub_entry). When the stub has done its work, it
+  entry point (efi_stub_entry).  When the stub has done its work, it
   jumps to code0 to resume the normal boot process.
 
-The image must be placed at the specified offset (currently 0x80000)
-from the start of the system RAM and called there. The start of the
-system RAM must be aligned to 2MB.
+- Prior to v3.17, the endianness of text_offset was not specified.  In
+  these cases image_size is zero and text_offset is 0x80000 in the
+  endianness of the kernel.  Where image_size is non-zero image_size is
+  little-endian and must be respected.  Where image_size is zero,
+  text_offset can be assumed to be 0x80000.
+
+- The flags field (introduced in v3.17) is a little-endian 64-bit field
+  composed as follows:
+  Bit 0: 	Kernel endianness.  1 if BE, 0 if LE.
+  Bits 1-63:	Reserved.
+
+- When image_size is zero, a bootloader should attempt to keep as much
+  memory as possible free for use by the kernel immediately after the
+  end of the kernel image. The amount of space required will vary
+  depending on selected features, and is effectively unbound.
+
+The Image must be placed text_offset bytes from a 2MB aligned base
+address near the start of usable system RAM and called there. Memory
+below that base address is currently unusable by Linux, and therefore it
+is strongly recommended that this location is the start of system RAM.
+At least image_size bytes from the start of the image must be free for
+use by the kernel.
+
+Any memory described to the kernel (even that below the 2MB aligned base
+address) which is not marked as reserved from the kernel e.g. with a
+memreserve region in the device tree) will be considered as available to
+the kernel.
 
 Before jumping into the kernel, the following conditions must be met:
 
diff --git a/Documentation/arm64/memory.txt b/Documentation/arm64/memory.txt
index d50fa61..344e85c 100644
--- a/Documentation/arm64/memory.txt
+++ b/Documentation/arm64/memory.txt
@@ -2,18 +2,18 @@
 		     ==============================
 
 Author: Catalin Marinas <catalin.marinas@arm.com>
-Date  : 20 February 2012
 
 This document describes the virtual memory layout used by the AArch64
 Linux kernel. The architecture allows up to 4 levels of translation
 tables with a 4KB page size and up to 3 levels with a 64KB page size.
 
-AArch64 Linux uses 3 levels of translation tables with the 4KB page
-configuration, allowing 39-bit (512GB) virtual addresses for both user
-and kernel. With 64KB pages, only 2 levels of translation tables are
-used but the memory layout is the same.
+AArch64 Linux uses either 3 levels or 4 levels of translation tables
+with the 4KB page configuration, allowing 39-bit (512GB) or 48-bit
+(256TB) virtual addresses, respectively, for both user and kernel. With
+64KB pages, only 2 levels of translation tables, allowing 42-bit (4TB)
+virtual address, are used but the memory layout is the same.
 
-User addresses have bits 63:39 set to 0 while the kernel addresses have
+User addresses have bits 63:48 set to 0 while the kernel addresses have
 the same bits set to 1. TTBRx selection is given by bit 63 of the
 virtual address. The swapper_pg_dir contains only kernel (global)
 mappings while the user pgd contains only user (non-global) mappings.
@@ -21,58 +21,40 @@
 TTBR0.
 
 
-AArch64 Linux memory layout with 4KB pages:
+AArch64 Linux memory layout with 4KB pages + 3 levels:
 
 Start			End			Size		Use
 -----------------------------------------------------------------------
 0000000000000000	0000007fffffffff	 512GB		user
-
-ffffff8000000000	ffffffbbfffeffff	~240GB		vmalloc
-
-ffffffbbffff0000	ffffffbbffffffff	  64KB		[guard page]
-
-ffffffbc00000000	ffffffbdffffffff	   8GB		vmemmap
-
-ffffffbe00000000	ffffffbffbbfffff	  ~8GB		[guard, future vmmemap]
-
-ffffffbffa000000	ffffffbffaffffff	  16MB		PCI I/O space
-
-ffffffbffb000000	ffffffbffbbfffff	  12MB		[guard]
-
-ffffffbffbc00000	ffffffbffbdfffff	   2MB		fixed mappings
-
-ffffffbffbe00000	ffffffbffbffffff	   2MB		[guard]
-
-ffffffbffc000000	ffffffbfffffffff	  64MB		modules
-
-ffffffc000000000	ffffffffffffffff	 256GB		kernel logical memory map
+ffffff8000000000	ffffffffffffffff	 512GB		kernel
 
 
-AArch64 Linux memory layout with 64KB pages:
+AArch64 Linux memory layout with 4KB pages + 4 levels:
+
+Start			End			Size		Use
+-----------------------------------------------------------------------
+0000000000000000	0000ffffffffffff	 256TB		user
+ffff000000000000	ffffffffffffffff	 256TB		kernel
+
+
+AArch64 Linux memory layout with 64KB pages + 2 levels:
 
 Start			End			Size		Use
 -----------------------------------------------------------------------
 0000000000000000	000003ffffffffff	   4TB		user
+fffffc0000000000	ffffffffffffffff	   4TB		kernel
 
-fffffc0000000000	fffffdfbfffeffff	  ~2TB		vmalloc
 
-fffffdfbffff0000	fffffdfbffffffff	  64KB		[guard page]
+AArch64 Linux memory layout with 64KB pages + 3 levels:
 
-fffffdfc00000000	fffffdfdffffffff	   8GB		vmemmap
+Start			End			Size		Use
+-----------------------------------------------------------------------
+0000000000000000	0000ffffffffffff	 256TB		user
+ffff000000000000	ffffffffffffffff	 256TB		kernel
 
-fffffdfe00000000	fffffdfffbbfffff	  ~8GB		[guard, future vmmemap]
 
-fffffdfffa000000	fffffdfffaffffff	  16MB		PCI I/O space
-
-fffffdfffb000000	fffffdfffbbfffff	  12MB		[guard]
-
-fffffdfffbc00000	fffffdfffbdfffff	   2MB		fixed mappings
-
-fffffdfffbe00000	fffffdfffbffffff	   2MB		[guard]
-
-fffffdfffc000000	fffffdffffffffff	  64MB		modules
-
-fffffe0000000000	ffffffffffffffff	   2TB		kernel logical memory map
+For details of the virtual kernel memory layout please see the kernel
+booting log.
 
 
 Translation table lookup with 4KB pages:
@@ -86,7 +68,7 @@
  |                 |         |         |         +-> [20:12] L3 index
  |                 |         |         +-----------> [29:21] L2 index
  |                 |         +---------------------> [38:30] L1 index
- |                 +-------------------------------> [47:39] L0 index (not used)
+ |                 +-------------------------------> [47:39] L0 index
  +-------------------------------------------------> [63] TTBR0/1
 
 
@@ -99,10 +81,11 @@
  |                 |    |               |              v
  |                 |    |               |            [15:0]  in-page offset
  |                 |    |               +----------> [28:16] L3 index
- |                 |    +--------------------------> [41:29] L2 index (only 38:29 used)
- |                 +-------------------------------> [47:42] L1 index (not used)
+ |                 |    +--------------------------> [41:29] L2 index
+ |                 +-------------------------------> [47:42] L1 index
  +-------------------------------------------------> [63] TTBR0/1
 
+
 When using KVM, the hypervisor maps kernel pages in EL2, at a fixed
 offset from the kernel VA (top 24bits of the kernel VA set to zero):
 
diff --git a/Documentation/cgroups/cgroups.txt b/Documentation/cgroups/cgroups.txt
index 821de56..10c949b 100644
--- a/Documentation/cgroups/cgroups.txt
+++ b/Documentation/cgroups/cgroups.txt
@@ -599,6 +599,20 @@
 while the caller holds cgroup_mutex and it is ensured that either
 attach() or cancel_attach() will be called in future.
 
+void css_reset(struct cgroup_subsys_state *css)
+(cgroup_mutex held by caller)
+
+An optional operation which should restore @css's configuration to the
+initial state.  This is currently only used on the unified hierarchy
+when a subsystem is disabled on a cgroup through
+"cgroup.subtree_control" but should remain enabled because other
+subsystems depend on it.  cgroup core makes such a css invisible by
+removing the associated interface files and invokes this callback so
+that the hidden subsystem can return to the initial neutral state.
+This prevents unexpected resource control from a hidden css and
+ensures that the configuration is in the initial state when it is made
+visible again later.
+
 void cancel_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
 (cgroup_mutex held by caller)
 
diff --git a/Documentation/cgroups/unified-hierarchy.txt b/Documentation/cgroups/unified-hierarchy.txt
index 324b182..4f45632 100644
--- a/Documentation/cgroups/unified-hierarchy.txt
+++ b/Documentation/cgroups/unified-hierarchy.txt
@@ -94,12 +94,35 @@
 
  mount -t cgroup -o __DEVEL__sane_behavior cgroup $MOUNT_POINT
 
-All controllers which are not bound to other hierarchies are
-automatically bound to unified hierarchy and show up at the root of
-it.  Controllers which are enabled only in the root of unified
-hierarchy can be bound to other hierarchies at any time.  This allows
-mixing unified hierarchy with the traditional multiple hierarchies in
-a fully backward compatible way.
+All controllers which support the unified hierarchy and are not bound
+to other hierarchies are automatically bound to unified hierarchy and
+show up at the root of it.  Controllers which are enabled only in the
+root of unified hierarchy can be bound to other hierarchies.  This
+allows mixing unified hierarchy with the traditional multiple
+hierarchies in a fully backward compatible way.
+
+For development purposes, the following boot parameter makes all
+controllers to appear on the unified hierarchy whether supported or
+not.
+
+ cgroup__DEVEL__legacy_files_on_dfl
+
+A controller can be moved across hierarchies only after the controller
+is no longer referenced in its current hierarchy.  Because per-cgroup
+controller states are destroyed asynchronously and controllers may
+have lingering references, a controller may not show up immediately on
+the unified hierarchy after the final umount of the previous
+hierarchy.  Similarly, a controller should be fully disabled to be
+moved out of the unified hierarchy and it may take some time for the
+disabled controller to become available for other hierarchies;
+furthermore, due to dependencies among controllers, other controllers
+may need to be disabled too.
+
+While useful for development and manual configurations, dynamically
+moving controllers between the unified and other hierarchies is
+strongly discouraged for production use.  It is recommended to decide
+the hierarchies and controller associations before starting using the
+controllers.
 
 
 2-2. cgroup.subtree_control
diff --git a/Documentation/devicetree/bindings/arm/samsung/pmu.txt b/Documentation/devicetree/bindings/arm/samsung/pmu.txt
index 2a4ab04..f9865e7 100644
--- a/Documentation/devicetree/bindings/arm/samsung/pmu.txt
+++ b/Documentation/devicetree/bindings/arm/samsung/pmu.txt
@@ -12,8 +12,38 @@
 
  - reg : offset and length of the register set.
 
+ - #clock-cells : must be <1>, since PMU requires once cell as clock specifier.
+		The single specifier cell is used as index to list of clocks
+		provided by PMU, which is currently:
+			0 : SoC clock output (CLKOUT pin)
+
+ - clock-names : list of clock names for particular CLKOUT mux inputs in
+		following format:
+			"clkoutN", where N is a decimal number corresponding to
+			CLKOUT mux control bits value for given input, e.g.
+				"clkout0", "clkout7", "clkout15".
+
+ - clocks : list of phandles and specifiers to all input clocks listed in
+		clock-names property.
+
 Example :
 pmu_system_controller: system-controller@10040000 {
 	compatible = "samsung,exynos5250-pmu", "syscon";
 	reg = <0x10040000 0x5000>;
+	#clock-cells = <1>;
+	clock-names = "clkout0", "clkout1", "clkout2", "clkout3",
+			"clkout4", "clkout8", "clkout9";
+	clocks = <&clock CLK_OUT_DMC>, <&clock CLK_OUT_TOP>,
+		<&clock CLK_OUT_LEFTBUS>, <&clock CLK_OUT_RIGHTBUS>,
+		<&clock CLK_OUT_CPU>, <&clock CLK_XXTI>,
+		<&clock CLK_XUSBXTI>;
+};
+
+Example of clock consumer :
+
+usb3503: usb3503@08 {
+	/* ... */
+	clock-names = "refclk";
+	clocks = <&pmu_system_controller 0>;
+	/* ... */
 };
diff --git a/Documentation/devicetree/bindings/ata/ahci-platform.txt b/Documentation/devicetree/bindings/ata/ahci-platform.txt
index c96d8dc..4ab09f2 100644
--- a/Documentation/devicetree/bindings/ata/ahci-platform.txt
+++ b/Documentation/devicetree/bindings/ata/ahci-platform.txt
@@ -3,28 +3,43 @@
 SATA nodes are defined to describe on-chip Serial ATA controllers.
 Each SATA controller should have its own node.
 
+It is possible, but not required, to represent each port as a sub-node.
+It allows to enable each port independently when dealing with multiple
+PHYs.
+
 Required properties:
 - compatible        : compatible string, one of:
   - "allwinner,sun4i-a10-ahci"
-  - "fsl,imx53-ahci"
-  - "fsl,imx6q-ahci"
   - "hisilicon,hisi-ahci"
   - "ibm,476gtr-ahci"
   - "marvell,armada-380-ahci"
   - "snps,dwc-ahci"
   - "snps,exynos5440-ahci"
   - "snps,spear-ahci"
+  - "generic-ahci"
 - interrupts        : <interrupt mapping for SATA IRQ>
 - reg               : <registers mapping>
 
+Please note that when using "generic-ahci" you must also specify a SoC specific
+compatible:
+	compatible = "manufacturer,soc-model-ahci", "generic-ahci";
+
 Optional properties:
 - dma-coherent      : Present if dma operations are coherent
 - clocks            : a list of phandle + clock specifier pairs
 - target-supply     : regulator for SATA target power
+- phys              : reference to the SATA PHY node
+- phy-names         : must be "sata-phy"
 
-"fsl,imx53-ahci", "fsl,imx6q-ahci" required properties:
-- clocks            : must contain the sata, sata_ref and ahb clocks
-- clock-names       : must contain "ahb" for the ahb clock
+Required properties when using sub-nodes:
+- #address-cells    : number of cells to encode an address
+- #size-cells       : number of cells representing the size of an address
+
+
+Sub-nodes required properties:
+- reg               : the port number
+- phys              : reference to the SATA PHY node
+
 
 Examples:
         sata@ffe08000 {
@@ -40,3 +55,23 @@
 		clocks = <&pll6 0>, <&ahb_gates 25>;
 		target-supply = <&reg_ahci_5v>;
 	};
+
+With sub-nodes:
+	sata@f7e90000 {
+		compatible = "marvell,berlin2q-achi", "generic-ahci";
+		reg = <0xe90000 0x1000>;
+		interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&chip CLKID_SATA>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		sata0: sata-port@0 {
+			reg = <0>;
+			phys = <&sata_phy 0>;
+		};
+
+		sata1: sata-port@1 {
+			reg = <1>;
+			phys = <&sata_phy 1>;
+		};
+	};
diff --git a/Documentation/devicetree/bindings/ata/ahci-st.txt b/Documentation/devicetree/bindings/ata/ahci-st.txt
new file mode 100644
index 0000000..0574a77
--- /dev/null
+++ b/Documentation/devicetree/bindings/ata/ahci-st.txt
@@ -0,0 +1,31 @@
+STMicroelectronics STi SATA controller
+
+This binding describes a SATA device.
+
+Required properties:
+ - compatible	   : Must be "st,sti-ahci"
+ - reg		   : Physical base addresses and length of register sets
+ - interrupts	   : Interrupt associated with the SATA device
+ - interrupt-names :   Associated name must be; "hostc"
+ - resets	   : The power-down and soft-reset lines of SATA IP
+ - reset-names	   :   Associated names must be; "pwr-dwn" and "sw-rst"
+ - clocks	   : The phandle for the clock
+ - clock-names	   :   Associated name must be; "ahci_clk"
+ - phys		   : The phandle for the PHY device
+ - phy-names	   :   Associated name must be; "ahci_phy"
+
+Example:
+
+	sata0: sata@fe380000 {
+		compatible      = "st,sti-ahci";
+		reg             = <0xfe380000 0x1000>;
+		interrupts      = <GIC_SPI 157 IRQ_TYPE_NONE>;
+		interrupt-names = "hostc";
+		phys	        = <&miphy365x_phy MIPHY_PORT_0 MIPHY_TYPE_SATA>;
+		phy-names       = "ahci_phy";
+		resets	        = <&powerdown STIH416_SATA0_POWERDOWN>,
+				  <&softreset STIH416_SATA0_SOFTRESET>;
+		reset-names     = "pwr-dwn", "sw-rst";
+		clocks	        = <&clk_s_a0_ls CLK_ICN_REG>;
+		clock-names     = "ahci_clk";
+	};
diff --git a/Documentation/devicetree/bindings/ata/imx-sata.txt b/Documentation/devicetree/bindings/ata/imx-sata.txt
new file mode 100644
index 0000000..fa511db
--- /dev/null
+++ b/Documentation/devicetree/bindings/ata/imx-sata.txt
@@ -0,0 +1,36 @@
+* Freescale i.MX AHCI SATA Controller
+
+The Freescale i.MX SATA controller mostly conforms to the AHCI interface
+with some special extensions at integration level.
+
+Required properties:
+- compatible : should be one of the following:
+   - "fsl,imx53-ahci" for i.MX53 SATA controller
+   - "fsl,imx6q-ahci" for i.MX6Q SATA controller
+- interrupts : interrupt mapping for SATA IRQ
+- reg : registers mapping
+- clocks : list of clock specifiers, must contain an entry for each
+  required entry in clock-names
+- clock-names : should include "sata", "sata_ref" and "ahb" entries
+
+Optional properties:
+- fsl,transmit-level-mV : transmit voltage level, in millivolts.
+- fsl,transmit-boost-mdB : transmit boost level, in milli-decibels
+- fsl,transmit-atten-16ths : transmit attenuation, in 16ths
+- fsl,receive-eq-mdB : receive equalisation, in milli-decibels
+  Please refer to the technical documentation or the driver source code
+  for the list of legal values for these options.
+- fsl,no-spread-spectrum : disable spread-spectrum clocking on the SATA
+  link.
+
+Examples:
+
+sata@02200000 {
+	compatible = "fsl,imx6q-ahci";
+	reg = <0x02200000 0x4000>;
+	interrupts = <0 39 IRQ_TYPE_LEVEL_HIGH>;
+	clocks = <&clks IMX6QDL_CLK_SATA>,
+		 <&clks IMX6QDL_CLK_SATA_REF_100M>,
+		 <&clks IMX6QDL_CLK_AHB>;
+	clock-names = "sata", "sata_ref", "ahb";
+};
diff --git a/Documentation/devicetree/bindings/ata/tegra-sata.txt b/Documentation/devicetree/bindings/ata/tegra-sata.txt
new file mode 100644
index 0000000..946f207
--- /dev/null
+++ b/Documentation/devicetree/bindings/ata/tegra-sata.txt
@@ -0,0 +1,30 @@
+Tegra124 SoC SATA AHCI controller
+
+Required properties :
+- compatible : "nvidia,tegra124-ahci".
+- reg : Should contain 2 entries:
+  - AHCI register set (SATA BAR5)
+  - SATA register set
+- interrupts : Defines the interrupt used by SATA
+- clocks : Must contain an entry for each entry in clock-names.
+  See ../clocks/clock-bindings.txt for details.
+- clock-names : Must include the following entries:
+  - sata
+  - sata-oob
+  - cml1
+  - pll_e
+- resets : Must contain an entry for each entry in reset-names.
+  See ../reset/reset.txt for details.
+- reset-names : Must include the following entries:
+  - sata
+  - sata-oob
+  - sata-cold
+- phys : Must contain an entry for each entry in phy-names.
+  See ../phy/phy-bindings.txt for details.
+- phy-names : Must include the following entries:
+  - sata-phy : XUSB PADCTL SATA PHY
+- hvdd-supply : Defines the SATA HVDD regulator
+- vddio-supply : Defines the SATA VDDIO regulator
+- avdd-supply : Defines the SATA AVDD regulator
+- target-5v-supply : Defines the SATA 5V power regulator
+- target-12v-supply : Defines the SATA 12V power regulator
diff --git a/Documentation/devicetree/bindings/clock/clk-palmas-clk32kg-clocks.txt b/Documentation/devicetree/bindings/clock/clk-palmas-clk32kg-clocks.txt
new file mode 100644
index 0000000..4208886
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/clk-palmas-clk32kg-clocks.txt
@@ -0,0 +1,35 @@
+* Palmas 32KHz clocks *
+
+Palmas device has two clock output pins for 32KHz, KG and KG_AUDIO.
+
+This binding uses the common clock binding ./clock-bindings.txt.
+
+Required properties:
+- compatible :	"ti,palmas-clk32kg" for clk32kg clock
+		"ti,palmas-clk32kgaudio" for clk32kgaudio clock
+- #clock-cells : shall be set to 0.
+
+Optional property:
+- ti,external-sleep-control: The external enable input pins controlled the
+	enable/disable of clocks.  The external enable input pins ENABLE1,
+	ENABLE2 and NSLEEP. The valid values for the external pins are:
+		PALMAS_EXT_CONTROL_PIN_ENABLE1 for ENABLE1 pin
+		PALMAS_EXT_CONTROL_PIN_ENABLE2 for ENABLE2 pin
+		PALMAS_EXT_CONTROL_PIN_NSLEEP for NSLEEP pin
+	Option 0 or missing this property means the clock is enabled/disabled
+	via register access and these pins do not have any control.
+	The macros of external control pins for DTS is defined at
+	dt-bindings/mfd/palmas.h
+
+Example:
+	#include <dt-bindings/mfd/palmas.h>
+	...
+	palmas: tps65913@58 {
+		...
+		clk32kg: palmas_clk32k@0 {
+			compatible = "ti,palmas-clk32kg";
+			#clock-cells = <0>;
+			ti,external-sleep-control = <PALMAS_EXT_CONTROL_PIN_NSLEEP>;
+		};
+		...
+	};
diff --git a/Documentation/devicetree/bindings/clock/clock-bindings.txt b/Documentation/devicetree/bindings/clock/clock-bindings.txt
index f157878..06fc6d5 100644
--- a/Documentation/devicetree/bindings/clock/clock-bindings.txt
+++ b/Documentation/devicetree/bindings/clock/clock-bindings.txt
@@ -131,3 +131,39 @@
   ("pll" and "pll-switched").
 * The UART has its baud clock connected the external oscillator and its
   register clock connected to the PLL clock (the "pll-switched" signal)
+
+==Assigned clock parents and rates==
+
+Some platforms may require initial configuration of default parent clocks
+and clock frequencies. Such a configuration can be specified in a device tree
+node through assigned-clocks, assigned-clock-parents and assigned-clock-rates
+properties. The assigned-clock-parents property should contain a list of parent
+clocks in form of phandle and clock specifier pairs, the assigned-clock-parents
+property the list of assigned clock frequency values - corresponding to clocks
+listed in the assigned-clocks property.
+
+To skip setting parent or rate of a clock its corresponding entry should be
+set to 0, or can be omitted if it is not followed by any non-zero entry.
+
+    uart@a000 {
+        compatible = "fsl,imx-uart";
+        reg = <0xa000 0x1000>;
+        ...
+        clocks = <&osc 0>, <&pll 1>;
+        clock-names = "baud", "register";
+
+        assigned-clocks = <&clkcon 0>, <&pll 2>;
+        assigned-clock-parents = <&pll 2>;
+        assigned-clock-rates = <0>, <460800>;
+    };
+
+In this example the <&pll 2> clock is set as parent of clock <&clkcon 0> and
+the <&pll 2> clock is assigned a frequency value of 460800 Hz.
+
+Configuring a clock's parent and rate through the device node that consumes
+the clock can be done only for clocks that have a single user. Specifying
+conflicting parent or rate configuration in multiple consumer nodes for
+a shared clock is forbidden.
+
+Configuration of common clocks, which affect multiple consumer devices can
+be similarly specified in the clock provider node.
diff --git a/Documentation/devicetree/bindings/clock/clps711x-clock.txt b/Documentation/devicetree/bindings/clock/clps711x-clock.txt
new file mode 100644
index 0000000..ce5a747
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/clps711x-clock.txt
@@ -0,0 +1,19 @@
+* Clock bindings for the Cirrus Logic CLPS711X CPUs
+
+Required properties:
+- compatible       : Shall contain "cirrus,clps711x-clk".
+- reg              : Address of the internal register set.
+- startup-frequency: Factory set CPU startup frequency in HZ.
+- #clock-cells     : Should be <1>.
+
+The clock consumer should specify the desired clock by having the clock
+ID in its "clocks" phandle cell. See include/dt-bindings/clock/clps711x-clock.h
+for the full list of CLPS711X clock IDs.
+
+Example:
+	clks: clks@80000000 {
+		#clock-cells = <1>;
+		compatible = "cirrus,ep7312-clk", "cirrus,clps711x-clk";
+		reg = <0x80000000 0xc000>;
+		startup-frequency = <73728000>;
+	};
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc.txt b/Documentation/devicetree/bindings/clock/qcom,gcc.txt
index 9cfcb4f..aba3d25 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc.txt
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc.txt
@@ -5,6 +5,8 @@
 - compatible : shall contain only one of the following:
 
 			"qcom,gcc-apq8064"
+			"qcom,gcc-apq8084"
+			"qcom,gcc-ipq8064"
 			"qcom,gcc-msm8660"
 			"qcom,gcc-msm8960"
 			"qcom,gcc-msm8974"
diff --git a/Documentation/devicetree/bindings/clock/qcom,mmcc.txt b/Documentation/devicetree/bindings/clock/qcom,mmcc.txt
index d572e99..29ebf84 100644
--- a/Documentation/devicetree/bindings/clock/qcom,mmcc.txt
+++ b/Documentation/devicetree/bindings/clock/qcom,mmcc.txt
@@ -4,6 +4,8 @@
 Required properties :
 - compatible : shall contain only one of the following:
 
+			"qcom,mmcc-apq8064"
+			"qcom,mmcc-apq8084"
 			"qcom,mmcc-msm8660"
 			"qcom,mmcc-msm8960"
 			"qcom,mmcc-msm8974"
diff --git a/Documentation/devicetree/bindings/clock/rockchip,rk3188-cru.txt b/Documentation/devicetree/bindings/clock/rockchip,rk3188-cru.txt
new file mode 100644
index 0000000..0c2bf5e
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/rockchip,rk3188-cru.txt
@@ -0,0 +1,61 @@
+* Rockchip RK3188/RK3066 Clock and Reset Unit
+
+The RK3188/RK3066 clock controller generates and supplies clock to various
+controllers within the SoC and also implements a reset controller for SoC
+peripherals.
+
+Required Properties:
+
+- compatible: should be "rockchip,rk3188-cru", "rockchip,rk3188a-cru" or
+			"rockchip,rk3066a-cru"
+- reg: physical base address of the controller and length of memory mapped
+  region.
+- #clock-cells: should be 1.
+- #reset-cells: should be 1.
+
+Optional Properties:
+
+- rockchip,grf: phandle to the syscon managing the "general register files"
+  If missing pll rates are not changable, due to the missing pll lock status.
+
+Each clock is assigned an identifier and client nodes can use this identifier
+to specify the clock which they consume. All available clocks are defined as
+preprocessor macros in the dt-bindings/clock/rk3188-cru.h and
+dt-bindings/clock/rk3066-cru.h headers and can be used in device tree sources.
+Similar macros exist for the reset sources in these files.
+
+External clocks:
+
+There are several clocks that are generated outside the SoC. It is expected
+that they are defined using standard clock bindings with following
+clock-output-names:
+ - "xin24m" - crystal input - required,
+ - "xin32k" - rtc clock - optional,
+ - "xin27m" - 27mhz crystal input on rk3066 - optional,
+ - "ext_hsadc" - external HSADC clock - optional,
+ - "ext_cif0" - external camera clock - optional,
+ - "ext_rmii" - external RMII clock - optional,
+ - "ext_jtag" - externalJTAG clock - optional
+
+Example: Clock controller node:
+
+	cru: cru@20000000 {
+		compatible = "rockchip,rk3188-cru";
+		reg = <0x20000000 0x1000>;
+		rockchip,grf = <&grf>;
+
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
+
+Example: UART controller node that consumes the clock generated by the clock
+  controller:
+
+	uart0: serial@10124000 {
+		compatible = "snps,dw-apb-uart";
+		reg = <0x10124000 0x400>;
+		interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+		reg-shift = <2>;
+		reg-io-width = <1>;
+		clocks = <&cru SCLK_UART0>;
+	};
diff --git a/Documentation/devicetree/bindings/clock/rockchip,rk3288-cru.txt b/Documentation/devicetree/bindings/clock/rockchip,rk3288-cru.txt
new file mode 100644
index 0000000..c9fbb76
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/rockchip,rk3288-cru.txt
@@ -0,0 +1,61 @@
+* Rockchip RK3288 Clock and Reset Unit
+
+The RK3288 clock controller generates and supplies clock to various
+controllers within the SoC and also implements a reset controller for SoC
+peripherals.
+
+Required Properties:
+
+- compatible: should be "rockchip,rk3288-cru"
+- reg: physical base address of the controller and length of memory mapped
+  region.
+- #clock-cells: should be 1.
+- #reset-cells: should be 1.
+
+Optional Properties:
+
+- rockchip,grf: phandle to the syscon managing the "general register files"
+  If missing pll rates are not changable, due to the missing pll lock status.
+
+Each clock is assigned an identifier and client nodes can use this identifier
+to specify the clock which they consume. All available clocks are defined as
+preprocessor macros in the dt-bindings/clock/rk3288-cru.h headers and can be
+used in device tree sources. Similar macros exist for the reset sources in
+these files.
+
+External clocks:
+
+There are several clocks that are generated outside the SoC. It is expected
+that they are defined using standard clock bindings with following
+clock-output-names:
+ - "xin24m" - crystal input - required,
+ - "xin32k" - rtc clock - optional,
+ - "ext_i2s" - external I2S clock - optional,
+ - "ext_hsadc" - external HSADC clock - optional,
+ - "ext_edp_24m" - external display port clock - optional,
+ - "ext_vip" - external VIP clock - optional,
+ - "ext_isp" - external ISP clock - optional,
+ - "ext_jtag" - external JTAG clock - optional
+
+Example: Clock controller node:
+
+	cru: cru@20000000 {
+		compatible = "rockchip,rk3188-cru";
+		reg = <0x20000000 0x1000>;
+		rockchip,grf = <&grf>;
+
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
+
+Example: UART controller node that consumes the clock generated by the clock
+  controller:
+
+	uart0: serial@10124000 {
+		compatible = "snps,dw-apb-uart";
+		reg = <0x10124000 0x400>;
+		interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+		reg-shift = <2>;
+		reg-io-width = <1>;
+		clocks = <&cru SCLK_UART0>;
+	};
diff --git a/Documentation/devicetree/bindings/clock/rockchip.txt b/Documentation/devicetree/bindings/clock/rockchip.txt
index a891c82..22f6769 100644
--- a/Documentation/devicetree/bindings/clock/rockchip.txt
+++ b/Documentation/devicetree/bindings/clock/rockchip.txt
@@ -6,6 +6,9 @@
 
 == Gate clocks ==
 
+These bindings are deprecated!
+Please use the soc specific CRU bindings instead.
+
 The gate registers form a continuos block which makes the dt node
 structure a matter of taste, as either all gates can be put into
 one gate clock spanning all registers or they can be divided into
diff --git a/Documentation/devicetree/bindings/clock/st/st,clkgen-divmux.txt b/Documentation/devicetree/bindings/clock/st/st,clkgen-divmux.txt
index ae56315..6247652 100644
--- a/Documentation/devicetree/bindings/clock/st/st,clkgen-divmux.txt
+++ b/Documentation/devicetree/bindings/clock/st/st,clkgen-divmux.txt
@@ -24,26 +24,26 @@
 
 Example:
 
-	clockgenA@fd345000 {
+	clockgen-a@fd345000 {
 		reg = <0xfd345000 0xb50>;
 
-		CLK_M_A1_DIV1: CLK_M_A1_DIV1 {
+		clk_m_a1_div1: clk-m-a1-div1 {
 			#clock-cells = <1>;
 			compatible = "st,clkgena-divmux-c32-odf1",
 				     "st,clkgena-divmux";
 
-			clocks = <&CLK_M_A1_OSC_PREDIV>,
-				 <&CLK_M_A1_PLL0 1>, /* PLL0 PHI1 */
-				 <&CLK_M_A1_PLL1 1>; /* PLL1 PHI1 */
+			clocks = <&clk_m_a1_osc_prediv>,
+				 <&clk_m_a1_pll0 1>, /* PLL0 PHI1 */
+				 <&clk_m_a1_pll1 1>; /* PLL1 PHI1 */
 
-			clock-output-names = "CLK_M_RX_ICN_TS",
-					     "CLK_M_RX_ICN_VDP_0",
-					     "", /* Unused */
-					     "CLK_M_PRV_T1_BUS",
-					     "CLK_M_ICN_REG_12",
-					     "CLK_M_ICN_REG_10",
-					     "", /* Unused */
-					     "CLK_M_ICN_ST231";
+			clock-output-names = "clk-m-rx-icn-ts",
+					     "clk-m-rx-icn-vdp-0",
+					     "", /* unused */
+					     "clk-m-prv-t1-bus",
+					     "clk-m-icn-reg-12",
+					     "clk-m-icn-reg-10",
+					     "", /* unused */
+					     "clk-m-icn-st231";
 		};
 	};
 
diff --git a/Documentation/devicetree/bindings/clock/st/st,clkgen-mux.txt b/Documentation/devicetree/bindings/clock/st/st,clkgen-mux.txt
index 943e080..f1fa91c 100644
--- a/Documentation/devicetree/bindings/clock/st/st,clkgen-mux.txt
+++ b/Documentation/devicetree/bindings/clock/st/st,clkgen-mux.txt
@@ -17,7 +17,7 @@
 	"st,stih416-clkgenf-vcc-sd",	"st,clkgen-mux"
 	"st,stih415-clkgen-a9-mux",	"st,clkgen-mux"
 	"st,stih416-clkgen-a9-mux",	"st,clkgen-mux"
-
+	"st,stih407-clkgen-a9-mux",	"st,clkgen-mux"
 
 - #clock-cells : from common clock binding; shall be set to 0.
 
@@ -27,10 +27,10 @@
 
 Example:
 
-	CLK_M_HVA: CLK_M_HVA {
+	clk_m_hva: clk-m-hva@fd690868 {
 		#clock-cells = <0>;
 		compatible = "st,stih416-clkgenf-vcc-hva", "st,clkgen-mux";
 		reg = <0xfd690868 4>;
 
-		clocks = <&CLOCKGEN_F 1>, <&CLK_M_A1_DIV0 3>;
+		clocks = <&clockgen_f 1>, <&clk_m_a1_div0 3>;
 	};
diff --git a/Documentation/devicetree/bindings/clock/st/st,clkgen-pll.txt b/Documentation/devicetree/bindings/clock/st/st,clkgen-pll.txt
index 81eb385..efb51cf 100644
--- a/Documentation/devicetree/bindings/clock/st/st,clkgen-pll.txt
+++ b/Documentation/devicetree/bindings/clock/st/st,clkgen-pll.txt
@@ -19,11 +19,14 @@
 	"st,stih415-plls-c32-ddr",	"st,clkgen-plls-c32"
 	"st,stih416-plls-c32-a9",	"st,clkgen-plls-c32"
 	"st,stih416-plls-c32-ddr",	"st,clkgen-plls-c32"
+	"st,stih407-plls-c32-a0",	"st,clkgen-plls-c32"
+	"st,stih407-plls-c32-a9",	"st,clkgen-plls-c32"
+	"st,stih407-plls-c32-c0_0",	"st,clkgen-plls-c32"
+	"st,stih407-plls-c32-c0_1",	"st,clkgen-plls-c32"
 
 	"st,stih415-gpu-pll-c32",	"st,clkgengpu-pll-c32"
 	"st,stih416-gpu-pll-c32",	"st,clkgengpu-pll-c32"
 
-
 - #clock-cells : From common clock binding; shall be set to 1.
 
 - clocks : From common clock binding
@@ -32,17 +35,17 @@
 
 Example:
 
-	clockgenA@fee62000 {
+	clockgen-a@fee62000 {
 		reg = <0xfee62000 0xb48>;
 
-		CLK_S_A0_PLL: CLK_S_A0_PLL {
+		clk_s_a0_pll: clk-s-a0-pll {
 			#clock-cells = <1>;
 			compatible = "st,clkgena-plls-c65";
 
-			clocks = <&CLK_SYSIN>;
+			clocks = <&clk_sysin>;
 
-			clock-output-names = "CLK_S_A0_PLL0_HS",
-					     "CLK_S_A0_PLL0_LS",
-					     "CLK_S_A0_PLL1";
+			clock-output-names = "clk-s-a0-pll0-hs",
+					     "clk-s-a0-pll0-ls",
+					     "clk-s-a0-pll1";
 		};
 	};
diff --git a/Documentation/devicetree/bindings/clock/st/st,clkgen-prediv.txt b/Documentation/devicetree/bindings/clock/st/st,clkgen-prediv.txt
index 566c9d7..604766c 100644
--- a/Documentation/devicetree/bindings/clock/st/st,clkgen-prediv.txt
+++ b/Documentation/devicetree/bindings/clock/st/st,clkgen-prediv.txt
@@ -20,17 +20,17 @@
 
 Example:
 
-	clockgenA@fd345000 {
+	clockgen-a@fd345000 {
 		reg = <0xfd345000 0xb50>;
 
-		CLK_M_A2_OSC_PREDIV: CLK_M_A2_OSC_PREDIV {
+		clk_m_a2_osc_prediv: clk-m-a2-osc-prediv {
 			#clock-cells = <0>;
 			compatible = "st,clkgena-prediv-c32",
 				     "st,clkgena-prediv";
 
-			clocks = <&CLK_SYSIN>;
+			clocks = <&clk_sysin>;
 
-			clock-output-names = "CLK_M_A2_OSC_PREDIV";
+			clock-output-names = "clk-m-a2-osc-prediv";
 		};
 	};
 
diff --git a/Documentation/devicetree/bindings/clock/st/st,clkgen-vcc.txt b/Documentation/devicetree/bindings/clock/st/st,clkgen-vcc.txt
index 4e3ff28..109b3ed 100644
--- a/Documentation/devicetree/bindings/clock/st/st,clkgen-vcc.txt
+++ b/Documentation/devicetree/bindings/clock/st/st,clkgen-vcc.txt
@@ -32,22 +32,30 @@
 
 Example:
 
-	CLOCKGEN_C_VCC: CLOCKGEN_C_VCC {
+	clockgen_c_vcc: clockgen-c-vcc@0xfe8308ac {
 		#clock-cells = <1>;
 		compatible = "st,stih416-clkgenc", "st,clkgen-vcc";
 		reg = <0xfe8308ac 12>;
 
-		clocks = <&CLK_S_VCC_HD>, <&CLOCKGEN_C 1>,
-			<&CLK_S_TMDS_FROMPHY>, <&CLOCKGEN_C 2>;
+		clocks = <&clk_s_vcc_hd>,
+			 <&clockgen_c 1>,
+			 <&clk_s_tmds_fromphy>,
+			 <&clockgen_c 2>;
 
-		clock-output-names  =
-			"CLK_S_PIX_HDMI",  "CLK_S_PIX_DVO",
-			"CLK_S_OUT_DVO",   "CLK_S_PIX_HD",
-			"CLK_S_HDDAC",     "CLK_S_DENC",
-			"CLK_S_SDDAC",     "CLK_S_PIX_MAIN",
-			"CLK_S_PIX_AUX",   "CLK_S_STFE_FRC_0",
-			"CLK_S_REF_MCRU",  "CLK_S_SLAVE_MCRU",
-			"CLK_S_TMDS_HDMI", "CLK_S_HDMI_REJECT_PLL",
-			"CLK_S_THSENS";
+		clock-output-names  = "clk-s-pix-hdmi",
+				      "clk-s-pix-dvo",
+				      "clk-s-out-dvo",
+				      "clk-s-pix-hd",
+				      "clk-s-hddac",
+				      "clk-s-denc",
+				      "clk-s-sddac",
+				      "clk-s-pix-main",
+				      "clk-s-pix-aux",
+				      "clk-s-stfe-frc-0",
+				      "clk-s-ref-mcru",
+				      "clk-s-slave-mcru",
+				      "clk-s-tmds-hdmi",
+				      "clk-s-hdmi-reject-pll",
+				      "clk-s-thsens";
 	};
 
diff --git a/Documentation/devicetree/bindings/clock/st/st,clkgen.txt b/Documentation/devicetree/bindings/clock/st/st,clkgen.txt
index 49ec5ae..78978f1 100644
--- a/Documentation/devicetree/bindings/clock/st/st,clkgen.txt
+++ b/Documentation/devicetree/bindings/clock/st/st,clkgen.txt
@@ -24,60 +24,77 @@
 		quadfs_node {
 			...
 		};
+
+		mux_node {
+			...
+		};
+
+		vcc_node {
+			...
+		};
+
+		flexgen_node {
+			...
+		};
 		...
 	};
 
 This binding uses the common clock binding[1].
-Each subnode should use the binding discribe in [2]..[4]
+Each subnode should use the binding discribe in [2]..[7]
 
 [1] Documentation/devicetree/bindings/clock/clock-bindings.txt
-[2] Documentation/devicetree/bindings/clock/st,quadfs.txt
-[3] Documentation/devicetree/bindings/clock/st,quadfs.txt
-[4] Documentation/devicetree/bindings/clock/st,quadfs.txt
+[2] Documentation/devicetree/bindings/clock/st,clkgen-divmux.txt
+[3] Documentation/devicetree/bindings/clock/st,clkgen-mux.txt
+[4] Documentation/devicetree/bindings/clock/st,clkgen-pll.txt
+[5] Documentation/devicetree/bindings/clock/st,clkgen-prediv.txt
+[6] Documentation/devicetree/bindings/clock/st,vcc.txt
+[7] Documentation/devicetree/bindings/clock/st,quadfs.txt
+[8] Documentation/devicetree/bindings/clock/st,flexgen.txt
+
 
 Required properties:
 - reg : A Base address and length of the register set.
 
 Example:
 
-	clockgenA@fee62000 {
+	clockgen-a@fee62000 {
 
 		reg = <0xfee62000 0xb48>;
 
-		CLK_S_A0_PLL: CLK_S_A0_PLL {
+		clk_s_a0_pll: clk-s-a0-pll {
 			#clock-cells = <1>;
 			compatible = "st,clkgena-plls-c65";
 
-			clocks = <&CLK_SYSIN>;
+			clocks = <&clk-sysin>;
 
-			clock-output-names = "CLK_S_A0_PLL0_HS",
-					     "CLK_S_A0_PLL0_LS",
-					     "CLK_S_A0_PLL1";
+			clock-output-names = "clk-s-a0-pll0-hs",
+					     "clk-s-a0-pll0-ls",
+					     "clk-s-a0-pll1";
 		};
 
-		CLK_S_A0_OSC_PREDIV: CLK_S_A0_OSC_PREDIV {
+		clk_s_a0_osc_prediv: clk-s-a0-osc-prediv {
 			#clock-cells = <0>;
 			compatible = "st,clkgena-prediv-c65",
 				     "st,clkgena-prediv";
 
-			clocks = <&CLK_SYSIN>;
+			clocks = <&clk_sysin>;
 
-			clock-output-names = "CLK_S_A0_OSC_PREDIV";
+			clock-output-names = "clk-s-a0-osc-prediv";
 		};
 
-		CLK_S_A0_HS: CLK_S_A0_HS {
+		clk_s_a0_hs: clk-s-a0-hs {
 			#clock-cells = <1>;
 			compatible = "st,clkgena-divmux-c65-hs",
 				     "st,clkgena-divmux";
 
-			clocks = <&CLK_S_A0_OSC_PREDIV>,
-				 <&CLK_S_A0_PLL 0>, /* PLL0 HS */
-				 <&CLK_S_A0_PLL 2>; /* PLL1 */
+			clocks = <&clk-s_a0_osc_prediv>,
+				 <&clk-s_a0_pll 0>, /* pll0 hs */
+				 <&clk-s_a0_pll 2>; /* pll1 */
 
-			clock-output-names = "CLK_S_FDMA_0",
-					     "CLK_S_FDMA_1",
-					     ""; /* CLK_S_JIT_SENSE */
-					     /* Fourth output unused */
+			clock-output-names = "clk-s-fdma-0",
+					     "clk-s-fdma-1",
+					     ""; /* clk-s-jit-sense */
+					     /* fourth output unused */
 		};
 	};
 
diff --git a/Documentation/devicetree/bindings/clock/st/st,flexgen.txt b/Documentation/devicetree/bindings/clock/st/st,flexgen.txt
new file mode 100644
index 0000000..1d3ace0
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/st/st,flexgen.txt
@@ -0,0 +1,119 @@
+Binding for a type of flexgen structure found on certain
+STMicroelectronics consumer electronics SoC devices
+
+This structure includes:
+- a clock cross bar (represented by a mux element)
+- a pre and final dividers (represented by a divider and gate elements)
+
+Flexgen structure is a part of Clockgen[1].
+
+Please find an example below:
+
+    Clockgen block diagram
+    -------------------------------------------------------------------
+   |                     Flexgen stucture                              |
+   |                  ---------------------------------------------    |
+   |                 |    -------       --------       --------    |   |
+clk_sysin            |   |       |     |        |     |        |   |   |
+---|-----------------|-->|       |     |        |     |        |   |   |
+   | |               |   |       |     |        |     |        |   |   |
+   | |   -------     |   |       |     |Pre     |     |Final   |   |   |
+   | |  |PLL0   |    |   |       |     |Dividers|     |Dividers|   |   |
+   | |->|       |    |   |       |     |  x32   |     |  x32   |   |   |
+   | |  |  odf_0|----|-->|       |     |        |     |        |   |   |
+   | |  |       |    |   |       |     |        |     |        |   |   |
+   | |  |       |    |   |       |     |        |     |        |   |   |
+   | |  |       |    |   |       |     |        |     |        |   |   |
+   | |  |       |    |   |       |     |        |     |        |   |   |
+   | |   -------     |   |       |     |        |     |        |   |   |
+   | |               |   |       |     |        |     |        |   |   |
+   | |   -------     |   | Clock |     |        |     |        |   |   |
+   | |  |PLL1   |    |   |       |     |        |     |        |   |   |
+   | |->|       |    |   | Cross |     |        |     |        |   |   |
+   | |  |  odf_0|----|-->|       |     |        |     |        | CLK_DIV[31:0]
+   | |  |       |    |   | Bar   |====>|        |====>|        |===|=========>
+   | |  |       |    |   |       |     |        |     |        |   |   |
+   | |  |       |    |   |       |     |        |     |        |   |   |
+   | |  |       |    |   |       |     |        |     |        |   |   |
+   | |   -------     |   |       |     |        |     |        |   |   |
+   | |               |   |       |     |        |     |        |   |   |
+   | |   -------     |   |       |     |        |     |        |   |   |
+   | |  |QUADFS |    |   |       |     |        |     |        |   |   |
+   | |->|    ch0|----|-->|       |     |        |     |        |   |   |
+   |    |       |    |   |       |     |        |     |        |   |   |
+   |    |    ch1|----|-->|       |     |        |     |        |   |   |
+   |    |       |    |   |       |     |        |     |        |   |   |
+   |    |    ch2|----|-->|       |     | DIV    |     | DIV    |   |   |
+   |    |       |    |   |       |     |  1 to  |     |  1 to  |   |   |
+   |    |    ch3|----|-->|       |     |   1024 |     |     64 |   |   |
+   |     -------     |   |       |     |        |     |        |   |   |
+   |                 |    -------       --------       --------    |   |
+   |                   --------------------------------------------    |
+   |                                                                   |
+    -------------------------------------------------------------------
+
+This binding uses the common clock binding[2].
+
+[1] Documentation/devicetree/bindings/clock/st/st,clkgen.txt
+[2] Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+Required properties:
+- compatible : shall be:
+  "st,flexgen"
+
+- #clock-cells : from common clock binding; shall be set to 1 (multiple clock
+  outputs).
+
+- clocks : must be set to the parent's phandle. it's could be output clocks of
+  a quadsfs or/and a pll or/and clk_sysin (up to 7 clocks)
+
+- clock-output-names : List of strings used to name the clock outputs.
+
+Example:
+
+	clk_s_c0_flexgen: clk-s-c0-flexgen {
+
+		#clock-cells = <1>;
+		compatible = "st,flexgen";
+
+		clocks = <&clk_s_c0_pll0 0>,
+			 <&clk_s_c0_pll1 0>,
+			 <&clk_s_c0_quadfs 0>,
+			 <&clk_s_c0_quadfs 1>,
+			 <&clk_s_c0_quadfs 2>,
+			 <&clk_s_c0_quadfs 3>,
+			 <&clk_sysin>;
+
+		clock-output-names = "clk-icn-gpu",
+				     "clk-fdma",
+				     "clk-nand",
+				     "clk-hva",
+				     "clk-proc-stfe",
+				     "clk-proc-tp",
+				     "clk-rx-icn-dmu",
+				     "clk-rx-icn-hva",
+				     "clk-icn-cpu",
+				     "clk-tx-icn-dmu",
+				     "clk-mmc-0",
+				     "clk-mmc-1",
+				     "clk-jpegdec",
+				     "clk-ext2fa9",
+				     "clk-ic-bdisp-0",
+				     "clk-ic-bdisp-1",
+				     "clk-pp-dmu",
+				     "clk-vid-dmu",
+				     "clk-dss-lpc",
+				     "clk-st231-aud-0",
+				     "clk-st231-gp-1",
+				     "clk-st231-dmu",
+				     "clk-icn-lmi",
+				     "clk-tx-icn-disp-1",
+				     "clk-icn-sbc",
+				     "clk-stfe-frc2",
+				     "clk-eth-phy",
+				     "clk-eth-ref-phyclk",
+				     "clk-flash-promip",
+				     "clk-main-disp",
+				     "clk-aux-disp",
+				     "clk-compo-dvp";
+	};
diff --git a/Documentation/devicetree/bindings/clock/st/st,quadfs.txt b/Documentation/devicetree/bindings/clock/st/st,quadfs.txt
index ec86d62..cedeb9c 100644
--- a/Documentation/devicetree/bindings/clock/st/st,quadfs.txt
+++ b/Documentation/devicetree/bindings/clock/st/st,quadfs.txt
@@ -15,6 +15,9 @@
   "st,stih416-quadfs432",	"st,quadfs"
   "st,stih416-quadfs660-E",	"st,quadfs"
   "st,stih416-quadfs660-F",	"st,quadfs"
+  "st,stih407-quadfs660-C",	"st,quadfs"
+  "st,stih407-quadfs660-D",	"st,quadfs"
+
 
 - #clock-cells : from common clock binding; shall be set to 1.
 
@@ -32,14 +35,14 @@
 
 Example:
 
-	CLOCKGEN_E: CLOCKGEN_E {
+	clockgen_e: clockgen-e@fd3208bc {
                 #clock-cells = <1>;
                 compatible = "st,stih416-quadfs660-E", "st,quadfs";
                 reg = <0xfd3208bc 0xB0>;
 
-                clocks = <&CLK_SYSIN>;
-                clock-output-names = "CLK_M_PIX_MDTP_0",
-                                        "CLK_M_PIX_MDTP_1",
-                                        "CLK_M_PIX_MDTP_2",
-                                        "CLK_M_MPELPC";
+                clocks = <&clk_sysin>;
+                clock-output-names = "clk-m-pix-mdtp-0",
+				     "clk-m-pix-mdtp-1",
+				     "clk-m-pix-mdtp-2",
+				     "clk-m-mpelpc";
         };
diff --git a/Documentation/devicetree/bindings/clock/sunxi.txt b/Documentation/devicetree/bindings/clock/sunxi.txt
index b9ec668..d3a5c3c 100644
--- a/Documentation/devicetree/bindings/clock/sunxi.txt
+++ b/Documentation/devicetree/bindings/clock/sunxi.txt
@@ -9,11 +9,13 @@
 	"allwinner,sun4i-a10-osc-clk" - for a gatable oscillator
 	"allwinner,sun4i-a10-pll1-clk" - for the main PLL clock and PLL4
 	"allwinner,sun6i-a31-pll1-clk" - for the main PLL clock on A31
+	"allwinner,sun8i-a23-pll1-clk" - for the main PLL clock on A23
 	"allwinner,sun4i-a10-pll5-clk" - for the PLL5 clock
 	"allwinner,sun4i-a10-pll6-clk" - for the PLL6 clock
 	"allwinner,sun6i-a31-pll6-clk" - for the PLL6 clock on A31
 	"allwinner,sun4i-a10-cpu-clk" - for the CPU multiplexer clock
 	"allwinner,sun4i-a10-axi-clk" - for the AXI clock
+	"allwinner,sun8i-a23-axi-clk" - for the AXI clock on A23
 	"allwinner,sun4i-a10-axi-gates-clk" - for the AXI gates
 	"allwinner,sun4i-a10-ahb-clk" - for the AHB clock
 	"allwinner,sun4i-a10-ahb-gates-clk" - for the AHB gates on A10
@@ -23,13 +25,16 @@
 	"allwinner,sun6i-a31-ar100-clk" - for the AR100 on A31
 	"allwinner,sun6i-a31-ahb1-mux-clk" - for the AHB1 multiplexer on A31
 	"allwinner,sun6i-a31-ahb1-gates-clk" - for the AHB1 gates on A31
+	"allwinner,sun8i-a23-ahb1-gates-clk" - for the AHB1 gates on A23
 	"allwinner,sun4i-a10-apb0-clk" - for the APB0 clock
 	"allwinner,sun6i-a31-apb0-clk" - for the APB0 clock on A31
+	"allwinner,sun8i-a23-apb0-clk" - for the APB0 clock on A23
 	"allwinner,sun4i-a10-apb0-gates-clk" - for the APB0 gates on A10
 	"allwinner,sun5i-a13-apb0-gates-clk" - for the APB0 gates on A13
 	"allwinner,sun5i-a10s-apb0-gates-clk" - for the APB0 gates on A10s
 	"allwinner,sun6i-a31-apb0-gates-clk" - for the APB0 gates on A31
 	"allwinner,sun7i-a20-apb0-gates-clk" - for the APB0 gates on A20
+	"allwinner,sun8i-a23-apb0-gates-clk" - for the APB0 gates on A23
 	"allwinner,sun4i-a10-apb1-clk" - for the APB1 clock
 	"allwinner,sun4i-a10-apb1-mux-clk" - for the APB1 clock muxing
 	"allwinner,sun4i-a10-apb1-gates-clk" - for the APB1 gates on A10
@@ -37,8 +42,10 @@
 	"allwinner,sun5i-a10s-apb1-gates-clk" - for the APB1 gates on A10s
 	"allwinner,sun6i-a31-apb1-gates-clk" - for the APB1 gates on A31
 	"allwinner,sun7i-a20-apb1-gates-clk" - for the APB1 gates on A20
+	"allwinner,sun8i-a23-apb1-gates-clk" - for the APB1 gates on A23
 	"allwinner,sun6i-a31-apb2-div-clk" - for the APB2 gates on A31
 	"allwinner,sun6i-a31-apb2-gates-clk" - for the APB2 gates on A31
+	"allwinner,sun8i-a23-apb2-gates-clk" - for the APB2 gates on A23
 	"allwinner,sun4i-a10-mod0-clk" - for the module 0 family of clocks
 	"allwinner,sun7i-a20-out-clk" - for the external output clocks
 	"allwinner,sun7i-a20-gmac-clk" - for the GMAC clock module on A20/A31
diff --git a/Documentation/devicetree/bindings/crypto/amd-ccp.txt b/Documentation/devicetree/bindings/crypto/amd-ccp.txt
new file mode 100644
index 0000000..8c61183
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/amd-ccp.txt
@@ -0,0 +1,19 @@
+* AMD Cryptographic Coprocessor driver (ccp)
+
+Required properties:
+- compatible: Should be "amd,ccp-seattle-v1a"
+- reg: Address and length of the register set for the device
+- interrupt-parent: Should be the phandle for the interrupt controller
+  that services interrupts for this device
+- interrupts: Should contain the CCP interrupt
+
+Optional properties:
+- dma-coherent: Present if dma operations are coherent
+
+Example:
+	ccp@e0100000 {
+		compatible = "amd,ccp-seattle-v1a";
+		reg = <0 0xe0100000 0 0x10000>;
+		interrupt-parent = <&gic>;
+		interrupts = <0 3 4>;
+	};
diff --git a/Documentation/devicetree/bindings/crypto/qcom-qce.txt b/Documentation/devicetree/bindings/crypto/qcom-qce.txt
new file mode 100644
index 0000000..fdd53b1
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/qcom-qce.txt
@@ -0,0 +1,25 @@
+Qualcomm crypto engine driver
+
+Required properties:
+
+- compatible  : should be "qcom,crypto-v5.1"
+- reg         : specifies base physical address and size of the registers map
+- clocks      : phandle to clock-controller plus clock-specifier pair
+- clock-names : "iface" clocks register interface
+                "bus" clocks data transfer interface
+                "core" clocks rest of the crypto block
+- dmas        : DMA specifiers for tx and rx dma channels. For more see
+                Documentation/devicetree/bindings/dma/dma.txt
+- dma-names   : DMA request names should be "rx" and "tx"
+
+Example:
+	crypto@fd45a000 {
+		compatible = "qcom,crypto-v5.1";
+		reg = <0xfd45a000 0x6000>;
+		clocks = <&gcc GCC_CE2_AHB_CLK>,
+			 <&gcc GCC_CE2_AXI_CLK>,
+			 <&gcc GCC_CE2_CLK>;
+		clock-names = "iface", "bus", "core";
+		dmas = <&cryptobam 2>, <&cryptobam 3>;
+		dma-names = "rx", "tx";
+	};
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index d7e43fa..7e240a7 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -197,6 +197,7 @@
 					<mailto:gregkh@linuxfoundation.org>
 'a'	all	linux/atm*.h, linux/sonet.h	ATM on linux
 					<http://lrcwww.epfl.ch/>
+'a'	00-0F	drivers/crypto/qat/qat_common/adf_cfg_common.h	conflict! qat driver
 'b'	00-FF				conflict! bit3 vme host bridge
 					<mailto:natalia@nikhefk.nikhef.nl>
 'c'	all	linux/cm4000_cs.h	conflict!
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index b7fa2f5..90f6139 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1097,6 +1097,12 @@
 			that can be changed at run time by the
 			set_graph_function file in the debugfs tracing directory.
 
+	ftrace_graph_notrace=[function-list]
+			[FTRACE] Do not trace from the functions specified in
+			function-list.  This list is a comma separated list of
+			functions that can be changed at run time by the
+			set_graph_notrace file in the debugfs tracing directory.
+
 	gamecon.map[2|3]=
 			[HW,JOY] Multisystem joystick and NES/SNES/PSX pad
 			support via parallel port (up to 5 devices per port)
@@ -2807,6 +2813,13 @@
 			quiescent states.  Units are jiffies, minimum
 			value is one, and maximum value is HZ.
 
+	rcutree.rcu_nocb_leader_stride= [KNL]
+			Set the number of NOCB kthread groups, which
+			defaults to the square root of the number of
+			CPUs.  Larger numbers reduces the wakeup overhead
+			on the per-CPU grace-period kthreads, but increases
+			that same overhead on each group's leader.
+
 	rcutree.qhimark= [KNL]
 			Set threshold of queued RCU callbacks beyond which
 			batch limiting is disabled.
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index f1dc4a2..a4de88f 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -757,10 +757,14 @@
 When dealing with CPU-CPU interactions, certain types of memory barrier should
 always be paired.  A lack of appropriate pairing is almost certainly an error.
 
-A write barrier should always be paired with a data dependency barrier or read
-barrier, though a general barrier would also be viable.  Similarly a read
-barrier or a data dependency barrier should always be paired with at least an
-write barrier, though, again, a general barrier is viable:
+General barriers pair with each other, though they also pair with
+most other types of barriers, albeit without transitivity.  An acquire
+barrier pairs with a release barrier, but both may also pair with other
+barriers, including of course general barriers.  A write barrier pairs
+with a data dependency barrier, an acquire barrier, a release barrier,
+a read barrier, or a general barrier.  Similarly a read barrier or a
+data dependency barrier pairs with a write barrier, an acquire barrier,
+a release barrier, or a general barrier:
 
 	CPU 1		      CPU 2
 	===============	      ===============
@@ -1893,6 +1897,21 @@
 	    <general barrier>		  STORE current->state
 	LOAD event_indicated
 
+To repeat, this write memory barrier is present if and only if something
+is actually awakened.  To see this, consider the following sequence of
+events, where X and Y are both initially zero:
+
+	CPU 1				CPU 2
+	===============================	===============================
+	X = 1;				STORE event_indicated
+	smp_mb();			wake_up();
+	Y = 1;				wait_event(wq, Y == 1);
+	wake_up();			  load from Y sees 1, no memory barrier
+					load from X might see 0
+
+In contrast, if a wakeup does occur, CPU 2's load from X would be guaranteed
+to see 1.
+
 The available waker functions include:
 
 	complete();
diff --git a/Documentation/trace/ftrace-design.txt b/Documentation/trace/ftrace-design.txt
index 3f669b9..dd5f916 100644
--- a/Documentation/trace/ftrace-design.txt
+++ b/Documentation/trace/ftrace-design.txt
@@ -102,30 +102,6 @@
 EXPORT_SYMBOL(mcount);
 
 
-HAVE_FUNCTION_TRACE_MCOUNT_TEST
--------------------------------
-
-This is an optional optimization for the normal case when tracing is turned off
-in the system.  If you do not enable this Kconfig option, the common ftrace
-code will take care of doing the checking for you.
-
-To support this feature, you only need to check the function_trace_stop
-variable in the mcount function.  If it is non-zero, there is no tracing to be
-done at all, so you can return.
-
-This additional pseudo code would simply be:
-void mcount(void)
-{
-	/* save any bare state needed in order to do initial checking */
-
-+	if (function_trace_stop)
-+		return;
-
-	extern void (*ftrace_trace_function)(unsigned long, unsigned long);
-	if (ftrace_trace_function != ftrace_stub)
-...
-
-
 HAVE_FUNCTION_GRAPH_TRACER
 --------------------------
 
@@ -328,8 +304,6 @@
 
 void ftrace_caller(void)
 {
-	/* implement HAVE_FUNCTION_TRACE_MCOUNT_TEST if you desire */
-
 	/* save all state needed by the ABI (see paragraph above) */
 
 	unsigned long frompc = ...;
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 0fe3649..68cda1f 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -297,6 +297,15 @@
 	__u64 rip, rflags;
 };
 
+/* mips */
+struct kvm_regs {
+	/* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
+	__u64 gpr[32];
+	__u64 hi;
+	__u64 lo;
+	__u64 pc;
+};
+
 
 4.12 KVM_SET_REGS
 
@@ -378,7 +387,7 @@
 4.16 KVM_INTERRUPT
 
 Capability: basic
-Architectures: x86, ppc
+Architectures: x86, ppc, mips
 Type: vcpu ioctl
 Parameters: struct kvm_interrupt (in)
 Returns: 0 on success, -1 on error
@@ -423,6 +432,11 @@
 Note that any value for 'irq' other than the ones stated above is invalid
 and incurs unexpected behavior.
 
+MIPS:
+
+Queues an external interrupt to be injected into the virtual CPU. A negative
+interrupt number dequeues the interrupt.
+
 
 4.17 KVM_DEBUG_GUEST
 
@@ -512,7 +526,7 @@
 4.21 KVM_SET_SIGNAL_MASK
 
 Capability: basic
-Architectures: x86
+Architectures: all
 Type: vcpu ioctl
 Parameters: struct kvm_signal_mask (in)
 Returns: 0 on success, -1 on error
@@ -974,7 +988,7 @@
 4.38 KVM_GET_MP_STATE
 
 Capability: KVM_CAP_MP_STATE
-Architectures: x86, ia64
+Architectures: x86, ia64, s390
 Type: vcpu ioctl
 Parameters: struct kvm_mp_state (out)
 Returns: 0 on success; -1 on error
@@ -988,24 +1002,32 @@
 
 Possible values are:
 
- - KVM_MP_STATE_RUNNABLE:        the vcpu is currently running
+ - KVM_MP_STATE_RUNNABLE:        the vcpu is currently running [x86, ia64]
  - KVM_MP_STATE_UNINITIALIZED:   the vcpu is an application processor (AP)
-                                 which has not yet received an INIT signal
+                                 which has not yet received an INIT signal [x86,
+                                 ia64]
  - KVM_MP_STATE_INIT_RECEIVED:   the vcpu has received an INIT signal, and is
-                                 now ready for a SIPI
+                                 now ready for a SIPI [x86, ia64]
  - KVM_MP_STATE_HALTED:          the vcpu has executed a HLT instruction and
-                                 is waiting for an interrupt
+                                 is waiting for an interrupt [x86, ia64]
  - KVM_MP_STATE_SIPI_RECEIVED:   the vcpu has just received a SIPI (vector
-                                 accessible via KVM_GET_VCPU_EVENTS)
+                                 accessible via KVM_GET_VCPU_EVENTS) [x86, ia64]
+ - KVM_MP_STATE_STOPPED:         the vcpu is stopped [s390]
+ - KVM_MP_STATE_CHECK_STOP:      the vcpu is in a special error state [s390]
+ - KVM_MP_STATE_OPERATING:       the vcpu is operating (running or halted)
+                                 [s390]
+ - KVM_MP_STATE_LOAD:            the vcpu is in a special load/startup state
+                                 [s390]
 
-This ioctl is only useful after KVM_CREATE_IRQCHIP.  Without an in-kernel
-irqchip, the multiprocessing state must be maintained by userspace.
+On x86 and ia64, this ioctl is only useful after KVM_CREATE_IRQCHIP. Without an
+in-kernel irqchip, the multiprocessing state must be maintained by userspace on
+these architectures.
 
 
 4.39 KVM_SET_MP_STATE
 
 Capability: KVM_CAP_MP_STATE
-Architectures: x86, ia64
+Architectures: x86, ia64, s390
 Type: vcpu ioctl
 Parameters: struct kvm_mp_state (in)
 Returns: 0 on success; -1 on error
@@ -1013,8 +1035,9 @@
 Sets the vcpu's current "multiprocessing state"; see KVM_GET_MP_STATE for
 arguments.
 
-This ioctl is only useful after KVM_CREATE_IRQCHIP.  Without an in-kernel
-irqchip, the multiprocessing state must be maintained by userspace.
+On x86 and ia64, this ioctl is only useful after KVM_CREATE_IRQCHIP. Without an
+in-kernel irqchip, the multiprocessing state must be maintained by userspace on
+these architectures.
 
 
 4.40 KVM_SET_IDENTITY_MAP_ADDR
@@ -1774,122 +1797,151 @@
 and their own constants and width. To keep track of the implemented
 registers, find a list below:
 
-  Arch  |       Register        | Width (bits)
-        |                       |
-  PPC   | KVM_REG_PPC_HIOR      | 64
-  PPC   | KVM_REG_PPC_IAC1      | 64
-  PPC   | KVM_REG_PPC_IAC2      | 64
-  PPC   | KVM_REG_PPC_IAC3      | 64
-  PPC   | KVM_REG_PPC_IAC4      | 64
-  PPC   | KVM_REG_PPC_DAC1      | 64
-  PPC   | KVM_REG_PPC_DAC2      | 64
-  PPC   | KVM_REG_PPC_DABR      | 64
-  PPC   | KVM_REG_PPC_DSCR      | 64
-  PPC   | KVM_REG_PPC_PURR      | 64
-  PPC   | KVM_REG_PPC_SPURR     | 64
-  PPC   | KVM_REG_PPC_DAR       | 64
-  PPC   | KVM_REG_PPC_DSISR     | 32
-  PPC   | KVM_REG_PPC_AMR       | 64
-  PPC   | KVM_REG_PPC_UAMOR     | 64
-  PPC   | KVM_REG_PPC_MMCR0     | 64
-  PPC   | KVM_REG_PPC_MMCR1     | 64
-  PPC   | KVM_REG_PPC_MMCRA     | 64
-  PPC   | KVM_REG_PPC_MMCR2     | 64
-  PPC   | KVM_REG_PPC_MMCRS     | 64
-  PPC   | KVM_REG_PPC_SIAR      | 64
-  PPC   | KVM_REG_PPC_SDAR      | 64
-  PPC   | KVM_REG_PPC_SIER      | 64
-  PPC   | KVM_REG_PPC_PMC1      | 32
-  PPC   | KVM_REG_PPC_PMC2      | 32
-  PPC   | KVM_REG_PPC_PMC3      | 32
-  PPC   | KVM_REG_PPC_PMC4      | 32
-  PPC   | KVM_REG_PPC_PMC5      | 32
-  PPC   | KVM_REG_PPC_PMC6      | 32
-  PPC   | KVM_REG_PPC_PMC7      | 32
-  PPC   | KVM_REG_PPC_PMC8      | 32
-  PPC   | KVM_REG_PPC_FPR0      | 64
+  Arch  |           Register            | Width (bits)
+        |                               |
+  PPC   | KVM_REG_PPC_HIOR              | 64
+  PPC   | KVM_REG_PPC_IAC1              | 64
+  PPC   | KVM_REG_PPC_IAC2              | 64
+  PPC   | KVM_REG_PPC_IAC3              | 64
+  PPC   | KVM_REG_PPC_IAC4              | 64
+  PPC   | KVM_REG_PPC_DAC1              | 64
+  PPC   | KVM_REG_PPC_DAC2              | 64
+  PPC   | KVM_REG_PPC_DABR              | 64
+  PPC   | KVM_REG_PPC_DSCR              | 64
+  PPC   | KVM_REG_PPC_PURR              | 64
+  PPC   | KVM_REG_PPC_SPURR             | 64
+  PPC   | KVM_REG_PPC_DAR               | 64
+  PPC   | KVM_REG_PPC_DSISR             | 32
+  PPC   | KVM_REG_PPC_AMR               | 64
+  PPC   | KVM_REG_PPC_UAMOR             | 64
+  PPC   | KVM_REG_PPC_MMCR0             | 64
+  PPC   | KVM_REG_PPC_MMCR1             | 64
+  PPC   | KVM_REG_PPC_MMCRA             | 64
+  PPC   | KVM_REG_PPC_MMCR2             | 64
+  PPC   | KVM_REG_PPC_MMCRS             | 64
+  PPC   | KVM_REG_PPC_SIAR              | 64
+  PPC   | KVM_REG_PPC_SDAR              | 64
+  PPC   | KVM_REG_PPC_SIER              | 64
+  PPC   | KVM_REG_PPC_PMC1              | 32
+  PPC   | KVM_REG_PPC_PMC2              | 32
+  PPC   | KVM_REG_PPC_PMC3              | 32
+  PPC   | KVM_REG_PPC_PMC4              | 32
+  PPC   | KVM_REG_PPC_PMC5              | 32
+  PPC   | KVM_REG_PPC_PMC6              | 32
+  PPC   | KVM_REG_PPC_PMC7              | 32
+  PPC   | KVM_REG_PPC_PMC8              | 32
+  PPC   | KVM_REG_PPC_FPR0              | 64
           ...
-  PPC   | KVM_REG_PPC_FPR31     | 64
-  PPC   | KVM_REG_PPC_VR0       | 128
+  PPC   | KVM_REG_PPC_FPR31             | 64
+  PPC   | KVM_REG_PPC_VR0               | 128
           ...
-  PPC   | KVM_REG_PPC_VR31      | 128
-  PPC   | KVM_REG_PPC_VSR0      | 128
+  PPC   | KVM_REG_PPC_VR31              | 128
+  PPC   | KVM_REG_PPC_VSR0              | 128
           ...
-  PPC   | KVM_REG_PPC_VSR31     | 128
-  PPC   | KVM_REG_PPC_FPSCR     | 64
-  PPC   | KVM_REG_PPC_VSCR      | 32
-  PPC   | KVM_REG_PPC_VPA_ADDR  | 64
-  PPC   | KVM_REG_PPC_VPA_SLB   | 128
-  PPC   | KVM_REG_PPC_VPA_DTL   | 128
-  PPC   | KVM_REG_PPC_EPCR	| 32
-  PPC   | KVM_REG_PPC_EPR	| 32
-  PPC   | KVM_REG_PPC_TCR	| 32
-  PPC   | KVM_REG_PPC_TSR	| 32
-  PPC   | KVM_REG_PPC_OR_TSR	| 32
-  PPC   | KVM_REG_PPC_CLEAR_TSR	| 32
-  PPC   | KVM_REG_PPC_MAS0	| 32
-  PPC   | KVM_REG_PPC_MAS1	| 32
-  PPC   | KVM_REG_PPC_MAS2	| 64
-  PPC   | KVM_REG_PPC_MAS7_3	| 64
-  PPC   | KVM_REG_PPC_MAS4	| 32
-  PPC   | KVM_REG_PPC_MAS6	| 32
-  PPC   | KVM_REG_PPC_MMUCFG	| 32
-  PPC   | KVM_REG_PPC_TLB0CFG	| 32
-  PPC   | KVM_REG_PPC_TLB1CFG	| 32
-  PPC   | KVM_REG_PPC_TLB2CFG	| 32
-  PPC   | KVM_REG_PPC_TLB3CFG	| 32
-  PPC   | KVM_REG_PPC_TLB0PS	| 32
-  PPC   | KVM_REG_PPC_TLB1PS	| 32
-  PPC   | KVM_REG_PPC_TLB2PS	| 32
-  PPC   | KVM_REG_PPC_TLB3PS	| 32
-  PPC   | KVM_REG_PPC_EPTCFG	| 32
-  PPC   | KVM_REG_PPC_ICP_STATE | 64
-  PPC   | KVM_REG_PPC_TB_OFFSET	| 64
-  PPC   | KVM_REG_PPC_SPMC1	| 32
-  PPC   | KVM_REG_PPC_SPMC2	| 32
-  PPC   | KVM_REG_PPC_IAMR	| 64
-  PPC   | KVM_REG_PPC_TFHAR	| 64
-  PPC   | KVM_REG_PPC_TFIAR	| 64
-  PPC   | KVM_REG_PPC_TEXASR	| 64
-  PPC   | KVM_REG_PPC_FSCR	| 64
-  PPC   | KVM_REG_PPC_PSPB	| 32
-  PPC   | KVM_REG_PPC_EBBHR	| 64
-  PPC   | KVM_REG_PPC_EBBRR	| 64
-  PPC   | KVM_REG_PPC_BESCR	| 64
-  PPC   | KVM_REG_PPC_TAR	| 64
-  PPC   | KVM_REG_PPC_DPDES	| 64
-  PPC   | KVM_REG_PPC_DAWR	| 64
-  PPC   | KVM_REG_PPC_DAWRX	| 64
-  PPC   | KVM_REG_PPC_CIABR	| 64
-  PPC   | KVM_REG_PPC_IC	| 64
-  PPC   | KVM_REG_PPC_VTB	| 64
-  PPC   | KVM_REG_PPC_CSIGR	| 64
-  PPC   | KVM_REG_PPC_TACR	| 64
-  PPC   | KVM_REG_PPC_TCSCR	| 64
-  PPC   | KVM_REG_PPC_PID	| 64
-  PPC   | KVM_REG_PPC_ACOP	| 64
-  PPC   | KVM_REG_PPC_VRSAVE	| 32
-  PPC   | KVM_REG_PPC_LPCR	| 64
-  PPC   | KVM_REG_PPC_PPR	| 64
-  PPC   | KVM_REG_PPC_ARCH_COMPAT 32
-  PPC   | KVM_REG_PPC_DABRX     | 32
-  PPC   | KVM_REG_PPC_WORT      | 64
-  PPC   | KVM_REG_PPC_TM_GPR0	| 64
+  PPC   | KVM_REG_PPC_VSR31             | 128
+  PPC   | KVM_REG_PPC_FPSCR             | 64
+  PPC   | KVM_REG_PPC_VSCR              | 32
+  PPC   | KVM_REG_PPC_VPA_ADDR          | 64
+  PPC   | KVM_REG_PPC_VPA_SLB           | 128
+  PPC   | KVM_REG_PPC_VPA_DTL           | 128
+  PPC   | KVM_REG_PPC_EPCR              | 32
+  PPC   | KVM_REG_PPC_EPR               | 32
+  PPC   | KVM_REG_PPC_TCR               | 32
+  PPC   | KVM_REG_PPC_TSR               | 32
+  PPC   | KVM_REG_PPC_OR_TSR            | 32
+  PPC   | KVM_REG_PPC_CLEAR_TSR         | 32
+  PPC   | KVM_REG_PPC_MAS0              | 32
+  PPC   | KVM_REG_PPC_MAS1              | 32
+  PPC   | KVM_REG_PPC_MAS2              | 64
+  PPC   | KVM_REG_PPC_MAS7_3            | 64
+  PPC   | KVM_REG_PPC_MAS4              | 32
+  PPC   | KVM_REG_PPC_MAS6              | 32
+  PPC   | KVM_REG_PPC_MMUCFG            | 32
+  PPC   | KVM_REG_PPC_TLB0CFG           | 32
+  PPC   | KVM_REG_PPC_TLB1CFG           | 32
+  PPC   | KVM_REG_PPC_TLB2CFG           | 32
+  PPC   | KVM_REG_PPC_TLB3CFG           | 32
+  PPC   | KVM_REG_PPC_TLB0PS            | 32
+  PPC   | KVM_REG_PPC_TLB1PS            | 32
+  PPC   | KVM_REG_PPC_TLB2PS            | 32
+  PPC   | KVM_REG_PPC_TLB3PS            | 32
+  PPC   | KVM_REG_PPC_EPTCFG            | 32
+  PPC   | KVM_REG_PPC_ICP_STATE         | 64
+  PPC   | KVM_REG_PPC_TB_OFFSET         | 64
+  PPC   | KVM_REG_PPC_SPMC1             | 32
+  PPC   | KVM_REG_PPC_SPMC2             | 32
+  PPC   | KVM_REG_PPC_IAMR              | 64
+  PPC   | KVM_REG_PPC_TFHAR             | 64
+  PPC   | KVM_REG_PPC_TFIAR             | 64
+  PPC   | KVM_REG_PPC_TEXASR            | 64
+  PPC   | KVM_REG_PPC_FSCR              | 64
+  PPC   | KVM_REG_PPC_PSPB              | 32
+  PPC   | KVM_REG_PPC_EBBHR             | 64
+  PPC   | KVM_REG_PPC_EBBRR             | 64
+  PPC   | KVM_REG_PPC_BESCR             | 64
+  PPC   | KVM_REG_PPC_TAR               | 64
+  PPC   | KVM_REG_PPC_DPDES             | 64
+  PPC   | KVM_REG_PPC_DAWR              | 64
+  PPC   | KVM_REG_PPC_DAWRX             | 64
+  PPC   | KVM_REG_PPC_CIABR             | 64
+  PPC   | KVM_REG_PPC_IC                | 64
+  PPC   | KVM_REG_PPC_VTB               | 64
+  PPC   | KVM_REG_PPC_CSIGR             | 64
+  PPC   | KVM_REG_PPC_TACR              | 64
+  PPC   | KVM_REG_PPC_TCSCR             | 64
+  PPC   | KVM_REG_PPC_PID               | 64
+  PPC   | KVM_REG_PPC_ACOP              | 64
+  PPC   | KVM_REG_PPC_VRSAVE            | 32
+  PPC   | KVM_REG_PPC_LPCR              | 64
+  PPC   | KVM_REG_PPC_PPR               | 64
+  PPC   | KVM_REG_PPC_ARCH_COMPAT       | 32
+  PPC   | KVM_REG_PPC_DABRX             | 32
+  PPC   | KVM_REG_PPC_WORT              | 64
+  PPC   | KVM_REG_PPC_TM_GPR0           | 64
           ...
-  PPC   | KVM_REG_PPC_TM_GPR31	| 64
-  PPC   | KVM_REG_PPC_TM_VSR0	| 128
+  PPC   | KVM_REG_PPC_TM_GPR31          | 64
+  PPC   | KVM_REG_PPC_TM_VSR0           | 128
           ...
-  PPC   | KVM_REG_PPC_TM_VSR63	| 128
-  PPC   | KVM_REG_PPC_TM_CR	| 64
-  PPC   | KVM_REG_PPC_TM_LR	| 64
-  PPC   | KVM_REG_PPC_TM_CTR	| 64
-  PPC   | KVM_REG_PPC_TM_FPSCR	| 64
-  PPC   | KVM_REG_PPC_TM_AMR	| 64
-  PPC   | KVM_REG_PPC_TM_PPR	| 64
-  PPC   | KVM_REG_PPC_TM_VRSAVE	| 64
-  PPC   | KVM_REG_PPC_TM_VSCR	| 32
-  PPC   | KVM_REG_PPC_TM_DSCR	| 64
-  PPC   | KVM_REG_PPC_TM_TAR	| 64
+  PPC   | KVM_REG_PPC_TM_VSR63          | 128
+  PPC   | KVM_REG_PPC_TM_CR             | 64
+  PPC   | KVM_REG_PPC_TM_LR             | 64
+  PPC   | KVM_REG_PPC_TM_CTR            | 64
+  PPC   | KVM_REG_PPC_TM_FPSCR          | 64
+  PPC   | KVM_REG_PPC_TM_AMR            | 64
+  PPC   | KVM_REG_PPC_TM_PPR            | 64
+  PPC   | KVM_REG_PPC_TM_VRSAVE         | 64
+  PPC   | KVM_REG_PPC_TM_VSCR           | 32
+  PPC   | KVM_REG_PPC_TM_DSCR           | 64
+  PPC   | KVM_REG_PPC_TM_TAR            | 64
+        |                               |
+  MIPS  | KVM_REG_MIPS_R0               | 64
+          ...
+  MIPS  | KVM_REG_MIPS_R31              | 64
+  MIPS  | KVM_REG_MIPS_HI               | 64
+  MIPS  | KVM_REG_MIPS_LO               | 64
+  MIPS  | KVM_REG_MIPS_PC               | 64
+  MIPS  | KVM_REG_MIPS_CP0_INDEX        | 32
+  MIPS  | KVM_REG_MIPS_CP0_CONTEXT      | 64
+  MIPS  | KVM_REG_MIPS_CP0_USERLOCAL    | 64
+  MIPS  | KVM_REG_MIPS_CP0_PAGEMASK     | 32
+  MIPS  | KVM_REG_MIPS_CP0_WIRED        | 32
+  MIPS  | KVM_REG_MIPS_CP0_HWRENA       | 32
+  MIPS  | KVM_REG_MIPS_CP0_BADVADDR     | 64
+  MIPS  | KVM_REG_MIPS_CP0_COUNT        | 32
+  MIPS  | KVM_REG_MIPS_CP0_ENTRYHI      | 64
+  MIPS  | KVM_REG_MIPS_CP0_COMPARE      | 32
+  MIPS  | KVM_REG_MIPS_CP0_STATUS       | 32
+  MIPS  | KVM_REG_MIPS_CP0_CAUSE        | 32
+  MIPS  | KVM_REG_MIPS_CP0_EPC          | 64
+  MIPS  | KVM_REG_MIPS_CP0_CONFIG       | 32
+  MIPS  | KVM_REG_MIPS_CP0_CONFIG1      | 32
+  MIPS  | KVM_REG_MIPS_CP0_CONFIG2      | 32
+  MIPS  | KVM_REG_MIPS_CP0_CONFIG3      | 32
+  MIPS  | KVM_REG_MIPS_CP0_CONFIG7      | 32
+  MIPS  | KVM_REG_MIPS_CP0_ERROREPC     | 64
+  MIPS  | KVM_REG_MIPS_COUNT_CTL        | 64
+  MIPS  | KVM_REG_MIPS_COUNT_RESUME     | 64
+  MIPS  | KVM_REG_MIPS_COUNT_HZ         | 64
 
 ARM registers are mapped using the lower 32 bits.  The upper 16 of that
 is the register group type, or coprocessor number:
@@ -1928,6 +1980,22 @@
 arm64 system registers have the following id bit patterns:
   0x6030 0000 0013 <op0:2> <op1:3> <crn:4> <crm:4> <op2:3>
 
+
+MIPS registers are mapped using the lower 32 bits.  The upper 16 of that is
+the register group type:
+
+MIPS core registers (see above) have the following id bit patterns:
+  0x7030 0000 0000 <reg:16>
+
+MIPS CP0 registers (see KVM_REG_MIPS_CP0_* above) have the following id bit
+patterns depending on whether they're 32-bit or 64-bit registers:
+  0x7020 0000 0001 00 <reg:5> <sel:3>   (32-bit)
+  0x7030 0000 0001 00 <reg:5> <sel:3>   (64-bit)
+
+MIPS KVM control registers (see above) have the following id bit patterns:
+  0x7030 0000 0002 <reg:16>
+
+
 4.69 KVM_GET_ONE_REG
 
 Capability: KVM_CAP_ONE_REG
@@ -2415,7 +2483,7 @@
 4.84 KVM_GET_REG_LIST
 
 Capability: basic
-Architectures: arm, arm64
+Architectures: arm, arm64, mips
 Type: vcpu ioctl
 Parameters: struct kvm_reg_list (in/out)
 Returns: 0 on success; -1 on error
@@ -2866,15 +2934,18 @@
 6. Capabilities that can be enabled
 -----------------------------------
 
-There are certain capabilities that change the behavior of the virtual CPU when
-enabled. To enable them, please see section 4.37. Below you can find a list of
-capabilities and what their effect on the vCPU is when enabling them.
+There are certain capabilities that change the behavior of the virtual CPU or
+the virtual machine when enabled. To enable them, please see section 4.37.
+Below you can find a list of capabilities and what their effect on the vCPU or
+the virtual machine is when enabling them.
 
 The following information is provided along with the description:
 
   Architectures: which instruction set architectures provide this ioctl.
       x86 includes both i386 and x86_64.
 
+  Target: whether this is a per-vcpu or per-vm capability.
+
   Parameters: what parameters are accepted by the capability.
 
   Returns: the return value.  General error numbers (EBADF, ENOMEM, EINVAL)
@@ -2884,6 +2955,7 @@
 6.1 KVM_CAP_PPC_OSI
 
 Architectures: ppc
+Target: vcpu
 Parameters: none
 Returns: 0 on success; -1 on error
 
@@ -2898,6 +2970,7 @@
 6.2 KVM_CAP_PPC_PAPR
 
 Architectures: ppc
+Target: vcpu
 Parameters: none
 Returns: 0 on success; -1 on error
 
@@ -2917,6 +2990,7 @@
 6.3 KVM_CAP_SW_TLB
 
 Architectures: ppc
+Target: vcpu
 Parameters: args[0] is the address of a struct kvm_config_tlb
 Returns: 0 on success; -1 on error
 
@@ -2959,6 +3033,7 @@
 6.4 KVM_CAP_S390_CSS_SUPPORT
 
 Architectures: s390
+Target: vcpu
 Parameters: none
 Returns: 0 on success; -1 on error
 
@@ -2970,9 +3045,13 @@
 When this capability is enabled, KVM_EXIT_S390_TSCH will occur on TEST
 SUBCHANNEL intercepts.
 
+Note that even though this capability is enabled per-vcpu, the complete
+virtual machine is affected.
+
 6.5 KVM_CAP_PPC_EPR
 
 Architectures: ppc
+Target: vcpu
 Parameters: args[0] defines whether the proxy facility is active
 Returns: 0 on success; -1 on error
 
@@ -2998,7 +3077,17 @@
 6.7 KVM_CAP_IRQ_XICS
 
 Architectures: ppc
+Target: vcpu
 Parameters: args[0] is the XICS device fd
             args[1] is the XICS CPU number (server ID) for this vcpu
 
 This capability connects the vcpu to an in-kernel XICS device.
+
+6.8 KVM_CAP_S390_IRQCHIP
+
+Architectures: s390
+Target: vm
+Parameters: none
+
+This capability enables the in-kernel irqchip for s390. Please refer to
+"4.24 KVM_CREATE_IRQCHIP" for details.
diff --git a/MAINTAINERS b/MAINTAINERS
index 86efa7e..1acc624 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -70,6 +70,8 @@
 
 	P: Person (obsolete)
 	M: Mail patches to: FullName <address@domain>
+	R: Designated reviewer: FullName <address@domain>
+	   These reviewers should be CCed on patches.
 	L: Mailing list that is relevant to this area
 	W: Web-page with status/info
 	Q: Patchwork web based patch tracking system site
@@ -3350,6 +3352,13 @@
 S:	Maintained
 F:	drivers/edac/i82975x_edac.c
 
+EDAC-IE31200
+M:	Jason Baron <jbaron@akamai.com>
+L:	linux-edac@vger.kernel.org
+W:	bluesmoke.sourceforge.net
+S:	Maintained
+F:	drivers/edac/ie31200_edac.c
+
 EDAC-MPC85XX
 M:	Johannes Thumshirn <johannes.thumshirn@men.de>
 L:	linux-edac@vger.kernel.org
@@ -7243,6 +7252,12 @@
 L:	rtc-linux@googlegroups.com
 S:	Maintained
 
+QAT DRIVER
+M:      Tadeusz Struk <tadeusz.struk@intel.com>
+L:      qat-linux@intel.com
+S:      Supported
+F:      drivers/crypto/qat/
+
 QIB DRIVER
 M:	Mike Marciniszyn <infinipath@intel.com>
 L:	linux-rdma@vger.kernel.org
@@ -7424,16 +7439,20 @@
 F:	drivers/net/wireless/ray*
 
 RCUTORTURE MODULE
-M:	Josh Triplett <josh@freedesktop.org>
+M:	Josh Triplett <josh@joshtriplett.org>
 M:	"Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
 L:	linux-kernel@vger.kernel.org
 S:	Supported
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
 F:	Documentation/RCU/torture.txt
-F:	kernel/rcu/torture.c
+F:	kernel/rcu/rcutorture.c
 
 RCUTORTURE TEST FRAMEWORK
 M:	"Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+M:	Josh Triplett <josh@joshtriplett.org>
+R:	Steven Rostedt <rostedt@goodmis.org>
+R:	Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+R:	Lai Jiangshan <laijs@cn.fujitsu.com>
 L:	linux-kernel@vger.kernel.org
 S:	Supported
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
@@ -7456,8 +7475,11 @@
 F:	net/rds/
 
 READ-COPY UPDATE (RCU)
-M:	Dipankar Sarma <dipankar@in.ibm.com>
 M:	"Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+M:	Josh Triplett <josh@joshtriplett.org>
+R:	Steven Rostedt <rostedt@goodmis.org>
+R:	Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+R:	Lai Jiangshan <laijs@cn.fujitsu.com>
 L:	linux-kernel@vger.kernel.org
 W:	http://www.rdrop.com/users/paulmck/RCU/
 S:	Supported
@@ -7467,7 +7489,7 @@
 F:	include/linux/rcu*
 X:	include/linux/srcu.h
 F:	kernel/rcu/
-X:	kernel/rcu/torture.c
+X:	kernel/torture.c
 
 REAL TIME CLOCK (RTC) SUBSYSTEM
 M:	Alessandro Zummo <a.zummo@towertech.it>
@@ -8250,6 +8272,9 @@
 SLEEPABLE READ-COPY UPDATE (SRCU)
 M:	Lai Jiangshan <laijs@cn.fujitsu.com>
 M:	"Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+M:	Josh Triplett <josh@joshtriplett.org>
+R:	Steven Rostedt <rostedt@goodmis.org>
+R:	Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
 L:	linux-kernel@vger.kernel.org
 W:	http://www.rdrop.com/users/paulmck/RCU/
 S:	Supported
@@ -8922,7 +8947,7 @@
 M:	Thierry Reding <thierry.reding@gmail.com>
 L:	linux-tegra@vger.kernel.org
 Q:	http://patchwork.ozlabs.org/project/linux-tegra/list/
-T:	git git://git.kernel.org/pub/scm/linux/kernel/git/swarren/linux-tegra.git
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/tegra/linux.git
 S:	Supported
 N:	[^a-z]tegra
 
@@ -9044,6 +9069,13 @@
 S:	Supported
 F:	drivers/thermal/ti-soc-thermal/
 
+TI CLOCK DRIVER
+M:	Tero Kristo <t-kristo@ti.com>
+L:	linux-omap@vger.kernel.org
+S:	Maintained
+F:	drivers/clk/ti/
+F:	include/linux/clk/ti.h
+
 TI FLASH MEDIA INTERFACE DRIVER
 M:	Alex Dubov <oakad@yahoo.com>
 S:	Maintained
diff --git a/Makefile b/Makefile
index f6a7794..d0901b4 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 16
 SUBLEVEL = 0
-EXTRAVERSION = -rc7
+EXTRAVERSION =
 NAME = Shuffling Zombie Juror
 
 # *DOCUMENTATION*
diff --git a/arch/alpha/include/asm/processor.h b/arch/alpha/include/asm/processor.h
index 6cb7fe8..b4cf036 100644
--- a/arch/alpha/include/asm/processor.h
+++ b/arch/alpha/include/asm/processor.h
@@ -57,6 +57,7 @@
   ((tsk) == current ? rdusp() : task_thread_info(tsk)->pcb.usp)
 
 #define cpu_relax()	barrier()
+#define cpu_relax_lowlatency() cpu_relax()
 
 #define ARCH_HAS_PREFETCH
 #define ARCH_HAS_PREFETCHW
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
index d99f9b3..82588f3 100644
--- a/arch/arc/include/asm/processor.h
+++ b/arch/arc/include/asm/processor.h
@@ -62,6 +62,8 @@
 #define cpu_relax()	do { } while (0)
 #endif
 
+#define cpu_relax_lowlatency() cpu_relax()
+
 #define copy_segments(tsk, mm)      do { } while (0)
 #define release_segments(mm)        do { } while (0)
 
diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c
index 63177e4..b9a5685 100644
--- a/arch/arc/kernel/perf_event.c
+++ b/arch/arc/kernel/perf_event.c
@@ -99,10 +99,6 @@
 	struct hw_perf_event *hwc = &event->hw;
 	int ret;
 
-	/* ARC 700 PMU does not support sampling events */
-	if (is_sampling_event(event))
-		return -ENOENT;
-
 	switch (event->attr.type) {
 	case PERF_TYPE_HARDWARE:
 		if (event->attr.config >= PERF_COUNT_HW_MAX)
@@ -298,6 +294,9 @@
 		.read		= arc_pmu_read,
 	};
 
+	/* ARC 700 PMU does not support sampling events */
+	arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
+
 	ret = perf_pmu_register(&arc_pmu->pmu, pdev->name, PERF_TYPE_RAW);
 
 	return ret;
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 88acf8b..290f02ee 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -313,7 +313,7 @@
 config ARCH_INTEGRATOR
 	bool "ARM Ltd. Integrator family"
 	select ARM_AMBA
-	select ARM_PATCH_PHYS_VIRT
+	select ARM_PATCH_PHYS_VIRT if MMU
 	select AUTO_ZRELADDR
 	select COMMON_CLK
 	select COMMON_CLK_VERSATILE
@@ -659,7 +659,7 @@
 config ARCH_SHMOBILE_LEGACY
 	bool "Renesas ARM SoCs (non-multiplatform)"
 	select ARCH_SHMOBILE
-	select ARM_PATCH_PHYS_VIRT
+	select ARM_PATCH_PHYS_VIRT if MMU
 	select CLKDEV_LOOKUP
 	select GENERIC_CLOCKEVENTS
 	select HAVE_ARM_SCU if SMP
diff --git a/arch/arm/boot/dts/exynos4210.dtsi b/arch/arm/boot/dts/exynos4210.dtsi
index ee3001f..97ea7a9 100644
--- a/arch/arm/boot/dts/exynos4210.dtsi
+++ b/arch/arm/boot/dts/exynos4210.dtsi
@@ -31,6 +31,16 @@
 		pinctrl2 = &pinctrl_2;
 	};
 
+	pmu_system_controller: system-controller@10020000 {
+		clock-names = "clkout0", "clkout1", "clkout2", "clkout3",
+				"clkout4", "clkout8", "clkout9";
+		clocks = <&clock CLK_OUT_DMC>, <&clock CLK_OUT_TOP>,
+			<&clock CLK_OUT_LEFTBUS>, <&clock CLK_OUT_RIGHTBUS>,
+			<&clock CLK_OUT_CPU>, <&clock CLK_XXTI>,
+			<&clock CLK_XUSBXTI>;
+		#clock-cells = <1>;
+	};
+
 	sysram@02020000 {
 		compatible = "mmio-sram";
 		reg = <0x02020000 0x20000>;
diff --git a/arch/arm/boot/dts/exynos4x12.dtsi b/arch/arm/boot/dts/exynos4x12.dtsi
index c5a943d..de1f9c7 100644
--- a/arch/arm/boot/dts/exynos4x12.dtsi
+++ b/arch/arm/boot/dts/exynos4x12.dtsi
@@ -139,6 +139,13 @@
 
 	pmu_system_controller: system-controller@10020000 {
 		compatible = "samsung,exynos4212-pmu", "syscon";
+		clock-names = "clkout0", "clkout1", "clkout2", "clkout3",
+				"clkout4", "clkout8", "clkout9";
+		clocks = <&clock CLK_OUT_DMC>, <&clock CLK_OUT_TOP>,
+			<&clock CLK_OUT_LEFTBUS>, <&clock CLK_OUT_RIGHTBUS>,
+			<&clock CLK_OUT_CPU>, <&clock CLK_XXTI>,
+			<&clock CLK_XUSBXTI>;
+		#clock-cells = <1>;
 	};
 
 	g2d@10800000 {
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index 834fb5a..492e1ef 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -191,6 +191,9 @@
 	pmu_system_controller: system-controller@10040000 {
 		compatible = "samsung,exynos5250-pmu", "syscon";
 		reg = <0x10040000 0x5000>;
+		clock-names = "clkout16";
+		clocks = <&clock CLK_FIN_PLL>;
+		#clock-cells = <1>;
 	};
 
 	sysreg_system_controller: syscon@10050000 {
diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi
index 1595722..a40a5c2 100644
--- a/arch/arm/boot/dts/exynos5420.dtsi
+++ b/arch/arm/boot/dts/exynos5420.dtsi
@@ -727,6 +727,9 @@
 	pmu_system_controller: system-controller@10040000 {
 		compatible = "samsung,exynos5420-pmu", "syscon";
 		reg = <0x10040000 0x5000>;
+		clock-names = "clkout16";
+		clocks = <&clock CLK_FIN_PLL>;
+		#clock-cells = <1>;
 	};
 
 	sysreg_system_controller: syscon@10050000 {
diff --git a/arch/arm/boot/dts/hi3620.dtsi b/arch/arm/boot/dts/hi3620.dtsi
index ab1116d..83a5b86 100644
--- a/arch/arm/boot/dts/hi3620.dtsi
+++ b/arch/arm/boot/dts/hi3620.dtsi
@@ -73,7 +73,7 @@
 
 		L2: l2-cache {
 			compatible = "arm,pl310-cache";
-			reg = <0xfc10000 0x100000>;
+			reg = <0x100000 0x100000>;
 			interrupts = <0 15 4>;
 			cache-unified;
 			cache-level = <2>;
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index 1fe45d1..b15f1a7 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -353,7 +353,7 @@
 	};
 
 	twl_power: power {
-		compatible = "ti,twl4030-power-n900", "ti,twl4030-power-idle-osc-off";
+		compatible = "ti,twl4030-power-n900";
 		ti,use_poweroff;
 	};
 };
diff --git a/arch/arm/boot/dts/r8a7791.dtsi b/arch/arm/boot/dts/r8a7791.dtsi
index 8d7ffae..79f68ac 100644
--- a/arch/arm/boot/dts/r8a7791.dtsi
+++ b/arch/arm/boot/dts/r8a7791.dtsi
@@ -540,9 +540,9 @@
 			#clock-cells = <0>;
 			clock-output-names = "sd1";
 		};
-		sd2_clk: sd3_clk@e615007c {
+		sd2_clk: sd3_clk@e615026c {
 			compatible = "renesas,r8a7791-div6-clock", "renesas,cpg-div6-clock";
-			reg = <0 0xe615007c 0 4>;
+			reg = <0 0xe615026c 0 4>;
 			clocks = <&pll1_div2_clk>;
 			#clock-cells = <0>;
 			clock-output-names = "sd2";
diff --git a/arch/arm/boot/dts/ste-nomadik-s8815.dts b/arch/arm/boot/dts/ste-nomadik-s8815.dts
index f557feb..90d8b6c 100644
--- a/arch/arm/boot/dts/ste-nomadik-s8815.dts
+++ b/arch/arm/boot/dts/ste-nomadik-s8815.dts
@@ -4,7 +4,7 @@
  */
 
 /dts-v1/;
-/include/ "ste-nomadik-stn8815.dtsi"
+#include "ste-nomadik-stn8815.dtsi"
 
 / {
 	model = "Calao Systems USB-S8815";
diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
index d316c95..dbcf521 100644
--- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
+++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
@@ -1,7 +1,9 @@
 /*
  * Device Tree for the ST-Ericsson Nomadik 8815 STn8815 SoC
  */
-/include/ "skeleton.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include "skeleton.dtsi"
 
 / {
 	#address-cells = <1>;
@@ -842,8 +844,7 @@
 			bus-width = <4>;
 			cap-mmc-highspeed;
 			cap-sd-highspeed;
-			cd-gpios = <&gpio3 15 0x1>;
-			cd-inverted;
+			cd-gpios = <&gpio3 15 GPIO_ACTIVE_LOW>;
 			pinctrl-names = "default";
 			pinctrl-0 = <&mmcsd_default_mux>, <&mmcsd_default_mode>;
 			vmmc-supply = <&vmmc_regulator>;
diff --git a/arch/arm/crypto/aesbs-glue.c b/arch/arm/crypto/aesbs-glue.c
index 4522366..15468fb 100644
--- a/arch/arm/crypto/aesbs-glue.c
+++ b/arch/arm/crypto/aesbs-glue.c
@@ -137,7 +137,7 @@
 				dst += AES_BLOCK_SIZE;
 			} while (--blocks);
 		}
-		err = blkcipher_walk_done(desc, &walk, 0);
+		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
 	}
 	return err;
 }
@@ -158,7 +158,7 @@
 		bsaes_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
 				  walk.nbytes, &ctx->dec, walk.iv);
 		kernel_neon_end();
-		err = blkcipher_walk_done(desc, &walk, 0);
+		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
 	}
 	while (walk.nbytes) {
 		u32 blocks = walk.nbytes / AES_BLOCK_SIZE;
@@ -182,7 +182,7 @@
 			dst += AES_BLOCK_SIZE;
 			src += AES_BLOCK_SIZE;
 		} while (--blocks);
-		err = blkcipher_walk_done(desc, &walk, 0);
+		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
 	}
 	return err;
 }
@@ -268,7 +268,7 @@
 		bsaes_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
 				  walk.nbytes, &ctx->enc, walk.iv);
 		kernel_neon_end();
-		err = blkcipher_walk_done(desc, &walk, 0);
+		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
 	}
 	return err;
 }
@@ -292,7 +292,7 @@
 		bsaes_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
 				  walk.nbytes, &ctx->dec, walk.iv);
 		kernel_neon_end();
-		err = blkcipher_walk_done(desc, &walk, 0);
+		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
 	}
 	return err;
 }
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index 060a75e..0406cb3 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -50,6 +50,7 @@
 	struct smp_operations	*smp;		/* SMP operations	*/
 	bool			(*smp_init)(void);
 	void			(*fixup)(struct tag *, char **);
+	void			(*dt_fixup)(void);
 	void			(*init_meminfo)(void);
 	void			(*reserve)(void);/* reserve mem blocks	*/
 	void			(*map_io)(void);/* IO mapping function	*/
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index c3d5fc1..8a1e8e9 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -82,6 +82,8 @@
 #define cpu_relax()			barrier()
 #endif
 
+#define cpu_relax_lowlatency()                cpu_relax()
+
 #define task_pt_regs(p) \
 	((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
 
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
index e94a157..11c54de 100644
--- a/arch/arm/kernel/devtree.c
+++ b/arch/arm/kernel/devtree.c
@@ -212,7 +212,7 @@
 	mdesc_best = &__mach_desc_GENERIC_DT;
 #endif
 
-	if (!dt_phys || !early_init_dt_scan(phys_to_virt(dt_phys)))
+	if (!dt_phys || !early_init_dt_verify(phys_to_virt(dt_phys)))
 		return NULL;
 
 	mdesc = of_flat_dt_match_machine(mdesc_best, arch_get_next_mach);
@@ -237,6 +237,12 @@
 		dump_machine_table(); /* does not return */
 	}
 
+	/* We really don't want to do this, but sometimes firmware provides buggy data */
+	if (mdesc->dt_fixup)
+		mdesc->dt_fixup();
+
+	early_init_dt_scan_nodes();
+
 	/* Change machine number to match the mdesc we're using */
 	__machine_arch_type = mdesc->nr;
 
diff --git a/arch/arm/kernel/iwmmxt.S b/arch/arm/kernel/iwmmxt.S
index a5599cf..2b32978 100644
--- a/arch/arm/kernel/iwmmxt.S
+++ b/arch/arm/kernel/iwmmxt.S
@@ -94,13 +94,19 @@
 
 	mrc	p15, 0, r2, c2, c0, 0
 	mov	r2, r2				@ cpwait
+	bl	concan_save
 
-	teq	r1, #0				@ test for last ownership
-	mov	lr, r9				@ normal exit from exception
-	beq	concan_load			@ no owner, skip save
+#ifdef CONFIG_PREEMPT_COUNT
+	get_thread_info r10
+#endif
+4:	dec_preempt_count r10, r3
+	mov	pc, r9				@ normal exit from exception
 
 concan_save:
 
+	teq	r1, #0				@ test for last ownership
+	beq	concan_load			@ no owner, skip save
+
 	tmrc	r2, wCon
 
 	@ CUP? wCx
@@ -138,7 +144,7 @@
 	wstrd	wR15, [r1, #MMX_WR15]
 
 2:	teq	r0, #0				@ anything to load?
-	beq	3f
+	moveq	pc, lr				@ if not, return
 
 concan_load:
 
@@ -171,14 +177,9 @@
 	@ clear CUP/MUP (only if r1 != 0)
 	teq	r1, #0
 	mov 	r2, #0
-	beq	3f
-	tmcr	wCon, r2
+	moveq	pc, lr
 
-3:
-#ifdef CONFIG_PREEMPT_COUNT
-	get_thread_info r10
-#endif
-4:	dec_preempt_count r10, r3
+	tmcr	wCon, r2
 	mov	pc, lr
 
 /*
diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
index 778c2f7..a74b53c 100644
--- a/arch/arm/kernel/kgdb.c
+++ b/arch/arm/kernel/kgdb.c
@@ -160,12 +160,16 @@
 static struct undef_hook kgdb_brkpt_hook = {
 	.instr_mask		= 0xffffffff,
 	.instr_val		= KGDB_BREAKINST,
+	.cpsr_mask		= MODE_MASK,
+	.cpsr_val		= SVC_MODE,
 	.fn			= kgdb_brk_fn
 };
 
 static struct undef_hook kgdb_compiled_brkpt_hook = {
 	.instr_mask		= 0xffffffff,
 	.instr_val		= KGDB_COMPILED_BREAK,
+	.cpsr_mask		= MODE_MASK,
+	.cpsr_val		= SVC_MODE,
 	.fn			= kgdb_compiled_brk_fn
 };
 
diff --git a/arch/arm/mach-exynos/exynos.c b/arch/arm/mach-exynos/exynos.c
index 46d893f..66c9b96 100644
--- a/arch/arm/mach-exynos/exynos.c
+++ b/arch/arm/mach-exynos/exynos.c
@@ -335,6 +335,15 @@
 #endif
 }
 
+static void __init exynos_dt_fixup(void)
+{
+	/*
+	 * Some versions of uboot pass garbage entries in the memory node,
+	 * use the old CONFIG_ARM_NR_BANKS
+	 */
+	of_fdt_limit_memory(8);
+}
+
 DT_MACHINE_START(EXYNOS_DT, "SAMSUNG EXYNOS (Flattened Device Tree)")
 	/* Maintainer: Thomas Abraham <thomas.abraham@linaro.org> */
 	/* Maintainer: Kukjin Kim <kgene.kim@samsung.com> */
@@ -348,4 +357,5 @@
 	.dt_compat	= exynos_dt_compat,
 	.restart	= exynos_restart,
 	.reserve	= exynos_reserve,
+	.dt_fixup	= exynos_dt_fixup,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/gpmc-nand.c b/arch/arm/mach-omap2/gpmc-nand.c
index 17cd393..93914d2 100644
--- a/arch/arm/mach-omap2/gpmc-nand.c
+++ b/arch/arm/mach-omap2/gpmc-nand.c
@@ -50,6 +50,16 @@
 		 soc_is_omap54xx() || soc_is_dra7xx())
 		return 1;
 
+	if (ecc_opt == OMAP_ECC_BCH4_CODE_HW_DETECTION_SW ||
+		 ecc_opt == OMAP_ECC_BCH8_CODE_HW_DETECTION_SW) {
+		if (cpu_is_omap24xx())
+			return 0;
+		else if (cpu_is_omap3630() && (GET_OMAP_REVISION() == 0))
+			return 0;
+		else
+			return 1;
+	}
+
 	/* OMAP3xxx do not have ELM engine, so cannot support ECC schemes
 	 * which require H/W based ECC error detection */
 	if ((cpu_is_omap34xx() || cpu_is_omap3630()) &&
@@ -57,14 +67,6 @@
 		 (ecc_opt == OMAP_ECC_BCH8_CODE_HW)))
 		return 0;
 
-	/*
-	 * For now, assume 4-bit mode is only supported on OMAP3630 ES1.x, x>=1
-	 * and AM33xx derivates. Other chips may be added if confirmed to work.
-	 */
-	if ((ecc_opt == OMAP_ECC_BCH4_CODE_HW_DETECTION_SW) &&
-	    (!cpu_is_omap3630() || (GET_OMAP_REVISION() == 0)))
-		return 0;
-
 	/* legacy platforms support only HAM1 (1-bit Hamming) ECC scheme */
 	if (ecc_opt == OMAP_ECC_HAM1_CODE_HW)
 		return 1;
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index 539e810..a0fe747 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -168,6 +168,10 @@
 		smc_op = OMAP4_MON_L2X0_PREFETCH_INDEX;
 		break;
 
+	case L310_POWER_CTRL:
+		pr_info_once("OMAP L2C310: ROM does not support power control setting\n");
+		return;
+
 	default:
 		WARN_ONCE(1, "OMAP L2C310: ignoring write to reg 0x%x\n", reg);
 		return;
diff --git a/arch/arm/mach-rockchip/Kconfig b/arch/arm/mach-rockchip/Kconfig
index 1caee6d..e4564c2 100644
--- a/arch/arm/mach-rockchip/Kconfig
+++ b/arch/arm/mach-rockchip/Kconfig
@@ -2,6 +2,7 @@
 	bool "Rockchip RK2928 and RK3xxx SOCs" if ARCH_MULTI_V7
 	select PINCTRL
 	select PINCTRL_ROCKCHIP
+	select ARCH_HAS_RESET_CONTROLLER
 	select ARCH_REQUIRE_GPIOLIB
 	select ARM_GIC
 	select CACHE_L2X0
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 4c88935..1f88db0 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -461,12 +461,21 @@
 		map.type = MT_MEMORY_DMA_READY;
 
 		/*
-		 * Clear previous low-memory mapping
+		 * Clear previous low-memory mapping to ensure that the
+		 * TLB does not see any conflicting entries, then flush
+		 * the TLB of the old entries before creating new mappings.
+		 *
+		 * This ensures that any speculatively loaded TLB entries
+		 * (even though they may be rare) can not cause any problems,
+		 * and ensures that this code is architecturally compliant.
 		 */
 		for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
 		     addr += PMD_SIZE)
 			pmd_clear(pmd_off_k(addr));
 
+		flush_tlb_kernel_range(__phys_to_virt(start),
+				       __phys_to_virt(end));
+
 		iotable_init(&map, 1);
 	}
 }
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c
index 8e0e52e..c447ec7 100644
--- a/arch/arm/mm/idmap.c
+++ b/arch/arm/mm/idmap.c
@@ -9,6 +9,11 @@
 #include <asm/sections.h>
 #include <asm/system_info.h>
 
+/*
+ * Note: accesses outside of the kernel image and the identity map area
+ * are not supported on any CPU using the idmap tables as its current
+ * page tables.
+ */
 pgd_t *idmap_pgd;
 phys_addr_t (*arch_virt_to_idmap) (unsigned long x);
 
@@ -25,6 +30,13 @@
 			pr_warning("Failed to allocate identity pmd.\n");
 			return;
 		}
+		/*
+		 * Copy the original PMD to ensure that the PMD entries for
+		 * the kernel image are preserved.
+		 */
+		if (!pud_none(*pud))
+			memcpy(pmd, pmd_offset(pud, 0),
+			       PTRS_PER_PMD * sizeof(pmd_t));
 		pud_populate(&init_mm, pud, pmd);
 		pmd += pmd_index(addr);
 	} else
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index ab14b79..6e3ba8d 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1406,8 +1406,8 @@
 		return;
 
 	/* remap kernel code and data */
-	map_start = init_mm.start_code;
-	map_end   = init_mm.brk;
+	map_start = init_mm.start_code & PMD_MASK;
+	map_end   = ALIGN(init_mm.brk, PMD_SIZE);
 
 	/* get a handle on things... */
 	pgd0 = pgd_offset_k(0);
@@ -1442,7 +1442,7 @@
 	}
 
 	/* remap pmds for kernel mapping */
-	phys = __pa(map_start) & PMD_MASK;
+	phys = __pa(map_start);
 	do {
 		*pmdk++ = __pmd(phys | pmdprot);
 		phys += PMD_SIZE;
diff --git a/arch/arm/xen/grant-table.c b/arch/arm/xen/grant-table.c
index 859a9bb..91cf08b 100644
--- a/arch/arm/xen/grant-table.c
+++ b/arch/arm/xen/grant-table.c
@@ -51,3 +51,8 @@
 {
 	return -ENOSYS;
 }
+
+int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status)
+{
+	return 0;
+}
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 839f48c..f3b584b 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1,7 +1,6 @@
 config ARM64
 	def_bool y
 	select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
-	select ARCH_HAS_OPP
 	select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
 	select ARCH_USE_CMPXCHG_LOCKREF
 	select ARCH_SUPPORTS_ATOMIC_RMW
@@ -11,6 +10,8 @@
 	select ARM_AMBA
 	select ARM_ARCH_TIMER
 	select ARM_GIC
+	select AUDIT_ARCH_COMPAT_GENERIC
+	select ARM_GIC_V3
 	select BUILDTIME_EXTABLE_SORT
 	select CLONE_BACKWARDS
 	select COMMON_CLK
@@ -29,10 +30,12 @@
 	select GENERIC_STRNLEN_USER
 	select GENERIC_TIME_VSYSCALL
 	select HARDIRQS_SW_RESEND
+	select HAVE_ARCH_AUDITSYSCALL
 	select HAVE_ARCH_JUMP_LABEL
 	select HAVE_ARCH_KGDB
 	select HAVE_ARCH_TRACEHOOK
 	select HAVE_C_RECORDMCOUNT
+	select HAVE_CC_STACKPROTECTOR
 	select HAVE_DEBUG_BUGVERBOSE
 	select HAVE_DEBUG_KMEMLEAK
 	select HAVE_DMA_API_DEBUG
@@ -63,6 +66,7 @@
 	select RTC_LIB
 	select SPARSE_IRQ
 	select SYSCTL_EXCEPTION_TRACE
+	select HAVE_CONTEXT_TRACKING
 	help
 	  ARM 64-bit (AArch64) Linux support.
 
@@ -155,14 +159,63 @@
 
 menu "Kernel Features"
 
+choice
+	prompt "Page size"
+	default ARM64_4K_PAGES
+	help
+	  Page size (translation granule) configuration.
+
+config ARM64_4K_PAGES
+	bool "4KB"
+	help
+	  This feature enables 4KB pages support.
+
 config ARM64_64K_PAGES
-	bool "Enable 64KB pages support"
+	bool "64KB"
 	help
 	  This feature enables 64KB pages support (4KB by default)
 	  allowing only two levels of page tables and faster TLB
 	  look-up. AArch32 emulation is not available when this feature
 	  is enabled.
 
+endchoice
+
+choice
+	prompt "Virtual address space size"
+	default ARM64_VA_BITS_39 if ARM64_4K_PAGES
+	default ARM64_VA_BITS_42 if ARM64_64K_PAGES
+	help
+	  Allows choosing one of multiple possible virtual address
+	  space sizes. The level of translation table is determined by
+	  a combination of page size and virtual address space size.
+
+config ARM64_VA_BITS_39
+	bool "39-bit"
+	depends on ARM64_4K_PAGES
+
+config ARM64_VA_BITS_42
+	bool "42-bit"
+	depends on ARM64_64K_PAGES
+
+config ARM64_VA_BITS_48
+	bool "48-bit"
+	depends on BROKEN
+
+endchoice
+
+config ARM64_VA_BITS
+	int
+	default 39 if ARM64_VA_BITS_39
+	default 42 if ARM64_VA_BITS_42
+	default 48 if ARM64_VA_BITS_48
+
+config ARM64_PGTABLE_LEVELS
+	int
+	default 2 if ARM64_64K_PAGES && ARM64_VA_BITS_42
+	default 3 if ARM64_64K_PAGES && ARM64_VA_BITS_48
+	default 3 if ARM64_4K_PAGES && ARM64_VA_BITS_39
+	default 4 if ARM64_4K_PAGES && ARM64_VA_BITS_48
+
 config CPU_BIG_ENDIAN
        bool "Build big-endian kernel"
        help
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
index 1c1b756..4ee8e90 100644
--- a/arch/arm64/Kconfig.debug
+++ b/arch/arm64/Kconfig.debug
@@ -28,4 +28,19 @@
 	  instructions during context switch. Say Y here only if you are
 	  planning to use hardware trace tools with this kernel.
 
+config ARM64_RANDOMIZE_TEXT_OFFSET
+	bool "Randomize TEXT_OFFSET at build time"
+	help
+	  Say Y here if you want the image load offset (AKA TEXT_OFFSET)
+	  of the kernel to be randomized at build-time. When selected,
+	  this option will cause TEXT_OFFSET to be randomized upon any
+	  build of the kernel, and the offset will be reflected in the
+	  text_offset field of the resulting Image. This can be used to
+	  fuzz-test bootloaders which respect text_offset.
+
+	  This option is intended for bootloader and/or kernel testing
+	  only. Bootloaders must make no assumptions regarding the value
+	  of TEXT_OFFSET and platforms must not require a specific
+	  value.
+
 endmenu
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 8185a91..e8d025c 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -38,7 +38,11 @@
 head-y		:= arch/arm64/kernel/head.o
 
 # The byte offset of the kernel image in RAM from the start of RAM.
+ifeq ($(CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET), y)
+TEXT_OFFSET := $(shell awk 'BEGIN {srand(); printf "0x%04x0\n", int(65535 * rand())}')
+else
 TEXT_OFFSET := 0x00080000
+endif
 
 export	TEXT_OFFSET GZFLAGS
 
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 3421f31..1e52b74 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -52,8 +52,11 @@
 # CONFIG_INET_LRO is not set
 # CONFIG_IPV6 is not set
 # CONFIG_WIRELESS is not set
+CONFIG_NET_9P=y
+CONFIG_NET_9P_VIRTIO=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_DMA_CMA=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_VIRTIO_BLK=y
@@ -65,6 +68,7 @@
 CONFIG_PATA_OF_PLATFORM=y
 CONFIG_NETDEVICES=y
 CONFIG_TUN=y
+CONFIG_VIRTIO_NET=y
 CONFIG_SMC91X=y
 CONFIG_SMSC911X=y
 # CONFIG_WLAN is not set
@@ -76,6 +80,7 @@
 CONFIG_SERIAL_AMBA_PL011=y
 CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
 CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_VIRTIO_CONSOLE=y
 # CONFIG_HW_RANDOM is not set
 # CONFIG_HWMON is not set
 CONFIG_REGULATOR=y
@@ -90,6 +95,7 @@
 CONFIG_USB_STORAGE=y
 CONFIG_MMC=y
 CONFIG_MMC_ARMMMCI=y
+CONFIG_VIRTIO_BALLOON=y
 CONFIG_VIRTIO_MMIO=y
 # CONFIG_IOMMU_SUPPORT is not set
 CONFIG_EXT2_FS=y
@@ -107,6 +113,7 @@
 # CONFIG_MISC_FILESYSTEMS is not set
 CONFIG_NFS_FS=y
 CONFIG_ROOT_NFS=y
+CONFIG_9P_FS=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
 CONFIG_VIRTUALIZATION=y
diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile
index 2070a56..a3f935f 100644
--- a/arch/arm64/crypto/Makefile
+++ b/arch/arm64/crypto/Makefile
@@ -35,4 +35,4 @@
 CFLAGS_aes-glue-ce.o	:= -DUSE_V8_CRYPTO_EXTENSIONS
 
 $(obj)/aes-glue-%.o: $(src)/aes-glue.c FORCE
-	$(call if_changed_dep,cc_o_c)
+	$(call if_changed_rule,cc_o_c)
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index 60f2f4c..79cd911 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -106,7 +106,7 @@
 	for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
 		aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
 				(u8 *)ctx->key_enc, rounds, blocks, first);
-		err = blkcipher_walk_done(desc, &walk, 0);
+		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
 	}
 	kernel_neon_end();
 	return err;
@@ -128,7 +128,7 @@
 	for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
 		aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
 				(u8 *)ctx->key_dec, rounds, blocks, first);
-		err = blkcipher_walk_done(desc, &walk, 0);
+		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
 	}
 	kernel_neon_end();
 	return err;
@@ -151,7 +151,7 @@
 		aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
 				(u8 *)ctx->key_enc, rounds, blocks, walk.iv,
 				first);
-		err = blkcipher_walk_done(desc, &walk, 0);
+		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
 	}
 	kernel_neon_end();
 	return err;
@@ -174,7 +174,7 @@
 		aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
 				(u8 *)ctx->key_dec, rounds, blocks, walk.iv,
 				first);
-		err = blkcipher_walk_done(desc, &walk, 0);
+		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
 	}
 	kernel_neon_end();
 	return err;
@@ -243,7 +243,7 @@
 		aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
 				(u8 *)ctx->key1.key_enc, rounds, blocks,
 				(u8 *)ctx->key2.key_enc, walk.iv, first);
-		err = blkcipher_walk_done(desc, &walk, 0);
+		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
 	}
 	kernel_neon_end();
 
@@ -267,7 +267,7 @@
 		aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
 				(u8 *)ctx->key1.key_dec, rounds, blocks,
 				(u8 *)ctx->key2.key_enc, walk.iv, first);
-		err = blkcipher_walk_done(desc, &walk, 0);
+		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
 	}
 	kernel_neon_end();
 
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index a5176cf..f2defe1 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -138,19 +138,10 @@
 #define flush_icache_page(vma,page)	do { } while (0)
 
 /*
- * flush_cache_vmap() is used when creating mappings (eg, via vmap,
- * vmalloc, ioremap etc) in kernel space for pages.  On non-VIPT
- * caches, since the direct-mappings of these pages may contain cached
- * data, we need to do a full cache flush to ensure that writebacks
- * don't corrupt data placed into these pages via the new mappings.
+ * Not required on AArch64 (PIPT or VIPT non-aliasing D-cache).
  */
 static inline void flush_cache_vmap(unsigned long start, unsigned long end)
 {
-	/*
-	 * set_pte_at() called from vmap_pte_range() does not
-	 * have a DSB after cleaning the cache line.
-	 */
-	dsb(ish);
 }
 
 static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
diff --git a/arch/arm64/include/asm/cachetype.h b/arch/arm64/include/asm/cachetype.h
index 4b23e75..7a2e076 100644
--- a/arch/arm64/include/asm/cachetype.h
+++ b/arch/arm64/include/asm/cachetype.h
@@ -30,10 +30,14 @@
 
 #ifndef __ASSEMBLY__
 
-static inline u32 icache_policy(void)
-{
-	return (read_cpuid_cachetype() >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK;
-}
+#include <linux/bitops.h>
+
+#define CTR_L1IP(ctr)	(((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK)
+
+#define ICACHEF_ALIASING	BIT(0)
+#define ICACHEF_AIVIVT		BIT(1)
+
+extern unsigned long __icache_flags;
 
 /*
  * Whilst the D-side always behaves as PIPT on AArch64, aliasing is
@@ -41,12 +45,12 @@
  */
 static inline int icache_is_aliasing(void)
 {
-	return icache_policy() != ICACHE_POLICY_PIPT;
+	return test_bit(ICACHEF_ALIASING, &__icache_flags);
 }
 
 static inline int icache_is_aivivt(void)
 {
-	return icache_policy() == ICACHE_POLICY_AIVIVT;
+	return test_bit(ICACHEF_AIVIVT, &__icache_flags);
 }
 
 static inline u32 cache_type_cwg(void)
diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h
new file mode 100644
index 0000000..0564430
--- /dev/null
+++ b/arch/arm64/include/asm/cpu.h
@@ -0,0 +1,59 @@
+/*
+  * Copyright (C) 2014 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_CPU_H
+#define __ASM_CPU_H
+
+#include <linux/cpu.h>
+#include <linux/init.h>
+#include <linux/percpu.h>
+
+/*
+ * Records attributes of an individual CPU.
+ */
+struct cpuinfo_arm64 {
+	struct cpu	cpu;
+	u32		reg_ctr;
+	u32		reg_cntfrq;
+	u32		reg_dczid;
+	u32		reg_midr;
+
+	u64		reg_id_aa64isar0;
+	u64		reg_id_aa64isar1;
+	u64		reg_id_aa64mmfr0;
+	u64		reg_id_aa64mmfr1;
+	u64		reg_id_aa64pfr0;
+	u64		reg_id_aa64pfr1;
+
+	u32		reg_id_isar0;
+	u32		reg_id_isar1;
+	u32		reg_id_isar2;
+	u32		reg_id_isar3;
+	u32		reg_id_isar4;
+	u32		reg_id_isar5;
+	u32		reg_id_mmfr0;
+	u32		reg_id_mmfr1;
+	u32		reg_id_mmfr2;
+	u32		reg_id_mmfr3;
+	u32		reg_id_pfr0;
+	u32		reg_id_pfr1;
+};
+
+DECLARE_PER_CPU(struct cpuinfo_arm64, cpu_data);
+
+void cpuinfo_store_cpu(void);
+void __init cpuinfo_store_boot_cpu(void);
+
+#endif /* __ASM_CPU_H */
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 27f54a7..379d0b8 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -18,6 +18,8 @@
 
 #define INVALID_HWID		ULONG_MAX
 
+#define MPIDR_UP_BITMASK	(0x1 << 30)
+#define MPIDR_MT_BITMASK	(0x1 << 24)
 #define MPIDR_HWID_BITMASK	0xff00ffffff
 
 #define MPIDR_LEVEL_BITS_SHIFT	3
@@ -36,15 +38,34 @@
 	__val;								\
 })
 
+#define MIDR_REVISION_MASK	0xf
+#define MIDR_REVISION(midr)	((midr) & MIDR_REVISION_MASK)
+#define MIDR_PARTNUM_SHIFT	4
+#define MIDR_PARTNUM_MASK	(0xfff << MIDR_PARTNUM_SHIFT)
+#define MIDR_PARTNUM(midr)	\
+	(((midr) & MIDR_PARTNUM_MASK) >> MIDR_PARTNUM_SHIFT)
+#define MIDR_ARCHITECTURE_SHIFT	16
+#define MIDR_ARCHITECTURE_MASK	(0xf << MIDR_ARCHITECTURE_SHIFT)
+#define MIDR_ARCHITECTURE(midr)	\
+	(((midr) & MIDR_ARCHITECTURE_MASK) >> MIDR_ARCHITECTURE_SHIFT)
+#define MIDR_VARIANT_SHIFT	20
+#define MIDR_VARIANT_MASK	(0xf << MIDR_VARIANT_SHIFT)
+#define MIDR_VARIANT(midr)	\
+	(((midr) & MIDR_VARIANT_MASK) >> MIDR_VARIANT_SHIFT)
+#define MIDR_IMPLEMENTOR_SHIFT	24
+#define MIDR_IMPLEMENTOR_MASK	(0xff << MIDR_IMPLEMENTOR_SHIFT)
+#define MIDR_IMPLEMENTOR(midr)	\
+	(((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT)
+
 #define ARM_CPU_IMP_ARM		0x41
 #define ARM_CPU_IMP_APM		0x50
 
-#define ARM_CPU_PART_AEM_V8	0xD0F0
-#define ARM_CPU_PART_FOUNDATION	0xD000
-#define ARM_CPU_PART_CORTEX_A53	0xD030
-#define ARM_CPU_PART_CORTEX_A57	0xD070
+#define ARM_CPU_PART_AEM_V8	0xD0F
+#define ARM_CPU_PART_FOUNDATION	0xD00
+#define ARM_CPU_PART_CORTEX_A57	0xD07
+#define ARM_CPU_PART_CORTEX_A53	0xD03
 
-#define APM_CPU_PART_POTENZA	0x0000
+#define APM_CPU_PART_POTENZA	0x000
 
 #ifndef __ASSEMBLY__
 
@@ -65,12 +86,12 @@
 
 static inline unsigned int __attribute_const__ read_cpuid_implementor(void)
 {
-	return (read_cpuid_id() & 0xFF000000) >> 24;
+	return MIDR_IMPLEMENTOR(read_cpuid_id());
 }
 
 static inline unsigned int __attribute_const__ read_cpuid_part_number(void)
 {
-	return (read_cpuid_id() & 0xFFF0);
+	return MIDR_PARTNUM(read_cpuid_id());
 }
 
 static inline u32 __attribute_const__ read_cpuid_cachetype(void)
diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h
index 768414d..007618b 100644
--- a/arch/arm64/include/asm/fpsimdmacros.h
+++ b/arch/arm64/include/asm/fpsimdmacros.h
@@ -40,6 +40,19 @@
 	str	w\tmpnr, [\state, #16 * 2 + 4]
 .endm
 
+.macro fpsimd_restore_fpcr state, tmp
+	/*
+	 * Writes to fpcr may be self-synchronising, so avoid restoring
+	 * the register if it hasn't changed.
+	 */
+	mrs	\tmp, fpcr
+	cmp	\tmp, \state
+	b.eq	9999f
+	msr	fpcr, \state
+9999:
+.endm
+
+/* Clobbers \state */
 .macro fpsimd_restore state, tmpnr
 	ldp	q0, q1, [\state, #16 * 0]
 	ldp	q2, q3, [\state, #16 * 2]
@@ -60,7 +73,7 @@
 	ldr	w\tmpnr, [\state, #16 * 2]
 	msr	fpsr, x\tmpnr
 	ldr	w\tmpnr, [\state, #16 * 2 + 4]
-	msr	fpcr, x\tmpnr
+	fpsimd_restore_fpcr x\tmpnr, \state
 .endm
 
 .altmacro
@@ -84,7 +97,7 @@
 .macro fpsimd_restore_partial state, tmpnr1, tmpnr2
 	ldp	w\tmpnr1, w\tmpnr2, [\state]
 	msr	fpsr, x\tmpnr1
-	msr	fpcr, x\tmpnr2
+	fpsimd_restore_fpcr x\tmpnr2, x\tmpnr1
 	adr	x\tmpnr1, 0f
 	ldr	w\tmpnr2, [\state, #8]
 	add	\state, \state, x\tmpnr2, lsl #4
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 902eb70..ccc7087 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -41,11 +41,7 @@
  * The module space lives between the addresses given by TASK_SIZE
  * and PAGE_OFFSET - it must be within 128MB of the kernel text.
  */
-#ifdef CONFIG_ARM64_64K_PAGES
-#define VA_BITS			(42)
-#else
-#define VA_BITS			(39)
-#endif
+#define VA_BITS			(CONFIG_ARM64_VA_BITS)
 #define PAGE_OFFSET		(UL(0xffffffffffffffff) << (VA_BITS - 1))
 #define MODULES_END		(PAGE_OFFSET)
 #define MODULES_VADDR		(MODULES_END - SZ_64M)
diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h
index 46bf666..7a3f462 100644
--- a/arch/arm64/include/asm/page.h
+++ b/arch/arm64/include/asm/page.h
@@ -31,13 +31,25 @@
 /* We do define AT_SYSINFO_EHDR but don't use the gate mechanism */
 #define __HAVE_ARCH_GATE_AREA		1
 
+/*
+ * The idmap and swapper page tables need some space reserved in the kernel
+ * image. Both require pgd, pud (4 levels only) and pmd tables to (section)
+ * map the kernel. With the 64K page configuration, swapper and idmap need to
+ * map to pte level. The swapper also maps the FDT (see __create_page_tables
+ * for more information).
+ */
+#ifdef CONFIG_ARM64_64K_PAGES
+#define SWAPPER_PGTABLE_LEVELS	(CONFIG_ARM64_PGTABLE_LEVELS)
+#else
+#define SWAPPER_PGTABLE_LEVELS	(CONFIG_ARM64_PGTABLE_LEVELS - 1)
+#endif
+
+#define SWAPPER_DIR_SIZE	(SWAPPER_PGTABLE_LEVELS * PAGE_SIZE)
+#define IDMAP_DIR_SIZE		(SWAPPER_DIR_SIZE)
+
 #ifndef __ASSEMBLY__
 
-#ifdef CONFIG_ARM64_64K_PAGES
-#include <asm/pgtable-2level-types.h>
-#else
-#include <asm/pgtable-3level-types.h>
-#endif
+#include <asm/pgtable-types.h>
 
 extern void __cpu_clear_user_page(void *p, unsigned long user);
 extern void __cpu_copy_user_page(void *to, const void *from,
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index 9bea6e7..d5bed02 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -26,7 +26,7 @@
 
 #define check_pgt_cache()		do { } while (0)
 
-#ifndef CONFIG_ARM64_64K_PAGES
+#if CONFIG_ARM64_PGTABLE_LEVELS > 2
 
 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
@@ -44,7 +44,27 @@
 	set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
 }
 
-#endif	/* CONFIG_ARM64_64K_PAGES */
+#endif	/* CONFIG_ARM64_PGTABLE_LEVELS > 2 */
+
+#if CONFIG_ARM64_PGTABLE_LEVELS > 3
+
+static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+	return (pud_t *)get_zeroed_page(GFP_KERNEL | __GFP_REPEAT);
+}
+
+static inline void pud_free(struct mm_struct *mm, pud_t *pud)
+{
+	BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
+	free_page((unsigned long)pud);
+}
+
+static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
+{
+	set_pgd(pgd, __pgd(__pa(pud) | PUD_TYPE_TABLE));
+}
+
+#endif	/* CONFIG_ARM64_PGTABLE_LEVELS > 3 */
 
 extern pgd_t *pgd_alloc(struct mm_struct *mm);
 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
diff --git a/arch/arm64/include/asm/pgtable-2level-hwdef.h b/arch/arm64/include/asm/pgtable-2level-hwdef.h
deleted file mode 100644
index 2593b49..0000000
--- a/arch/arm64/include/asm/pgtable-2level-hwdef.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef __ASM_PGTABLE_2LEVEL_HWDEF_H
-#define __ASM_PGTABLE_2LEVEL_HWDEF_H
-
-/*
- * With LPAE and 64KB pages, there are 2 levels of page tables. Each level has
- * 8192 entries of 8 bytes each, occupying a 64KB page. Levels 0 and 1 are not
- * used. The 2nd level table (PGD for Linux) can cover a range of 4TB, each
- * entry representing 512MB. The user and kernel address spaces are limited to
- * 4TB in the 64KB page configuration.
- */
-#define PTRS_PER_PTE		8192
-#define PTRS_PER_PGD		8192
-
-/*
- * PGDIR_SHIFT determines the size a top-level page table entry can map.
- */
-#define PGDIR_SHIFT		29
-#define PGDIR_SIZE		(_AC(1, UL) << PGDIR_SHIFT)
-#define PGDIR_MASK		(~(PGDIR_SIZE-1))
-
-/*
- * section address mask and size definitions.
- */
-#define SECTION_SHIFT		29
-#define SECTION_SIZE		(_AC(1, UL) << SECTION_SHIFT)
-#define SECTION_MASK		(~(SECTION_SIZE-1))
-
-#endif
diff --git a/arch/arm64/include/asm/pgtable-2level-types.h b/arch/arm64/include/asm/pgtable-2level-types.h
deleted file mode 100644
index 5f101e6..0000000
--- a/arch/arm64/include/asm/pgtable-2level-types.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef __ASM_PGTABLE_2LEVEL_TYPES_H
-#define __ASM_PGTABLE_2LEVEL_TYPES_H
-
-#include <asm/types.h>
-
-typedef u64 pteval_t;
-typedef u64 pgdval_t;
-typedef pgdval_t pmdval_t;
-
-#undef STRICT_MM_TYPECHECKS
-
-#ifdef STRICT_MM_TYPECHECKS
-
-/*
- * These are used to make use of C type-checking..
- */
-typedef struct { pteval_t pte; } pte_t;
-typedef struct { pgdval_t pgd; } pgd_t;
-typedef struct { pteval_t pgprot; } pgprot_t;
-
-#define pte_val(x)      ((x).pte)
-#define pgd_val(x)	((x).pgd)
-#define pgprot_val(x)   ((x).pgprot)
-
-#define __pte(x)        ((pte_t) { (x) } )
-#define __pgd(x)	((pgd_t) { (x) } )
-#define __pgprot(x)     ((pgprot_t) { (x) } )
-
-#else	/* !STRICT_MM_TYPECHECKS */
-
-typedef pteval_t pte_t;
-typedef pgdval_t pgd_t;
-typedef pteval_t pgprot_t;
-
-#define pte_val(x)	(x)
-#define pgd_val(x)	(x)
-#define pgprot_val(x)	(x)
-
-#define __pte(x)	(x)
-#define __pgd(x)	(x)
-#define __pgprot(x)	(x)
-
-#endif	/* STRICT_MM_TYPECHECKS */
-
-#include <asm-generic/pgtable-nopmd.h>
-
-#endif	/* __ASM_PGTABLE_2LEVEL_TYPES_H */
diff --git a/arch/arm64/include/asm/pgtable-3level-hwdef.h b/arch/arm64/include/asm/pgtable-3level-hwdef.h
deleted file mode 100644
index 3dbf941..0000000
--- a/arch/arm64/include/asm/pgtable-3level-hwdef.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef __ASM_PGTABLE_3LEVEL_HWDEF_H
-#define __ASM_PGTABLE_3LEVEL_HWDEF_H
-
-/*
- * With LPAE and 4KB pages, there are 3 levels of page tables. Each level has
- * 512 entries of 8 bytes each, occupying a 4K page. The first level table
- * covers a range of 512GB, each entry representing 1GB. The user and kernel
- * address spaces are limited to 512GB each.
- */
-#define PTRS_PER_PTE		512
-#define PTRS_PER_PMD		512
-#define PTRS_PER_PGD		512
-
-/*
- * PGDIR_SHIFT determines the size a top-level page table entry can map.
- */
-#define PGDIR_SHIFT		30
-#define PGDIR_SIZE		(_AC(1, UL) << PGDIR_SHIFT)
-#define PGDIR_MASK		(~(PGDIR_SIZE-1))
-
-/*
- * PMD_SHIFT determines the size a middle-level page table entry can map.
- */
-#define PMD_SHIFT		21
-#define PMD_SIZE		(_AC(1, UL) << PMD_SHIFT)
-#define PMD_MASK		(~(PMD_SIZE-1))
-
-/*
- * section address mask and size definitions.
- */
-#define SECTION_SHIFT		21
-#define SECTION_SIZE		(_AC(1, UL) << SECTION_SHIFT)
-#define SECTION_MASK		(~(SECTION_SIZE-1))
-
-#endif
diff --git a/arch/arm64/include/asm/pgtable-3level-types.h b/arch/arm64/include/asm/pgtable-3level-types.h
deleted file mode 100644
index 4e94424..0000000
--- a/arch/arm64/include/asm/pgtable-3level-types.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef __ASM_PGTABLE_3LEVEL_TYPES_H
-#define __ASM_PGTABLE_3LEVEL_TYPES_H
-
-#include <asm/types.h>
-
-typedef u64 pteval_t;
-typedef u64 pmdval_t;
-typedef u64 pgdval_t;
-
-#undef STRICT_MM_TYPECHECKS
-
-#ifdef STRICT_MM_TYPECHECKS
-
-/*
- * These are used to make use of C type-checking..
- */
-typedef struct { pteval_t pte; } pte_t;
-typedef struct { pmdval_t pmd; } pmd_t;
-typedef struct { pgdval_t pgd; } pgd_t;
-typedef struct { pteval_t pgprot; } pgprot_t;
-
-#define pte_val(x)      ((x).pte)
-#define pmd_val(x)      ((x).pmd)
-#define pgd_val(x)	((x).pgd)
-#define pgprot_val(x)   ((x).pgprot)
-
-#define __pte(x)        ((pte_t) { (x) } )
-#define __pmd(x)        ((pmd_t) { (x) } )
-#define __pgd(x)	((pgd_t) { (x) } )
-#define __pgprot(x)     ((pgprot_t) { (x) } )
-
-#else	/* !STRICT_MM_TYPECHECKS */
-
-typedef pteval_t pte_t;
-typedef pmdval_t pmd_t;
-typedef pgdval_t pgd_t;
-typedef pteval_t pgprot_t;
-
-#define pte_val(x)	(x)
-#define pmd_val(x)	(x)
-#define pgd_val(x)	(x)
-#define pgprot_val(x)	(x)
-
-#define __pte(x)	(x)
-#define __pmd(x)	(x)
-#define __pgd(x)	(x)
-#define __pgprot(x)	(x)
-
-#endif	/* STRICT_MM_TYPECHECKS */
-
-#include <asm-generic/pgtable-nopud.h>
-
-#endif	/* __ASM_PGTABLE_3LEVEL_TYPES_H */
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index 955e8c5..88174e0 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -16,18 +16,50 @@
 #ifndef __ASM_PGTABLE_HWDEF_H
 #define __ASM_PGTABLE_HWDEF_H
 
-#ifdef CONFIG_ARM64_64K_PAGES
-#include <asm/pgtable-2level-hwdef.h>
-#else
-#include <asm/pgtable-3level-hwdef.h>
+#define PTRS_PER_PTE		(1 << (PAGE_SHIFT - 3))
+
+/*
+ * PMD_SHIFT determines the size a level 2 page table entry can map.
+ */
+#if CONFIG_ARM64_PGTABLE_LEVELS > 2
+#define PMD_SHIFT		((PAGE_SHIFT - 3) * 2 + 3)
+#define PMD_SIZE		(_AC(1, UL) << PMD_SHIFT)
+#define PMD_MASK		(~(PMD_SIZE-1))
+#define PTRS_PER_PMD		PTRS_PER_PTE
 #endif
 
 /*
+ * PUD_SHIFT determines the size a level 1 page table entry can map.
+ */
+#if CONFIG_ARM64_PGTABLE_LEVELS > 3
+#define PUD_SHIFT		((PAGE_SHIFT - 3) * 3 + 3)
+#define PUD_SIZE		(_AC(1, UL) << PUD_SHIFT)
+#define PUD_MASK		(~(PUD_SIZE-1))
+#define PTRS_PER_PUD		PTRS_PER_PTE
+#endif
+
+/*
+ * PGDIR_SHIFT determines the size a top-level page table entry can map
+ * (depending on the configuration, this level can be 0, 1 or 2).
+ */
+#define PGDIR_SHIFT		((PAGE_SHIFT - 3) * CONFIG_ARM64_PGTABLE_LEVELS + 3)
+#define PGDIR_SIZE		(_AC(1, UL) << PGDIR_SHIFT)
+#define PGDIR_MASK		(~(PGDIR_SIZE-1))
+#define PTRS_PER_PGD		(1 << (VA_BITS - PGDIR_SHIFT))
+
+/*
+ * Section address mask and size definitions.
+ */
+#define SECTION_SHIFT		PMD_SHIFT
+#define SECTION_SIZE		(_AC(1, UL) << SECTION_SHIFT)
+#define SECTION_MASK		(~(SECTION_SIZE-1))
+
+/*
  * Hardware page table definitions.
  *
  * Level 1 descriptor (PUD).
  */
-
+#define PUD_TYPE_TABLE		(_AT(pudval_t, 3) << 0)
 #define PUD_TABLE_BIT		(_AT(pgdval_t, 1) << 1)
 #define PUD_TYPE_MASK		(_AT(pgdval_t, 3) << 0)
 #define PUD_TYPE_SECT		(_AT(pgdval_t, 1) << 0)
diff --git a/arch/arm64/include/asm/pgtable-types.h b/arch/arm64/include/asm/pgtable-types.h
new file mode 100644
index 0000000..ca9df80
--- /dev/null
+++ b/arch/arm64/include/asm/pgtable-types.h
@@ -0,0 +1,95 @@
+/*
+ * Page table types definitions.
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ * Author: Catalin Marinas <catalin.marinas@arm.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ASM_PGTABLE_TYPES_H
+#define __ASM_PGTABLE_TYPES_H
+
+#include <asm/types.h>
+
+typedef u64 pteval_t;
+typedef u64 pmdval_t;
+typedef u64 pudval_t;
+typedef u64 pgdval_t;
+
+#undef STRICT_MM_TYPECHECKS
+
+#ifdef STRICT_MM_TYPECHECKS
+
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct { pteval_t pte; } pte_t;
+#define pte_val(x)	((x).pte)
+#define __pte(x)	((pte_t) { (x) } )
+
+#if CONFIG_ARM64_PGTABLE_LEVELS > 2
+typedef struct { pmdval_t pmd; } pmd_t;
+#define pmd_val(x)	((x).pmd)
+#define __pmd(x)	((pmd_t) { (x) } )
+#endif
+
+#if CONFIG_ARM64_PGTABLE_LEVELS > 3
+typedef struct { pudval_t pud; } pud_t;
+#define pud_val(x)	((x).pud)
+#define __pud(x)	((pud_t) { (x) } )
+#endif
+
+typedef struct { pgdval_t pgd; } pgd_t;
+#define pgd_val(x)	((x).pgd)
+#define __pgd(x)	((pgd_t) { (x) } )
+
+typedef struct { pteval_t pgprot; } pgprot_t;
+#define pgprot_val(x)	((x).pgprot)
+#define __pgprot(x)	((pgprot_t) { (x) } )
+
+#else	/* !STRICT_MM_TYPECHECKS */
+
+typedef pteval_t pte_t;
+#define pte_val(x)	(x)
+#define __pte(x)	(x)
+
+#if CONFIG_ARM64_PGTABLE_LEVELS > 2
+typedef pmdval_t pmd_t;
+#define pmd_val(x)	(x)
+#define __pmd(x)	(x)
+#endif
+
+#if CONFIG_ARM64_PGTABLE_LEVELS > 3
+typedef pudval_t pud_t;
+#define pud_val(x)	(x)
+#define __pud(x)	(x)
+#endif
+
+typedef pgdval_t pgd_t;
+#define pgd_val(x)	(x)
+#define __pgd(x)	(x)
+
+typedef pteval_t pgprot_t;
+#define pgprot_val(x)	(x)
+#define __pgprot(x)	(x)
+
+#endif /* STRICT_MM_TYPECHECKS */
+
+#if CONFIG_ARM64_PGTABLE_LEVELS == 2
+#include <asm-generic/pgtable-nopmd.h>
+#elif CONFIG_ARM64_PGTABLE_LEVELS == 3
+#include <asm-generic/pgtable-nopud.h>
+#endif
+
+#endif	/* __ASM_PGTABLE_TYPES_H */
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index e0ccceb..ffe1ba0 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -33,9 +33,16 @@
 
 /*
  * VMALLOC and SPARSEMEM_VMEMMAP ranges.
+ *
+ * VMEMAP_SIZE: allows the whole VA space to be covered by a struct page array
+ *	(rounded up to PUD_SIZE).
+ * VMALLOC_START: beginning of the kernel VA space
+ * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space,
+ *	fixed mappings and modules
  */
+#define VMEMMAP_SIZE		ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE)
 #define VMALLOC_START		(UL(0xffffffffffffffff) << VA_BITS)
-#define VMALLOC_END		(PAGE_OFFSET - UL(0x400000000) - SZ_64K)
+#define VMALLOC_END		(PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
 
 #define vmemmap			((struct page *)(VMALLOC_END + SZ_64K))
 
@@ -44,14 +51,9 @@
 #ifndef __ASSEMBLY__
 extern void __pte_error(const char *file, int line, unsigned long val);
 extern void __pmd_error(const char *file, int line, unsigned long val);
+extern void __pud_error(const char *file, int line, unsigned long val);
 extern void __pgd_error(const char *file, int line, unsigned long val);
 
-#define pte_ERROR(pte)		__pte_error(__FILE__, __LINE__, pte_val(pte))
-#ifndef CONFIG_ARM64_64K_PAGES
-#define pmd_ERROR(pmd)		__pmd_error(__FILE__, __LINE__, pmd_val(pmd))
-#endif
-#define pgd_ERROR(pgd)		__pgd_error(__FILE__, __LINE__, pgd_val(pgd))
-
 #ifdef CONFIG_SMP
 #define PROT_DEFAULT		(PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
 #define PROT_SECT_DEFAULT	(PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
@@ -112,6 +114,8 @@
 extern struct page *empty_zero_page;
 #define ZERO_PAGE(vaddr)	(empty_zero_page)
 
+#define pte_ERROR(pte)		__pte_error(__FILE__, __LINE__, pte_val(pte))
+
 #define pte_pfn(pte)		((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT)
 
 #define pfn_pte(pfn,prot)	(__pte(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
@@ -119,6 +123,10 @@
 #define pte_none(pte)		(!pte_val(pte))
 #define pte_clear(mm,addr,ptep)	set_pte(ptep, __pte(0))
 #define pte_page(pte)		(pfn_to_page(pte_pfn(pte)))
+
+/* Find an entry in the third-level page table. */
+#define pte_index(addr)		(((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+
 #define pte_offset_kernel(dir,addr)	(pmd_page_vaddr(*(dir)) + pte_index(addr))
 
 #define pte_offset_map(dir,addr)	pte_offset_kernel((dir), (addr))
@@ -138,6 +146,8 @@
 
 #define pte_valid_user(pte) \
 	((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
+#define pte_valid_not_user(pte) \
+	((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
 
 static inline pte_t pte_wrprotect(pte_t pte)
 {
@@ -184,6 +194,15 @@
 static inline void set_pte(pte_t *ptep, pte_t pte)
 {
 	*ptep = pte;
+
+	/*
+	 * Only if the new pte is valid and kernel, otherwise TLB maintenance
+	 * or update_mmu_cache() have the necessary barriers.
+	 */
+	if (pte_valid_not_user(pte)) {
+		dsb(ishst);
+		isb();
+	}
 }
 
 extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
@@ -303,6 +322,7 @@
 {
 	*pmdp = pmd;
 	dsb(ishst);
+	isb();
 }
 
 static inline void pmd_clear(pmd_t *pmdp)
@@ -323,7 +343,9 @@
  */
 #define mk_pte(page,prot)	pfn_pte(page_to_pfn(page),prot)
 
-#ifndef CONFIG_ARM64_64K_PAGES
+#if CONFIG_ARM64_PGTABLE_LEVELS > 2
+
+#define pmd_ERROR(pmd)		__pmd_error(__FILE__, __LINE__, pmd_val(pmd))
 
 #define pud_none(pud)		(!pud_val(pud))
 #define pud_bad(pud)		(!(pud_val(pud) & 2))
@@ -333,6 +355,7 @@
 {
 	*pudp = pud;
 	dsb(ishst);
+	isb();
 }
 
 static inline void pud_clear(pud_t *pudp)
@@ -345,7 +368,51 @@
 	return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK);
 }
 
-#endif	/* CONFIG_ARM64_64K_PAGES */
+/* Find an entry in the second-level page table. */
+#define pmd_index(addr)		(((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
+
+static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
+{
+	return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr);
+}
+
+#endif	/* CONFIG_ARM64_PGTABLE_LEVELS > 2 */
+
+#if CONFIG_ARM64_PGTABLE_LEVELS > 3
+
+#define pud_ERROR(pud)		__pud_error(__FILE__, __LINE__, pud_val(pud))
+
+#define pgd_none(pgd)		(!pgd_val(pgd))
+#define pgd_bad(pgd)		(!(pgd_val(pgd) & 2))
+#define pgd_present(pgd)	(pgd_val(pgd))
+
+static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
+{
+	*pgdp = pgd;
+	dsb(ishst);
+}
+
+static inline void pgd_clear(pgd_t *pgdp)
+{
+	set_pgd(pgdp, __pgd(0));
+}
+
+static inline pud_t *pgd_page_vaddr(pgd_t pgd)
+{
+	return __va(pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK);
+}
+
+/* Find an entry in the frst-level page table. */
+#define pud_index(addr)		(((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
+
+static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr)
+{
+	return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(addr);
+}
+
+#endif  /* CONFIG_ARM64_PGTABLE_LEVELS > 3 */
+
+#define pgd_ERROR(pgd)		__pgd_error(__FILE__, __LINE__, pgd_val(pgd))
 
 /* to find an entry in a page-table-directory */
 #define pgd_index(addr)		(((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
@@ -355,18 +422,6 @@
 /* to find an entry in a kernel page-table-directory */
 #define pgd_offset_k(addr)	pgd_offset(&init_mm, addr)
 
-/* Find an entry in the second-level page table.. */
-#ifndef CONFIG_ARM64_64K_PAGES
-#define pmd_index(addr)		(((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
-static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
-{
-	return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr);
-}
-#endif
-
-/* Find an entry in the third-level page table.. */
-#define pte_index(addr)		(((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
 	const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
@@ -383,9 +438,6 @@
 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
 
-#define SWAPPER_DIR_SIZE	(3 * PAGE_SIZE)
-#define IDMAP_DIR_SIZE		(2 * PAGE_SIZE)
-
 /*
  * Encode and decode a swap entry:
  *	bits 0-1:	present (must be zero)
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 34de2a8..3df21fe 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -129,6 +129,7 @@
 unsigned long get_wchan(struct task_struct *p);
 
 #define cpu_relax()			barrier()
+#define cpu_relax_lowlatency()                cpu_relax()
 
 /* Thread switching */
 extern struct task_struct *cpu_switch_to(struct task_struct *prev,
@@ -137,8 +138,8 @@
 #define task_pt_regs(p) \
 	((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
 
-#define KSTK_EIP(tsk)	task_pt_regs(tsk)->pc
-#define KSTK_ESP(tsk)	task_pt_regs(tsk)->sp
+#define KSTK_EIP(tsk)	((unsigned long)task_pt_regs(tsk)->pc)
+#define KSTK_ESP(tsk)	((unsigned long)task_pt_regs(tsk)->sp)
 
 /*
  * Prefetching support
diff --git a/arch/arm64/include/asm/stackprotector.h b/arch/arm64/include/asm/stackprotector.h
new file mode 100644
index 0000000..fe5e287
--- /dev/null
+++ b/arch/arm64/include/asm/stackprotector.h
@@ -0,0 +1,38 @@
+/*
+ * GCC stack protector support.
+ *
+ * Stack protector works by putting predefined pattern at the start of
+ * the stack frame and verifying that it hasn't been overwritten when
+ * returning from the function.  The pattern is called stack canary
+ * and gcc expects it to be defined by a global variable called
+ * "__stack_chk_guard" on ARM.  This unfortunately means that on SMP
+ * we cannot have a different canary value per task.
+ */
+
+#ifndef __ASM_STACKPROTECTOR_H
+#define __ASM_STACKPROTECTOR_H
+
+#include <linux/random.h>
+#include <linux/version.h>
+
+extern unsigned long __stack_chk_guard;
+
+/*
+ * Initialize the stackprotector canary value.
+ *
+ * NOTE: this must only be called from functions that never return,
+ * and it must always be inlined.
+ */
+static __always_inline void boot_init_stack_canary(void)
+{
+	unsigned long canary;
+
+	/* Try to get a semi random initial value. */
+	get_random_bytes(&canary, sizeof(canary));
+	canary ^= LINUX_VERSION_CODE;
+
+	current->stack_canary = canary;
+	__stack_chk_guard = current->stack_canary;
+}
+
+#endif	/* _ASM_STACKPROTECTOR_H */
diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h
index 383771e..709a574 100644
--- a/arch/arm64/include/asm/syscall.h
+++ b/arch/arm64/include/asm/syscall.h
@@ -16,6 +16,8 @@
 #ifndef __ASM_SYSCALL_H
 #define __ASM_SYSCALL_H
 
+#include <uapi/linux/audit.h>
+#include <linux/compat.h>
 #include <linux/err.h>
 
 extern const void *sys_call_table[];
@@ -105,4 +107,16 @@
 	memcpy(&regs->regs[i], args, n * sizeof(args[0]));
 }
 
+/*
+ * We don't care about endianness (__AUDIT_ARCH_LE bit) here because
+ * AArch64 has the same system calls both on little- and big- endian.
+ */
+static inline int syscall_get_arch(void)
+{
+	if (is_compat_task())
+		return AUDIT_ARCH_ARM;
+
+	return AUDIT_ARCH_AARCH64;
+}
+
 #endif	/* __ASM_SYSCALL_H */
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
new file mode 100644
index 0000000..5c89df0
--- /dev/null
+++ b/arch/arm64/include/asm/sysreg.h
@@ -0,0 +1,60 @@
+/*
+ * Macros for accessing system registers with older binutils.
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ * Author: Catalin Marinas <catalin.marinas@arm.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ASM_SYSREG_H
+#define __ASM_SYSREG_H
+
+#define sys_reg(op0, op1, crn, crm, op2) \
+	((((op0)-2)<<19)|((op1)<<16)|((crn)<<12)|((crm)<<8)|((op2)<<5))
+
+#ifdef __ASSEMBLY__
+
+	.irp	num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
+	.equ	__reg_num_x\num, \num
+	.endr
+	.equ	__reg_num_xzr, 31
+
+	.macro	mrs_s, rt, sreg
+	.inst	0xd5300000|(\sreg)|(__reg_num_\rt)
+	.endm
+
+	.macro	msr_s, sreg, rt
+	.inst	0xd5100000|(\sreg)|(__reg_num_\rt)
+	.endm
+
+#else
+
+asm(
+"	.irp	num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n"
+"	.equ	__reg_num_x\\num, \\num\n"
+"	.endr\n"
+"	.equ	__reg_num_xzr, 31\n"
+"\n"
+"	.macro	mrs_s, rt, sreg\n"
+"	.inst	0xd5300000|(\\sreg)|(__reg_num_\\rt)\n"
+"	.endm\n"
+"\n"
+"	.macro	msr_s, sreg, rt\n"
+"	.inst	0xd5100000|(\\sreg)|(__reg_num_\\rt)\n"
+"	.endm\n"
+);
+
+#endif
+
+#endif	/* __ASM_SYSREG_H */
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index e40b6d0..45108d8 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -103,6 +103,7 @@
 #define TIF_NEED_RESCHED	1
 #define TIF_NOTIFY_RESUME	2	/* callback before returning to user */
 #define TIF_FOREIGN_FPSTATE	3	/* CPU's FP state is not current's */
+#define TIF_NOHZ		7
 #define TIF_SYSCALL_TRACE	8
 #define TIF_SYSCALL_AUDIT	9
 #define TIF_SYSCALL_TRACEPOINT	10
@@ -118,6 +119,7 @@
 #define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED)
 #define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
 #define _TIF_FOREIGN_FPSTATE	(1 << TIF_FOREIGN_FPSTATE)
+#define _TIF_NOHZ		(1 << TIF_NOHZ)
 #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
 #define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT)
 #define _TIF_SYSCALL_TRACEPOINT	(1 << TIF_SYSCALL_TRACEPOINT)
@@ -128,7 +130,8 @@
 				 _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE)
 
 #define _TIF_SYSCALL_WORK	(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
-				 _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
+				 _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
+				 _TIF_NOHZ)
 
 #endif /* __KERNEL__ */
 #endif /* __ASM_THREAD_INFO_H */
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index 80e2c08..62731ef 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -91,7 +91,7 @@
 	tlb_remove_page(tlb, pte);
 }
 
-#ifndef CONFIG_ARM64_64K_PAGES
+#if CONFIG_ARM64_PGTABLE_LEVELS > 2
 static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
 				  unsigned long addr)
 {
@@ -100,6 +100,15 @@
 }
 #endif
 
+#if CONFIG_ARM64_PGTABLE_LEVELS > 3
+static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp,
+				  unsigned long addr)
+{
+	tlb_add_flush(tlb, addr);
+	tlb_remove_page(tlb, virt_to_page(pudp));
+}
+#endif
+
 static inline void __tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp,
 						unsigned long address)
 {
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index b9349c4..73f0ce5 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -98,8 +98,8 @@
 	dsb(ish);
 }
 
-static inline void flush_tlb_range(struct vm_area_struct *vma,
-					unsigned long start, unsigned long end)
+static inline void __flush_tlb_range(struct vm_area_struct *vma,
+				     unsigned long start, unsigned long end)
 {
 	unsigned long asid = (unsigned long)ASID(vma->vm_mm) << 48;
 	unsigned long addr;
@@ -112,7 +112,7 @@
 	dsb(ish);
 }
 
-static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+static inline void __flush_tlb_kernel_range(unsigned long start, unsigned long end)
 {
 	unsigned long addr;
 	start >>= 12;
@@ -122,6 +122,30 @@
 	for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
 		asm("tlbi vaae1is, %0" : : "r"(addr));
 	dsb(ish);
+	isb();
+}
+
+/*
+ * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
+ * necessarily a performance improvement.
+ */
+#define MAX_TLB_RANGE	(1024UL << PAGE_SHIFT)
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+				   unsigned long start, unsigned long end)
+{
+	if ((end - start) <= MAX_TLB_RANGE)
+		__flush_tlb_range(vma, start, end);
+	else
+		flush_tlb_mm(vma->vm_mm);
+}
+
+static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+	if ((end - start) <= MAX_TLB_RANGE)
+		__flush_tlb_kernel_range(start, end);
+	else
+		flush_tlb_all();
 }
 
 /*
@@ -131,8 +155,8 @@
 				    unsigned long addr, pte_t *ptep)
 {
 	/*
-	 * set_pte() does not have a DSB, so make sure that the page table
-	 * write is visible.
+	 * set_pte() does not have a DSB for user mappings, so make sure that
+	 * the page table write is visible.
 	 */
 	dsb(ishst);
 }
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index e5f47df..4bc95d2 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -26,7 +26,24 @@
 #define __ARCH_WANT_COMPAT_SYS_SENDFILE
 #define __ARCH_WANT_SYS_FORK
 #define __ARCH_WANT_SYS_VFORK
+
+/*
+ * Compat syscall numbers used by the AArch64 kernel.
+ */
+#define __NR_compat_restart_syscall	0
+#define __NR_compat_sigreturn		119
+#define __NR_compat_rt_sigreturn	173
+
+/*
+ * The following SVCs are ARM private.
+ */
+#define __ARM_NR_COMPAT_BASE		0x0f0000
+#define __ARM_NR_compat_cacheflush	(__ARM_NR_COMPAT_BASE+2)
+#define __ARM_NR_compat_set_tls		(__ARM_NR_COMPAT_BASE+5)
+
+#define __NR_compat_syscalls		383
 #endif
+
 #define __ARCH_WANT_SYS_CLONE
 #include <uapi/asm/unistd.h>
 
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index c8d8fc1..e242600 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -21,403 +21,769 @@
 #define __SYSCALL(x, y)
 #endif
 
-__SYSCALL(0,   sys_restart_syscall)
-__SYSCALL(1,   sys_exit)
-__SYSCALL(2,   sys_fork)
-__SYSCALL(3,   sys_read)
-__SYSCALL(4,   sys_write)
-__SYSCALL(5,   compat_sys_open)
-__SYSCALL(6,   sys_close)
-__SYSCALL(7,   sys_ni_syscall)			/* 7 was sys_waitpid */
-__SYSCALL(8,   sys_creat)
-__SYSCALL(9,   sys_link)
-__SYSCALL(10,  sys_unlink)
-__SYSCALL(11,  compat_sys_execve)
-__SYSCALL(12,  sys_chdir)
-__SYSCALL(13,  sys_ni_syscall)			/* 13 was sys_time */
-__SYSCALL(14,  sys_mknod)
-__SYSCALL(15,  sys_chmod)
-__SYSCALL(16,  sys_lchown16)
-__SYSCALL(17,  sys_ni_syscall)			/* 17 was sys_break */
-__SYSCALL(18,  sys_ni_syscall)			/* 18 was sys_stat */
-__SYSCALL(19,  compat_sys_lseek)
-__SYSCALL(20,  sys_getpid)
-__SYSCALL(21,  compat_sys_mount)
-__SYSCALL(22,  sys_ni_syscall)			/* 22 was sys_umount */
-__SYSCALL(23,  sys_setuid16)
-__SYSCALL(24,  sys_getuid16)
-__SYSCALL(25,  sys_ni_syscall)			/* 25 was sys_stime */
-__SYSCALL(26,  compat_sys_ptrace)
-__SYSCALL(27,  sys_ni_syscall)			/* 27 was sys_alarm */
-__SYSCALL(28,  sys_ni_syscall)			/* 28 was sys_fstat */
-__SYSCALL(29,  sys_pause)
-__SYSCALL(30,  sys_ni_syscall)			/* 30 was sys_utime */
-__SYSCALL(31,  sys_ni_syscall)			/* 31 was sys_stty */
-__SYSCALL(32,  sys_ni_syscall)			/* 32 was sys_gtty */
-__SYSCALL(33,  sys_access)
-__SYSCALL(34,  sys_nice)
-__SYSCALL(35,  sys_ni_syscall)			/* 35 was sys_ftime */
-__SYSCALL(36,  sys_sync)
-__SYSCALL(37,  sys_kill)
-__SYSCALL(38,  sys_rename)
-__SYSCALL(39,  sys_mkdir)
-__SYSCALL(40,  sys_rmdir)
-__SYSCALL(41,  sys_dup)
-__SYSCALL(42,  sys_pipe)
-__SYSCALL(43,  compat_sys_times)
-__SYSCALL(44,  sys_ni_syscall)			/* 44 was sys_prof */
-__SYSCALL(45,  sys_brk)
-__SYSCALL(46,  sys_setgid16)
-__SYSCALL(47,  sys_getgid16)
-__SYSCALL(48,  sys_ni_syscall)			/* 48 was sys_signal */
-__SYSCALL(49,  sys_geteuid16)
-__SYSCALL(50,  sys_getegid16)
-__SYSCALL(51,  sys_acct)
-__SYSCALL(52,  sys_umount)
-__SYSCALL(53,  sys_ni_syscall)			/* 53 was sys_lock */
-__SYSCALL(54,  compat_sys_ioctl)
-__SYSCALL(55,  compat_sys_fcntl)
-__SYSCALL(56,  sys_ni_syscall)			/* 56 was sys_mpx */
-__SYSCALL(57,  sys_setpgid)
-__SYSCALL(58,  sys_ni_syscall)			/* 58 was sys_ulimit */
-__SYSCALL(59,  sys_ni_syscall)			/* 59 was sys_olduname */
-__SYSCALL(60,  sys_umask)
-__SYSCALL(61,  sys_chroot)
-__SYSCALL(62,  compat_sys_ustat)
-__SYSCALL(63,  sys_dup2)
-__SYSCALL(64,  sys_getppid)
-__SYSCALL(65,  sys_getpgrp)
-__SYSCALL(66,  sys_setsid)
-__SYSCALL(67,  compat_sys_sigaction)
-__SYSCALL(68,  sys_ni_syscall)			/* 68 was sys_sgetmask */
-__SYSCALL(69,  sys_ni_syscall)			/* 69 was sys_ssetmask */
-__SYSCALL(70,  sys_setreuid16)
-__SYSCALL(71,  sys_setregid16)
-__SYSCALL(72,  sys_sigsuspend)
-__SYSCALL(73,  compat_sys_sigpending)
-__SYSCALL(74,  sys_sethostname)
-__SYSCALL(75,  compat_sys_setrlimit)
-__SYSCALL(76,  sys_ni_syscall)			/* 76 was compat_sys_getrlimit */
-__SYSCALL(77,  compat_sys_getrusage)
-__SYSCALL(78,  compat_sys_gettimeofday)
-__SYSCALL(79,  compat_sys_settimeofday)
-__SYSCALL(80,  sys_getgroups16)
-__SYSCALL(81,  sys_setgroups16)
-__SYSCALL(82,  sys_ni_syscall)			/* 82 was compat_sys_select */
-__SYSCALL(83,  sys_symlink)
-__SYSCALL(84,  sys_ni_syscall)			/* 84 was sys_lstat */
-__SYSCALL(85,  sys_readlink)
-__SYSCALL(86,  sys_uselib)
-__SYSCALL(87,  sys_swapon)
-__SYSCALL(88,  sys_reboot)
-__SYSCALL(89,  sys_ni_syscall)			/* 89 was sys_readdir */
-__SYSCALL(90,  sys_ni_syscall)			/* 90 was sys_mmap */
-__SYSCALL(91,  sys_munmap)
-__SYSCALL(92,  compat_sys_truncate)
-__SYSCALL(93,  compat_sys_ftruncate)
-__SYSCALL(94,  sys_fchmod)
-__SYSCALL(95,  sys_fchown16)
-__SYSCALL(96,  sys_getpriority)
-__SYSCALL(97,  sys_setpriority)
-__SYSCALL(98,  sys_ni_syscall)			/* 98 was sys_profil */
-__SYSCALL(99,  compat_sys_statfs)
-__SYSCALL(100, compat_sys_fstatfs)
-__SYSCALL(101, sys_ni_syscall)			/* 101 was sys_ioperm */
-__SYSCALL(102, sys_ni_syscall)			/* 102 was sys_socketcall */
-__SYSCALL(103, sys_syslog)
-__SYSCALL(104, compat_sys_setitimer)
-__SYSCALL(105, compat_sys_getitimer)
-__SYSCALL(106, compat_sys_newstat)
-__SYSCALL(107, compat_sys_newlstat)
-__SYSCALL(108, compat_sys_newfstat)
-__SYSCALL(109, sys_ni_syscall)			/* 109 was sys_uname */
-__SYSCALL(110, sys_ni_syscall)			/* 110 was sys_iopl */
-__SYSCALL(111, sys_vhangup)
-__SYSCALL(112, sys_ni_syscall)			/* 112 was sys_idle */
-__SYSCALL(113, sys_ni_syscall)			/* 113 was sys_syscall */
-__SYSCALL(114, compat_sys_wait4)
-__SYSCALL(115, sys_swapoff)
-__SYSCALL(116, compat_sys_sysinfo)
-__SYSCALL(117, sys_ni_syscall)			/* 117 was sys_ipc */
-__SYSCALL(118, sys_fsync)
-__SYSCALL(119, compat_sys_sigreturn_wrapper)
-__SYSCALL(120, sys_clone)
-__SYSCALL(121, sys_setdomainname)
-__SYSCALL(122, sys_newuname)
-__SYSCALL(123, sys_ni_syscall)			/* 123 was sys_modify_ldt */
-__SYSCALL(124, compat_sys_adjtimex)
-__SYSCALL(125, sys_mprotect)
-__SYSCALL(126, compat_sys_sigprocmask)
-__SYSCALL(127, sys_ni_syscall)			/* 127 was sys_create_module */
-__SYSCALL(128, sys_init_module)
-__SYSCALL(129, sys_delete_module)
-__SYSCALL(130, sys_ni_syscall)			/* 130 was sys_get_kernel_syms */
-__SYSCALL(131, sys_quotactl)
-__SYSCALL(132, sys_getpgid)
-__SYSCALL(133, sys_fchdir)
-__SYSCALL(134, sys_bdflush)
-__SYSCALL(135, sys_sysfs)
-__SYSCALL(136, sys_personality)
-__SYSCALL(137, sys_ni_syscall)			/* 137 was sys_afs_syscall */
-__SYSCALL(138, sys_setfsuid16)
-__SYSCALL(139, sys_setfsgid16)
-__SYSCALL(140, sys_llseek)
-__SYSCALL(141, compat_sys_getdents)
-__SYSCALL(142, compat_sys_select)
-__SYSCALL(143, sys_flock)
-__SYSCALL(144, sys_msync)
-__SYSCALL(145, compat_sys_readv)
-__SYSCALL(146, compat_sys_writev)
-__SYSCALL(147, sys_getsid)
-__SYSCALL(148, sys_fdatasync)
-__SYSCALL(149, compat_sys_sysctl)
-__SYSCALL(150, sys_mlock)
-__SYSCALL(151, sys_munlock)
-__SYSCALL(152, sys_mlockall)
-__SYSCALL(153, sys_munlockall)
-__SYSCALL(154, sys_sched_setparam)
-__SYSCALL(155, sys_sched_getparam)
-__SYSCALL(156, sys_sched_setscheduler)
-__SYSCALL(157, sys_sched_getscheduler)
-__SYSCALL(158, sys_sched_yield)
-__SYSCALL(159, sys_sched_get_priority_max)
-__SYSCALL(160, sys_sched_get_priority_min)
-__SYSCALL(161, compat_sys_sched_rr_get_interval)
-__SYSCALL(162, compat_sys_nanosleep)
-__SYSCALL(163, sys_mremap)
-__SYSCALL(164, sys_setresuid16)
-__SYSCALL(165, sys_getresuid16)
-__SYSCALL(166, sys_ni_syscall)			/* 166 was sys_vm86 */
-__SYSCALL(167, sys_ni_syscall)			/* 167 was sys_query_module */
-__SYSCALL(168, sys_poll)
-__SYSCALL(169, sys_ni_syscall)
-__SYSCALL(170, sys_setresgid16)
-__SYSCALL(171, sys_getresgid16)
-__SYSCALL(172, sys_prctl)
-__SYSCALL(173, compat_sys_rt_sigreturn_wrapper)
-__SYSCALL(174, compat_sys_rt_sigaction)
-__SYSCALL(175, compat_sys_rt_sigprocmask)
-__SYSCALL(176, compat_sys_rt_sigpending)
-__SYSCALL(177, compat_sys_rt_sigtimedwait)
-__SYSCALL(178, compat_sys_rt_sigqueueinfo)
-__SYSCALL(179, compat_sys_rt_sigsuspend)
-__SYSCALL(180, compat_sys_pread64_wrapper)
-__SYSCALL(181, compat_sys_pwrite64_wrapper)
-__SYSCALL(182, sys_chown16)
-__SYSCALL(183, sys_getcwd)
-__SYSCALL(184, sys_capget)
-__SYSCALL(185, sys_capset)
-__SYSCALL(186, compat_sys_sigaltstack)
-__SYSCALL(187, compat_sys_sendfile)
-__SYSCALL(188, sys_ni_syscall)			/* 188 reserved */
-__SYSCALL(189, sys_ni_syscall)			/* 189 reserved */
-__SYSCALL(190, sys_vfork)
-__SYSCALL(191, compat_sys_getrlimit)		/* SuS compliant getrlimit */
-__SYSCALL(192, sys_mmap_pgoff)
-__SYSCALL(193, compat_sys_truncate64_wrapper)
-__SYSCALL(194, compat_sys_ftruncate64_wrapper)
-__SYSCALL(195, sys_stat64)
-__SYSCALL(196, sys_lstat64)
-__SYSCALL(197, sys_fstat64)
-__SYSCALL(198, sys_lchown)
-__SYSCALL(199, sys_getuid)
-__SYSCALL(200, sys_getgid)
-__SYSCALL(201, sys_geteuid)
-__SYSCALL(202, sys_getegid)
-__SYSCALL(203, sys_setreuid)
-__SYSCALL(204, sys_setregid)
-__SYSCALL(205, sys_getgroups)
-__SYSCALL(206, sys_setgroups)
-__SYSCALL(207, sys_fchown)
-__SYSCALL(208, sys_setresuid)
-__SYSCALL(209, sys_getresuid)
-__SYSCALL(210, sys_setresgid)
-__SYSCALL(211, sys_getresgid)
-__SYSCALL(212, sys_chown)
-__SYSCALL(213, sys_setuid)
-__SYSCALL(214, sys_setgid)
-__SYSCALL(215, sys_setfsuid)
-__SYSCALL(216, sys_setfsgid)
-__SYSCALL(217, compat_sys_getdents64)
-__SYSCALL(218, sys_pivot_root)
-__SYSCALL(219, sys_mincore)
-__SYSCALL(220, sys_madvise)
-__SYSCALL(221, compat_sys_fcntl64)
-__SYSCALL(222, sys_ni_syscall)			/* 222 for tux */
-__SYSCALL(223, sys_ni_syscall)			/* 223 is unused */
-__SYSCALL(224, sys_gettid)
-__SYSCALL(225, compat_sys_readahead_wrapper)
-__SYSCALL(226, sys_setxattr)
-__SYSCALL(227, sys_lsetxattr)
-__SYSCALL(228, sys_fsetxattr)
-__SYSCALL(229, sys_getxattr)
-__SYSCALL(230, sys_lgetxattr)
-__SYSCALL(231, sys_fgetxattr)
-__SYSCALL(232, sys_listxattr)
-__SYSCALL(233, sys_llistxattr)
-__SYSCALL(234, sys_flistxattr)
-__SYSCALL(235, sys_removexattr)
-__SYSCALL(236, sys_lremovexattr)
-__SYSCALL(237, sys_fremovexattr)
-__SYSCALL(238, sys_tkill)
-__SYSCALL(239, sys_sendfile64)
-__SYSCALL(240, compat_sys_futex)
-__SYSCALL(241, compat_sys_sched_setaffinity)
-__SYSCALL(242, compat_sys_sched_getaffinity)
-__SYSCALL(243, compat_sys_io_setup)
-__SYSCALL(244, sys_io_destroy)
-__SYSCALL(245, compat_sys_io_getevents)
-__SYSCALL(246, compat_sys_io_submit)
-__SYSCALL(247, sys_io_cancel)
-__SYSCALL(248, sys_exit_group)
-__SYSCALL(249, compat_sys_lookup_dcookie)
-__SYSCALL(250, sys_epoll_create)
-__SYSCALL(251, sys_epoll_ctl)
-__SYSCALL(252, sys_epoll_wait)
-__SYSCALL(253, sys_remap_file_pages)
-__SYSCALL(254, sys_ni_syscall)			/* 254 for set_thread_area */
-__SYSCALL(255, sys_ni_syscall)			/* 255 for get_thread_area */
-__SYSCALL(256, sys_set_tid_address)
-__SYSCALL(257, compat_sys_timer_create)
-__SYSCALL(258, compat_sys_timer_settime)
-__SYSCALL(259, compat_sys_timer_gettime)
-__SYSCALL(260, sys_timer_getoverrun)
-__SYSCALL(261, sys_timer_delete)
-__SYSCALL(262, compat_sys_clock_settime)
-__SYSCALL(263, compat_sys_clock_gettime)
-__SYSCALL(264, compat_sys_clock_getres)
-__SYSCALL(265, compat_sys_clock_nanosleep)
-__SYSCALL(266, compat_sys_statfs64_wrapper)
-__SYSCALL(267, compat_sys_fstatfs64_wrapper)
-__SYSCALL(268, sys_tgkill)
-__SYSCALL(269, compat_sys_utimes)
-__SYSCALL(270, compat_sys_fadvise64_64_wrapper)
-__SYSCALL(271, sys_pciconfig_iobase)
-__SYSCALL(272, sys_pciconfig_read)
-__SYSCALL(273, sys_pciconfig_write)
-__SYSCALL(274, compat_sys_mq_open)
-__SYSCALL(275, sys_mq_unlink)
-__SYSCALL(276, compat_sys_mq_timedsend)
-__SYSCALL(277, compat_sys_mq_timedreceive)
-__SYSCALL(278, compat_sys_mq_notify)
-__SYSCALL(279, compat_sys_mq_getsetattr)
-__SYSCALL(280, compat_sys_waitid)
-__SYSCALL(281, sys_socket)
-__SYSCALL(282, sys_bind)
-__SYSCALL(283, sys_connect)
-__SYSCALL(284, sys_listen)
-__SYSCALL(285, sys_accept)
-__SYSCALL(286, sys_getsockname)
-__SYSCALL(287, sys_getpeername)
-__SYSCALL(288, sys_socketpair)
-__SYSCALL(289, sys_send)
-__SYSCALL(290, sys_sendto)
-__SYSCALL(291, compat_sys_recv)
-__SYSCALL(292, compat_sys_recvfrom)
-__SYSCALL(293, sys_shutdown)
-__SYSCALL(294, compat_sys_setsockopt)
-__SYSCALL(295, compat_sys_getsockopt)
-__SYSCALL(296, compat_sys_sendmsg)
-__SYSCALL(297, compat_sys_recvmsg)
-__SYSCALL(298, sys_semop)
-__SYSCALL(299, sys_semget)
-__SYSCALL(300, compat_sys_semctl)
-__SYSCALL(301, compat_sys_msgsnd)
-__SYSCALL(302, compat_sys_msgrcv)
-__SYSCALL(303, sys_msgget)
-__SYSCALL(304, compat_sys_msgctl)
-__SYSCALL(305, compat_sys_shmat)
-__SYSCALL(306, sys_shmdt)
-__SYSCALL(307, sys_shmget)
-__SYSCALL(308, compat_sys_shmctl)
-__SYSCALL(309, sys_add_key)
-__SYSCALL(310, sys_request_key)
-__SYSCALL(311, compat_sys_keyctl)
-__SYSCALL(312, compat_sys_semtimedop)
-__SYSCALL(313, sys_ni_syscall)
-__SYSCALL(314, sys_ioprio_set)
-__SYSCALL(315, sys_ioprio_get)
-__SYSCALL(316, sys_inotify_init)
-__SYSCALL(317, sys_inotify_add_watch)
-__SYSCALL(318, sys_inotify_rm_watch)
-__SYSCALL(319, compat_sys_mbind)
-__SYSCALL(320, compat_sys_get_mempolicy)
-__SYSCALL(321, compat_sys_set_mempolicy)
-__SYSCALL(322, compat_sys_openat)
-__SYSCALL(323, sys_mkdirat)
-__SYSCALL(324, sys_mknodat)
-__SYSCALL(325, sys_fchownat)
-__SYSCALL(326, compat_sys_futimesat)
-__SYSCALL(327, sys_fstatat64)
-__SYSCALL(328, sys_unlinkat)
-__SYSCALL(329, sys_renameat)
-__SYSCALL(330, sys_linkat)
-__SYSCALL(331, sys_symlinkat)
-__SYSCALL(332, sys_readlinkat)
-__SYSCALL(333, sys_fchmodat)
-__SYSCALL(334, sys_faccessat)
-__SYSCALL(335, compat_sys_pselect6)
-__SYSCALL(336, compat_sys_ppoll)
-__SYSCALL(337, sys_unshare)
-__SYSCALL(338, compat_sys_set_robust_list)
-__SYSCALL(339, compat_sys_get_robust_list)
-__SYSCALL(340, sys_splice)
-__SYSCALL(341, compat_sys_sync_file_range2_wrapper)
-__SYSCALL(342, sys_tee)
-__SYSCALL(343, compat_sys_vmsplice)
-__SYSCALL(344, compat_sys_move_pages)
-__SYSCALL(345, sys_getcpu)
-__SYSCALL(346, compat_sys_epoll_pwait)
-__SYSCALL(347, compat_sys_kexec_load)
-__SYSCALL(348, compat_sys_utimensat)
-__SYSCALL(349, compat_sys_signalfd)
-__SYSCALL(350, sys_timerfd_create)
-__SYSCALL(351, sys_eventfd)
-__SYSCALL(352, compat_sys_fallocate_wrapper)
-__SYSCALL(353, compat_sys_timerfd_settime)
-__SYSCALL(354, compat_sys_timerfd_gettime)
-__SYSCALL(355, compat_sys_signalfd4)
-__SYSCALL(356, sys_eventfd2)
-__SYSCALL(357, sys_epoll_create1)
-__SYSCALL(358, sys_dup3)
-__SYSCALL(359, sys_pipe2)
-__SYSCALL(360, sys_inotify_init1)
-__SYSCALL(361, compat_sys_preadv)
-__SYSCALL(362, compat_sys_pwritev)
-__SYSCALL(363, compat_sys_rt_tgsigqueueinfo)
-__SYSCALL(364, sys_perf_event_open)
-__SYSCALL(365, compat_sys_recvmmsg)
-__SYSCALL(366, sys_accept4)
-__SYSCALL(367, sys_fanotify_init)
-__SYSCALL(368, compat_sys_fanotify_mark)
-__SYSCALL(369, sys_prlimit64)
-__SYSCALL(370, sys_name_to_handle_at)
-__SYSCALL(371, compat_sys_open_by_handle_at)
-__SYSCALL(372, compat_sys_clock_adjtime)
-__SYSCALL(373, sys_syncfs)
-__SYSCALL(374, compat_sys_sendmmsg)
-__SYSCALL(375, sys_setns)
-__SYSCALL(376, compat_sys_process_vm_readv)
-__SYSCALL(377, compat_sys_process_vm_writev)
-__SYSCALL(378, sys_kcmp)
-__SYSCALL(379, sys_finit_module)
-__SYSCALL(380, sys_sched_setattr)
-__SYSCALL(381, sys_sched_getattr)
-__SYSCALL(382, sys_renameat2)
-
-#define __NR_compat_syscalls		383
-
-/*
- * Compat syscall numbers used by the AArch64 kernel.
- */
-#define __NR_compat_restart_syscall	0
-#define __NR_compat_sigreturn		119
-#define __NR_compat_rt_sigreturn	173
-
-
-/*
- * The following SVCs are ARM private.
- */
-#define __ARM_NR_COMPAT_BASE		0x0f0000
-#define __ARM_NR_compat_cacheflush	(__ARM_NR_COMPAT_BASE+2)
-#define __ARM_NR_compat_set_tls		(__ARM_NR_COMPAT_BASE+5)
+#define __NR_restart_syscall 0
+__SYSCALL(__NR_restart_syscall, sys_restart_syscall)
+#define __NR_exit 1
+__SYSCALL(__NR_exit, sys_exit)
+#define __NR_fork 2
+__SYSCALL(__NR_fork, sys_fork)
+#define __NR_read 3
+__SYSCALL(__NR_read, sys_read)
+#define __NR_write 4
+__SYSCALL(__NR_write, sys_write)
+#define __NR_open 5
+__SYSCALL(__NR_open, compat_sys_open)
+#define __NR_close 6
+__SYSCALL(__NR_close, sys_close)
+			/* 7 was sys_waitpid */
+__SYSCALL(7, sys_ni_syscall)
+#define __NR_creat 8
+__SYSCALL(__NR_creat, sys_creat)
+#define __NR_link 9
+__SYSCALL(__NR_link, sys_link)
+#define __NR_unlink 10
+__SYSCALL(__NR_unlink, sys_unlink)
+#define __NR_execve 11
+__SYSCALL(__NR_execve, compat_sys_execve)
+#define __NR_chdir 12
+__SYSCALL(__NR_chdir, sys_chdir)
+			/* 13 was sys_time */
+__SYSCALL(13, sys_ni_syscall)
+#define __NR_mknod 14
+__SYSCALL(__NR_mknod, sys_mknod)
+#define __NR_chmod 15
+__SYSCALL(__NR_chmod, sys_chmod)
+#define __NR_lchown 16
+__SYSCALL(__NR_lchown, sys_lchown16)
+			/* 17 was sys_break */
+__SYSCALL(17, sys_ni_syscall)
+			/* 18 was sys_stat */
+__SYSCALL(18, sys_ni_syscall)
+#define __NR_lseek 19
+__SYSCALL(__NR_lseek, compat_sys_lseek)
+#define __NR_getpid 20
+__SYSCALL(__NR_getpid, sys_getpid)
+#define __NR_mount 21
+__SYSCALL(__NR_mount, compat_sys_mount)
+			/* 22 was sys_umount */
+__SYSCALL(22, sys_ni_syscall)
+#define __NR_setuid 23
+__SYSCALL(__NR_setuid, sys_setuid16)
+#define __NR_getuid 24
+__SYSCALL(__NR_getuid, sys_getuid16)
+			/* 25 was sys_stime */
+__SYSCALL(25, sys_ni_syscall)
+#define __NR_ptrace 26
+__SYSCALL(__NR_ptrace, compat_sys_ptrace)
+			/* 27 was sys_alarm */
+__SYSCALL(27, sys_ni_syscall)
+			/* 28 was sys_fstat */
+__SYSCALL(28, sys_ni_syscall)
+#define __NR_pause 29
+__SYSCALL(__NR_pause, sys_pause)
+			/* 30 was sys_utime */
+__SYSCALL(30, sys_ni_syscall)
+			/* 31 was sys_stty */
+__SYSCALL(31, sys_ni_syscall)
+			/* 32 was sys_gtty */
+__SYSCALL(32, sys_ni_syscall)
+#define __NR_access 33
+__SYSCALL(__NR_access, sys_access)
+#define __NR_nice 34
+__SYSCALL(__NR_nice, sys_nice)
+			/* 35 was sys_ftime */
+__SYSCALL(35, sys_ni_syscall)
+#define __NR_sync 36
+__SYSCALL(__NR_sync, sys_sync)
+#define __NR_kill 37
+__SYSCALL(__NR_kill, sys_kill)
+#define __NR_rename 38
+__SYSCALL(__NR_rename, sys_rename)
+#define __NR_mkdir 39
+__SYSCALL(__NR_mkdir, sys_mkdir)
+#define __NR_rmdir 40
+__SYSCALL(__NR_rmdir, sys_rmdir)
+#define __NR_dup 41
+__SYSCALL(__NR_dup, sys_dup)
+#define __NR_pipe 42
+__SYSCALL(__NR_pipe, sys_pipe)
+#define __NR_times 43
+__SYSCALL(__NR_times, compat_sys_times)
+			/* 44 was sys_prof */
+__SYSCALL(44, sys_ni_syscall)
+#define __NR_brk 45
+__SYSCALL(__NR_brk, sys_brk)
+#define __NR_setgid 46
+__SYSCALL(__NR_setgid, sys_setgid16)
+#define __NR_getgid 47
+__SYSCALL(__NR_getgid, sys_getgid16)
+			/* 48 was sys_signal */
+__SYSCALL(48, sys_ni_syscall)
+#define __NR_geteuid 49
+__SYSCALL(__NR_geteuid, sys_geteuid16)
+#define __NR_getegid 50
+__SYSCALL(__NR_getegid, sys_getegid16)
+#define __NR_acct 51
+__SYSCALL(__NR_acct, sys_acct)
+#define __NR_umount2 52
+__SYSCALL(__NR_umount2, sys_umount)
+			/* 53 was sys_lock */
+__SYSCALL(53, sys_ni_syscall)
+#define __NR_ioctl 54
+__SYSCALL(__NR_ioctl, compat_sys_ioctl)
+#define __NR_fcntl 55
+__SYSCALL(__NR_fcntl, compat_sys_fcntl)
+			/* 56 was sys_mpx */
+__SYSCALL(56, sys_ni_syscall)
+#define __NR_setpgid 57
+__SYSCALL(__NR_setpgid, sys_setpgid)
+			/* 58 was sys_ulimit */
+__SYSCALL(58, sys_ni_syscall)
+			/* 59 was sys_olduname */
+__SYSCALL(59, sys_ni_syscall)
+#define __NR_umask 60
+__SYSCALL(__NR_umask, sys_umask)
+#define __NR_chroot 61
+__SYSCALL(__NR_chroot, sys_chroot)
+#define __NR_ustat 62
+__SYSCALL(__NR_ustat, compat_sys_ustat)
+#define __NR_dup2 63
+__SYSCALL(__NR_dup2, sys_dup2)
+#define __NR_getppid 64
+__SYSCALL(__NR_getppid, sys_getppid)
+#define __NR_getpgrp 65
+__SYSCALL(__NR_getpgrp, sys_getpgrp)
+#define __NR_setsid 66
+__SYSCALL(__NR_setsid, sys_setsid)
+#define __NR_sigaction 67
+__SYSCALL(__NR_sigaction, compat_sys_sigaction)
+			/* 68 was sys_sgetmask */
+__SYSCALL(68, sys_ni_syscall)
+			/* 69 was sys_ssetmask */
+__SYSCALL(69, sys_ni_syscall)
+#define __NR_setreuid 70
+__SYSCALL(__NR_setreuid, sys_setreuid16)
+#define __NR_setregid 71
+__SYSCALL(__NR_setregid, sys_setregid16)
+#define __NR_sigsuspend 72
+__SYSCALL(__NR_sigsuspend, sys_sigsuspend)
+#define __NR_sigpending 73
+__SYSCALL(__NR_sigpending, compat_sys_sigpending)
+#define __NR_sethostname 74
+__SYSCALL(__NR_sethostname, sys_sethostname)
+#define __NR_setrlimit 75
+__SYSCALL(__NR_setrlimit, compat_sys_setrlimit)
+			/* 76 was compat_sys_getrlimit */
+__SYSCALL(76, sys_ni_syscall)
+#define __NR_getrusage 77
+__SYSCALL(__NR_getrusage, compat_sys_getrusage)
+#define __NR_gettimeofday 78
+__SYSCALL(__NR_gettimeofday, compat_sys_gettimeofday)
+#define __NR_settimeofday 79
+__SYSCALL(__NR_settimeofday, compat_sys_settimeofday)
+#define __NR_getgroups 80
+__SYSCALL(__NR_getgroups, sys_getgroups16)
+#define __NR_setgroups 81
+__SYSCALL(__NR_setgroups, sys_setgroups16)
+			/* 82 was compat_sys_select */
+__SYSCALL(82, sys_ni_syscall)
+#define __NR_symlink 83
+__SYSCALL(__NR_symlink, sys_symlink)
+			/* 84 was sys_lstat */
+__SYSCALL(84, sys_ni_syscall)
+#define __NR_readlink 85
+__SYSCALL(__NR_readlink, sys_readlink)
+#define __NR_uselib 86
+__SYSCALL(__NR_uselib, sys_uselib)
+#define __NR_swapon 87
+__SYSCALL(__NR_swapon, sys_swapon)
+#define __NR_reboot 88
+__SYSCALL(__NR_reboot, sys_reboot)
+			/* 89 was sys_readdir */
+__SYSCALL(89, sys_ni_syscall)
+			/* 90 was sys_mmap */
+__SYSCALL(90, sys_ni_syscall)
+#define __NR_munmap 91
+__SYSCALL(__NR_munmap, sys_munmap)
+#define __NR_truncate 92
+__SYSCALL(__NR_truncate, compat_sys_truncate)
+#define __NR_ftruncate 93
+__SYSCALL(__NR_ftruncate, compat_sys_ftruncate)
+#define __NR_fchmod 94
+__SYSCALL(__NR_fchmod, sys_fchmod)
+#define __NR_fchown 95
+__SYSCALL(__NR_fchown, sys_fchown16)
+#define __NR_getpriority 96
+__SYSCALL(__NR_getpriority, sys_getpriority)
+#define __NR_setpriority 97
+__SYSCALL(__NR_setpriority, sys_setpriority)
+			/* 98 was sys_profil */
+__SYSCALL(98, sys_ni_syscall)
+#define __NR_statfs 99
+__SYSCALL(__NR_statfs, compat_sys_statfs)
+#define __NR_fstatfs 100
+__SYSCALL(__NR_fstatfs, compat_sys_fstatfs)
+			/* 101 was sys_ioperm */
+__SYSCALL(101, sys_ni_syscall)
+			/* 102 was sys_socketcall */
+__SYSCALL(102, sys_ni_syscall)
+#define __NR_syslog 103
+__SYSCALL(__NR_syslog, sys_syslog)
+#define __NR_setitimer 104
+__SYSCALL(__NR_setitimer, compat_sys_setitimer)
+#define __NR_getitimer 105
+__SYSCALL(__NR_getitimer, compat_sys_getitimer)
+#define __NR_stat 106
+__SYSCALL(__NR_stat, compat_sys_newstat)
+#define __NR_lstat 107
+__SYSCALL(__NR_lstat, compat_sys_newlstat)
+#define __NR_fstat 108
+__SYSCALL(__NR_fstat, compat_sys_newfstat)
+			/* 109 was sys_uname */
+__SYSCALL(109, sys_ni_syscall)
+			/* 110 was sys_iopl */
+__SYSCALL(110, sys_ni_syscall)
+#define __NR_vhangup 111
+__SYSCALL(__NR_vhangup, sys_vhangup)
+			/* 112 was sys_idle */
+__SYSCALL(112, sys_ni_syscall)
+			/* 113 was sys_syscall */
+__SYSCALL(113, sys_ni_syscall)
+#define __NR_wait4 114
+__SYSCALL(__NR_wait4, compat_sys_wait4)
+#define __NR_swapoff 115
+__SYSCALL(__NR_swapoff, sys_swapoff)
+#define __NR_sysinfo 116
+__SYSCALL(__NR_sysinfo, compat_sys_sysinfo)
+			/* 117 was sys_ipc */
+__SYSCALL(117, sys_ni_syscall)
+#define __NR_fsync 118
+__SYSCALL(__NR_fsync, sys_fsync)
+#define __NR_sigreturn 119
+__SYSCALL(__NR_sigreturn, compat_sys_sigreturn_wrapper)
+#define __NR_clone 120
+__SYSCALL(__NR_clone, sys_clone)
+#define __NR_setdomainname 121
+__SYSCALL(__NR_setdomainname, sys_setdomainname)
+#define __NR_uname 122
+__SYSCALL(__NR_uname, sys_newuname)
+			/* 123 was sys_modify_ldt */
+__SYSCALL(123, sys_ni_syscall)
+#define __NR_adjtimex 124
+__SYSCALL(__NR_adjtimex, compat_sys_adjtimex)
+#define __NR_mprotect 125
+__SYSCALL(__NR_mprotect, sys_mprotect)
+#define __NR_sigprocmask 126
+__SYSCALL(__NR_sigprocmask, compat_sys_sigprocmask)
+			/* 127 was sys_create_module */
+__SYSCALL(127, sys_ni_syscall)
+#define __NR_init_module 128
+__SYSCALL(__NR_init_module, sys_init_module)
+#define __NR_delete_module 129
+__SYSCALL(__NR_delete_module, sys_delete_module)
+			/* 130 was sys_get_kernel_syms */
+__SYSCALL(130, sys_ni_syscall)
+#define __NR_quotactl 131
+__SYSCALL(__NR_quotactl, sys_quotactl)
+#define __NR_getpgid 132
+__SYSCALL(__NR_getpgid, sys_getpgid)
+#define __NR_fchdir 133
+__SYSCALL(__NR_fchdir, sys_fchdir)
+#define __NR_bdflush 134
+__SYSCALL(__NR_bdflush, sys_bdflush)
+#define __NR_sysfs 135
+__SYSCALL(__NR_sysfs, sys_sysfs)
+#define __NR_personality 136
+__SYSCALL(__NR_personality, sys_personality)
+			/* 137 was sys_afs_syscall */
+__SYSCALL(137, sys_ni_syscall)
+#define __NR_setfsuid 138
+__SYSCALL(__NR_setfsuid, sys_setfsuid16)
+#define __NR_setfsgid 139
+__SYSCALL(__NR_setfsgid, sys_setfsgid16)
+#define __NR__llseek 140
+__SYSCALL(__NR__llseek, sys_llseek)
+#define __NR_getdents 141
+__SYSCALL(__NR_getdents, compat_sys_getdents)
+#define __NR__newselect 142
+__SYSCALL(__NR__newselect, compat_sys_select)
+#define __NR_flock 143
+__SYSCALL(__NR_flock, sys_flock)
+#define __NR_msync 144
+__SYSCALL(__NR_msync, sys_msync)
+#define __NR_readv 145
+__SYSCALL(__NR_readv, compat_sys_readv)
+#define __NR_writev 146
+__SYSCALL(__NR_writev, compat_sys_writev)
+#define __NR_getsid 147
+__SYSCALL(__NR_getsid, sys_getsid)
+#define __NR_fdatasync 148
+__SYSCALL(__NR_fdatasync, sys_fdatasync)
+#define __NR__sysctl 149
+__SYSCALL(__NR__sysctl, compat_sys_sysctl)
+#define __NR_mlock 150
+__SYSCALL(__NR_mlock, sys_mlock)
+#define __NR_munlock 151
+__SYSCALL(__NR_munlock, sys_munlock)
+#define __NR_mlockall 152
+__SYSCALL(__NR_mlockall, sys_mlockall)
+#define __NR_munlockall 153
+__SYSCALL(__NR_munlockall, sys_munlockall)
+#define __NR_sched_setparam 154
+__SYSCALL(__NR_sched_setparam, sys_sched_setparam)
+#define __NR_sched_getparam 155
+__SYSCALL(__NR_sched_getparam, sys_sched_getparam)
+#define __NR_sched_setscheduler 156
+__SYSCALL(__NR_sched_setscheduler, sys_sched_setscheduler)
+#define __NR_sched_getscheduler 157
+__SYSCALL(__NR_sched_getscheduler, sys_sched_getscheduler)
+#define __NR_sched_yield 158
+__SYSCALL(__NR_sched_yield, sys_sched_yield)
+#define __NR_sched_get_priority_max 159
+__SYSCALL(__NR_sched_get_priority_max, sys_sched_get_priority_max)
+#define __NR_sched_get_priority_min 160
+__SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min)
+#define __NR_sched_rr_get_interval 161
+__SYSCALL(__NR_sched_rr_get_interval, compat_sys_sched_rr_get_interval)
+#define __NR_nanosleep 162
+__SYSCALL(__NR_nanosleep, compat_sys_nanosleep)
+#define __NR_mremap 163
+__SYSCALL(__NR_mremap, sys_mremap)
+#define __NR_setresuid 164
+__SYSCALL(__NR_setresuid, sys_setresuid16)
+#define __NR_getresuid 165
+__SYSCALL(__NR_getresuid, sys_getresuid16)
+			/* 166 was sys_vm86 */
+__SYSCALL(166, sys_ni_syscall)
+			/* 167 was sys_query_module */
+__SYSCALL(167, sys_ni_syscall)
+#define __NR_poll 168
+__SYSCALL(__NR_poll, sys_poll)
+#define __NR_nfsservctl 169
+__SYSCALL(__NR_nfsservctl, sys_ni_syscall)
+#define __NR_setresgid 170
+__SYSCALL(__NR_setresgid, sys_setresgid16)
+#define __NR_getresgid 171
+__SYSCALL(__NR_getresgid, sys_getresgid16)
+#define __NR_prctl 172
+__SYSCALL(__NR_prctl, sys_prctl)
+#define __NR_rt_sigreturn 173
+__SYSCALL(__NR_rt_sigreturn, compat_sys_rt_sigreturn_wrapper)
+#define __NR_rt_sigaction 174
+__SYSCALL(__NR_rt_sigaction, compat_sys_rt_sigaction)
+#define __NR_rt_sigprocmask 175
+__SYSCALL(__NR_rt_sigprocmask, compat_sys_rt_sigprocmask)
+#define __NR_rt_sigpending 176
+__SYSCALL(__NR_rt_sigpending, compat_sys_rt_sigpending)
+#define __NR_rt_sigtimedwait 177
+__SYSCALL(__NR_rt_sigtimedwait, compat_sys_rt_sigtimedwait)
+#define __NR_rt_sigqueueinfo 178
+__SYSCALL(__NR_rt_sigqueueinfo, compat_sys_rt_sigqueueinfo)
+#define __NR_rt_sigsuspend 179
+__SYSCALL(__NR_rt_sigsuspend, compat_sys_rt_sigsuspend)
+#define __NR_pread64 180
+__SYSCALL(__NR_pread64, compat_sys_pread64_wrapper)
+#define __NR_pwrite64 181
+__SYSCALL(__NR_pwrite64, compat_sys_pwrite64_wrapper)
+#define __NR_chown 182
+__SYSCALL(__NR_chown, sys_chown16)
+#define __NR_getcwd 183
+__SYSCALL(__NR_getcwd, sys_getcwd)
+#define __NR_capget 184
+__SYSCALL(__NR_capget, sys_capget)
+#define __NR_capset 185
+__SYSCALL(__NR_capset, sys_capset)
+#define __NR_sigaltstack 186
+__SYSCALL(__NR_sigaltstack, compat_sys_sigaltstack)
+#define __NR_sendfile 187
+__SYSCALL(__NR_sendfile, compat_sys_sendfile)
+			/* 188 reserved */
+__SYSCALL(188, sys_ni_syscall)
+			/* 189 reserved */
+__SYSCALL(189, sys_ni_syscall)
+#define __NR_vfork 190
+__SYSCALL(__NR_vfork, sys_vfork)
+#define __NR_ugetrlimit 191	/* SuS compliant getrlimit */
+__SYSCALL(__NR_ugetrlimit, compat_sys_getrlimit)		/* SuS compliant getrlimit */
+#define __NR_mmap2 192
+__SYSCALL(__NR_mmap2, sys_mmap_pgoff)
+#define __NR_truncate64 193
+__SYSCALL(__NR_truncate64, compat_sys_truncate64_wrapper)
+#define __NR_ftruncate64 194
+__SYSCALL(__NR_ftruncate64, compat_sys_ftruncate64_wrapper)
+#define __NR_stat64 195
+__SYSCALL(__NR_stat64, sys_stat64)
+#define __NR_lstat64 196
+__SYSCALL(__NR_lstat64, sys_lstat64)
+#define __NR_fstat64 197
+__SYSCALL(__NR_fstat64, sys_fstat64)
+#define __NR_lchown32 198
+__SYSCALL(__NR_lchown32, sys_lchown)
+#define __NR_getuid32 199
+__SYSCALL(__NR_getuid32, sys_getuid)
+#define __NR_getgid32 200
+__SYSCALL(__NR_getgid32, sys_getgid)
+#define __NR_geteuid32 201
+__SYSCALL(__NR_geteuid32, sys_geteuid)
+#define __NR_getegid32 202
+__SYSCALL(__NR_getegid32, sys_getegid)
+#define __NR_setreuid32 203
+__SYSCALL(__NR_setreuid32, sys_setreuid)
+#define __NR_setregid32 204
+__SYSCALL(__NR_setregid32, sys_setregid)
+#define __NR_getgroups32 205
+__SYSCALL(__NR_getgroups32, sys_getgroups)
+#define __NR_setgroups32 206
+__SYSCALL(__NR_setgroups32, sys_setgroups)
+#define __NR_fchown32 207
+__SYSCALL(__NR_fchown32, sys_fchown)
+#define __NR_setresuid32 208
+__SYSCALL(__NR_setresuid32, sys_setresuid)
+#define __NR_getresuid32 209
+__SYSCALL(__NR_getresuid32, sys_getresuid)
+#define __NR_setresgid32 210
+__SYSCALL(__NR_setresgid32, sys_setresgid)
+#define __NR_getresgid32 211
+__SYSCALL(__NR_getresgid32, sys_getresgid)
+#define __NR_chown32 212
+__SYSCALL(__NR_chown32, sys_chown)
+#define __NR_setuid32 213
+__SYSCALL(__NR_setuid32, sys_setuid)
+#define __NR_setgid32 214
+__SYSCALL(__NR_setgid32, sys_setgid)
+#define __NR_setfsuid32 215
+__SYSCALL(__NR_setfsuid32, sys_setfsuid)
+#define __NR_setfsgid32 216
+__SYSCALL(__NR_setfsgid32, sys_setfsgid)
+#define __NR_getdents64 217
+__SYSCALL(__NR_getdents64, compat_sys_getdents64)
+#define __NR_pivot_root 218
+__SYSCALL(__NR_pivot_root, sys_pivot_root)
+#define __NR_mincore 219
+__SYSCALL(__NR_mincore, sys_mincore)
+#define __NR_madvise 220
+__SYSCALL(__NR_madvise, sys_madvise)
+#define __NR_fcntl64 221
+__SYSCALL(__NR_fcntl64, compat_sys_fcntl64)
+			/* 222 for tux */
+__SYSCALL(222, sys_ni_syscall)
+			/* 223 is unused */
+__SYSCALL(223, sys_ni_syscall)
+#define __NR_gettid 224
+__SYSCALL(__NR_gettid, sys_gettid)
+#define __NR_readahead 225
+__SYSCALL(__NR_readahead, compat_sys_readahead_wrapper)
+#define __NR_setxattr 226
+__SYSCALL(__NR_setxattr, sys_setxattr)
+#define __NR_lsetxattr 227
+__SYSCALL(__NR_lsetxattr, sys_lsetxattr)
+#define __NR_fsetxattr 228
+__SYSCALL(__NR_fsetxattr, sys_fsetxattr)
+#define __NR_getxattr 229
+__SYSCALL(__NR_getxattr, sys_getxattr)
+#define __NR_lgetxattr 230
+__SYSCALL(__NR_lgetxattr, sys_lgetxattr)
+#define __NR_fgetxattr 231
+__SYSCALL(__NR_fgetxattr, sys_fgetxattr)
+#define __NR_listxattr 232
+__SYSCALL(__NR_listxattr, sys_listxattr)
+#define __NR_llistxattr 233
+__SYSCALL(__NR_llistxattr, sys_llistxattr)
+#define __NR_flistxattr 234
+__SYSCALL(__NR_flistxattr, sys_flistxattr)
+#define __NR_removexattr 235
+__SYSCALL(__NR_removexattr, sys_removexattr)
+#define __NR_lremovexattr 236
+__SYSCALL(__NR_lremovexattr, sys_lremovexattr)
+#define __NR_fremovexattr 237
+__SYSCALL(__NR_fremovexattr, sys_fremovexattr)
+#define __NR_tkill 238
+__SYSCALL(__NR_tkill, sys_tkill)
+#define __NR_sendfile64 239
+__SYSCALL(__NR_sendfile64, sys_sendfile64)
+#define __NR_futex 240
+__SYSCALL(__NR_futex, compat_sys_futex)
+#define __NR_sched_setaffinity 241
+__SYSCALL(__NR_sched_setaffinity, compat_sys_sched_setaffinity)
+#define __NR_sched_getaffinity 242
+__SYSCALL(__NR_sched_getaffinity, compat_sys_sched_getaffinity)
+#define __NR_io_setup 243
+__SYSCALL(__NR_io_setup, compat_sys_io_setup)
+#define __NR_io_destroy 244
+__SYSCALL(__NR_io_destroy, sys_io_destroy)
+#define __NR_io_getevents 245
+__SYSCALL(__NR_io_getevents, compat_sys_io_getevents)
+#define __NR_io_submit 246
+__SYSCALL(__NR_io_submit, compat_sys_io_submit)
+#define __NR_io_cancel 247
+__SYSCALL(__NR_io_cancel, sys_io_cancel)
+#define __NR_exit_group 248
+__SYSCALL(__NR_exit_group, sys_exit_group)
+#define __NR_lookup_dcookie 249
+__SYSCALL(__NR_lookup_dcookie, compat_sys_lookup_dcookie)
+#define __NR_epoll_create 250
+__SYSCALL(__NR_epoll_create, sys_epoll_create)
+#define __NR_epoll_ctl 251
+__SYSCALL(__NR_epoll_ctl, sys_epoll_ctl)
+#define __NR_epoll_wait 252
+__SYSCALL(__NR_epoll_wait, sys_epoll_wait)
+#define __NR_remap_file_pages 253
+__SYSCALL(__NR_remap_file_pages, sys_remap_file_pages)
+			/* 254 for set_thread_area */
+__SYSCALL(254, sys_ni_syscall)
+			/* 255 for get_thread_area */
+__SYSCALL(255, sys_ni_syscall)
+#define __NR_set_tid_address 256
+__SYSCALL(__NR_set_tid_address, sys_set_tid_address)
+#define __NR_timer_create 257
+__SYSCALL(__NR_timer_create, compat_sys_timer_create)
+#define __NR_timer_settime 258
+__SYSCALL(__NR_timer_settime, compat_sys_timer_settime)
+#define __NR_timer_gettime 259
+__SYSCALL(__NR_timer_gettime, compat_sys_timer_gettime)
+#define __NR_timer_getoverrun 260
+__SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun)
+#define __NR_timer_delete 261
+__SYSCALL(__NR_timer_delete, sys_timer_delete)
+#define __NR_clock_settime 262
+__SYSCALL(__NR_clock_settime, compat_sys_clock_settime)
+#define __NR_clock_gettime 263
+__SYSCALL(__NR_clock_gettime, compat_sys_clock_gettime)
+#define __NR_clock_getres 264
+__SYSCALL(__NR_clock_getres, compat_sys_clock_getres)
+#define __NR_clock_nanosleep 265
+__SYSCALL(__NR_clock_nanosleep, compat_sys_clock_nanosleep)
+#define __NR_statfs64 266
+__SYSCALL(__NR_statfs64, compat_sys_statfs64_wrapper)
+#define __NR_fstatfs64 267
+__SYSCALL(__NR_fstatfs64, compat_sys_fstatfs64_wrapper)
+#define __NR_tgkill 268
+__SYSCALL(__NR_tgkill, sys_tgkill)
+#define __NR_utimes 269
+__SYSCALL(__NR_utimes, compat_sys_utimes)
+#define __NR_arm_fadvise64_64 270
+__SYSCALL(__NR_arm_fadvise64_64, compat_sys_fadvise64_64_wrapper)
+#define __NR_pciconfig_iobase 271
+__SYSCALL(__NR_pciconfig_iobase, sys_pciconfig_iobase)
+#define __NR_pciconfig_read 272
+__SYSCALL(__NR_pciconfig_read, sys_pciconfig_read)
+#define __NR_pciconfig_write 273
+__SYSCALL(__NR_pciconfig_write, sys_pciconfig_write)
+#define __NR_mq_open 274
+__SYSCALL(__NR_mq_open, compat_sys_mq_open)
+#define __NR_mq_unlink 275
+__SYSCALL(__NR_mq_unlink, sys_mq_unlink)
+#define __NR_mq_timedsend 276
+__SYSCALL(__NR_mq_timedsend, compat_sys_mq_timedsend)
+#define __NR_mq_timedreceive 277
+__SYSCALL(__NR_mq_timedreceive, compat_sys_mq_timedreceive)
+#define __NR_mq_notify 278
+__SYSCALL(__NR_mq_notify, compat_sys_mq_notify)
+#define __NR_mq_getsetattr 279
+__SYSCALL(__NR_mq_getsetattr, compat_sys_mq_getsetattr)
+#define __NR_waitid 280
+__SYSCALL(__NR_waitid, compat_sys_waitid)
+#define __NR_socket 281
+__SYSCALL(__NR_socket, sys_socket)
+#define __NR_bind 282
+__SYSCALL(__NR_bind, sys_bind)
+#define __NR_connect 283
+__SYSCALL(__NR_connect, sys_connect)
+#define __NR_listen 284
+__SYSCALL(__NR_listen, sys_listen)
+#define __NR_accept 285
+__SYSCALL(__NR_accept, sys_accept)
+#define __NR_getsockname 286
+__SYSCALL(__NR_getsockname, sys_getsockname)
+#define __NR_getpeername 287
+__SYSCALL(__NR_getpeername, sys_getpeername)
+#define __NR_socketpair 288
+__SYSCALL(__NR_socketpair, sys_socketpair)
+#define __NR_send 289
+__SYSCALL(__NR_send, sys_send)
+#define __NR_sendto 290
+__SYSCALL(__NR_sendto, sys_sendto)
+#define __NR_recv 291
+__SYSCALL(__NR_recv, compat_sys_recv)
+#define __NR_recvfrom 292
+__SYSCALL(__NR_recvfrom, compat_sys_recvfrom)
+#define __NR_shutdown 293
+__SYSCALL(__NR_shutdown, sys_shutdown)
+#define __NR_setsockopt 294
+__SYSCALL(__NR_setsockopt, compat_sys_setsockopt)
+#define __NR_getsockopt 295
+__SYSCALL(__NR_getsockopt, compat_sys_getsockopt)
+#define __NR_sendmsg 296
+__SYSCALL(__NR_sendmsg, compat_sys_sendmsg)
+#define __NR_recvmsg 297
+__SYSCALL(__NR_recvmsg, compat_sys_recvmsg)
+#define __NR_semop 298
+__SYSCALL(__NR_semop, sys_semop)
+#define __NR_semget 299
+__SYSCALL(__NR_semget, sys_semget)
+#define __NR_semctl 300
+__SYSCALL(__NR_semctl, compat_sys_semctl)
+#define __NR_msgsnd 301
+__SYSCALL(__NR_msgsnd, compat_sys_msgsnd)
+#define __NR_msgrcv 302
+__SYSCALL(__NR_msgrcv, compat_sys_msgrcv)
+#define __NR_msgget 303
+__SYSCALL(__NR_msgget, sys_msgget)
+#define __NR_msgctl 304
+__SYSCALL(__NR_msgctl, compat_sys_msgctl)
+#define __NR_shmat 305
+__SYSCALL(__NR_shmat, compat_sys_shmat)
+#define __NR_shmdt 306
+__SYSCALL(__NR_shmdt, sys_shmdt)
+#define __NR_shmget 307
+__SYSCALL(__NR_shmget, sys_shmget)
+#define __NR_shmctl 308
+__SYSCALL(__NR_shmctl, compat_sys_shmctl)
+#define __NR_add_key 309
+__SYSCALL(__NR_add_key, sys_add_key)
+#define __NR_request_key 310
+__SYSCALL(__NR_request_key, sys_request_key)
+#define __NR_keyctl 311
+__SYSCALL(__NR_keyctl, compat_sys_keyctl)
+#define __NR_semtimedop 312
+__SYSCALL(__NR_semtimedop, compat_sys_semtimedop)
+#define __NR_vserver 313
+__SYSCALL(__NR_vserver, sys_ni_syscall)
+#define __NR_ioprio_set 314
+__SYSCALL(__NR_ioprio_set, sys_ioprio_set)
+#define __NR_ioprio_get 315
+__SYSCALL(__NR_ioprio_get, sys_ioprio_get)
+#define __NR_inotify_init 316
+__SYSCALL(__NR_inotify_init, sys_inotify_init)
+#define __NR_inotify_add_watch 317
+__SYSCALL(__NR_inotify_add_watch, sys_inotify_add_watch)
+#define __NR_inotify_rm_watch 318
+__SYSCALL(__NR_inotify_rm_watch, sys_inotify_rm_watch)
+#define __NR_mbind 319
+__SYSCALL(__NR_mbind, compat_sys_mbind)
+#define __NR_get_mempolicy 320
+__SYSCALL(__NR_get_mempolicy, compat_sys_get_mempolicy)
+#define __NR_set_mempolicy 321
+__SYSCALL(__NR_set_mempolicy, compat_sys_set_mempolicy)
+#define __NR_openat 322
+__SYSCALL(__NR_openat, compat_sys_openat)
+#define __NR_mkdirat 323
+__SYSCALL(__NR_mkdirat, sys_mkdirat)
+#define __NR_mknodat 324
+__SYSCALL(__NR_mknodat, sys_mknodat)
+#define __NR_fchownat 325
+__SYSCALL(__NR_fchownat, sys_fchownat)
+#define __NR_futimesat 326
+__SYSCALL(__NR_futimesat, compat_sys_futimesat)
+#define __NR_fstatat64 327
+__SYSCALL(__NR_fstatat64, sys_fstatat64)
+#define __NR_unlinkat 328
+__SYSCALL(__NR_unlinkat, sys_unlinkat)
+#define __NR_renameat 329
+__SYSCALL(__NR_renameat, sys_renameat)
+#define __NR_linkat 330
+__SYSCALL(__NR_linkat, sys_linkat)
+#define __NR_symlinkat 331
+__SYSCALL(__NR_symlinkat, sys_symlinkat)
+#define __NR_readlinkat 332
+__SYSCALL(__NR_readlinkat, sys_readlinkat)
+#define __NR_fchmodat 333
+__SYSCALL(__NR_fchmodat, sys_fchmodat)
+#define __NR_faccessat 334
+__SYSCALL(__NR_faccessat, sys_faccessat)
+#define __NR_pselect6 335
+__SYSCALL(__NR_pselect6, compat_sys_pselect6)
+#define __NR_ppoll 336
+__SYSCALL(__NR_ppoll, compat_sys_ppoll)
+#define __NR_unshare 337
+__SYSCALL(__NR_unshare, sys_unshare)
+#define __NR_set_robust_list 338
+__SYSCALL(__NR_set_robust_list, compat_sys_set_robust_list)
+#define __NR_get_robust_list 339
+__SYSCALL(__NR_get_robust_list, compat_sys_get_robust_list)
+#define __NR_splice 340
+__SYSCALL(__NR_splice, sys_splice)
+#define __NR_sync_file_range2 341
+__SYSCALL(__NR_sync_file_range2, compat_sys_sync_file_range2_wrapper)
+#define __NR_tee 342
+__SYSCALL(__NR_tee, sys_tee)
+#define __NR_vmsplice 343
+__SYSCALL(__NR_vmsplice, compat_sys_vmsplice)
+#define __NR_move_pages 344
+__SYSCALL(__NR_move_pages, compat_sys_move_pages)
+#define __NR_getcpu 345
+__SYSCALL(__NR_getcpu, sys_getcpu)
+#define __NR_epoll_pwait 346
+__SYSCALL(__NR_epoll_pwait, compat_sys_epoll_pwait)
+#define __NR_kexec_load 347
+__SYSCALL(__NR_kexec_load, compat_sys_kexec_load)
+#define __NR_utimensat 348
+__SYSCALL(__NR_utimensat, compat_sys_utimensat)
+#define __NR_signalfd 349
+__SYSCALL(__NR_signalfd, compat_sys_signalfd)
+#define __NR_timerfd_create 350
+__SYSCALL(__NR_timerfd_create, sys_timerfd_create)
+#define __NR_eventfd 351
+__SYSCALL(__NR_eventfd, sys_eventfd)
+#define __NR_fallocate 352
+__SYSCALL(__NR_fallocate, compat_sys_fallocate_wrapper)
+#define __NR_timerfd_settime 353
+__SYSCALL(__NR_timerfd_settime, compat_sys_timerfd_settime)
+#define __NR_timerfd_gettime 354
+__SYSCALL(__NR_timerfd_gettime, compat_sys_timerfd_gettime)
+#define __NR_signalfd4 355
+__SYSCALL(__NR_signalfd4, compat_sys_signalfd4)
+#define __NR_eventfd2 356
+__SYSCALL(__NR_eventfd2, sys_eventfd2)
+#define __NR_epoll_create1 357
+__SYSCALL(__NR_epoll_create1, sys_epoll_create1)
+#define __NR_dup3 358
+__SYSCALL(__NR_dup3, sys_dup3)
+#define __NR_pipe2 359
+__SYSCALL(__NR_pipe2, sys_pipe2)
+#define __NR_inotify_init1 360
+__SYSCALL(__NR_inotify_init1, sys_inotify_init1)
+#define __NR_preadv 361
+__SYSCALL(__NR_preadv, compat_sys_preadv)
+#define __NR_pwritev 362
+__SYSCALL(__NR_pwritev, compat_sys_pwritev)
+#define __NR_rt_tgsigqueueinfo 363
+__SYSCALL(__NR_rt_tgsigqueueinfo, compat_sys_rt_tgsigqueueinfo)
+#define __NR_perf_event_open 364
+__SYSCALL(__NR_perf_event_open, sys_perf_event_open)
+#define __NR_recvmmsg 365
+__SYSCALL(__NR_recvmmsg, compat_sys_recvmmsg)
+#define __NR_accept4 366
+__SYSCALL(__NR_accept4, sys_accept4)
+#define __NR_fanotify_init 367
+__SYSCALL(__NR_fanotify_init, sys_fanotify_init)
+#define __NR_fanotify_mark 368
+__SYSCALL(__NR_fanotify_mark, compat_sys_fanotify_mark)
+#define __NR_prlimit64 369
+__SYSCALL(__NR_prlimit64, sys_prlimit64)
+#define __NR_name_to_handle_at 370
+__SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at)
+#define __NR_open_by_handle_at 371
+__SYSCALL(__NR_open_by_handle_at, compat_sys_open_by_handle_at)
+#define __NR_clock_adjtime 372
+__SYSCALL(__NR_clock_adjtime, compat_sys_clock_adjtime)
+#define __NR_syncfs 373
+__SYSCALL(__NR_syncfs, sys_syncfs)
+#define __NR_sendmmsg 374
+__SYSCALL(__NR_sendmmsg, compat_sys_sendmmsg)
+#define __NR_setns 375
+__SYSCALL(__NR_setns, sys_setns)
+#define __NR_process_vm_readv 376
+__SYSCALL(__NR_process_vm_readv, compat_sys_process_vm_readv)
+#define __NR_process_vm_writev 377
+__SYSCALL(__NR_process_vm_writev, compat_sys_process_vm_writev)
+#define __NR_kcmp 378
+__SYSCALL(__NR_kcmp, sys_kcmp)
+#define __NR_finit_module 379
+__SYSCALL(__NR_finit_module, sys_finit_module)
+#define __NR_sched_setattr 380
+__SYSCALL(__NR_sched_setattr, sys_sched_setattr)
+#define __NR_sched_getattr 381
+__SYSCALL(__NR_sched_getattr, sys_sched_getattr)
+#define __NR_renameat2 382
+__SYSCALL(__NR_renameat2, sys_renameat2)
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index cdaedad..27c72ef 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -15,7 +15,8 @@
 arm64-obj-y		:= cputable.o debug-monitors.o entry.o irq.o fpsimd.o	\
 			   entry-fpsimd.o process.o ptrace.o setup.o signal.o	\
 			   sys.o stacktrace.o time.o traps.o io.o vdso.o	\
-			   hyp-stub.o psci.o cpu_ops.o insn.o return_address.o
+			   hyp-stub.o psci.o cpu_ops.o insn.o return_address.o	\
+			   cpuinfo.o
 
 arm64-obj-$(CONFIG_COMPAT)		+= sys32.o kuser32.o signal32.o 	\
 					   sys_compat.o
diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c
index d62d12f..cce9524 100644
--- a/arch/arm64/kernel/cpu_ops.c
+++ b/arch/arm64/kernel/cpu_ops.c
@@ -30,8 +30,8 @@
 static const struct cpu_operations *supported_cpu_ops[] __initconst = {
 #ifdef CONFIG_SMP
 	&smp_spin_table_ops,
-	&cpu_psci_ops,
 #endif
+	&cpu_psci_ops,
 	NULL,
 };
 
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
new file mode 100644
index 0000000..f798f66
--- /dev/null
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -0,0 +1,192 @@
+/*
+ * Record and handle CPU attributes.
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <asm/arch_timer.h>
+#include <asm/cachetype.h>
+#include <asm/cpu.h>
+#include <asm/cputype.h>
+
+#include <linux/bitops.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/smp.h>
+
+/*
+ * In case the boot CPU is hotpluggable, we record its initial state and
+ * current state separately. Certain system registers may contain different
+ * values depending on configuration at or after reset.
+ */
+DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data);
+static struct cpuinfo_arm64 boot_cpu_data;
+
+static char *icache_policy_str[] = {
+	[ICACHE_POLICY_RESERVED] = "RESERVED/UNKNOWN",
+	[ICACHE_POLICY_AIVIVT] = "AIVIVT",
+	[ICACHE_POLICY_VIPT] = "VIPT",
+	[ICACHE_POLICY_PIPT] = "PIPT",
+};
+
+unsigned long __icache_flags;
+
+static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info)
+{
+	unsigned int cpu = smp_processor_id();
+	u32 l1ip = CTR_L1IP(info->reg_ctr);
+
+	if (l1ip != ICACHE_POLICY_PIPT)
+		set_bit(ICACHEF_ALIASING, &__icache_flags);
+	if (l1ip == ICACHE_POLICY_AIVIVT);
+		set_bit(ICACHEF_AIVIVT, &__icache_flags);
+
+	pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu);
+}
+
+static int check_reg_mask(char *name, u64 mask, u64 boot, u64 cur, int cpu)
+{
+	if ((boot & mask) == (cur & mask))
+		return 0;
+
+	pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016lx, CPU%d: %#016lx\n",
+		name, (unsigned long)boot, cpu, (unsigned long)cur);
+
+	return 1;
+}
+
+#define CHECK_MASK(field, mask, boot, cur, cpu) \
+	check_reg_mask(#field, mask, (boot)->reg_ ## field, (cur)->reg_ ## field, cpu)
+
+#define CHECK(field, boot, cur, cpu) \
+	CHECK_MASK(field, ~0ULL, boot, cur, cpu)
+
+/*
+ * Verify that CPUs don't have unexpected differences that will cause problems.
+ */
+static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur)
+{
+	unsigned int cpu = smp_processor_id();
+	struct cpuinfo_arm64 *boot = &boot_cpu_data;
+	unsigned int diff = 0;
+
+	/*
+	 * The kernel can handle differing I-cache policies, but otherwise
+	 * caches should look identical. Userspace JITs will make use of
+	 * *minLine.
+	 */
+	diff |= CHECK_MASK(ctr, 0xffff3fff, boot, cur, cpu);
+
+	/*
+	 * Userspace may perform DC ZVA instructions. Mismatched block sizes
+	 * could result in too much or too little memory being zeroed if a
+	 * process is preempted and migrated between CPUs.
+	 */
+	diff |= CHECK(dczid, boot, cur, cpu);
+
+	/* If different, timekeeping will be broken (especially with KVM) */
+	diff |= CHECK(cntfrq, boot, cur, cpu);
+
+	/*
+	 * Even in big.LITTLE, processors should be identical instruction-set
+	 * wise.
+	 */
+	diff |= CHECK(id_aa64isar0, boot, cur, cpu);
+	diff |= CHECK(id_aa64isar1, boot, cur, cpu);
+
+	/*
+	 * Differing PARange support is fine as long as all peripherals and
+	 * memory are mapped within the minimum PARange of all CPUs.
+	 * Linux should not care about secure memory.
+	 * ID_AA64MMFR1 is currently RES0.
+	 */
+	diff |= CHECK_MASK(id_aa64mmfr0, 0xffffffffffff0ff0, boot, cur, cpu);
+	diff |= CHECK(id_aa64mmfr1, boot, cur, cpu);
+
+	/*
+	 * EL3 is not our concern.
+	 * ID_AA64PFR1 is currently RES0.
+	 */
+	diff |= CHECK_MASK(id_aa64pfr0, 0xffffffffffff0fff, boot, cur, cpu);
+	diff |= CHECK(id_aa64pfr1, boot, cur, cpu);
+
+	/*
+	 * If we have AArch32, we care about 32-bit features for compat. These
+	 * registers should be RES0 otherwise.
+	 */
+	diff |= CHECK(id_isar0, boot, cur, cpu);
+	diff |= CHECK(id_isar1, boot, cur, cpu);
+	diff |= CHECK(id_isar2, boot, cur, cpu);
+	diff |= CHECK(id_isar3, boot, cur, cpu);
+	diff |= CHECK(id_isar4, boot, cur, cpu);
+	diff |= CHECK(id_isar5, boot, cur, cpu);
+	diff |= CHECK(id_mmfr0, boot, cur, cpu);
+	diff |= CHECK(id_mmfr1, boot, cur, cpu);
+	diff |= CHECK(id_mmfr2, boot, cur, cpu);
+	diff |= CHECK(id_mmfr3, boot, cur, cpu);
+	diff |= CHECK(id_pfr0, boot, cur, cpu);
+	diff |= CHECK(id_pfr1, boot, cur, cpu);
+
+	/*
+	 * Mismatched CPU features are a recipe for disaster. Don't even
+	 * pretend to support them.
+	 */
+	WARN_TAINT_ONCE(diff, TAINT_CPU_OUT_OF_SPEC,
+			"Unsupported CPU feature variation.");
+}
+
+static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
+{
+	info->reg_cntfrq = arch_timer_get_cntfrq();
+	info->reg_ctr = read_cpuid_cachetype();
+	info->reg_dczid = read_cpuid(DCZID_EL0);
+	info->reg_midr = read_cpuid_id();
+
+	info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1);
+	info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1);
+	info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
+	info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
+	info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1);
+	info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);
+
+	info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
+	info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1);
+	info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1);
+	info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1);
+	info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1);
+	info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1);
+	info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1);
+	info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1);
+	info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1);
+	info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1);
+	info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1);
+	info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);
+
+	cpuinfo_detect_icache_policy(info);
+}
+
+void cpuinfo_store_cpu(void)
+{
+	struct cpuinfo_arm64 *info = this_cpu_ptr(&cpu_data);
+	__cpuinfo_store_cpu(info);
+	cpuinfo_sanity_check(info);
+}
+
+void __init cpuinfo_store_boot_cpu(void)
+{
+	struct cpuinfo_arm64 *info = &per_cpu(cpu_data, 0);
+	__cpuinfo_store_cpu(info);
+
+	boot_cpu_data = *info;
+}
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index a7fb874..fe5b940 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -315,20 +315,20 @@
 {
 	siginfo_t info;
 
-	if (call_break_hook(regs, esr) == DBG_HOOK_HANDLED)
-		return 0;
+	if (user_mode(regs)) {
+		info = (siginfo_t) {
+			.si_signo = SIGTRAP,
+			.si_errno = 0,
+			.si_code  = TRAP_BRKPT,
+			.si_addr  = (void __user *)instruction_pointer(regs),
+		};
 
-	if (!user_mode(regs))
+		force_sig_info(SIGTRAP, &info, current);
+	} else if (call_break_hook(regs, esr) != DBG_HOOK_HANDLED) {
+		pr_warning("Unexpected kernel BRK exception at EL1\n");
 		return -EFAULT;
+	}
 
-	info = (siginfo_t) {
-		.si_signo = SIGTRAP,
-		.si_errno = 0,
-		.si_code  = TRAP_BRKPT,
-		.si_addr  = (void __user *)instruction_pointer(regs),
-	};
-
-	force_sig_info(SIGTRAP, &info, current);
 	return 0;
 }
 
diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S
index d358cca..c44a82f 100644
--- a/arch/arm64/kernel/entry-fpsimd.S
+++ b/arch/arm64/kernel/entry-fpsimd.S
@@ -52,7 +52,7 @@
 ENTRY(fpsimd_save_partial_state)
 	fpsimd_save_partial x0, 1, 8, 9
 	ret
-ENDPROC(fpsimd_load_partial_state)
+ENDPROC(fpsimd_save_partial_state)
 
 /*
  * Load the bottom n FP registers.
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index aa5f9fc..38e704e 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -96,11 +96,6 @@
  *     - ftrace_graph_caller to set up an exit hook
  */
 ENTRY(_mcount)
-#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
-	ldr	x0, =ftrace_trace_stop
-	ldr	x0, [x0]		// if ftrace_trace_stop
-	ret				//   return;
-#endif
 	mcount_enter
 
 	ldr	x0, =ftrace_trace_function
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 9ce04ba..f0b5e51 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -27,7 +27,32 @@
 #include <asm/esr.h>
 #include <asm/thread_info.h>
 #include <asm/unistd.h>
-#include <asm/unistd32.h>
+
+/*
+ * Context tracking subsystem.  Used to instrument transitions
+ * between user and kernel mode.
+ */
+	.macro ct_user_exit, syscall = 0
+#ifdef CONFIG_CONTEXT_TRACKING
+	bl	context_tracking_user_exit
+	.if \syscall == 1
+	/*
+	 * Save/restore needed during syscalls.  Restore syscall arguments from
+	 * the values already saved on stack during kernel_entry.
+	 */
+	ldp	x0, x1, [sp]
+	ldp	x2, x3, [sp, #S_X2]
+	ldp	x4, x5, [sp, #S_X4]
+	ldp	x6, x7, [sp, #S_X6]
+	.endif
+#endif
+	.endm
+
+	.macro ct_user_enter
+#ifdef CONFIG_CONTEXT_TRACKING
+	bl	context_tracking_user_enter
+#endif
+	.endm
 
 /*
  * Bad Abort numbers
@@ -91,6 +116,7 @@
 	.macro	kernel_exit, el, ret = 0
 	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
 	.if	\el == 0
+	ct_user_enter
 	ldr	x23, [sp, #S_SP]		// load return stack pointer
 	.endif
 	.if	\ret
@@ -353,7 +379,6 @@
 	lsr	x24, x25, #ESR_EL1_EC_SHIFT	// exception class
 	cmp	x24, #ESR_EL1_EC_SVC64		// SVC in 64-bit state
 	b.eq	el0_svc
-	adr	lr, ret_to_user
 	cmp	x24, #ESR_EL1_EC_DABT_EL0	// data abort in EL0
 	b.eq	el0_da
 	cmp	x24, #ESR_EL1_EC_IABT_EL0	// instruction abort in EL0
@@ -382,7 +407,6 @@
 	lsr	x24, x25, #ESR_EL1_EC_SHIFT	// exception class
 	cmp	x24, #ESR_EL1_EC_SVC32		// SVC in 32-bit state
 	b.eq	el0_svc_compat
-	adr	lr, ret_to_user
 	cmp	x24, #ESR_EL1_EC_DABT_EL0	// data abort in EL0
 	b.eq	el0_da
 	cmp	x24, #ESR_EL1_EC_IABT_EL0	// instruction abort in EL0
@@ -425,48 +449,59 @@
 	/*
 	 * Data abort handling
 	 */
-	mrs	x0, far_el1
-	bic	x0, x0, #(0xff << 56)
+	mrs	x26, far_el1
 	// enable interrupts before calling the main handler
 	enable_dbg_and_irq
+	ct_user_exit
+	bic	x0, x26, #(0xff << 56)
 	mov	x1, x25
 	mov	x2, sp
+	adr	lr, ret_to_user
 	b	do_mem_abort
 el0_ia:
 	/*
 	 * Instruction abort handling
 	 */
-	mrs	x0, far_el1
+	mrs	x26, far_el1
 	// enable interrupts before calling the main handler
 	enable_dbg_and_irq
+	ct_user_exit
+	mov	x0, x26
 	orr	x1, x25, #1 << 24		// use reserved ISS bit for instruction aborts
 	mov	x2, sp
+	adr	lr, ret_to_user
 	b	do_mem_abort
 el0_fpsimd_acc:
 	/*
 	 * Floating Point or Advanced SIMD access
 	 */
 	enable_dbg
+	ct_user_exit
 	mov	x0, x25
 	mov	x1, sp
+	adr	lr, ret_to_user
 	b	do_fpsimd_acc
 el0_fpsimd_exc:
 	/*
 	 * Floating Point or Advanced SIMD exception
 	 */
 	enable_dbg
+	ct_user_exit
 	mov	x0, x25
 	mov	x1, sp
+	adr	lr, ret_to_user
 	b	do_fpsimd_exc
 el0_sp_pc:
 	/*
 	 * Stack or PC alignment exception handling
 	 */
-	mrs	x0, far_el1
+	mrs	x26, far_el1
 	// enable interrupts before calling the main handler
 	enable_dbg_and_irq
+	mov	x0, x26
 	mov	x1, x25
 	mov	x2, sp
+	adr	lr, ret_to_user
 	b	do_sp_pc_abort
 el0_undef:
 	/*
@@ -474,7 +509,9 @@
 	 */
 	// enable interrupts before calling the main handler
 	enable_dbg_and_irq
+	ct_user_exit
 	mov	x0, sp
+	adr	lr, ret_to_user
 	b	do_undefinstr
 el0_dbg:
 	/*
@@ -486,12 +523,15 @@
 	mov	x2, sp
 	bl	do_debug_exception
 	enable_dbg
+	ct_user_exit
 	b	ret_to_user
 el0_inv:
 	enable_dbg
+	ct_user_exit
 	mov	x0, sp
 	mov	x1, #BAD_SYNC
 	mrs	x2, esr_el1
+	adr	lr, ret_to_user
 	b	bad_mode
 ENDPROC(el0_sync)
 
@@ -504,6 +544,7 @@
 	bl	trace_hardirqs_off
 #endif
 
+	ct_user_exit
 	irq_handler
 
 #ifdef CONFIG_TRACE_IRQFLAGS
@@ -608,6 +649,7 @@
 el0_svc_naked:					// compat entry point
 	stp	x0, scno, [sp, #S_ORIG_X0]	// save the original x0 and syscall number
 	enable_dbg_and_irq
+	ct_user_exit 1
 
 	ldr	x16, [tsk, #TI_FLAGS]		// check for syscall hooks
 	tst	x16, #_TIF_SYSCALL_WORK
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index a2c1195..144f105 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -22,6 +22,7 @@
 
 #include <linux/linkage.h>
 #include <linux/init.h>
+#include <linux/irqchip/arm-gic-v3.h>
 
 #include <asm/assembler.h>
 #include <asm/ptrace.h>
@@ -35,37 +36,31 @@
 #include <asm/page.h>
 #include <asm/virt.h>
 
-/*
- * swapper_pg_dir is the virtual address of the initial page table. We place
- * the page tables 3 * PAGE_SIZE below KERNEL_RAM_VADDR. The idmap_pg_dir has
- * 2 pages and is placed below swapper_pg_dir.
- */
 #define KERNEL_RAM_VADDR	(PAGE_OFFSET + TEXT_OFFSET)
 
-#if (KERNEL_RAM_VADDR & 0xfffff) != 0x80000
-#error KERNEL_RAM_VADDR must start at 0xXXX80000
+#if (TEXT_OFFSET & 0xf) != 0
+#error TEXT_OFFSET must be at least 16B aligned
+#elif (PAGE_OFFSET & 0xfffff) != 0
+#error PAGE_OFFSET must be at least 2MB aligned
+#elif TEXT_OFFSET > 0xfffff
+#error TEXT_OFFSET must be less than 2MB
 #endif
 
-#define SWAPPER_DIR_SIZE	(3 * PAGE_SIZE)
-#define IDMAP_DIR_SIZE		(2 * PAGE_SIZE)
-
-	.globl	swapper_pg_dir
-	.equ	swapper_pg_dir, KERNEL_RAM_VADDR - SWAPPER_DIR_SIZE
-
-	.globl	idmap_pg_dir
-	.equ	idmap_pg_dir, swapper_pg_dir - IDMAP_DIR_SIZE
-
-	.macro	pgtbl, ttb0, ttb1, phys
-	add	\ttb1, \phys, #TEXT_OFFSET - SWAPPER_DIR_SIZE
-	sub	\ttb0, \ttb1, #IDMAP_DIR_SIZE
+	.macro	pgtbl, ttb0, ttb1, virt_to_phys
+	ldr	\ttb1, =swapper_pg_dir
+	ldr	\ttb0, =idmap_pg_dir
+	add	\ttb1, \ttb1, \virt_to_phys
+	add	\ttb0, \ttb0, \virt_to_phys
 	.endm
 
 #ifdef CONFIG_ARM64_64K_PAGES
 #define BLOCK_SHIFT	PAGE_SHIFT
 #define BLOCK_SIZE	PAGE_SIZE
+#define TABLE_SHIFT	PMD_SHIFT
 #else
 #define BLOCK_SHIFT	SECTION_SHIFT
 #define BLOCK_SIZE	SECTION_SIZE
+#define TABLE_SHIFT	PUD_SHIFT
 #endif
 
 #define KERNEL_START	KERNEL_RAM_VADDR
@@ -120,9 +115,9 @@
 	b	stext				// branch to kernel start, magic
 	.long	0				// reserved
 #endif
-	.quad	TEXT_OFFSET			// Image load offset from start of RAM
-	.quad	0				// reserved
-	.quad	0				// reserved
+	.quad	_kernel_offset_le		// Image load offset from start of RAM, little-endian
+	.quad	_kernel_size_le			// Effective size of kernel image, little-endian
+	.quad	_kernel_flags_le		// Informative flags, little-endian
 	.quad	0				// reserved
 	.quad	0				// reserved
 	.quad	0				// reserved
@@ -295,6 +290,23 @@
 	msr	cnthctl_el2, x0
 	msr	cntvoff_el2, xzr		// Clear virtual offset
 
+#ifdef CONFIG_ARM_GIC_V3
+	/* GICv3 system register access */
+	mrs	x0, id_aa64pfr0_el1
+	ubfx	x0, x0, #24, #4
+	cmp	x0, #1
+	b.ne	3f
+
+	mrs_s	x0, ICC_SRE_EL2
+	orr	x0, x0, #ICC_SRE_EL2_SRE	// Set ICC_SRE_EL2.SRE==1
+	orr	x0, x0, #ICC_SRE_EL2_ENABLE	// Set ICC_SRE_EL2.Enable==1
+	msr_s	ICC_SRE_EL2, x0
+	isb					// Make sure SRE is now set
+	msr_s	ICH_HCR_EL2, xzr		// Reset ICC_HCR_EL2 to defaults
+
+3:
+#endif
+
 	/* Populate ID registers. */
 	mrs	x0, midr_el1
 	mrs	x1, mpidr_el1
@@ -413,7 +425,7 @@
 	mov	x23, x0				// x23=current cpu_table
 	cbz	x23, __error_p			// invalid processor (x23=0)?
 
-	pgtbl	x25, x26, x24			// x25=TTBR0, x26=TTBR1
+	pgtbl	x25, x26, x28			// x25=TTBR0, x26=TTBR1
 	ldr	x12, [x23, #CPU_INFO_SETUP]
 	add	x12, x12, x28			// __virt_to_phys
 	blr	x12				// initialise processor
@@ -455,8 +467,13 @@
  *  x27 = *virtual* address to jump to upon completion
  *
  * other registers depend on the function called upon completion
+ *
+ * We align the entire function to the smallest power of two larger than it to
+ * ensure it fits within a single block map entry. Otherwise were PHYS_OFFSET
+ * close to the end of a 512MB or 1GB block we might require an additional
+ * table to map the entire function.
  */
-	.align	6
+	.align	4
 __turn_mmu_on:
 	msr	sctlr_el1, x0
 	isb
@@ -479,17 +496,38 @@
 	.quad	PAGE_OFFSET
 
 /*
- * Macro to populate the PGD for the corresponding block entry in the next
- * level (tbl) for the given virtual address.
+ * Macro to create a table entry to the next page.
  *
- * Preserves:	pgd, tbl, virt
+ *	tbl:	page table address
+ *	virt:	virtual address
+ *	shift:	#imm page table shift
+ *	ptrs:	#imm pointers per table page
+ *
+ * Preserves:	virt
+ * Corrupts:	tmp1, tmp2
+ * Returns:	tbl -> next level table page address
+ */
+	.macro	create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2
+	lsr	\tmp1, \virt, #\shift
+	and	\tmp1, \tmp1, #\ptrs - 1	// table index
+	add	\tmp2, \tbl, #PAGE_SIZE
+	orr	\tmp2, \tmp2, #PMD_TYPE_TABLE	// address of next table and entry type
+	str	\tmp2, [\tbl, \tmp1, lsl #3]
+	add	\tbl, \tbl, #PAGE_SIZE		// next level table page
+	.endm
+
+/*
+ * Macro to populate the PGD (and possibily PUD) for the corresponding
+ * block entry in the next level (tbl) for the given virtual address.
+ *
+ * Preserves:	tbl, next, virt
  * Corrupts:	tmp1, tmp2
  */
-	.macro	create_pgd_entry, pgd, tbl, virt, tmp1, tmp2
-	lsr	\tmp1, \virt, #PGDIR_SHIFT
-	and	\tmp1, \tmp1, #PTRS_PER_PGD - 1	// PGD index
-	orr	\tmp2, \tbl, #3			// PGD entry table type
-	str	\tmp2, [\pgd, \tmp1, lsl #3]
+	.macro	create_pgd_entry, tbl, virt, tmp1, tmp2
+	create_table_entry \tbl, \virt, PGDIR_SHIFT, PTRS_PER_PGD, \tmp1, \tmp2
+#if SWAPPER_PGTABLE_LEVELS == 3
+	create_table_entry \tbl, \virt, TABLE_SHIFT, PTRS_PER_PTE, \tmp1, \tmp2
+#endif
 	.endm
 
 /*
@@ -522,7 +560,7 @@
  *   - pgd entry for fixed mappings (TTBR1)
  */
 __create_page_tables:
-	pgtbl	x25, x26, x24			// idmap_pg_dir and swapper_pg_dir addresses
+	pgtbl	x25, x26, x28			// idmap_pg_dir and swapper_pg_dir addresses
 	mov	x27, lr
 
 	/*
@@ -550,10 +588,10 @@
 	/*
 	 * Create the identity mapping.
 	 */
-	add	x0, x25, #PAGE_SIZE		// section table address
+	mov	x0, x25				// idmap_pg_dir
 	ldr	x3, =KERNEL_START
 	add	x3, x3, x28			// __pa(KERNEL_START)
-	create_pgd_entry x25, x0, x3, x5, x6
+	create_pgd_entry x0, x3, x5, x6
 	ldr	x6, =KERNEL_END
 	mov	x5, x3				// __pa(KERNEL_START)
 	add	x6, x6, x28			// __pa(KERNEL_END)
@@ -562,9 +600,9 @@
 	/*
 	 * Map the kernel image (starting with PHYS_OFFSET).
 	 */
-	add	x0, x26, #PAGE_SIZE		// section table address
+	mov	x0, x26				// swapper_pg_dir
 	mov	x5, #PAGE_OFFSET
-	create_pgd_entry x26, x0, x5, x3, x6
+	create_pgd_entry x0, x5, x3, x6
 	ldr	x6, =KERNEL_END
 	mov	x3, x24				// phys offset
 	create_block_map x0, x7, x3, x5, x6
@@ -586,13 +624,6 @@
 	create_block_map x0, x7, x3, x5, x6
 1:
 	/*
-	 * Create the pgd entry for the fixed mappings.
-	 */
-	ldr	x5, =FIXADDR_TOP		// Fixed mapping virtual address
-	add	x0, x26, #2 * PAGE_SIZE		// section table address
-	create_pgd_entry x26, x0, x5, x6, x7
-
-	/*
 	 * Since the page tables have been populated with non-cacheable
 	 * accesses (MMU disabled), invalidate the idmap and swapper page
 	 * tables again to remove any speculatively loaded cache lines.
@@ -611,7 +642,7 @@
 __switch_data:
 	.quad	__mmap_switched
 	.quad	__bss_start			// x6
-	.quad	_end				// x7
+	.quad	__bss_stop			// x7
 	.quad	processor_id			// x4
 	.quad	__fdt_pointer			// x5
 	.quad	memstart_addr			// x6
diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
index 0959611..a272f33 100644
--- a/arch/arm64/kernel/hyp-stub.S
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -19,6 +19,7 @@
 
 #include <linux/init.h>
 #include <linux/linkage.h>
+#include <linux/irqchip/arm-gic-v3.h>
 
 #include <asm/assembler.h>
 #include <asm/ptrace.h>
diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h
new file mode 100644
index 0000000..8fae075
--- /dev/null
+++ b/arch/arm64/kernel/image.h
@@ -0,0 +1,62 @@
+/*
+ * Linker script macros to generate Image header fields.
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_IMAGE_H
+#define __ASM_IMAGE_H
+
+#ifndef LINKER_SCRIPT
+#error This file should only be included in vmlinux.lds.S
+#endif
+
+/*
+ * There aren't any ELF relocations we can use to endian-swap values known only
+ * at link time (e.g. the subtraction of two symbol addresses), so we must get
+ * the linker to endian-swap certain values before emitting them.
+ */
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define DATA_LE64(data)					\
+	((((data) & 0x00000000000000ff) << 56) |	\
+	 (((data) & 0x000000000000ff00) << 40) |	\
+	 (((data) & 0x0000000000ff0000) << 24) |	\
+	 (((data) & 0x00000000ff000000) << 8)  |	\
+	 (((data) & 0x000000ff00000000) >> 8)  |	\
+	 (((data) & 0x0000ff0000000000) >> 24) |	\
+	 (((data) & 0x00ff000000000000) >> 40) |	\
+	 (((data) & 0xff00000000000000) >> 56))
+#else
+#define DATA_LE64(data) ((data) & 0xffffffffffffffff)
+#endif
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define __HEAD_FLAG_BE	1
+#else
+#define __HEAD_FLAG_BE	0
+#endif
+
+#define __HEAD_FLAGS	(__HEAD_FLAG_BE << 0)
+
+/*
+ * These will output as part of the Image header, which should be little-endian
+ * regardless of the endianness of the kernel. While constant values could be
+ * endian swapped in head.S, all are done here for consistency.
+ */
+#define HEAD_SYMBOLS						\
+	_kernel_size_le		= DATA_LE64(_end - _text);	\
+	_kernel_offset_le	= DATA_LE64(TEXT_OFFSET);	\
+	_kernel_flags_le	= DATA_LE64(__HEAD_FLAGS);
+
+#endif /* __ASM_IMAGE_H */
diff --git a/arch/arm64/kernel/kuser32.S b/arch/arm64/kernel/kuser32.S
index 7787208..997e6b2 100644
--- a/arch/arm64/kernel/kuser32.S
+++ b/arch/arm64/kernel/kuser32.S
@@ -28,7 +28,7 @@
  * See Documentation/arm/kernel_user_helpers.txt for formal definitions.
  */
 
-#include <asm/unistd32.h>
+#include <asm/unistd.h>
 
 	.align	5
 	.globl	__kuser_helper_start
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 43b7c34..1309d64 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -51,6 +51,12 @@
 #include <asm/processor.h>
 #include <asm/stacktrace.h>
 
+#ifdef CONFIG_CC_STACKPROTECTOR
+#include <linux/stackprotector.h>
+unsigned long __stack_chk_guard __read_mostly;
+EXPORT_SYMBOL(__stack_chk_guard);
+#endif
+
 static void setup_restart(void)
 {
 	/*
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index 9e9798f..5539547 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -235,7 +235,7 @@
  * PSCI Function IDs for v0.2+ are well defined so use
  * standard values.
  */
-static int psci_0_2_init(struct device_node *np)
+static int __init psci_0_2_init(struct device_node *np)
 {
 	int err, ver;
 
@@ -296,7 +296,7 @@
 /*
  * PSCI < v0.2 get PSCI Function IDs via DT.
  */
-static int psci_0_1_init(struct device_node *np)
+static int __init psci_0_1_init(struct device_node *np)
 {
 	u32 id;
 	int err;
@@ -434,9 +434,11 @@
 	return 0;
 }
 #endif
+#endif
 
 const struct cpu_operations cpu_psci_ops = {
 	.name		= "psci",
+#ifdef CONFIG_SMP
 	.cpu_init	= cpu_psci_cpu_init,
 	.cpu_prepare	= cpu_psci_cpu_prepare,
 	.cpu_boot	= cpu_psci_cpu_boot,
@@ -445,6 +447,6 @@
 	.cpu_die	= cpu_psci_cpu_die,
 	.cpu_kill	= cpu_psci_cpu_kill,
 #endif
+#endif
 };
 
-#endif
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 9fde010..0310811 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -19,6 +19,7 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/audit.h>
 #include <linux/compat.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
@@ -39,6 +40,7 @@
 #include <asm/compat.h>
 #include <asm/debug-monitors.h>
 #include <asm/pgtable.h>
+#include <asm/syscall.h>
 #include <asm/traps.h>
 #include <asm/system_misc.h>
 
@@ -1113,11 +1115,20 @@
 	if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
 		trace_sys_enter(regs, regs->syscallno);
 
+#ifdef CONFIG_AUDITSYSCALL
+	audit_syscall_entry(syscall_get_arch(), regs->syscallno,
+		regs->orig_x0, regs->regs[1], regs->regs[2], regs->regs[3]);
+#endif
+
 	return regs->syscallno;
 }
 
 asmlinkage void syscall_trace_exit(struct pt_regs *regs)
 {
+#ifdef CONFIG_AUDITSYSCALL
+	audit_syscall_exit(regs);
+#endif
+
 	if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
 		trace_sys_exit(regs, regs_return_value(regs));
 
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 46d1125..f6f0ccf 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -45,6 +45,7 @@
 #include <linux/efi.h>
 
 #include <asm/fixmap.h>
+#include <asm/cpu.h>
 #include <asm/cputype.h>
 #include <asm/elf.h>
 #include <asm/cputable.h>
@@ -77,7 +78,6 @@
 #endif
 
 static const char *cpu_name;
-static const char *machine_name;
 phys_addr_t __fdt_pointer __initdata;
 
 /*
@@ -219,6 +219,8 @@
 	sprintf(init_utsname()->machine, ELF_PLATFORM);
 	elf_hwcap = 0;
 
+	cpuinfo_store_boot_cpu();
+
 	/*
 	 * Check for sane CTR_EL0.CWG value.
 	 */
@@ -307,8 +309,6 @@
 		while (true)
 			cpu_relax();
 	}
-
-	machine_name = of_flat_dt_get_machine_name();
 }
 
 /*
@@ -417,14 +417,12 @@
 }
 arch_initcall_sync(arm64_device_init);
 
-static DEFINE_PER_CPU(struct cpu, cpu_data);
-
 static int __init topology_init(void)
 {
 	int i;
 
 	for_each_possible_cpu(i) {
-		struct cpu *cpu = &per_cpu(cpu_data, i);
+		struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
 		cpu->hotpluggable = 1;
 		register_cpu(cpu, i);
 	}
@@ -449,10 +447,21 @@
 {
 	int i;
 
-	seq_printf(m, "Processor\t: %s rev %d (%s)\n",
-		   cpu_name, read_cpuid_id() & 15, ELF_PLATFORM);
+	/*
+	 * Dump out the common processor features in a single line. Userspace
+	 * should read the hwcaps with getauxval(AT_HWCAP) rather than
+	 * attempting to parse this.
+	 */
+	seq_puts(m, "features\t:");
+	for (i = 0; hwcap_str[i]; i++)
+		if (elf_hwcap & (1 << i))
+			seq_printf(m, " %s", hwcap_str[i]);
+	seq_puts(m, "\n\n");
 
 	for_each_online_cpu(i) {
+		struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
+		u32 midr = cpuinfo->reg_midr;
+
 		/*
 		 * glibc reads /proc/cpuinfo to determine the number of
 		 * online processors, looking for lines beginning with
@@ -461,25 +470,13 @@
 #ifdef CONFIG_SMP
 		seq_printf(m, "processor\t: %d\n", i);
 #endif
+		seq_printf(m, "implementer\t: 0x%02x\n",
+			   MIDR_IMPLEMENTOR(midr));
+		seq_printf(m, "variant\t\t: 0x%x\n", MIDR_VARIANT(midr));
+		seq_printf(m, "partnum\t\t: 0x%03x\n", MIDR_PARTNUM(midr));
+		seq_printf(m, "revision\t: 0x%x\n\n", MIDR_REVISION(midr));
 	}
 
-	/* dump out the processor features */
-	seq_puts(m, "Features\t: ");
-
-	for (i = 0; hwcap_str[i]; i++)
-		if (elf_hwcap & (1 << i))
-			seq_printf(m, "%s ", hwcap_str[i]);
-
-	seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
-	seq_printf(m, "CPU architecture: AArch64\n");
-	seq_printf(m, "CPU variant\t: 0x%x\n", (read_cpuid_id() >> 20) & 15);
-	seq_printf(m, "CPU part\t: 0x%03x\n", (read_cpuid_id() >> 4) & 0xfff);
-	seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
-
-	seq_puts(m, "\n");
-
-	seq_printf(m, "Hardware\t: %s\n", machine_name);
-
 	return 0;
 }
 
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index 3491c63..c5ee208 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -27,7 +27,7 @@
 #include <asm/fpsimd.h>
 #include <asm/signal32.h>
 #include <asm/uaccess.h>
-#include <asm/unistd32.h>
+#include <asm/unistd.h>
 
 struct compat_sigcontext {
 	/* We always set these two fields to 0 */
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 40f38f4..3e2f5eb 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -39,6 +39,7 @@
 
 #include <asm/atomic.h>
 #include <asm/cacheflush.h>
+#include <asm/cpu.h>
 #include <asm/cputype.h>
 #include <asm/cpu_ops.h>
 #include <asm/mmu_context.h>
@@ -155,6 +156,11 @@
 		cpu_ops[cpu]->cpu_postboot();
 
 	/*
+	 * Log the CPU info before it is marked online and might get read.
+	 */
+	cpuinfo_store_cpu();
+
+	/*
 	 * Enable GIC and timers.
 	 */
 	notify_cpu_starting(cpu);
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index 1fa9ce4..55a99b9 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -119,7 +119,7 @@
 extern struct sleep_save_sp sleep_save_sp;
 extern phys_addr_t sleep_idmap_phys;
 
-static int cpu_suspend_init(void)
+static int __init cpu_suspend_init(void)
 {
 	void *ctx_ptr;
 
diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
index 26e9c4e..de2b022 100644
--- a/arch/arm64/kernel/sys_compat.c
+++ b/arch/arm64/kernel/sys_compat.c
@@ -26,7 +26,7 @@
 #include <linux/uaccess.h>
 
 #include <asm/cacheflush.h>
-#include <asm/unistd32.h>
+#include <asm/unistd.h>
 
 static inline void
 do_compat_cache_op(unsigned long start, unsigned long end, int flags)
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 43514f9..b6ee26b 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -20,6 +20,7 @@
 #include <linux/of.h>
 #include <linux/sched.h>
 
+#include <asm/cputype.h>
 #include <asm/topology.h>
 
 static int __init get_cpu_for_node(struct device_node *node)
@@ -188,13 +189,9 @@
 	 * Check that all cores are in the topology; the SMP code will
 	 * only mark cores described in the DT as possible.
 	 */
-	for_each_possible_cpu(cpu) {
-		if (cpu_topology[cpu].cluster_id == -1) {
-			pr_err("CPU%d: No topology information specified\n",
-			       cpu);
+	for_each_possible_cpu(cpu)
+		if (cpu_topology[cpu].cluster_id == -1)
 			ret = -EINVAL;
-		}
-	}
 
 out_map:
 	of_node_put(map);
@@ -219,14 +216,6 @@
 	struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
 	int cpu;
 
-	if (cpuid_topo->cluster_id == -1) {
-		/*
-		 * DT does not contain topology information for this cpu.
-		 */
-		pr_debug("CPU%u: No topology information configured\n", cpuid);
-		return;
-	}
-
 	/* update core and thread sibling masks */
 	for_each_possible_cpu(cpu) {
 		cpu_topo = &cpu_topology[cpu];
@@ -249,6 +238,36 @@
 
 void store_cpu_topology(unsigned int cpuid)
 {
+	struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
+	u64 mpidr;
+
+	if (cpuid_topo->cluster_id != -1)
+		goto topology_populated;
+
+	mpidr = read_cpuid_mpidr();
+
+	/* Uniprocessor systems can rely on default topology values */
+	if (mpidr & MPIDR_UP_BITMASK)
+		return;
+
+	/* Create cpu topology mapping based on MPIDR. */
+	if (mpidr & MPIDR_MT_BITMASK) {
+		/* Multiprocessor system : Multi-threads per core */
+		cpuid_topo->thread_id  = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+		cpuid_topo->core_id    = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+		cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
+	} else {
+		/* Multiprocessor system : Single-thread per core */
+		cpuid_topo->thread_id  = -1;
+		cpuid_topo->core_id    = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+		cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+	}
+
+	pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n",
+		 cpuid, cpuid_topo->cluster_id, cpuid_topo->core_id,
+		 cpuid_topo->thread_id, mpidr);
+
+topology_populated:
 	update_siblings_masks(cpuid);
 }
 
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index c43cfa9..02cd3f0 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -156,7 +156,7 @@
 		frame.pc = thread_saved_pc(tsk);
 	}
 
-	printk("Call trace:\n");
+	pr_emerg("Call trace:\n");
 	while (1) {
 		unsigned long where = frame.pc;
 		int ret;
@@ -331,17 +331,22 @@
 
 void __pte_error(const char *file, int line, unsigned long val)
 {
-	printk("%s:%d: bad pte %016lx.\n", file, line, val);
+	pr_crit("%s:%d: bad pte %016lx.\n", file, line, val);
 }
 
 void __pmd_error(const char *file, int line, unsigned long val)
 {
-	printk("%s:%d: bad pmd %016lx.\n", file, line, val);
+	pr_crit("%s:%d: bad pmd %016lx.\n", file, line, val);
+}
+
+void __pud_error(const char *file, int line, unsigned long val)
+{
+	pr_crit("%s:%d: bad pud %016lx.\n", file, line, val);
 }
 
 void __pgd_error(const char *file, int line, unsigned long val)
 {
-	printk("%s:%d: bad pgd %016lx.\n", file, line, val);
+	pr_crit("%s:%d: bad pgd %016lx.\n", file, line, val);
 }
 
 void __init trap_init(void)
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 50384fe..24f2e8c 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -88,22 +88,29 @@
 {
 	struct mm_struct *mm = current->mm;
 	unsigned long addr = AARCH32_VECTORS_BASE;
-	int ret;
+	static struct vm_special_mapping spec = {
+		.name	= "[vectors]",
+		.pages	= vectors_page,
+
+	};
+	void *ret;
 
 	down_write(&mm->mmap_sem);
 	current->mm->context.vdso = (void *)addr;
 
 	/* Map vectors page at the high address. */
-	ret = install_special_mapping(mm, addr, PAGE_SIZE,
-				      VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC,
-				      vectors_page);
+	ret = _install_special_mapping(mm, addr, PAGE_SIZE,
+				       VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC,
+				       &spec);
 
 	up_write(&mm->mmap_sem);
 
-	return ret;
+	return PTR_ERR_OR_ZERO(ret);
 }
 #endif /* CONFIG_COMPAT */
 
+static struct vm_special_mapping vdso_spec[2];
+
 static int __init vdso_init(void)
 {
 	int i;
@@ -114,8 +121,8 @@
 	}
 
 	vdso_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT;
-	pr_info("vdso: %ld pages (%ld code, %ld data) at base %p\n",
-		vdso_pages + 1, vdso_pages, 1L, &vdso_start);
+	pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n",
+		vdso_pages + 1, vdso_pages, &vdso_start, 1L, vdso_data);
 
 	/* Allocate the vDSO pagelist, plus a page for the data. */
 	vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *),
@@ -123,12 +130,23 @@
 	if (vdso_pagelist == NULL)
 		return -ENOMEM;
 
+	/* Grab the vDSO data page. */
+	vdso_pagelist[0] = virt_to_page(vdso_data);
+
 	/* Grab the vDSO code pages. */
 	for (i = 0; i < vdso_pages; i++)
-		vdso_pagelist[i] = virt_to_page(&vdso_start + i * PAGE_SIZE);
+		vdso_pagelist[i + 1] = virt_to_page(&vdso_start + i * PAGE_SIZE);
 
-	/* Grab the vDSO data page. */
-	vdso_pagelist[i] = virt_to_page(vdso_data);
+	/* Populate the special mapping structures */
+	vdso_spec[0] = (struct vm_special_mapping) {
+		.name	= "[vvar]",
+		.pages	= vdso_pagelist,
+	};
+
+	vdso_spec[1] = (struct vm_special_mapping) {
+		.name	= "[vdso]",
+		.pages	= &vdso_pagelist[1],
+	};
 
 	return 0;
 }
@@ -138,52 +156,42 @@
 				int uses_interp)
 {
 	struct mm_struct *mm = current->mm;
-	unsigned long vdso_base, vdso_mapping_len;
-	int ret;
+	unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
+	void *ret;
 
+	vdso_text_len = vdso_pages << PAGE_SHIFT;
 	/* Be sure to map the data page */
-	vdso_mapping_len = (vdso_pages + 1) << PAGE_SHIFT;
+	vdso_mapping_len = vdso_text_len + PAGE_SIZE;
 
 	down_write(&mm->mmap_sem);
 	vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
 	if (IS_ERR_VALUE(vdso_base)) {
-		ret = vdso_base;
+		ret = ERR_PTR(vdso_base);
 		goto up_fail;
 	}
-	mm->context.vdso = (void *)vdso_base;
+	ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE,
+				       VM_READ|VM_MAYREAD,
+				       &vdso_spec[0]);
+	if (IS_ERR(ret))
+		goto up_fail;
 
-	ret = install_special_mapping(mm, vdso_base, vdso_mapping_len,
-				      VM_READ|VM_EXEC|
-				      VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
-				      vdso_pagelist);
-	if (ret) {
-		mm->context.vdso = NULL;
+	vdso_base += PAGE_SIZE;
+	mm->context.vdso = (void *)vdso_base;
+	ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
+				       VM_READ|VM_EXEC|
+				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
+				       &vdso_spec[1]);
+	if (IS_ERR(ret))
 		goto up_fail;
-	}
+
+
+	up_write(&mm->mmap_sem);
+	return 0;
 
 up_fail:
+	mm->context.vdso = NULL;
 	up_write(&mm->mmap_sem);
-
-	return ret;
-}
-
-const char *arch_vma_name(struct vm_area_struct *vma)
-{
-	/*
-	 * We can re-use the vdso pointer in mm_context_t for identifying
-	 * the vectors page for compat applications. The vDSO will always
-	 * sit above TASK_UNMAPPED_BASE and so we don't need to worry about
-	 * it conflicting with the vectors base.
-	 */
-	if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) {
-#ifdef CONFIG_COMPAT
-		if (vma->vm_start == AARCH32_VECTORS_BASE)
-			return "[vectors]";
-#endif
-		return "[vdso]";
-	}
-
-	return NULL;
+	return PTR_ERR(ret);
 }
 
 /*
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
index 6d20b7d..ff3bdde 100644
--- a/arch/arm64/kernel/vdso/Makefile
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -43,13 +43,13 @@
 	$(call if_changed,vdsosym)
 
 # Assembly rules for the .S files
-$(obj-vdso): %.o: %.S
+$(obj-vdso): %.o: %.S FORCE
 	$(call if_changed_dep,vdsoas)
 
 # Actual build commands
-quiet_cmd_vdsold = VDSOL $@
+quiet_cmd_vdsold = VDSOL   $@
       cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $^ -o $@
-quiet_cmd_vdsoas = VDSOA $@
+quiet_cmd_vdsoas = VDSOA   $@
       cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $<
 
 # Install commands for the unstripped file
diff --git a/arch/arm64/kernel/vdso/vdso.lds.S b/arch/arm64/kernel/vdso/vdso.lds.S
index 8154b8d..beca249 100644
--- a/arch/arm64/kernel/vdso/vdso.lds.S
+++ b/arch/arm64/kernel/vdso/vdso.lds.S
@@ -28,6 +28,7 @@
 
 SECTIONS
 {
+	PROVIDE(_vdso_data = . - PAGE_SIZE);
 	. = VDSO_LBASE + SIZEOF_HEADERS;
 
 	.hash		: { *(.hash) }			:text
@@ -57,9 +58,6 @@
 	_end = .;
 	PROVIDE(end = .);
 
-	. = ALIGN(PAGE_SIZE);
-	PROVIDE(_vdso_data = .);
-
 	/DISCARD/	: {
 		*(.note.GNU-stack)
 		*(.data .data.* .gnu.linkonce.d.* .sdata*)
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index f1e6d5c..97f0c04 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -9,6 +9,8 @@
 #include <asm/memory.h>
 #include <asm/page.h>
 
+#include "image.h"
+
 #define ARM_EXIT_KEEP(x)
 #define ARM_EXIT_DISCARD(x)	x
 
@@ -104,9 +106,18 @@
 	_edata = .;
 
 	BSS_SECTION(0, 0, 0)
+
+	. = ALIGN(PAGE_SIZE);
+	idmap_pg_dir = .;
+	. += IDMAP_DIR_SIZE;
+	swapper_pg_dir = .;
+	. += SWAPPER_DIR_SIZE;
+
 	_end = .;
 
 	STABS_DEBUG
+
+	HEAD_SYMBOLS
 }
 
 /*
@@ -114,3 +125,8 @@
  */
 ASSERT(((__hyp_idmap_text_start + PAGE_SIZE) > __hyp_idmap_text_end),
        "HYP init code too big")
+
+/*
+ * If padding is applied before .head.text, virt<->phys conversions will fail.
+ */
+ASSERT(_text == (PAGE_OFFSET + TEXT_OFFSET), "HEAD is misaligned")
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index bcc965e..41cb6d3 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -62,6 +62,7 @@
 			break;
 
 		pud = pud_offset(pgd, addr);
+		printk(", *pud=%016llx", pud_val(*pud));
 		if (pud_none(*pud) || pud_bad(*pud))
 			break;
 
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index e90c542..5b4526e 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -33,6 +33,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/dma-contiguous.h>
 
+#include <asm/fixmap.h>
 #include <asm/sections.h>
 #include <asm/setup.h>
 #include <asm/sizes.h>
@@ -137,20 +138,16 @@
 {
 	phys_addr_t dma_phys_limit = 0;
 
-	/* Register the kernel text, kernel data and initrd with memblock */
+	/*
+	 * Register the kernel text, kernel data, initrd, and initial
+	 * pagetables with memblock.
+	 */
 	memblock_reserve(__pa(_text), _end - _text);
 #ifdef CONFIG_BLK_DEV_INITRD
 	if (initrd_start)
 		memblock_reserve(__virt_to_phys(initrd_start), initrd_end - initrd_start);
 #endif
 
-	/*
-	 * Reserve the page tables.  These are already in use,
-	 * and can only be in node 0.
-	 */
-	memblock_reserve(__pa(swapper_pg_dir), SWAPPER_DIR_SIZE);
-	memblock_reserve(__pa(idmap_pg_dir), IDMAP_DIR_SIZE);
-
 	early_init_fdt_scan_reserved_mem();
 
 	/* 4GB maximum for 32-bit only capable devices */
@@ -269,26 +266,33 @@
 
 #define MLK(b, t) b, t, ((t) - (b)) >> 10
 #define MLM(b, t) b, t, ((t) - (b)) >> 20
+#define MLG(b, t) b, t, ((t) - (b)) >> 30
 #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
 
 	pr_notice("Virtual kernel memory layout:\n"
-		  "    vmalloc : 0x%16lx - 0x%16lx   (%6ld MB)\n"
+		  "    vmalloc : 0x%16lx - 0x%16lx   (%6ld GB)\n"
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
-		  "    vmemmap : 0x%16lx - 0x%16lx   (%6ld MB)\n"
+		  "    vmemmap : 0x%16lx - 0x%16lx   (%6ld GB maximum)\n"
+		  "              0x%16lx - 0x%16lx   (%6ld MB actual)\n"
 #endif
+		  "    PCI I/O : 0x%16lx - 0x%16lx   (%6ld MB)\n"
+		  "    fixed   : 0x%16lx - 0x%16lx   (%6ld KB)\n"
 		  "    modules : 0x%16lx - 0x%16lx   (%6ld MB)\n"
 		  "    memory  : 0x%16lx - 0x%16lx   (%6ld MB)\n"
-		  "      .init : 0x%p" " - 0x%p" "   (%6ld kB)\n"
-		  "      .text : 0x%p" " - 0x%p" "   (%6ld kB)\n"
-		  "      .data : 0x%p" " - 0x%p" "   (%6ld kB)\n",
-		  MLM(VMALLOC_START, VMALLOC_END),
+		  "      .init : 0x%p" " - 0x%p" "   (%6ld KB)\n"
+		  "      .text : 0x%p" " - 0x%p" "   (%6ld KB)\n"
+		  "      .data : 0x%p" " - 0x%p" "   (%6ld KB)\n",
+		  MLG(VMALLOC_START, VMALLOC_END),
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
+		  MLG((unsigned long)vmemmap,
+		      (unsigned long)vmemmap + VMEMMAP_SIZE),
 		  MLM((unsigned long)virt_to_page(PAGE_OFFSET),
 		      (unsigned long)virt_to_page(high_memory)),
 #endif
+		  MLM((unsigned long)PCI_IOBASE, (unsigned long)PCI_IOBASE + SZ_16M),
+		  MLK(FIXADDR_START, FIXADDR_TOP),
 		  MLM(MODULES_VADDR, MODULES_END),
 		  MLM(PAGE_OFFSET, (unsigned long)high_memory),
-
 		  MLK_ROUNDUP(__init_begin, __init_end),
 		  MLK_ROUNDUP(_text, _etext),
 		  MLK_ROUNDUP(_sdata, _edata));
diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c
index 7ec3283..fa324bd 100644
--- a/arch/arm64/mm/ioremap.c
+++ b/arch/arm64/mm/ioremap.c
@@ -103,19 +103,28 @@
 }
 EXPORT_SYMBOL(ioremap_cache);
 
-#ifndef CONFIG_ARM64_64K_PAGES
 static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
+#if CONFIG_ARM64_PGTABLE_LEVELS > 2
+static pte_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
+#endif
+#if CONFIG_ARM64_PGTABLE_LEVELS > 3
+static pte_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
 #endif
 
-static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
+static inline pud_t * __init early_ioremap_pud(unsigned long addr)
 {
 	pgd_t *pgd;
-	pud_t *pud;
 
 	pgd = pgd_offset_k(addr);
 	BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
 
-	pud = pud_offset(pgd, addr);
+	return pud_offset(pgd, addr);
+}
+
+static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
+{
+	pud_t *pud = early_ioremap_pud(addr);
+
 	BUG_ON(pud_none(*pud) || pud_bad(*pud));
 
 	return pmd_offset(pud, addr);
@@ -132,13 +141,18 @@
 
 void __init early_ioremap_init(void)
 {
+	pgd_t *pgd;
+	pud_t *pud;
 	pmd_t *pmd;
+	unsigned long addr = fix_to_virt(FIX_BTMAP_BEGIN);
 
-	pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
-#ifndef CONFIG_ARM64_64K_PAGES
-	/* need to populate pmd for 4k pagesize only */
+	pgd = pgd_offset_k(addr);
+	pgd_populate(&init_mm, pgd, bm_pud);
+	pud = pud_offset(pgd, addr);
+	pud_populate(&init_mm, pud, bm_pmd);
+	pmd = pmd_offset(pud, addr);
 	pmd_populate_kernel(&init_mm, pmd, bm_pte);
-#endif
+
 	/*
 	 * The boot-ioremap range spans multiple pmds, for which
 	 * we are not prepared:
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index c43f1dd..c555672 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -32,6 +32,7 @@
 #include <asm/setup.h>
 #include <asm/sizes.h>
 #include <asm/tlb.h>
+#include <asm/memblock.h>
 #include <asm/mmu_context.h>
 
 #include "mm.h"
@@ -204,9 +205,16 @@
 				  unsigned long end, unsigned long phys,
 				  int map_io)
 {
-	pud_t *pud = pud_offset(pgd, addr);
+	pud_t *pud;
 	unsigned long next;
 
+	if (pgd_none(*pgd)) {
+		pud = early_alloc(PTRS_PER_PUD * sizeof(pud_t));
+		pgd_populate(&init_mm, pgd, pud);
+	}
+	BUG_ON(pgd_bad(*pgd));
+
+	pud = pud_offset(pgd, addr);
 	do {
 		next = pud_addr_end(addr, end);
 
@@ -290,10 +298,10 @@
 	 * memory addressable from the initial direct kernel mapping.
 	 *
 	 * The initial direct kernel mapping, located at swapper_pg_dir,
-	 * gives us PGDIR_SIZE memory starting from PHYS_OFFSET (which must be
+	 * gives us PUD_SIZE memory starting from PHYS_OFFSET (which must be
 	 * aligned to 2MB as per Documentation/arm64/booting.txt).
 	 */
-	limit = PHYS_OFFSET + PGDIR_SIZE;
+	limit = PHYS_OFFSET + PUD_SIZE;
 	memblock_set_current_limit(limit);
 
 	/* map all the memory banks */
diff --git a/arch/avr32/include/asm/processor.h b/arch/avr32/include/asm/processor.h
index 972adcc..941593c 100644
--- a/arch/avr32/include/asm/processor.h
+++ b/arch/avr32/include/asm/processor.h
@@ -92,6 +92,7 @@
 #define TASK_UNMAPPED_BASE	(PAGE_ALIGN(TASK_SIZE / 3))
 
 #define cpu_relax()		barrier()
+#define cpu_relax_lowlatency()        cpu_relax()
 #define cpu_sync_pipeline()	asm volatile("sub pc, -2" : : : "memory")
 
 struct cpu_context {
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index f81e7b9..ed30699 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -18,7 +18,6 @@
 	select HAVE_FTRACE_MCOUNT_RECORD
 	select HAVE_FUNCTION_GRAPH_TRACER
 	select HAVE_FUNCTION_TRACER
-	select HAVE_FUNCTION_TRACE_MCOUNT_TEST
 	select HAVE_IDE
 	select HAVE_KERNEL_GZIP if RAMKERNEL
 	select HAVE_KERNEL_BZIP2 if RAMKERNEL
diff --git a/arch/blackfin/include/asm/processor.h b/arch/blackfin/include/asm/processor.h
index d0e72e9..7acd466 100644
--- a/arch/blackfin/include/asm/processor.h
+++ b/arch/blackfin/include/asm/processor.h
@@ -99,7 +99,7 @@
 #define	KSTK_ESP(tsk)	((tsk) == current ? rdusp() : (tsk)->thread.usp)
 
 #define cpu_relax()    	smp_mb()
-
+#define cpu_relax_lowlatency() cpu_relax()
 
 /* Get the Silicon Revision of the chip */
 static inline uint32_t __pure bfin_revid(void)
diff --git a/arch/blackfin/kernel/ftrace-entry.S b/arch/blackfin/kernel/ftrace-entry.S
index 7eed00b..28d0595 100644
--- a/arch/blackfin/kernel/ftrace-entry.S
+++ b/arch/blackfin/kernel/ftrace-entry.S
@@ -33,15 +33,6 @@
  * function will be waiting there.  mmmm pie.
  */
 ENTRY(_ftrace_caller)
-# ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
-	/* optional micro optimization: return if stopped */
-	p1.l = _function_trace_stop;
-	p1.h = _function_trace_stop;
-	r3 = [p1];
-	cc = r3 == 0;
-	if ! cc jump _ftrace_stub (bp);
-# endif
-
 	/* save first/second/third function arg and the return register */
 	[--sp] = r2;
 	[--sp] = r0;
@@ -83,15 +74,6 @@
 
 /* See documentation for _ftrace_caller */
 ENTRY(__mcount)
-# ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
-	/* optional micro optimization: return if stopped */
-	p1.l = _function_trace_stop;
-	p1.h = _function_trace_stop;
-	r3 = [p1];
-	cc = r3 == 0;
-	if ! cc jump _ftrace_stub (bp);
-# endif
-
 	/* save third function arg early so we can do testing below */
 	[--sp] = r2;
 
diff --git a/arch/blackfin/kernel/perf_event.c b/arch/blackfin/kernel/perf_event.c
index 974e554..ea20320 100644
--- a/arch/blackfin/kernel/perf_event.c
+++ b/arch/blackfin/kernel/perf_event.c
@@ -389,14 +389,6 @@
 	if (attr->exclude_hv || attr->exclude_idle)
 		return -EPERM;
 
-	/*
-	 * All of the on-chip counters are "limited", in that they have
-	 * no interrupts, and are therefore unable to do sampling without
-	 * further work and timer assistance.
-	 */
-	if (hwc->sample_period)
-		return -EINVAL;
-
 	ret = 0;
 	switch (attr->type) {
 	case PERF_TYPE_RAW:
@@ -490,6 +482,13 @@
 {
 	int ret;
 
+	/*
+	 * All of the on-chip counters are "limited", in that they have
+	 * no interrupts, and are therefore unable to do sampling without
+	 * further work and timer assistance.
+	 */
+	pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
+
 	ret = perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
 	if (!ret)
 		perf_cpu_notifier(bfin_pmu_notifier);
diff --git a/arch/c6x/include/asm/processor.h b/arch/c6x/include/asm/processor.h
index b9eb3da..f2ef31b 100644
--- a/arch/c6x/include/asm/processor.h
+++ b/arch/c6x/include/asm/processor.h
@@ -121,6 +121,7 @@
 #define KSTK_ESP(task)	(task_pt_regs(task)->sp)
 
 #define cpu_relax()		do { } while (0)
+#define cpu_relax_lowlatency()        cpu_relax()
 
 extern const struct seq_operations cpuinfo_op;
 
diff --git a/arch/cris/include/asm/processor.h b/arch/cris/include/asm/processor.h
index 15b815d..862126b 100644
--- a/arch/cris/include/asm/processor.h
+++ b/arch/cris/include/asm/processor.h
@@ -63,6 +63,7 @@
 #define init_stack      (init_thread_union.stack)
 
 #define cpu_relax()     barrier()
+#define cpu_relax_lowlatency() cpu_relax()
 
 void default_idle(void);
 
diff --git a/arch/hexagon/include/asm/processor.h b/arch/hexagon/include/asm/processor.h
index 45a8254..d850113 100644
--- a/arch/hexagon/include/asm/processor.h
+++ b/arch/hexagon/include/asm/processor.h
@@ -56,6 +56,7 @@
 }
 
 #define cpu_relax() __vmyield()
+#define cpu_relax_lowlatency() cpu_relax()
 
 /*
  * Decides where the kernel will search for a free chunk of vm space during
diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h
index efd1b92..c736713 100644
--- a/arch/ia64/include/asm/processor.h
+++ b/arch/ia64/include/asm/processor.h
@@ -548,6 +548,7 @@
 }
 
 #define cpu_relax()	ia64_hint(ia64_hint_pause)
+#define cpu_relax_lowlatency() cpu_relax()
 
 static inline int
 ia64_get_irr(unsigned int vector)
diff --git a/arch/ia64/pci/fixup.c b/arch/ia64/pci/fixup.c
index 1fe9aa5..ec73b2c 100644
--- a/arch/ia64/pci/fixup.c
+++ b/arch/ia64/pci/fixup.c
@@ -6,6 +6,7 @@
 #include <linux/pci.h>
 #include <linux/init.h>
 #include <linux/vgaarb.h>
+#include <linux/screen_info.h>
 
 #include <asm/machvec.h>
 
@@ -37,6 +38,27 @@
 		return;
 	/* Maybe, this machine supports legacy memory map. */
 
+	if (!vga_default_device()) {
+		resource_size_t start, end;
+		int i;
+
+		/* Does firmware framebuffer belong to us? */
+		for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+			if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
+				continue;
+
+			start = pci_resource_start(pdev, i);
+			end  = pci_resource_end(pdev, i);
+
+			if (!start || !end)
+				continue;
+
+			if (screen_info.lfb_base >= start &&
+			    (screen_info.lfb_base + screen_info.lfb_size) < end)
+				vga_set_default_device(pdev);
+		}
+	}
+
 	/* Is VGA routed to us? */
 	bus = pdev->bus;
 	while (bus) {
diff --git a/arch/ia64/sn/kernel/bte.c b/arch/ia64/sn/kernel/bte.c
index cad775a..b2eb484 100644
--- a/arch/ia64/sn/kernel/bte.c
+++ b/arch/ia64/sn/kernel/bte.c
@@ -114,7 +114,7 @@
 		if (mode & BTE_USE_ANY) {
 			nasid_to_try[1] = my_nasid;
 		} else {
-			nasid_to_try[1] = (int)NULL;
+			nasid_to_try[1] = 0;
 		}
 	} else {
 		/* try local then remote */
@@ -122,7 +122,7 @@
 		if (mode & BTE_USE_ANY) {
 			nasid_to_try[1] = NASID_GET(dest);
 		} else {
-			nasid_to_try[1] = (int)NULL;
+			nasid_to_try[1] = 0;
 		}
 	}
 
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index 53b01b8..36182c8 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -579,7 +579,7 @@
 		       (sn_prom_type == 1) ? "real" : "fake");
 	}
 
-	memset(pda, 0, sizeof(pda));
+	memset(pda, 0, sizeof(*pda));
 	if (ia64_sn_get_sn_info(0, &sn_hub_info->shub2,
 				&sn_hub_info->nasid_bitmask,
 				&sn_hub_info->nasid_shift,
diff --git a/arch/m32r/include/asm/processor.h b/arch/m32r/include/asm/processor.h
index 5767367..9f8fd9b 100644
--- a/arch/m32r/include/asm/processor.h
+++ b/arch/m32r/include/asm/processor.h
@@ -133,5 +133,6 @@
 #define KSTK_ESP(tsk)  ((tsk)->thread.sp)
 
 #define cpu_relax()	barrier()
+#define cpu_relax_lowlatency() cpu_relax()
 
 #endif /* _ASM_M32R_PROCESSOR_H */
diff --git a/arch/m68k/include/asm/processor.h b/arch/m68k/include/asm/processor.h
index b0768a6..20dda1d 100644
--- a/arch/m68k/include/asm/processor.h
+++ b/arch/m68k/include/asm/processor.h
@@ -176,5 +176,6 @@
 #define task_pt_regs(tsk)	((struct pt_regs *) ((tsk)->thread.esp0))
 
 #define cpu_relax()	barrier()
+#define cpu_relax_lowlatency() cpu_relax()
 
 #endif
diff --git a/arch/m68k/include/asm/sun3_pgalloc.h b/arch/m68k/include/asm/sun3_pgalloc.h
index f868506..0931388 100644
--- a/arch/m68k/include/asm/sun3_pgalloc.h
+++ b/arch/m68k/include/asm/sun3_pgalloc.h
@@ -12,10 +12,6 @@
 
 #include <asm/tlb.h>
 
-/* FIXME - when we get this compiling */
-/* erm, now that it's compiling, what do we do with it? */
-#define _KERNPG_TABLE 0
-
 extern const char bad_pmd_string[];
 
 #define pmd_alloc_one(mm,address)       ({ BUG(); ((pmd_t *)2); })
diff --git a/arch/metag/Kconfig b/arch/metag/Kconfig
index 499b761..0b389a8 100644
--- a/arch/metag/Kconfig
+++ b/arch/metag/Kconfig
@@ -13,7 +13,6 @@
 	select HAVE_DYNAMIC_FTRACE
 	select HAVE_FTRACE_MCOUNT_RECORD
 	select HAVE_FUNCTION_TRACER
-	select HAVE_FUNCTION_TRACE_MCOUNT_TEST
 	select HAVE_KERNEL_BZIP2
 	select HAVE_KERNEL_GZIP
 	select HAVE_KERNEL_LZO
diff --git a/arch/metag/include/asm/processor.h b/arch/metag/include/asm/processor.h
index a8a3747..881071c 100644
--- a/arch/metag/include/asm/processor.h
+++ b/arch/metag/include/asm/processor.h
@@ -155,6 +155,7 @@
 #define user_stack_pointer(regs)        ((regs)->ctx.AX[0].U0)
 
 #define cpu_relax()     barrier()
+#define cpu_relax_lowlatency()  cpu_relax()
 
 extern void setup_priv(void);
 
diff --git a/arch/metag/kernel/ftrace_stub.S b/arch/metag/kernel/ftrace_stub.S
index e70bff7..3acc288 100644
--- a/arch/metag/kernel/ftrace_stub.S
+++ b/arch/metag/kernel/ftrace_stub.S
@@ -16,13 +16,6 @@
 	.global _ftrace_caller
 	.type	_ftrace_caller,function
 _ftrace_caller:
-	MOVT    D0Re0,#HI(_function_trace_stop)
-	ADD	D0Re0,D0Re0,#LO(_function_trace_stop)
-	GETD	D0Re0,[D0Re0]
-	CMP	D0Re0,#0
-	BEQ	$Lcall_stub
-	MOV	PC,D0.4
-$Lcall_stub:
 	MSETL   [A0StP], D0Ar6, D0Ar4, D0Ar2, D0.4
 	MOV     D1Ar1, D0.4
 	MOV     D0Ar2, D1RtP
@@ -42,13 +35,6 @@
 	.global	_mcount_wrapper
 	.type	_mcount_wrapper,function
 _mcount_wrapper:
-	MOVT    D0Re0,#HI(_function_trace_stop)
-	ADD	D0Re0,D0Re0,#LO(_function_trace_stop)
-	GETD	D0Re0,[D0Re0]
-	CMP	D0Re0,#0
-	BEQ	$Lcall_mcount
-	MOV	PC,D0.4
-$Lcall_mcount:
 	MSETL   [A0StP], D0Ar6, D0Ar4, D0Ar2, D0.4
 	MOV     D1Ar1, D0.4
 	MOV     D0Ar2, D1RtP
diff --git a/arch/metag/kernel/perf/perf_event.c b/arch/metag/kernel/perf/perf_event.c
index 5cc4d4d..02c0873 100644
--- a/arch/metag/kernel/perf/perf_event.c
+++ b/arch/metag/kernel/perf/perf_event.c
@@ -568,16 +568,6 @@
 		return -EINVAL;
 
 	/*
-	 * Early cores have "limited" counters - they have no overflow
-	 * interrupts - and so are unable to do sampling without extra work
-	 * and timer assistance.
-	 */
-	if (metag_pmu->max_period == 0) {
-		if (hwc->sample_period)
-			return -EINVAL;
-	}
-
-	/*
 	 * Don't assign an index until the event is placed into the hardware.
 	 * -1 signifies that we're still deciding where to put it. On SMP
 	 * systems each core has its own set of counters, so we can't do any
@@ -866,6 +856,15 @@
 	pr_info("enabled with %s PMU driver, %d counters available\n",
 			metag_pmu->name, metag_pmu->max_events);
 
+	/*
+	 * Early cores have "limited" counters - they have no overflow
+	 * interrupts - and so are unable to do sampling without extra work
+	 * and timer assistance.
+	 */
+	if (metag_pmu->max_period == 0) {
+		metag_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
+	}
+
 	/* Initialise the active events and reservation mutex */
 	atomic_set(&metag_pmu->active_events, 0);
 	mutex_init(&metag_pmu->reserve_mutex);
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 9ae0854..40e1c1d 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -22,7 +22,6 @@
 	select HAVE_DYNAMIC_FTRACE
 	select HAVE_FTRACE_MCOUNT_RECORD
 	select HAVE_FUNCTION_GRAPH_TRACER
-	select HAVE_FUNCTION_TRACE_MCOUNT_TEST
 	select HAVE_FUNCTION_TRACER
 	select HAVE_MEMBLOCK
 	select HAVE_MEMBLOCK_NODE_MAP
diff --git a/arch/microblaze/include/asm/processor.h b/arch/microblaze/include/asm/processor.h
index 9d31b05..497a988 100644
--- a/arch/microblaze/include/asm/processor.h
+++ b/arch/microblaze/include/asm/processor.h
@@ -22,6 +22,7 @@
 extern const struct seq_operations cpuinfo_op;
 
 # define cpu_relax()		barrier()
+# define cpu_relax_lowlatency()	cpu_relax()
 
 #define task_pt_regs(tsk) \
 		(((struct pt_regs *)(THREAD_SIZE + task_stack_page(tsk))) - 1)
diff --git a/arch/microblaze/kernel/ftrace.c b/arch/microblaze/kernel/ftrace.c
index bbcd253..fc7b48a 100644
--- a/arch/microblaze/kernel/ftrace.c
+++ b/arch/microblaze/kernel/ftrace.c
@@ -27,6 +27,9 @@
 	unsigned long return_hooker = (unsigned long)
 				&return_to_handler;
 
+	if (unlikely(ftrace_graph_is_dead()))
+		return;
+
 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
 		return;
 
diff --git a/arch/microblaze/kernel/mcount.S b/arch/microblaze/kernel/mcount.S
index fc1e132..fed9da5 100644
--- a/arch/microblaze/kernel/mcount.S
+++ b/arch/microblaze/kernel/mcount.S
@@ -91,11 +91,6 @@
 #endif /* CONFIG_DYNAMIC_FTRACE */
 	SAVE_REGS
 	swi	r15, r1, 0;
-	/* MS: HAVE_FUNCTION_TRACE_MCOUNT_TEST begin of checking */
-	lwi	r5, r0, function_trace_stop;
-	bneid	r5, end;
-	nop;
-	/* MS: HAVE_FUNCTION_TRACE_MCOUNT_TEST end of checking */
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 #ifndef CONFIG_DYNAMIC_FTRACE
 	lwi	r5, r0, ftrace_graph_return;
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 4e238e6..10f270b 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -15,7 +15,6 @@
 	select HAVE_BPF_JIT if !CPU_MICROMIPS
 	select ARCH_HAVE_CUSTOM_GPIO_H
 	select HAVE_FUNCTION_TRACER
-	select HAVE_FUNCTION_TRACE_MCOUNT_TEST
 	select HAVE_DYNAMIC_FTRACE
 	select HAVE_FTRACE_MCOUNT_RECORD
 	select HAVE_C_RECORDMCOUNT
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index b0aa955..7a3fc67 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -359,13 +359,17 @@
 #define MIPS3_PG_FRAME		0x3fffffc0
 
 #define VPN2_MASK		0xffffe000
-#define TLB_IS_GLOBAL(x)	(((x).tlb_lo0 & MIPS3_PG_G) &&	\
+#define TLB_IS_GLOBAL(x)	(((x).tlb_lo0 & MIPS3_PG_G) &&		\
 				 ((x).tlb_lo1 & MIPS3_PG_G))
 #define TLB_VPN2(x)		((x).tlb_hi & VPN2_MASK)
 #define TLB_ASID(x)		((x).tlb_hi & ASID_MASK)
-#define TLB_IS_VALID(x, va)	(((va) & (1 << PAGE_SHIFT))	\
-				 ? ((x).tlb_lo1 & MIPS3_PG_V)	\
+#define TLB_IS_VALID(x, va)	(((va) & (1 << PAGE_SHIFT))		\
+				 ? ((x).tlb_lo1 & MIPS3_PG_V)		\
 				 : ((x).tlb_lo0 & MIPS3_PG_V))
+#define TLB_HI_VPN2_HIT(x, y)	((TLB_VPN2(x) & ~(x).tlb_mask) ==	\
+				 ((y) & VPN2_MASK & ~(x).tlb_mask))
+#define TLB_HI_ASID_HIT(x, y)	(TLB_IS_GLOBAL(x) ||			\
+				 TLB_ASID(x) == ((y) & ASID_MASK))
 
 struct kvm_mips_tlb {
 	long tlb_mask;
@@ -760,7 +764,7 @@
 			       struct kvm_vcpu *vcpu);
 
 /* Misc */
-extern int kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
+extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
 extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
 
 
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index ad70cba..d5098bc 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -367,6 +367,7 @@
 #define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status)
 
 #define cpu_relax()	barrier()
+#define cpu_relax_lowlatency() cpu_relax()
 
 /*
  * Return_address is a replacement for __builtin_return_address(count)
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
index 0b8bd28..4520adc 100644
--- a/arch/mips/include/asm/r4kcache.h
+++ b/arch/mips/include/asm/r4kcache.h
@@ -19,6 +19,9 @@
 #include <asm/mipsmtregs.h>
 #include <asm/uaccess.h> /* for segment_eq() */
 
+extern void (*r4k_blast_dcache)(void);
+extern void (*r4k_blast_icache)(void);
+
 /*
  * This macro return a properly sign-extended address suitable as base address
  * for indexed cache operations.  Two issues here:
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index 60e7e5e..8b65387 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -302,6 +302,9 @@
 	    &return_to_handler;
 	int faulted, insns;
 
+	if (unlikely(ftrace_graph_is_dead()))
+		return;
+
 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
 		return;
 
diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S
index 539b629..00940d1 100644
--- a/arch/mips/kernel/mcount.S
+++ b/arch/mips/kernel/mcount.S
@@ -74,10 +74,6 @@
 #endif
 
 	/* When tracing is activated, it calls ftrace_caller+8 (aka here) */
-	lw	t1, function_trace_stop
-	bnez	t1, ftrace_stub
-	 nop
-
 	MCOUNT_SAVE_REGS
 #ifdef KBUILD_MCOUNT_RA_ADDRESS
 	PTR_S	MCOUNT_RA_ADDRESS_REG, PT_R12(sp)
@@ -105,9 +101,6 @@
 #else	/* ! CONFIG_DYNAMIC_FTRACE */
 
 NESTED(_mcount, PT_SIZE, ra)
-	lw	t1, function_trace_stop
-	bnez	t1, ftrace_stub
-	 nop
 	PTR_LA	t1, ftrace_stub
 	PTR_L	t2, ftrace_trace_function /* Prepare t2 for (1) */
 	bne	t1, t2, static_trace
diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile
index 78d87bb..401fe02 100644
--- a/arch/mips/kvm/Makefile
+++ b/arch/mips/kvm/Makefile
@@ -5,9 +5,9 @@
 
 EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm
 
-kvm-objs := $(common-objs) kvm_mips.o kvm_mips_emul.o kvm_locore.o \
-	    kvm_mips_int.o kvm_mips_stats.o kvm_mips_commpage.o \
-	    kvm_mips_dyntrans.o kvm_trap_emul.o
+kvm-objs := $(common-objs) mips.o emulate.o locore.o \
+	    interrupt.o stats.o commpage.o \
+	    dyntrans.o trap_emul.o
 
 obj-$(CONFIG_KVM)	+= kvm.o
-obj-y			+= kvm_cb.o kvm_tlb.o
+obj-y			+= callback.o tlb.o
diff --git a/arch/mips/kvm/kvm_cb.c b/arch/mips/kvm/callback.c
similarity index 100%
rename from arch/mips/kvm/kvm_cb.c
rename to arch/mips/kvm/callback.c
diff --git a/arch/mips/kvm/commpage.c b/arch/mips/kvm/commpage.c
new file mode 100644
index 0000000..2d6e976
--- /dev/null
+++ b/arch/mips/kvm/commpage.c
@@ -0,0 +1,33 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * commpage, currently used for Virtual COP0 registers.
+ * Mapped into the guest kernel @ 0x0.
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
+
+#include <linux/kvm_host.h>
+
+#include "commpage.h"
+
+void kvm_mips_commpage_init(struct kvm_vcpu *vcpu)
+{
+	struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage;
+
+	/* Specific init values for fields */
+	vcpu->arch.cop0 = &page->cop0;
+}
diff --git a/arch/mips/kvm/commpage.h b/arch/mips/kvm/commpage.h
new file mode 100644
index 0000000..08c5fa2
--- /dev/null
+++ b/arch/mips/kvm/commpage.h
@@ -0,0 +1,24 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: commpage: mapped into get kernel space
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#ifndef __KVM_MIPS_COMMPAGE_H__
+#define __KVM_MIPS_COMMPAGE_H__
+
+struct kvm_mips_commpage {
+	/* COP0 state is mapped into Guest kernel via commpage */
+	struct mips_coproc cop0;
+};
+
+#define KVM_MIPS_COMM_EIDI_OFFSET       0x0
+
+extern void kvm_mips_commpage_init(struct kvm_vcpu *vcpu);
+
+#endif /* __KVM_MIPS_COMMPAGE_H__ */
diff --git a/arch/mips/kvm/kvm_mips_dyntrans.c b/arch/mips/kvm/dyntrans.c
similarity index 79%
rename from arch/mips/kvm/kvm_mips_dyntrans.c
rename to arch/mips/kvm/dyntrans.c
index b80e41d..521121b 100644
--- a/arch/mips/kvm/kvm_mips_dyntrans.c
+++ b/arch/mips/kvm/dyntrans.c
@@ -1,13 +1,13 @@
 /*
-* This file is subject to the terms and conditions of the GNU General Public
-* License.  See the file "COPYING" in the main directory of this archive
-* for more details.
-*
-* KVM/MIPS: Binary Patching for privileged instructions, reduces traps.
-*
-* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
-* Authors: Sanjay Lal <sanjayl@kymasys.com>
-*/
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: Binary Patching for privileged instructions, reduces traps.
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
 
 #include <linux/errno.h>
 #include <linux/err.h>
@@ -18,7 +18,7 @@
 #include <linux/bootmem.h>
 #include <asm/cacheflush.h>
 
-#include "kvm_mips_comm.h"
+#include "commpage.h"
 
 #define SYNCI_TEMPLATE  0x041f0000
 #define SYNCI_BASE(x)   (((x) >> 21) & 0x1f)
@@ -28,9 +28,8 @@
 #define CLEAR_TEMPLATE  0x00000020
 #define SW_TEMPLATE     0xac000000
 
-int
-kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
-			   struct kvm_vcpu *vcpu)
+int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
+			       struct kvm_vcpu *vcpu)
 {
 	int result = 0;
 	unsigned long kseg0_opc;
@@ -47,12 +46,11 @@
 }
 
 /*
- *  Address based CACHE instructions are transformed into synci(s). A little heavy
- * for just D-cache invalidates, but avoids an expensive trap
+ * Address based CACHE instructions are transformed into synci(s). A little
+ * heavy for just D-cache invalidates, but avoids an expensive trap
  */
-int
-kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
-			struct kvm_vcpu *vcpu)
+int kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
+			    struct kvm_vcpu *vcpu)
 {
 	int result = 0;
 	unsigned long kseg0_opc;
@@ -72,8 +70,7 @@
 	return result;
 }
 
-int
-kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
+int kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
 {
 	int32_t rt, rd, sel;
 	uint32_t mfc0_inst;
@@ -115,8 +112,7 @@
 	return 0;
 }
 
-int
-kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
+int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
 {
 	int32_t rt, rd, sel;
 	uint32_t mtc0_inst = SW_TEMPLATE;
diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/emulate.c
similarity index 83%
rename from arch/mips/kvm/kvm_mips_emul.c
rename to arch/mips/kvm/emulate.c
index 8d48400..fb3e8df 100644
--- a/arch/mips/kvm/kvm_mips_emul.c
+++ b/arch/mips/kvm/emulate.c
@@ -1,13 +1,13 @@
 /*
-* This file is subject to the terms and conditions of the GNU General Public
-* License.  See the file "COPYING" in the main directory of this archive
-* for more details.
-*
-* KVM/MIPS: Instruction/Exception emulation
-*
-* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
-* Authors: Sanjay Lal <sanjayl@kymasys.com>
-*/
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: Instruction/Exception emulation
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
 
 #include <linux/errno.h>
 #include <linux/err.h>
@@ -29,9 +29,9 @@
 #include <asm/r4kcache.h>
 #define CONFIG_MIPS_MT
 
-#include "kvm_mips_opcode.h"
-#include "kvm_mips_int.h"
-#include "kvm_mips_comm.h"
+#include "opcode.h"
+#include "interrupt.h"
+#include "commpage.h"
 
 #include "trace.h"
 
@@ -51,18 +51,14 @@
 	if (epc & 3)
 		goto unaligned;
 
-	/*
-	 * Read the instruction
-	 */
+	/* Read the instruction */
 	insn.word = kvm_get_inst((uint32_t *) epc, vcpu);
 
 	if (insn.word == KVM_INVALID_INST)
 		return KVM_INVALID_INST;
 
 	switch (insn.i_format.opcode) {
-		/*
-		 * jr and jalr are in r_format format.
-		 */
+		/* jr and jalr are in r_format format. */
 	case spec_op:
 		switch (insn.r_format.func) {
 		case jalr_op:
@@ -124,18 +120,16 @@
 
 			dspcontrol = rddsp(0x01);
 
-			if (dspcontrol >= 32) {
+			if (dspcontrol >= 32)
 				epc = epc + 4 + (insn.i_format.simmediate << 2);
-			} else
+			else
 				epc += 8;
 			nextpc = epc;
 			break;
 		}
 		break;
 
-		/*
-		 * These are unconditional and in j_format.
-		 */
+		/* These are unconditional and in j_format. */
 	case jal_op:
 		arch->gprs[31] = instpc + 8;
 	case j_op:
@@ -146,9 +140,7 @@
 		nextpc = epc;
 		break;
 
-		/*
-		 * These are conditional and in i_format.
-		 */
+		/* These are conditional and in i_format. */
 	case beq_op:
 	case beql_op:
 		if (arch->gprs[insn.i_format.rs] ==
@@ -189,22 +181,20 @@
 		nextpc = epc;
 		break;
 
-		/*
-		 * And now the FPA/cp1 branch instructions.
-		 */
+		/* And now the FPA/cp1 branch instructions. */
 	case cop1_op:
-		printk("%s: unsupported cop1_op\n", __func__);
+		kvm_err("%s: unsupported cop1_op\n", __func__);
 		break;
 	}
 
 	return nextpc;
 
 unaligned:
-	printk("%s: unaligned epc\n", __func__);
+	kvm_err("%s: unaligned epc\n", __func__);
 	return nextpc;
 
 sigill:
-	printk("%s: DSP branch but not DSP ASE\n", __func__);
+	kvm_err("%s: DSP branch but not DSP ASE\n", __func__);
 	return nextpc;
 }
 
@@ -219,7 +209,8 @@
 			er = EMULATE_FAIL;
 		} else {
 			vcpu->arch.pc = branch_pc;
-			kvm_debug("BD update_pc(): New PC: %#lx\n", vcpu->arch.pc);
+			kvm_debug("BD update_pc(): New PC: %#lx\n",
+				  vcpu->arch.pc);
 		}
 	} else
 		vcpu->arch.pc += 4;
@@ -240,6 +231,7 @@
 static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
 {
 	struct mips_coproc *cop0 = vcpu->arch.cop0;
+
 	return	(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
 		(kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
 }
@@ -392,7 +384,6 @@
 	return now;
 }
 
-
 /**
  * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry.
  * @vcpu:	Virtual CPU.
@@ -760,8 +751,8 @@
 		kvm_clear_c0_guest_status(cop0, ST0_ERL);
 		vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
 	} else {
-		printk("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
-		       vcpu->arch.pc);
+		kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
+			vcpu->arch.pc);
 		er = EMULATE_FAIL;
 	}
 
@@ -770,8 +761,6 @@
 
 enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
 {
-	enum emulation_result er = EMULATE_DONE;
-
 	kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
 		  vcpu->arch.pending_exceptions);
 
@@ -781,8 +770,9 @@
 		vcpu->arch.wait = 1;
 		kvm_vcpu_block(vcpu);
 
-		/* We we are runnable, then definitely go off to user space to check if any
-		 * I/O interrupts are pending.
+		/*
+		 * We we are runnable, then definitely go off to user space to
+		 * check if any I/O interrupts are pending.
 		 */
 		if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
 			clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
@@ -790,20 +780,20 @@
 		}
 	}
 
-	return er;
+	return EMULATE_DONE;
 }
 
-/* XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that we can catch
- * this, if things ever change
+/*
+ * XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that
+ * we can catch this, if things ever change
  */
 enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
 {
 	struct mips_coproc *cop0 = vcpu->arch.cop0;
-	enum emulation_result er = EMULATE_FAIL;
 	uint32_t pc = vcpu->arch.pc;
 
-	printk("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
-	return er;
+	kvm_err("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
+	return EMULATE_FAIL;
 }
 
 /* Write Guest TLB Entry @ Index */
@@ -811,88 +801,76 @@
 {
 	struct mips_coproc *cop0 = vcpu->arch.cop0;
 	int index = kvm_read_c0_guest_index(cop0);
-	enum emulation_result er = EMULATE_DONE;
 	struct kvm_mips_tlb *tlb = NULL;
 	uint32_t pc = vcpu->arch.pc;
 
 	if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
-		printk("%s: illegal index: %d\n", __func__, index);
-		printk
-		    ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
-		     pc, index, kvm_read_c0_guest_entryhi(cop0),
-		     kvm_read_c0_guest_entrylo0(cop0),
-		     kvm_read_c0_guest_entrylo1(cop0),
-		     kvm_read_c0_guest_pagemask(cop0));
+		kvm_debug("%s: illegal index: %d\n", __func__, index);
+		kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
+			  pc, index, kvm_read_c0_guest_entryhi(cop0),
+			  kvm_read_c0_guest_entrylo0(cop0),
+			  kvm_read_c0_guest_entrylo1(cop0),
+			  kvm_read_c0_guest_pagemask(cop0));
 		index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
 	}
 
 	tlb = &vcpu->arch.guest_tlb[index];
-#if 1
-	/* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
+	/*
+	 * Probe the shadow host TLB for the entry being overwritten, if one
+	 * matches, invalidate it
+	 */
 	kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
-#endif
 
 	tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
 	tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
 	tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
 	tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
 
-	kvm_debug
-	    ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
-	     pc, index, kvm_read_c0_guest_entryhi(cop0),
-	     kvm_read_c0_guest_entrylo0(cop0), kvm_read_c0_guest_entrylo1(cop0),
-	     kvm_read_c0_guest_pagemask(cop0));
+	kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
+		  pc, index, kvm_read_c0_guest_entryhi(cop0),
+		  kvm_read_c0_guest_entrylo0(cop0),
+		  kvm_read_c0_guest_entrylo1(cop0),
+		  kvm_read_c0_guest_pagemask(cop0));
 
-	return er;
+	return EMULATE_DONE;
 }
 
 /* Write Guest TLB Entry @ Random Index */
 enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
 {
 	struct mips_coproc *cop0 = vcpu->arch.cop0;
-	enum emulation_result er = EMULATE_DONE;
 	struct kvm_mips_tlb *tlb = NULL;
 	uint32_t pc = vcpu->arch.pc;
 	int index;
 
-#if 1
 	get_random_bytes(&index, sizeof(index));
 	index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
-#else
-	index = jiffies % KVM_MIPS_GUEST_TLB_SIZE;
-#endif
-
-	if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
-		printk("%s: illegal index: %d\n", __func__, index);
-		return EMULATE_FAIL;
-	}
 
 	tlb = &vcpu->arch.guest_tlb[index];
 
-#if 1
-	/* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
+	/*
+	 * Probe the shadow host TLB for the entry being overwritten, if one
+	 * matches, invalidate it
+	 */
 	kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
-#endif
 
 	tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
 	tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
 	tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
 	tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
 
-	kvm_debug
-	    ("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
-	     pc, index, kvm_read_c0_guest_entryhi(cop0),
-	     kvm_read_c0_guest_entrylo0(cop0),
-	     kvm_read_c0_guest_entrylo1(cop0));
+	kvm_debug("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
+		  pc, index, kvm_read_c0_guest_entryhi(cop0),
+		  kvm_read_c0_guest_entrylo0(cop0),
+		  kvm_read_c0_guest_entrylo1(cop0));
 
-	return er;
+	return EMULATE_DONE;
 }
 
 enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
 {
 	struct mips_coproc *cop0 = vcpu->arch.cop0;
 	long entryhi = kvm_read_c0_guest_entryhi(cop0);
-	enum emulation_result er = EMULATE_DONE;
 	uint32_t pc = vcpu->arch.pc;
 	int index = -1;
 
@@ -903,12 +881,12 @@
 	kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
 		  index);
 
-	return er;
+	return EMULATE_DONE;
 }
 
-enum emulation_result
-kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
-		     struct kvm_run *run, struct kvm_vcpu *vcpu)
+enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
+					   uint32_t cause, struct kvm_run *run,
+					   struct kvm_vcpu *vcpu)
 {
 	struct mips_coproc *cop0 = vcpu->arch.cop0;
 	enum emulation_result er = EMULATE_DONE;
@@ -922,9 +900,8 @@
 	 */
 	curr_pc = vcpu->arch.pc;
 	er = update_pc(vcpu, cause);
-	if (er == EMULATE_FAIL) {
+	if (er == EMULATE_FAIL)
 		return er;
-	}
 
 	copz = (inst >> 21) & 0x1f;
 	rt = (inst >> 16) & 0x1f;
@@ -949,7 +926,7 @@
 			er = kvm_mips_emul_tlbp(vcpu);
 			break;
 		case rfe_op:
-			printk("!!!COP0_RFE!!!\n");
+			kvm_err("!!!COP0_RFE!!!\n");
 			break;
 		case eret_op:
 			er = kvm_mips_emul_eret(vcpu);
@@ -973,8 +950,7 @@
 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
 				kvm_mips_trans_mfc0(inst, opc, vcpu);
 #endif
-			}
-			else {
+			} else {
 				vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
 
 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
@@ -999,8 +975,8 @@
 			if ((rd == MIPS_CP0_TLB_INDEX)
 			    && (vcpu->arch.gprs[rt] >=
 				KVM_MIPS_GUEST_TLB_SIZE)) {
-				printk("Invalid TLB Index: %ld",
-				       vcpu->arch.gprs[rt]);
+				kvm_err("Invalid TLB Index: %ld",
+					vcpu->arch.gprs[rt]);
 				er = EMULATE_FAIL;
 				break;
 			}
@@ -1010,21 +986,19 @@
 				kvm_change_c0_guest_ebase(cop0,
 							  ~(C0_EBASE_CORE_MASK),
 							  vcpu->arch.gprs[rt]);
-				printk("MTCz, cop0->reg[EBASE]: %#lx\n",
-				       kvm_read_c0_guest_ebase(cop0));
+				kvm_err("MTCz, cop0->reg[EBASE]: %#lx\n",
+					kvm_read_c0_guest_ebase(cop0));
 			} else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
 				uint32_t nasid =
-				    vcpu->arch.gprs[rt] & ASID_MASK;
-				if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0)
-				    &&
+					vcpu->arch.gprs[rt] & ASID_MASK;
+				if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) &&
 				    ((kvm_read_c0_guest_entryhi(cop0) &
 				      ASID_MASK) != nasid)) {
-
-					kvm_debug
-					    ("MTCz, change ASID from %#lx to %#lx\n",
-					     kvm_read_c0_guest_entryhi(cop0) &
-					     ASID_MASK,
-					     vcpu->arch.gprs[rt] & ASID_MASK);
+					kvm_debug("MTCz, change ASID from %#lx to %#lx\n",
+						kvm_read_c0_guest_entryhi(cop0)
+						& ASID_MASK,
+						vcpu->arch.gprs[rt]
+						& ASID_MASK);
 
 					/* Blow away the shadow host TLBs */
 					kvm_mips_flush_host_tlb(1);
@@ -1049,7 +1023,10 @@
 			} else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
 				kvm_write_c0_guest_status(cop0,
 							  vcpu->arch.gprs[rt]);
-				/* Make sure that CU1 and NMI bits are never set */
+				/*
+				 * Make sure that CU1 and NMI bits are
+				 * never set
+				 */
 				kvm_clear_c0_guest_status(cop0,
 							  (ST0_CU1 | ST0_NMI));
 
@@ -1058,6 +1035,7 @@
 #endif
 			} else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
 				uint32_t old_cause, new_cause;
+
 				old_cause = kvm_read_c0_guest_cause(cop0);
 				new_cause = vcpu->arch.gprs[rt];
 				/* Update R/W bits */
@@ -1082,9 +1060,8 @@
 			break;
 
 		case dmtc_op:
-			printk
-			    ("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
-			     vcpu->arch.pc, rt, rd, sel);
+			kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
+				vcpu->arch.pc, rt, rd, sel);
 			er = EMULATE_FAIL;
 			break;
 
@@ -1115,7 +1092,10 @@
 				    cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
 				uint32_t pss =
 				    (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
-				/* We don't support any shadow register sets, so SRSCtl[PSS] == SRSCtl[CSS] = 0 */
+				/*
+				 * We don't support any shadow register sets, so
+				 * SRSCtl[PSS] == SRSCtl[CSS] = 0
+				 */
 				if (css || pss) {
 					er = EMULATE_FAIL;
 					break;
@@ -1126,21 +1106,17 @@
 			}
 			break;
 		default:
-			printk
-			    ("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
-			     vcpu->arch.pc, copz);
+			kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
+				vcpu->arch.pc, copz);
 			er = EMULATE_FAIL;
 			break;
 		}
 	}
 
 done:
-	/*
-	 * Rollback PC only if emulation was unsuccessful
-	 */
-	if (er == EMULATE_FAIL) {
+	/* Rollback PC only if emulation was unsuccessful */
+	if (er == EMULATE_FAIL)
 		vcpu->arch.pc = curr_pc;
-	}
 
 dont_update_pc:
 	/*
@@ -1152,9 +1128,9 @@
 	return er;
 }
 
-enum emulation_result
-kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
-		       struct kvm_run *run, struct kvm_vcpu *vcpu)
+enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
+					     struct kvm_run *run,
+					     struct kvm_vcpu *vcpu)
 {
 	enum emulation_result er = EMULATE_DO_MMIO;
 	int32_t op, base, rt, offset;
@@ -1252,24 +1228,21 @@
 		break;
 
 	default:
-		printk("Store not yet supported");
+		kvm_err("Store not yet supported");
 		er = EMULATE_FAIL;
 		break;
 	}
 
-	/*
-	 * Rollback PC if emulation was unsuccessful
-	 */
-	if (er == EMULATE_FAIL) {
+	/* Rollback PC if emulation was unsuccessful */
+	if (er == EMULATE_FAIL)
 		vcpu->arch.pc = curr_pc;
-	}
 
 	return er;
 }
 
-enum emulation_result
-kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
-		      struct kvm_run *run, struct kvm_vcpu *vcpu)
+enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
+					    struct kvm_run *run,
+					    struct kvm_vcpu *vcpu)
 {
 	enum emulation_result er = EMULATE_DO_MMIO;
 	int32_t op, base, rt, offset;
@@ -1364,7 +1337,7 @@
 		break;
 
 	default:
-		printk("Load not yet supported");
+		kvm_err("Load not yet supported");
 		er = EMULATE_FAIL;
 		break;
 	}
@@ -1383,7 +1356,7 @@
 	gfn = va >> PAGE_SHIFT;
 
 	if (gfn >= kvm->arch.guest_pmap_npages) {
-		printk("%s: Invalid gfn: %#llx\n", __func__, gfn);
+		kvm_err("%s: Invalid gfn: %#llx\n", __func__, gfn);
 		kvm_mips_dump_host_tlbs();
 		kvm_arch_vcpu_dump_regs(vcpu);
 		return -1;
@@ -1391,7 +1364,8 @@
 	pfn = kvm->arch.guest_pmap[gfn];
 	pa = (pfn << PAGE_SHIFT) | offset;
 
-	printk("%s: va: %#lx, unmapped: %#x\n", __func__, va, CKSEG0ADDR(pa));
+	kvm_debug("%s: va: %#lx, unmapped: %#x\n", __func__, va,
+		  CKSEG0ADDR(pa));
 
 	local_flush_icache_range(CKSEG0ADDR(pa), 32);
 	return 0;
@@ -1410,13 +1384,12 @@
 #define MIPS_CACHE_DCACHE               0x1
 #define MIPS_CACHE_SEC                  0x3
 
-enum emulation_result
-kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
-		       struct kvm_run *run, struct kvm_vcpu *vcpu)
+enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
+					     uint32_t cause,
+					     struct kvm_run *run,
+					     struct kvm_vcpu *vcpu)
 {
 	struct mips_coproc *cop0 = vcpu->arch.cop0;
-	extern void (*r4k_blast_dcache) (void);
-	extern void (*r4k_blast_icache) (void);
 	enum emulation_result er = EMULATE_DONE;
 	int32_t offset, cache, op_inst, op, base;
 	struct kvm_vcpu_arch *arch = &vcpu->arch;
@@ -1443,22 +1416,23 @@
 	kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
 		  cache, op, base, arch->gprs[base], offset);
 
-	/* Treat INDEX_INV as a nop, basically issued by Linux on startup to invalidate
-	 * the caches entirely by stepping through all the ways/indexes
+	/*
+	 * Treat INDEX_INV as a nop, basically issued by Linux on startup to
+	 * invalidate the caches entirely by stepping through all the
+	 * ways/indexes
 	 */
 	if (op == MIPS_CACHE_OP_INDEX_INV) {
-		kvm_debug
-		    ("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
-		     vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
-		     arch->gprs[base], offset);
+		kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+			  vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
+			  arch->gprs[base], offset);
 
 		if (cache == MIPS_CACHE_DCACHE)
 			r4k_blast_dcache();
 		else if (cache == MIPS_CACHE_ICACHE)
 			r4k_blast_icache();
 		else {
-			printk("%s: unsupported CACHE INDEX operation\n",
-			       __func__);
+			kvm_err("%s: unsupported CACHE INDEX operation\n",
+				__func__);
 			return EMULATE_FAIL;
 		}
 
@@ -1470,21 +1444,19 @@
 
 	preempt_disable();
 	if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
-
-		if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) {
+		if (kvm_mips_host_tlb_lookup(vcpu, va) < 0)
 			kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
-		}
 	} else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
 		   KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
 		int index;
 
 		/* If an entry already exists then skip */
-		if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0) {
+		if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0)
 			goto skip_fault;
-		}
 
-		/* If address not in the guest TLB, then give the guest a fault, the
-		 * resulting handler will do the right thing
+		/*
+		 * If address not in the guest TLB, then give the guest a fault,
+		 * the resulting handler will do the right thing
 		 */
 		index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
 						  (kvm_read_c0_guest_entryhi
@@ -1499,23 +1471,28 @@
 			goto dont_update_pc;
 		} else {
 			struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
-			/* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
+			/*
+			 * Check if the entry is valid, if not then setup a TLB
+			 * invalid exception to the guest
+			 */
 			if (!TLB_IS_VALID(*tlb, va)) {
 				er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
 								run, vcpu);
 				preempt_enable();
 				goto dont_update_pc;
 			} else {
-				/* We fault an entry from the guest tlb to the shadow host TLB */
+				/*
+				 * We fault an entry from the guest tlb to the
+				 * shadow host TLB
+				 */
 				kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
 								     NULL,
 								     NULL);
 			}
 		}
 	} else {
-		printk
-		    ("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
-		     cache, op, base, arch->gprs[base], offset);
+		kvm_err("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+			cache, op, base, arch->gprs[base], offset);
 		er = EMULATE_FAIL;
 		preempt_enable();
 		goto dont_update_pc;
@@ -1530,7 +1507,10 @@
 		flush_dcache_line(va);
 
 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
-		/* Replace the CACHE instruction, with a SYNCI, not the same, but avoids a trap */
+		/*
+		 * Replace the CACHE instruction, with a SYNCI, not the same,
+		 * but avoids a trap
+		 */
 		kvm_mips_trans_cache_va(inst, opc, vcpu);
 #endif
 	} else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) {
@@ -1542,9 +1522,8 @@
 		kvm_mips_trans_cache_va(inst, opc, vcpu);
 #endif
 	} else {
-		printk
-		    ("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
-		     cache, op, base, arch->gprs[base], offset);
+		kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+			cache, op, base, arch->gprs[base], offset);
 		er = EMULATE_FAIL;
 		preempt_enable();
 		goto dont_update_pc;
@@ -1552,28 +1531,23 @@
 
 	preempt_enable();
 
-      dont_update_pc:
-	/*
-	 * Rollback PC
-	 */
+dont_update_pc:
+	/* Rollback PC */
 	vcpu->arch.pc = curr_pc;
-      done:
+done:
 	return er;
 }
 
-enum emulation_result
-kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
-		      struct kvm_run *run, struct kvm_vcpu *vcpu)
+enum emulation_result kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
+					    struct kvm_run *run,
+					    struct kvm_vcpu *vcpu)
 {
 	enum emulation_result er = EMULATE_DONE;
 	uint32_t inst;
 
-	/*
-	 *  Fetch the instruction.
-	 */
-	if (cause & CAUSEF_BD) {
+	/* Fetch the instruction. */
+	if (cause & CAUSEF_BD)
 		opc += 1;
-	}
 
 	inst = kvm_get_inst(opc, vcpu);
 
@@ -1601,8 +1575,8 @@
 		break;
 
 	default:
-		printk("Instruction emulation not supported (%p/%#x)\n", opc,
-		       inst);
+		kvm_err("Instruction emulation not supported (%p/%#x)\n", opc,
+			inst);
 		kvm_arch_vcpu_dump_regs(vcpu);
 		er = EMULATE_FAIL;
 		break;
@@ -1611,9 +1585,10 @@
 	return er;
 }
 
-enum emulation_result
-kvm_mips_emulate_syscall(unsigned long cause, uint32_t *opc,
-			 struct kvm_run *run, struct kvm_vcpu *vcpu)
+enum emulation_result kvm_mips_emulate_syscall(unsigned long cause,
+					       uint32_t *opc,
+					       struct kvm_run *run,
+					       struct kvm_vcpu *vcpu)
 {
 	struct mips_coproc *cop0 = vcpu->arch.cop0;
 	struct kvm_vcpu_arch *arch = &vcpu->arch;
@@ -1638,20 +1613,20 @@
 		arch->pc = KVM_GUEST_KSEG0 + 0x180;
 
 	} else {
-		printk("Trying to deliver SYSCALL when EXL is already set\n");
+		kvm_err("Trying to deliver SYSCALL when EXL is already set\n");
 		er = EMULATE_FAIL;
 	}
 
 	return er;
 }
 
-enum emulation_result
-kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc,
-			    struct kvm_run *run, struct kvm_vcpu *vcpu)
+enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause,
+						  uint32_t *opc,
+						  struct kvm_run *run,
+						  struct kvm_vcpu *vcpu)
 {
 	struct mips_coproc *cop0 = vcpu->arch.cop0;
 	struct kvm_vcpu_arch *arch = &vcpu->arch;
-	enum emulation_result er = EMULATE_DONE;
 	unsigned long entryhi = (vcpu->arch.  host_cp0_badvaddr & VPN2_MASK) |
 				(kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
 
@@ -1688,16 +1663,16 @@
 	/* Blow away the shadow host TLBs */
 	kvm_mips_flush_host_tlb(1);
 
-	return er;
+	return EMULATE_DONE;
 }
 
-enum emulation_result
-kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc,
-			   struct kvm_run *run, struct kvm_vcpu *vcpu)
+enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause,
+						 uint32_t *opc,
+						 struct kvm_run *run,
+						 struct kvm_vcpu *vcpu)
 {
 	struct mips_coproc *cop0 = vcpu->arch.cop0;
 	struct kvm_vcpu_arch *arch = &vcpu->arch;
-	enum emulation_result er = EMULATE_DONE;
 	unsigned long entryhi =
 		(vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
 		(kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
@@ -1734,16 +1709,16 @@
 	/* Blow away the shadow host TLBs */
 	kvm_mips_flush_host_tlb(1);
 
-	return er;
+	return EMULATE_DONE;
 }
 
-enum emulation_result
-kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc,
-			    struct kvm_run *run, struct kvm_vcpu *vcpu)
+enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause,
+						  uint32_t *opc,
+						  struct kvm_run *run,
+						  struct kvm_vcpu *vcpu)
 {
 	struct mips_coproc *cop0 = vcpu->arch.cop0;
 	struct kvm_vcpu_arch *arch = &vcpu->arch;
-	enum emulation_result er = EMULATE_DONE;
 	unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
 				(kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
 
@@ -1778,16 +1753,16 @@
 	/* Blow away the shadow host TLBs */
 	kvm_mips_flush_host_tlb(1);
 
-	return er;
+	return EMULATE_DONE;
 }
 
-enum emulation_result
-kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc,
-			   struct kvm_run *run, struct kvm_vcpu *vcpu)
+enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause,
+						 uint32_t *opc,
+						 struct kvm_run *run,
+						 struct kvm_vcpu *vcpu)
 {
 	struct mips_coproc *cop0 = vcpu->arch.cop0;
 	struct kvm_vcpu_arch *arch = &vcpu->arch;
-	enum emulation_result er = EMULATE_DONE;
 	unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
 		(kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
 
@@ -1822,13 +1797,13 @@
 	/* Blow away the shadow host TLBs */
 	kvm_mips_flush_host_tlb(1);
 
-	return er;
+	return EMULATE_DONE;
 }
 
 /* TLBMOD: store into address matching TLB with Dirty bit off */
-enum emulation_result
-kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
-		       struct kvm_run *run, struct kvm_vcpu *vcpu)
+enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
+					     struct kvm_run *run,
+					     struct kvm_vcpu *vcpu)
 {
 	enum emulation_result er = EMULATE_DONE;
 #ifdef DEBUG
@@ -1837,9 +1812,7 @@
 				(kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
 	int index;
 
-	/*
-	 * If address not in the guest TLB, then we are in trouble
-	 */
+	/* If address not in the guest TLB, then we are in trouble */
 	index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
 	if (index < 0) {
 		/* XXXKYMA Invalidate and retry */
@@ -1856,15 +1829,15 @@
 	return er;
 }
 
-enum emulation_result
-kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc,
-			struct kvm_run *run, struct kvm_vcpu *vcpu)
+enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause,
+					      uint32_t *opc,
+					      struct kvm_run *run,
+					      struct kvm_vcpu *vcpu)
 {
 	struct mips_coproc *cop0 = vcpu->arch.cop0;
 	unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
 				(kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
 	struct kvm_vcpu_arch *arch = &vcpu->arch;
-	enum emulation_result er = EMULATE_DONE;
 
 	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
 		/* save old pc */
@@ -1895,16 +1868,16 @@
 	/* Blow away the shadow host TLBs */
 	kvm_mips_flush_host_tlb(1);
 
-	return er;
+	return EMULATE_DONE;
 }
 
-enum emulation_result
-kvm_mips_emulate_fpu_exc(unsigned long cause, uint32_t *opc,
-			 struct kvm_run *run, struct kvm_vcpu *vcpu)
+enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause,
+					       uint32_t *opc,
+					       struct kvm_run *run,
+					       struct kvm_vcpu *vcpu)
 {
 	struct mips_coproc *cop0 = vcpu->arch.cop0;
 	struct kvm_vcpu_arch *arch = &vcpu->arch;
-	enum emulation_result er = EMULATE_DONE;
 
 	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
 		/* save old pc */
@@ -1924,12 +1897,13 @@
 				  (T_COP_UNUSABLE << CAUSEB_EXCCODE));
 	kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
 
-	return er;
+	return EMULATE_DONE;
 }
 
-enum emulation_result
-kvm_mips_emulate_ri_exc(unsigned long cause, uint32_t *opc,
-			struct kvm_run *run, struct kvm_vcpu *vcpu)
+enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause,
+					      uint32_t *opc,
+					      struct kvm_run *run,
+					      struct kvm_vcpu *vcpu)
 {
 	struct mips_coproc *cop0 = vcpu->arch.cop0;
 	struct kvm_vcpu_arch *arch = &vcpu->arch;
@@ -1961,9 +1935,10 @@
 	return er;
 }
 
-enum emulation_result
-kvm_mips_emulate_bp_exc(unsigned long cause, uint32_t *opc,
-			struct kvm_run *run, struct kvm_vcpu *vcpu)
+enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
+					      uint32_t *opc,
+					      struct kvm_run *run,
+					      struct kvm_vcpu *vcpu)
 {
 	struct mips_coproc *cop0 = vcpu->arch.cop0;
 	struct kvm_vcpu_arch *arch = &vcpu->arch;
@@ -1988,16 +1963,14 @@
 		arch->pc = KVM_GUEST_KSEG0 + 0x180;
 
 	} else {
-		printk("Trying to deliver BP when EXL is already set\n");
+		kvm_err("Trying to deliver BP when EXL is already set\n");
 		er = EMULATE_FAIL;
 	}
 
 	return er;
 }
 
-/*
- * ll/sc, rdhwr, sync emulation
- */
+/* ll/sc, rdhwr, sync emulation */
 
 #define OPCODE 0xfc000000
 #define BASE   0x03e00000
@@ -2012,9 +1985,9 @@
 #define SYNC   0x0000000f
 #define RDHWR  0x0000003b
 
-enum emulation_result
-kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
-		   struct kvm_run *run, struct kvm_vcpu *vcpu)
+enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
+					 struct kvm_run *run,
+					 struct kvm_vcpu *vcpu)
 {
 	struct mips_coproc *cop0 = vcpu->arch.cop0;
 	struct kvm_vcpu_arch *arch = &vcpu->arch;
@@ -2031,16 +2004,14 @@
 	if (er == EMULATE_FAIL)
 		return er;
 
-	/*
-	 *  Fetch the instruction.
-	 */
+	/* Fetch the instruction. */
 	if (cause & CAUSEF_BD)
 		opc += 1;
 
 	inst = kvm_get_inst(opc, vcpu);
 
 	if (inst == KVM_INVALID_INST) {
-		printk("%s: Cannot get inst @ %p\n", __func__, opc);
+		kvm_err("%s: Cannot get inst @ %p\n", __func__, opc);
 		return EMULATE_FAIL;
 	}
 
@@ -2099,15 +2070,15 @@
 	return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
 }
 
-enum emulation_result
-kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run)
+enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
+						  struct kvm_run *run)
 {
 	unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
 	enum emulation_result er = EMULATE_DONE;
 	unsigned long curr_pc;
 
 	if (run->mmio.len > sizeof(*gpr)) {
-		printk("Bad MMIO length: %d", run->mmio.len);
+		kvm_err("Bad MMIO length: %d", run->mmio.len);
 		er = EMULATE_FAIL;
 		goto done;
 	}
@@ -2142,18 +2113,18 @@
 	}
 
 	if (vcpu->arch.pending_load_cause & CAUSEF_BD)
-		kvm_debug
-		    ("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
-		     vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
-		     vcpu->mmio_needed);
+		kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
+			  vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
+			  vcpu->mmio_needed);
 
 done:
 	return er;
 }
 
-static enum emulation_result
-kvm_mips_emulate_exc(unsigned long cause, uint32_t *opc,
-		     struct kvm_run *run, struct kvm_vcpu *vcpu)
+static enum emulation_result kvm_mips_emulate_exc(unsigned long cause,
+						  uint32_t *opc,
+						  struct kvm_run *run,
+						  struct kvm_vcpu *vcpu)
 {
 	uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
 	struct mips_coproc *cop0 = vcpu->arch.cop0;
@@ -2181,16 +2152,17 @@
 			  exccode, kvm_read_c0_guest_epc(cop0),
 			  kvm_read_c0_guest_badvaddr(cop0));
 	} else {
-		printk("Trying to deliver EXC when EXL is already set\n");
+		kvm_err("Trying to deliver EXC when EXL is already set\n");
 		er = EMULATE_FAIL;
 	}
 
 	return er;
 }
 
-enum emulation_result
-kvm_mips_check_privilege(unsigned long cause, uint32_t *opc,
-			 struct kvm_run *run, struct kvm_vcpu *vcpu)
+enum emulation_result kvm_mips_check_privilege(unsigned long cause,
+					       uint32_t *opc,
+					       struct kvm_run *run,
+					       struct kvm_vcpu *vcpu)
 {
 	enum emulation_result er = EMULATE_DONE;
 	uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
@@ -2215,10 +2187,13 @@
 			break;
 
 		case T_TLB_LD_MISS:
-			/* We we are accessing Guest kernel space, then send an address error exception to the guest */
+			/*
+			 * We we are accessing Guest kernel space, then send an
+			 * address error exception to the guest
+			 */
 			if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
-				printk("%s: LD MISS @ %#lx\n", __func__,
-				       badvaddr);
+				kvm_debug("%s: LD MISS @ %#lx\n", __func__,
+					  badvaddr);
 				cause &= ~0xff;
 				cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE);
 				er = EMULATE_PRIV_FAIL;
@@ -2226,10 +2201,13 @@
 			break;
 
 		case T_TLB_ST_MISS:
-			/* We we are accessing Guest kernel space, then send an address error exception to the guest */
+			/*
+			 * We we are accessing Guest kernel space, then send an
+			 * address error exception to the guest
+			 */
 			if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
-				printk("%s: ST MISS @ %#lx\n", __func__,
-				       badvaddr);
+				kvm_debug("%s: ST MISS @ %#lx\n", __func__,
+					  badvaddr);
 				cause &= ~0xff;
 				cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE);
 				er = EMULATE_PRIV_FAIL;
@@ -2237,8 +2215,8 @@
 			break;
 
 		case T_ADDR_ERR_ST:
-			printk("%s: address error ST @ %#lx\n", __func__,
-			       badvaddr);
+			kvm_debug("%s: address error ST @ %#lx\n", __func__,
+				  badvaddr);
 			if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
 				cause &= ~0xff;
 				cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE);
@@ -2246,8 +2224,8 @@
 			er = EMULATE_PRIV_FAIL;
 			break;
 		case T_ADDR_ERR_LD:
-			printk("%s: address error LD @ %#lx\n", __func__,
-			       badvaddr);
+			kvm_debug("%s: address error LD @ %#lx\n", __func__,
+				  badvaddr);
 			if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
 				cause &= ~0xff;
 				cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE);
@@ -2260,21 +2238,23 @@
 		}
 	}
 
-	if (er == EMULATE_PRIV_FAIL) {
+	if (er == EMULATE_PRIV_FAIL)
 		kvm_mips_emulate_exc(cause, opc, run, vcpu);
-	}
+
 	return er;
 }
 
-/* User Address (UA) fault, this could happen if
+/*
+ * User Address (UA) fault, this could happen if
  * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
  *     case we pass on the fault to the guest kernel and let it handle it.
  * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
  *     case we inject the TLB from the Guest TLB into the shadow host TLB
  */
-enum emulation_result
-kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc,
-			struct kvm_run *run, struct kvm_vcpu *vcpu)
+enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
+					      uint32_t *opc,
+					      struct kvm_run *run,
+					      struct kvm_vcpu *vcpu)
 {
 	enum emulation_result er = EMULATE_DONE;
 	uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
@@ -2284,10 +2264,11 @@
 	kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n",
 		  vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi);
 
-	/* KVM would not have got the exception if this entry was valid in the shadow host TLB
-	 * Check the Guest TLB, if the entry is not there then send the guest an
-	 * exception. The guest exc handler should then inject an entry into the
-	 * guest TLB
+	/*
+	 * KVM would not have got the exception if this entry was valid in the
+	 * shadow host TLB. Check the Guest TLB, if the entry is not there then
+	 * send the guest an exception. The guest exc handler should then inject
+	 * an entry into the guest TLB.
 	 */
 	index = kvm_mips_guest_tlb_lookup(vcpu,
 					  (va & VPN2_MASK) |
@@ -2299,13 +2280,17 @@
 		} else if (exccode == T_TLB_ST_MISS) {
 			er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
 		} else {
-			printk("%s: invalid exc code: %d\n", __func__, exccode);
+			kvm_err("%s: invalid exc code: %d\n", __func__,
+				exccode);
 			er = EMULATE_FAIL;
 		}
 	} else {
 		struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
 
-		/* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
+		/*
+		 * Check if the entry is valid, if not then setup a TLB invalid
+		 * exception to the guest
+		 */
 		if (!TLB_IS_VALID(*tlb, va)) {
 			if (exccode == T_TLB_LD_MISS) {
 				er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
@@ -2314,15 +2299,17 @@
 				er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
 								vcpu);
 			} else {
-				printk("%s: invalid exc code: %d\n", __func__,
-				       exccode);
+				kvm_err("%s: invalid exc code: %d\n", __func__,
+					exccode);
 				er = EMULATE_FAIL;
 			}
 		} else {
-			kvm_debug
-			    ("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
-			     tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
-			/* OK we have a Guest TLB entry, now inject it into the shadow host TLB */
+			kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
+				  tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
+			/*
+			 * OK we have a Guest TLB entry, now inject it into the
+			 * shadow host TLB
+			 */
 			kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
 							     NULL);
 		}
diff --git a/arch/mips/kvm/kvm_mips_int.c b/arch/mips/kvm/interrupt.c
similarity index 85%
rename from arch/mips/kvm/kvm_mips_int.c
rename to arch/mips/kvm/interrupt.c
index 1e5de16..9b44459 100644
--- a/arch/mips/kvm/kvm_mips_int.c
+++ b/arch/mips/kvm/interrupt.c
@@ -1,13 +1,13 @@
 /*
-* This file is subject to the terms and conditions of the GNU General Public
-* License.  See the file "COPYING" in the main directory of this archive
-* for more details.
-*
-* KVM/MIPS: Interrupt delivery
-*
-* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
-* Authors: Sanjay Lal <sanjayl@kymasys.com>
-*/
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: Interrupt delivery
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
 
 #include <linux/errno.h>
 #include <linux/err.h>
@@ -20,7 +20,7 @@
 
 #include <linux/kvm_host.h>
 
-#include "kvm_mips_int.h"
+#include "interrupt.h"
 
 void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
 {
@@ -34,7 +34,8 @@
 
 void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu)
 {
-	/* Cause bits to reflect the pending timer interrupt,
+	/*
+	 * Cause bits to reflect the pending timer interrupt,
 	 * the EXC code will be set when we are actually
 	 * delivering the interrupt:
 	 */
@@ -51,12 +52,13 @@
 	kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
 }
 
-void
-kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq)
+void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
+			      struct kvm_mips_interrupt *irq)
 {
 	int intr = (int)irq->irq;
 
-	/* Cause bits to reflect the pending IO interrupt,
+	/*
+	 * Cause bits to reflect the pending IO interrupt,
 	 * the EXC code will be set when we are actually
 	 * delivering the interrupt:
 	 */
@@ -83,11 +85,11 @@
 
 }
 
-void
-kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
-			   struct kvm_mips_interrupt *irq)
+void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
+				struct kvm_mips_interrupt *irq)
 {
 	int intr = (int)irq->irq;
+
 	switch (intr) {
 	case -2:
 		kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
@@ -111,9 +113,8 @@
 }
 
 /* Deliver the interrupt of the corresponding priority, if possible. */
-int
-kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
-			uint32_t cause)
+int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+			    uint32_t cause)
 {
 	int allowed = 0;
 	uint32_t exccode;
@@ -164,7 +165,6 @@
 
 	/* Are we allowed to deliver the interrupt ??? */
 	if (allowed) {
-
 		if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
 			/* save old pc */
 			kvm_write_c0_guest_epc(cop0, arch->pc);
@@ -195,9 +195,8 @@
 	return allowed;
 }
 
-int
-kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
-		      uint32_t cause)
+int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+			  uint32_t cause)
 {
 	return 1;
 }
diff --git a/arch/mips/kvm/kvm_mips_int.h b/arch/mips/kvm/interrupt.h
similarity index 74%
rename from arch/mips/kvm/kvm_mips_int.h
rename to arch/mips/kvm/interrupt.h
index 20da7d2..4ab4bdf 100644
--- a/arch/mips/kvm/kvm_mips_int.h
+++ b/arch/mips/kvm/interrupt.h
@@ -1,14 +1,15 @@
 /*
-* This file is subject to the terms and conditions of the GNU General Public
-* License.  See the file "COPYING" in the main directory of this archive
-* for more details.
-*
-* KVM/MIPS: Interrupts
-* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
-* Authors: Sanjay Lal <sanjayl@kymasys.com>
-*/
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: Interrupts
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
 
-/* MIPS Exception Priorities, exceptions (including interrupts) are queued up
+/*
+ * MIPS Exception Priorities, exceptions (including interrupts) are queued up
  * for the guest in the order specified by their priorities
  */
 
@@ -27,6 +28,9 @@
 #define MIPS_EXC_MAX                12
 /* XXXSL More to follow */
 
+extern char mips32_exception[], mips32_exceptionEnd[];
+extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
+
 #define C_TI        (_ULCAST_(1) << 30)
 
 #define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0)
diff --git a/arch/mips/kvm/kvm_mips_comm.h b/arch/mips/kvm/kvm_mips_comm.h
deleted file mode 100644
index a4a8c85..0000000
--- a/arch/mips/kvm/kvm_mips_comm.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
-* This file is subject to the terms and conditions of the GNU General Public
-* License.  See the file "COPYING" in the main directory of this archive
-* for more details.
-*
-* KVM/MIPS: commpage: mapped into get kernel space
-*
-* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
-* Authors: Sanjay Lal <sanjayl@kymasys.com>
-*/
-
-#ifndef __KVM_MIPS_COMMPAGE_H__
-#define __KVM_MIPS_COMMPAGE_H__
-
-struct kvm_mips_commpage {
-	struct mips_coproc cop0;	/* COP0 state is mapped into Guest kernel via commpage */
-};
-
-#define KVM_MIPS_COMM_EIDI_OFFSET       0x0
-
-extern void kvm_mips_commpage_init(struct kvm_vcpu *vcpu);
-
-#endif /* __KVM_MIPS_COMMPAGE_H__ */
diff --git a/arch/mips/kvm/kvm_mips_commpage.c b/arch/mips/kvm/kvm_mips_commpage.c
deleted file mode 100644
index 3873b1e..0000000
--- a/arch/mips/kvm/kvm_mips_commpage.c
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
-* This file is subject to the terms and conditions of the GNU General Public
-* License.  See the file "COPYING" in the main directory of this archive
-* for more details.
-*
-* commpage, currently used for Virtual COP0 registers.
-* Mapped into the guest kernel @ 0x0.
-*
-* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
-* Authors: Sanjay Lal <sanjayl@kymasys.com>
-*/
-
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/vmalloc.h>
-#include <linux/fs.h>
-#include <linux/bootmem.h>
-#include <asm/page.h>
-#include <asm/cacheflush.h>
-#include <asm/mmu_context.h>
-
-#include <linux/kvm_host.h>
-
-#include "kvm_mips_comm.h"
-
-void kvm_mips_commpage_init(struct kvm_vcpu *vcpu)
-{
-	struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage;
-	memset(page, 0, sizeof(struct kvm_mips_commpage));
-
-	/* Specific init values for fields */
-	vcpu->arch.cop0 = &page->cop0;
-	memset(vcpu->arch.cop0, 0, sizeof(struct mips_coproc));
-
-	return;
-}
diff --git a/arch/mips/kvm/kvm_mips_opcode.h b/arch/mips/kvm/kvm_mips_opcode.h
deleted file mode 100644
index 86d3b4c..0000000
--- a/arch/mips/kvm/kvm_mips_opcode.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
-* This file is subject to the terms and conditions of the GNU General Public
-* License.  See the file "COPYING" in the main directory of this archive
-* for more details.
-*
-* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
-* Authors: Sanjay Lal <sanjayl@kymasys.com>
-*/
-
-/*
- * Define opcode values not defined in <asm/isnt.h>
- */
-
-#ifndef __KVM_MIPS_OPCODE_H__
-#define __KVM_MIPS_OPCODE_H__
-
-/* COP0 Ops */
-#define     mfmcz_op         0x0b	/*  01011  */
-#define     wrpgpr_op        0x0e	/*  01110  */
-
-/*  COP0 opcodes (only if COP0 and CO=1):  */
-#define     wait_op               0x20	/*  100000  */
-
-#endif /* __KVM_MIPS_OPCODE_H__ */
diff --git a/arch/mips/kvm/kvm_locore.S b/arch/mips/kvm/locore.S
similarity index 93%
rename from arch/mips/kvm/kvm_locore.S
rename to arch/mips/kvm/locore.S
index 033ac34..d7279c0 100644
--- a/arch/mips/kvm/kvm_locore.S
+++ b/arch/mips/kvm/locore.S
@@ -16,7 +16,6 @@
 #include <asm/stackframe.h>
 #include <asm/asm-offsets.h>
 
-
 #define _C_LABEL(x)     x
 #define MIPSX(name)     mips32_ ## name
 #define CALLFRAME_SIZ   32
@@ -91,7 +90,10 @@
 	LONG_S	$24, PT_R24(k1)
 	LONG_S	$25, PT_R25(k1)
 
-	/* XXXKYMA k0/k1 not saved, not being used if we got here through an ioctl() */
+	/*
+	 * XXXKYMA k0/k1 not saved, not being used if we got here through
+	 * an ioctl()
+	 */
 
 	LONG_S	$28, PT_R28(k1)
 	LONG_S	$29, PT_R29(k1)
@@ -132,7 +134,10 @@
 	/* Save the kernel gp as well */
 	LONG_S	gp, VCPU_HOST_GP(k1)
 
-	/* Setup status register for running the guest in UM, interrupts are disabled */
+	/*
+	 * Setup status register for running the guest in UM, interrupts
+	 * are disabled
+	 */
 	li	k0, (ST0_EXL | KSU_USER | ST0_BEV)
 	mtc0	k0, CP0_STATUS
 	ehb
@@ -152,7 +157,6 @@
 	mtc0	k0, CP0_STATUS
 	ehb
 
-
 	/* Set Guest EPC */
 	LONG_L	t0, VCPU_PC(k1)
 	mtc0	t0, CP0_EPC
@@ -165,7 +169,7 @@
 	 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID  /* (BD)  */
 	INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID    /* else user */
 1:
-	     /* t1: contains the base of the ASID array, need to get the cpu id  */
+	/* t1: contains the base of the ASID array, need to get the cpu id */
 	LONG_L	t2, TI_CPU($28)             /* smp_processor_id */
 	INT_SLL	t2, t2, 2                   /* x4 */
 	REG_ADDU t3, t1, t2
@@ -229,9 +233,7 @@
 	eret
 
 VECTOR(MIPSX(exception), unknown)
-/*
- * Find out what mode we came from and jump to the proper handler.
- */
+/* Find out what mode we came from and jump to the proper handler. */
 	mtc0	k0, CP0_ERROREPC	#01: Save guest k0
 	ehb				#02:
 
@@ -239,7 +241,8 @@
 	INT_SRL	k0, k0, 10		#03: Get rid of CPUNum
 	INT_SLL	k0, k0, 10		#04
 	LONG_S	k1, 0x3000(k0)		#05: Save k1 @ offset 0x3000
-	INT_ADDIU k0, k0, 0x2000		#06: Exception handler is installed @ offset 0x2000
+	INT_ADDIU k0, k0, 0x2000	#06: Exception handler is
+					#    installed @ offset 0x2000
 	j	k0			#07: jump to the function
 	 nop				#08: branch delay slot
 VECTOR_END(MIPSX(exceptionEnd))
@@ -248,7 +251,6 @@
 /*
  * Generic Guest exception handler. We end up here when the guest
  * does something that causes a trap to kernel mode.
- *
  */
 NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
 	/* Get the VCPU pointer from DDTATA_LO */
@@ -290,9 +292,7 @@
 	LONG_S	$30, VCPU_R30(k1)
 	LONG_S	$31, VCPU_R31(k1)
 
-	/* We need to save hi/lo and restore them on
-	 * the way out
-	 */
+	/* We need to save hi/lo and restore them on the way out */
 	mfhi	t0
 	LONG_S	t0, VCPU_HI(k1)
 
@@ -321,8 +321,10 @@
 	/* Save pointer to run in s0, will be saved by the compiler */
 	move	s0, a0
 
-	/* Save Host level EPC, BadVaddr and Cause to VCPU, useful to
-	 * process the exception */
+	/*
+	 * Save Host level EPC, BadVaddr and Cause to VCPU, useful to
+	 * process the exception
+	 */
 	mfc0	k0,CP0_EPC
 	LONG_S	k0, VCPU_PC(k1)
 
@@ -351,7 +353,6 @@
 	LONG_L	k0, VCPU_HOST_EBASE(k1)
 	mtc0	k0,CP0_EBASE
 
-
 	/* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
 	.set	at
 	and	v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
@@ -369,7 +370,8 @@
 	/* Saved host state */
 	INT_ADDIU sp, sp, -PT_SIZE
 
-	/* XXXKYMA do we need to load the host ASID, maybe not because the
+	/*
+	 * XXXKYMA do we need to load the host ASID, maybe not because the
 	 * kernel entries are marked GLOBAL, need to verify
 	 */
 
@@ -383,9 +385,11 @@
 
 	/* Jump to handler */
 FEXPORT(__kvm_mips_jump_to_handler)
-	/* XXXKYMA: not sure if this is safe, how large is the stack??
+	/*
+	 * XXXKYMA: not sure if this is safe, how large is the stack??
 	 * Now jump to the kvm_mips_handle_exit() to see if we can deal
-	 * with this in the kernel */
+	 * with this in the kernel
+	 */
 	PTR_LA	t9, kvm_mips_handle_exit
 	jalr.hb	t9
 	 INT_ADDIU sp, sp, -CALLFRAME_SIZ           /* BD Slot */
@@ -394,7 +398,8 @@
 	di
 	ehb
 
-	/* XXXKYMA: k0/k1 could have been blown away if we processed
+	/*
+	 * XXXKYMA: k0/k1 could have been blown away if we processed
 	 * an exception while we were handling the exception from the
 	 * guest, reload k1
 	 */
@@ -402,7 +407,8 @@
 	move	k1, s1
 	INT_ADDIU k1, k1, VCPU_HOST_ARCH
 
-	/* Check return value, should tell us if we are returning to the
+	/*
+	 * Check return value, should tell us if we are returning to the
 	 * host (handle I/O etc)or resuming the guest
 	 */
 	andi	t0, v0, RESUME_HOST
@@ -521,8 +527,10 @@
 	LONG_L	$0, PT_R0(k1)
 	LONG_L	$1, PT_R1(k1)
 
-	/* r2/v0 is the return code, shift it down by 2 (arithmetic)
-	 * to recover the err code  */
+	/*
+	 * r2/v0 is the return code, shift it down by 2 (arithmetic)
+	 * to recover the err code
+	 */
 	INT_SRA	k0, v0, 2
 	move	$2, k0
 
@@ -566,7 +574,6 @@
 	PTR_LI	k0, 0x2000000F
 	mtc0	k0,  CP0_HWRENA
 
-
 	/* Restore RA, which is the address we will return to */
 	LONG_L  ra, PT_R31(k1)
 	j       ra
diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/mips.c
similarity index 83%
rename from arch/mips/kvm/kvm_mips.c
rename to arch/mips/kvm/mips.c
index f3c56a1..4fda672 100644
--- a/arch/mips/kvm/kvm_mips.c
+++ b/arch/mips/kvm/mips.c
@@ -7,7 +7,7 @@
  *
  * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
  * Authors: Sanjay Lal <sanjayl@kymasys.com>
-*/
+ */
 
 #include <linux/errno.h>
 #include <linux/err.h>
@@ -21,8 +21,8 @@
 
 #include <linux/kvm_host.h>
 
-#include "kvm_mips_int.h"
-#include "kvm_mips_comm.h"
+#include "interrupt.h"
+#include "commpage.h"
 
 #define CREATE_TRACE_POINTS
 #include "trace.h"
@@ -31,38 +31,41 @@
 #define VECTORSPACING 0x100	/* for EI/VI mode */
 #endif
 
-#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
+#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x)
 struct kvm_stats_debugfs_item debugfs_entries[] = {
-	{ "wait", VCPU_STAT(wait_exits) },
-	{ "cache", VCPU_STAT(cache_exits) },
-	{ "signal", VCPU_STAT(signal_exits) },
-	{ "interrupt", VCPU_STAT(int_exits) },
-	{ "cop_unsuable", VCPU_STAT(cop_unusable_exits) },
-	{ "tlbmod", VCPU_STAT(tlbmod_exits) },
-	{ "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits) },
-	{ "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits) },
-	{ "addrerr_st", VCPU_STAT(addrerr_st_exits) },
-	{ "addrerr_ld", VCPU_STAT(addrerr_ld_exits) },
-	{ "syscall", VCPU_STAT(syscall_exits) },
-	{ "resvd_inst", VCPU_STAT(resvd_inst_exits) },
-	{ "break_inst", VCPU_STAT(break_inst_exits) },
-	{ "flush_dcache", VCPU_STAT(flush_dcache_exits) },
-	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
+	{ "wait",	  VCPU_STAT(wait_exits),	 KVM_STAT_VCPU },
+	{ "cache",	  VCPU_STAT(cache_exits),	 KVM_STAT_VCPU },
+	{ "signal",	  VCPU_STAT(signal_exits),	 KVM_STAT_VCPU },
+	{ "interrupt",	  VCPU_STAT(int_exits),		 KVM_STAT_VCPU },
+	{ "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
+	{ "tlbmod",	  VCPU_STAT(tlbmod_exits),	 KVM_STAT_VCPU },
+	{ "tlbmiss_ld",	  VCPU_STAT(tlbmiss_ld_exits),	 KVM_STAT_VCPU },
+	{ "tlbmiss_st",	  VCPU_STAT(tlbmiss_st_exits),	 KVM_STAT_VCPU },
+	{ "addrerr_st",	  VCPU_STAT(addrerr_st_exits),	 KVM_STAT_VCPU },
+	{ "addrerr_ld",	  VCPU_STAT(addrerr_ld_exits),	 KVM_STAT_VCPU },
+	{ "syscall",	  VCPU_STAT(syscall_exits),	 KVM_STAT_VCPU },
+	{ "resvd_inst",	  VCPU_STAT(resvd_inst_exits),	 KVM_STAT_VCPU },
+	{ "break_inst",	  VCPU_STAT(break_inst_exits),	 KVM_STAT_VCPU },
+	{ "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
+	{ "halt_wakeup",  VCPU_STAT(halt_wakeup),	 KVM_STAT_VCPU },
 	{NULL}
 };
 
 static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu)
 {
 	int i;
+
 	for_each_possible_cpu(i) {
 		vcpu->arch.guest_kernel_asid[i] = 0;
 		vcpu->arch.guest_user_asid[i] = 0;
 	}
+
 	return 0;
 }
 
-/* XXXKYMA: We are simulatoring a processor that has the WII bit set in Config7, so we
- * are "runnable" if interrupts are pending
+/*
+ * XXXKYMA: We are simulatoring a processor that has the WII bit set in
+ * Config7, so we are "runnable" if interrupts are pending
  */
 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
 {
@@ -94,16 +97,17 @@
 
 void kvm_arch_check_processor_compat(void *rtn)
 {
-	int *r = (int *)rtn;
-	*r = 0;
-	return;
+	*(int *)rtn = 0;
 }
 
 static void kvm_mips_init_tlbs(struct kvm *kvm)
 {
 	unsigned long wired;
 
-	/* Add a wired entry to the TLB, it is used to map the commpage to the Guest kernel */
+	/*
+	 * Add a wired entry to the TLB, it is used to map the commpage to
+	 * the Guest kernel
+	 */
 	wired = read_c0_wired();
 	write_c0_wired(wired + 1);
 	mtc0_tlbw_hazard();
@@ -130,7 +134,6 @@
 		on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1);
 	}
 
-
 	return 0;
 }
 
@@ -185,8 +188,8 @@
 	}
 }
 
-long
-kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl,
+			unsigned long arg)
 {
 	return -ENOIOCTLCMD;
 }
@@ -207,20 +210,20 @@
 }
 
 int kvm_arch_prepare_memory_region(struct kvm *kvm,
-                                struct kvm_memory_slot *memslot,
-                                struct kvm_userspace_memory_region *mem,
-                                enum kvm_mr_change change)
+				   struct kvm_memory_slot *memslot,
+				   struct kvm_userspace_memory_region *mem,
+				   enum kvm_mr_change change)
 {
 	return 0;
 }
 
 void kvm_arch_commit_memory_region(struct kvm *kvm,
-                                struct kvm_userspace_memory_region *mem,
-                                const struct kvm_memory_slot *old,
-                                enum kvm_mr_change change)
+				   struct kvm_userspace_memory_region *mem,
+				   const struct kvm_memory_slot *old,
+				   enum kvm_mr_change change)
 {
 	unsigned long npages = 0;
-	int i, err = 0;
+	int i;
 
 	kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
 		  __func__, kvm, mem->slot, mem->guest_phys_addr,
@@ -238,21 +241,17 @@
 
 			if (!kvm->arch.guest_pmap) {
 				kvm_err("Failed to allocate guest PMAP");
-				err = -ENOMEM;
-				goto out;
+				return;
 			}
 
 			kvm_debug("Allocated space for Guest PMAP Table (%ld pages) @ %p\n",
 				  npages, kvm->arch.guest_pmap);
 
 			/* Now setup the page table */
-			for (i = 0; i < npages; i++) {
+			for (i = 0; i < npages; i++)
 				kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE;
-			}
 		}
 	}
-out:
-	return;
 }
 
 void kvm_arch_flush_shadow_all(struct kvm *kvm)
@@ -270,8 +269,6 @@
 
 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
 {
-	extern char mips32_exception[], mips32_exceptionEnd[];
-	extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
 	int err, size, offset;
 	void *gebase;
 	int i;
@@ -290,14 +287,14 @@
 
 	kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
 
-	/* Allocate space for host mode exception handlers that handle
+	/*
+	 * Allocate space for host mode exception handlers that handle
 	 * guest mode exits
 	 */
-	if (cpu_has_veic || cpu_has_vint) {
+	if (cpu_has_veic || cpu_has_vint)
 		size = 0x200 + VECTORSPACING * 64;
-	} else {
+	else
 		size = 0x4000;
-	}
 
 	/* Save Linux EBASE */
 	vcpu->arch.host_ebase = (void *)read_c0_ebase();
@@ -345,7 +342,10 @@
 	local_flush_icache_range((unsigned long)gebase,
 				(unsigned long)gebase + ALIGN(size, PAGE_SIZE));
 
-	/* Allocate comm page for guest kernel, a TLB will be reserved for mapping GVA @ 0xFFFF8000 to this page */
+	/*
+	 * Allocate comm page for guest kernel, a TLB will be reserved for
+	 * mapping GVA @ 0xFFFF8000 to this page
+	 */
 	vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
 
 	if (!vcpu->arch.kseg0_commpage) {
@@ -392,9 +392,8 @@
 	kvm_arch_vcpu_free(vcpu);
 }
 
-int
-kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
-				    struct kvm_guest_debug *dbg)
+int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
+					struct kvm_guest_debug *dbg)
 {
 	return -ENOIOCTLCMD;
 }
@@ -431,8 +430,8 @@
 	return r;
 }
 
-int
-kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq)
+int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
+			     struct kvm_mips_interrupt *irq)
 {
 	int intr = (int)irq->irq;
 	struct kvm_vcpu *dvcpu = NULL;
@@ -459,23 +458,20 @@
 
 	dvcpu->arch.wait = 0;
 
-	if (waitqueue_active(&dvcpu->wq)) {
+	if (waitqueue_active(&dvcpu->wq))
 		wake_up_interruptible(&dvcpu->wq);
-	}
 
 	return 0;
 }
 
-int
-kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
-				struct kvm_mp_state *mp_state)
+int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
+				    struct kvm_mp_state *mp_state)
 {
 	return -ENOIOCTLCMD;
 }
 
-int
-kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
-				struct kvm_mp_state *mp_state)
+int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
+				    struct kvm_mp_state *mp_state)
 {
 	return -ENOIOCTLCMD;
 }
@@ -632,10 +628,12 @@
 	}
 	if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
 		u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
+
 		return put_user(v, uaddr64);
 	} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
 		u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
 		u32 v32 = (u32)v;
+
 		return put_user(v32, uaddr32);
 	} else {
 		return -EINVAL;
@@ -728,8 +726,8 @@
 	return 0;
 }
 
-long
-kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
+			 unsigned long arg)
 {
 	struct kvm_vcpu *vcpu = filp->private_data;
 	void __user *argp = (void __user *)arg;
@@ -739,6 +737,7 @@
 	case KVM_SET_ONE_REG:
 	case KVM_GET_ONE_REG: {
 		struct kvm_one_reg reg;
+
 		if (copy_from_user(&reg, argp, sizeof(reg)))
 			return -EFAULT;
 		if (ioctl == KVM_SET_ONE_REG)
@@ -773,6 +772,7 @@
 	case KVM_INTERRUPT:
 		{
 			struct kvm_mips_interrupt irq;
+
 			r = -EFAULT;
 			if (copy_from_user(&irq, argp, sizeof(irq)))
 				goto out;
@@ -791,9 +791,7 @@
 	return r;
 }
 
-/*
- * Get (and clear) the dirty memory log for a memory slot.
- */
+/* Get (and clear) the dirty memory log for a memory slot. */
 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
 {
 	struct kvm_memory_slot *memslot;
@@ -815,8 +813,8 @@
 		ga = memslot->base_gfn << PAGE_SHIFT;
 		ga_end = ga + (memslot->npages << PAGE_SHIFT);
 
-		printk("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga,
-		       ga_end);
+		kvm_info("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga,
+			 ga_end);
 
 		n = kvm_dirty_bitmap_bytes(memslot);
 		memset(memslot->dirty_bitmap, 0, n);
@@ -843,16 +841,12 @@
 
 int kvm_arch_init(void *opaque)
 {
-	int ret;
-
 	if (kvm_mips_callbacks) {
 		kvm_err("kvm: module already exists\n");
 		return -EEXIST;
 	}
 
-	ret = kvm_mips_emulation_init(&kvm_mips_callbacks);
-
-	return ret;
+	return kvm_mips_emulation_init(&kvm_mips_callbacks);
 }
 
 void kvm_arch_exit(void)
@@ -860,14 +854,14 @@
 	kvm_mips_callbacks = NULL;
 }
 
-int
-kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
+				  struct kvm_sregs *sregs)
 {
 	return -ENOIOCTLCMD;
 }
 
-int
-kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+				  struct kvm_sregs *sregs)
 {
 	return -ENOIOCTLCMD;
 }
@@ -923,24 +917,25 @@
 	if (!vcpu)
 		return -1;
 
-	printk("VCPU Register Dump:\n");
-	printk("\tpc = 0x%08lx\n", vcpu->arch.pc);
-	printk("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
+	kvm_debug("VCPU Register Dump:\n");
+	kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc);
+	kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
 
 	for (i = 0; i < 32; i += 4) {
-		printk("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
+		kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
 		       vcpu->arch.gprs[i],
 		       vcpu->arch.gprs[i + 1],
 		       vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
 	}
-	printk("\thi: 0x%08lx\n", vcpu->arch.hi);
-	printk("\tlo: 0x%08lx\n", vcpu->arch.lo);
+	kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi);
+	kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo);
 
 	cop0 = vcpu->arch.cop0;
-	printk("\tStatus: 0x%08lx, Cause: 0x%08lx\n",
-	       kvm_read_c0_guest_status(cop0), kvm_read_c0_guest_cause(cop0));
+	kvm_debug("\tStatus: 0x%08lx, Cause: 0x%08lx\n",
+		  kvm_read_c0_guest_status(cop0),
+		  kvm_read_c0_guest_cause(cop0));
 
-	printk("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
+	kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
 
 	return 0;
 }
@@ -980,14 +975,11 @@
 	kvm_mips_callbacks->queue_timer_int(vcpu);
 
 	vcpu->arch.wait = 0;
-	if (waitqueue_active(&vcpu->wq)) {
+	if (waitqueue_active(&vcpu->wq))
 		wake_up_interruptible(&vcpu->wq);
-	}
 }
 
-/*
- * low level hrtimer wake routine.
- */
+/* low level hrtimer wake routine */
 static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
 {
 	struct kvm_vcpu *vcpu;
@@ -1008,11 +1000,10 @@
 
 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
 {
-	return;
 }
 
-int
-kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr)
+int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
+				  struct kvm_translation *tr)
 {
 	return 0;
 }
@@ -1023,8 +1014,7 @@
 	return kvm_mips_callbacks->vcpu_setup(vcpu);
 }
 
-static
-void kvm_mips_set_c0_status(void)
+static void kvm_mips_set_c0_status(void)
 {
 	uint32_t status = read_c0_status();
 
@@ -1054,7 +1044,10 @@
 	run->exit_reason = KVM_EXIT_UNKNOWN;
 	run->ready_for_interrupt_injection = 1;
 
-	/* Set the appropriate status bits based on host CPU features, before we hit the scheduler */
+	/*
+	 * Set the appropriate status bits based on host CPU features,
+	 * before we hit the scheduler
+	 */
 	kvm_mips_set_c0_status();
 
 	local_irq_enable();
@@ -1062,7 +1055,8 @@
 	kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
 			cause, opc, run, vcpu);
 
-	/* Do a privilege check, if in UM most of these exit conditions end up
+	/*
+	 * Do a privilege check, if in UM most of these exit conditions end up
 	 * causing an exception to be delivered to the Guest Kernel
 	 */
 	er = kvm_mips_check_privilege(cause, opc, run, vcpu);
@@ -1081,9 +1075,8 @@
 		++vcpu->stat.int_exits;
 		trace_kvm_exit(vcpu, INT_EXITS);
 
-		if (need_resched()) {
+		if (need_resched())
 			cond_resched();
-		}
 
 		ret = RESUME_GUEST;
 		break;
@@ -1095,9 +1088,8 @@
 		trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS);
 		ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
 		/* XXXKYMA: Might need to return to user space */
-		if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) {
+		if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN)
 			ret = RESUME_HOST;
-		}
 		break;
 
 	case T_TLB_MOD:
@@ -1107,10 +1099,9 @@
 		break;
 
 	case T_TLB_ST_MISS:
-		kvm_debug
-		    ("TLB ST fault:  cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
-		     cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
-		     badvaddr);
+		kvm_debug("TLB ST fault:  cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
+			  cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
+			  badvaddr);
 
 		++vcpu->stat.tlbmiss_st_exits;
 		trace_kvm_exit(vcpu, TLBMISS_ST_EXITS);
@@ -1157,10 +1148,9 @@
 		break;
 
 	default:
-		kvm_err
-		    ("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x  BadVaddr: %#lx Status: %#lx\n",
-		     exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
-		     kvm_read_c0_guest_status(vcpu->arch.cop0));
+		kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x  BadVaddr: %#lx Status: %#lx\n",
+			exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
+			kvm_read_c0_guest_status(vcpu->arch.cop0));
 		kvm_arch_vcpu_dump_regs(vcpu);
 		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
 		ret = RESUME_HOST;
@@ -1175,7 +1165,7 @@
 		kvm_mips_deliver_interrupts(vcpu, cause);
 
 	if (!(ret & RESUME_HOST)) {
-		/* Only check for signals if not already exiting to userspace  */
+		/* Only check for signals if not already exiting to userspace */
 		if (signal_pending(current)) {
 			run->exit_reason = KVM_EXIT_INTR;
 			ret = (-EINTR << 2) | RESUME_HOST;
@@ -1196,11 +1186,13 @@
 	if (ret)
 		return ret;
 
-	/* On MIPS, kernel modules are executed from "mapped space", which requires TLBs.
-	 * The TLB handling code is statically linked with the rest of the kernel (kvm_tlb.c)
-	 * to avoid the possibility of double faulting. The issue is that the TLB code
-	 * references routines that are part of the the KVM module,
-	 * which are only available once the module is loaded.
+	/*
+	 * On MIPS, kernel modules are executed from "mapped space", which
+	 * requires TLBs. The TLB handling code is statically linked with
+	 * the rest of the kernel (tlb.c) to avoid the possibility of
+	 * double faulting. The issue is that the TLB code references
+	 * routines that are part of the the KVM module, which are only
+	 * available once the module is loaded.
 	 */
 	kvm_mips_gfn_to_pfn = gfn_to_pfn;
 	kvm_mips_release_pfn_clean = kvm_release_pfn_clean;
diff --git a/arch/mips/kvm/opcode.h b/arch/mips/kvm/opcode.h
new file mode 100644
index 0000000..03a6ae8
--- /dev/null
+++ b/arch/mips/kvm/opcode.h
@@ -0,0 +1,22 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+/* Define opcode values not defined in <asm/isnt.h> */
+
+#ifndef __KVM_MIPS_OPCODE_H__
+#define __KVM_MIPS_OPCODE_H__
+
+/* COP0 Ops */
+#define mfmcz_op	0x0b	/* 01011 */
+#define wrpgpr_op	0x0e	/* 01110 */
+
+/* COP0 opcodes (only if COP0 and CO=1): */
+#define wait_op		0x20	/* 100000 */
+
+#endif /* __KVM_MIPS_OPCODE_H__ */
diff --git a/arch/mips/kvm/kvm_mips_stats.c b/arch/mips/kvm/stats.c
similarity index 63%
rename from arch/mips/kvm/kvm_mips_stats.c
rename to arch/mips/kvm/stats.c
index 075904b..a74d602 100644
--- a/arch/mips/kvm/kvm_mips_stats.c
+++ b/arch/mips/kvm/stats.c
@@ -1,13 +1,13 @@
 /*
-* This file is subject to the terms and conditions of the GNU General Public
-* License.  See the file "COPYING" in the main directory of this archive
-* for more details.
-*
-* KVM/MIPS: COP0 access histogram
-*
-* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
-* Authors: Sanjay Lal <sanjayl@kymasys.com>
-*/
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: COP0 access histogram
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
 
 #include <linux/kvm_host.h>
 
@@ -63,20 +63,18 @@
 	"DESAVE"
 };
 
-int kvm_mips_dump_stats(struct kvm_vcpu *vcpu)
+void kvm_mips_dump_stats(struct kvm_vcpu *vcpu)
 {
 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
 	int i, j;
 
-	printk("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id);
+	kvm_info("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id);
 	for (i = 0; i < N_MIPS_COPROC_REGS; i++) {
 		for (j = 0; j < N_MIPS_COPROC_SEL; j++) {
 			if (vcpu->arch.cop0->stat[i][j])
-				printk("%s[%d]: %lu\n", kvm_cop0_str[i], j,
-				       vcpu->arch.cop0->stat[i][j]);
+				kvm_info("%s[%d]: %lu\n", kvm_cop0_str[i], j,
+					 vcpu->arch.cop0->stat[i][j]);
 		}
 	}
 #endif
-
-	return 0;
 }
diff --git a/arch/mips/kvm/kvm_tlb.c b/arch/mips/kvm/tlb.c
similarity index 78%
rename from arch/mips/kvm/kvm_tlb.c
rename to arch/mips/kvm/tlb.c
index 8a5a700..bbcd822 100644
--- a/arch/mips/kvm/kvm_tlb.c
+++ b/arch/mips/kvm/tlb.c
@@ -1,14 +1,14 @@
 /*
-* This file is subject to the terms and conditions of the GNU General Public
-* License.  See the file "COPYING" in the main directory of this archive
-* for more details.
-*
-* KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
-* TLB handlers run from KSEG0
-*
-* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
-* Authors: Sanjay Lal <sanjayl@kymasys.com>
-*/
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
+ * TLB handlers run from KSEG0
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
 
 #include <linux/sched.h>
 #include <linux/smp.h>
@@ -18,7 +18,6 @@
 #include <linux/kvm_host.h>
 #include <linux/srcu.h>
 
-
 #include <asm/cpu.h>
 #include <asm/bootinfo.h>
 #include <asm/mmu_context.h>
@@ -39,13 +38,13 @@
 EXPORT_SYMBOL(kvm_mips_instance);
 
 /* These function pointers are initialized once the KVM module is loaded */
-pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn);
+pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn);
 EXPORT_SYMBOL(kvm_mips_gfn_to_pfn);
 
-void (*kvm_mips_release_pfn_clean) (pfn_t pfn);
+void (*kvm_mips_release_pfn_clean)(pfn_t pfn);
 EXPORT_SYMBOL(kvm_mips_release_pfn_clean);
 
-bool(*kvm_mips_is_error_pfn) (pfn_t pfn);
+bool (*kvm_mips_is_error_pfn)(pfn_t pfn);
 EXPORT_SYMBOL(kvm_mips_is_error_pfn);
 
 uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
@@ -53,21 +52,17 @@
 	return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK;
 }
 
-
 uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
 {
 	return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK;
 }
 
-inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu)
+inline uint32_t kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu)
 {
 	return vcpu->kvm->arch.commpage_tlb;
 }
 
-
-/*
- * Structure defining an tlb entry data set.
- */
+/* Structure defining an tlb entry data set. */
 
 void kvm_mips_dump_host_tlbs(void)
 {
@@ -82,8 +77,8 @@
 	old_entryhi = read_c0_entryhi();
 	old_pagemask = read_c0_pagemask();
 
-	printk("HOST TLBs:\n");
-	printk("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK);
+	kvm_info("HOST TLBs:\n");
+	kvm_info("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK);
 
 	for (i = 0; i < current_cpu_data.tlbsize; i++) {
 		write_c0_index(i);
@@ -97,25 +92,26 @@
 		tlb.tlb_lo1 = read_c0_entrylo1();
 		tlb.tlb_mask = read_c0_pagemask();
 
-		printk("TLB%c%3d Hi 0x%08lx ",
-		       (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
-		       i, tlb.tlb_hi);
-		printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
-		       (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
-		       (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
-		       (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
-		       (tlb.tlb_lo0 >> 3) & 7);
-		printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
-		       (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
-		       (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
-		       (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
-		       (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
+		kvm_info("TLB%c%3d Hi 0x%08lx ",
+			 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
+			 i, tlb.tlb_hi);
+		kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
+			 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
+			 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
+			 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
+			 (tlb.tlb_lo0 >> 3) & 7);
+		kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
+			 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
+			 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
+			 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
+			 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
 	}
 	write_c0_entryhi(old_entryhi);
 	write_c0_pagemask(old_pagemask);
 	mtc0_tlbw_hazard();
 	local_irq_restore(flags);
 }
+EXPORT_SYMBOL(kvm_mips_dump_host_tlbs);
 
 void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
 {
@@ -123,26 +119,27 @@
 	struct kvm_mips_tlb tlb;
 	int i;
 
-	printk("Guest TLBs:\n");
-	printk("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
+	kvm_info("Guest TLBs:\n");
+	kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
 
 	for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
 		tlb = vcpu->arch.guest_tlb[i];
-		printk("TLB%c%3d Hi 0x%08lx ",
-		       (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
-		       i, tlb.tlb_hi);
-		printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
-		       (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
-		       (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
-		       (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
-		       (tlb.tlb_lo0 >> 3) & 7);
-		printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
-		       (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
-		       (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
-		       (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
-		       (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
+		kvm_info("TLB%c%3d Hi 0x%08lx ",
+			 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
+			 i, tlb.tlb_hi);
+		kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
+			 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
+			 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
+			 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
+			 (tlb.tlb_lo0 >> 3) & 7);
+		kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
+			 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
+			 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
+			 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
+			 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
 	}
 }
+EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs);
 
 static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
 {
@@ -152,7 +149,7 @@
 	if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
 		return 0;
 
-        srcu_idx = srcu_read_lock(&kvm->srcu);
+	srcu_idx = srcu_read_lock(&kvm->srcu);
 	pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
 
 	if (kvm_mips_is_error_pfn(pfn)) {
@@ -169,7 +166,7 @@
 
 /* Translate guest KSEG0 addresses to Host PA */
 unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
-	unsigned long gva)
+						    unsigned long gva)
 {
 	gfn_t gfn;
 	uint32_t offset = gva & ~PAGE_MASK;
@@ -194,20 +191,20 @@
 
 	return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
 }
+EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa);
 
 /* XXXKYMA: Must be called with interrupts disabled */
 /* set flush_dcache_mask == 0 if no dcache flush required */
-int
-kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
-	unsigned long entrylo0, unsigned long entrylo1, int flush_dcache_mask)
+int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
+			    unsigned long entrylo0, unsigned long entrylo1,
+			    int flush_dcache_mask)
 {
 	unsigned long flags;
 	unsigned long old_entryhi;
-	volatile int idx;
+	int idx;
 
 	local_irq_save(flags);
 
-
 	old_entryhi = read_c0_entryhi();
 	write_c0_entryhi(entryhi);
 	mtc0_tlbw_hazard();
@@ -240,12 +237,14 @@
 	if (flush_dcache_mask) {
 		if (entrylo0 & MIPS3_PG_V) {
 			++vcpu->stat.flush_dcache_exits;
-			flush_data_cache_page((entryhi & VPN2_MASK) & ~flush_dcache_mask);
+			flush_data_cache_page((entryhi & VPN2_MASK) &
+					      ~flush_dcache_mask);
 		}
 		if (entrylo1 & MIPS3_PG_V) {
 			++vcpu->stat.flush_dcache_exits;
-			flush_data_cache_page(((entryhi & VPN2_MASK) & ~flush_dcache_mask) |
-				(0x1 << PAGE_SHIFT));
+			flush_data_cache_page(((entryhi & VPN2_MASK) &
+					       ~flush_dcache_mask) |
+					      (0x1 << PAGE_SHIFT));
 		}
 	}
 
@@ -257,10 +256,9 @@
 	return 0;
 }
 
-
 /* XXXKYMA: Must be called with interrupts disabled */
 int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
-	struct kvm_vcpu *vcpu)
+				    struct kvm_vcpu *vcpu)
 {
 	gfn_t gfn;
 	pfn_t pfn0, pfn1;
@@ -270,7 +268,6 @@
 	struct kvm *kvm = vcpu->kvm;
 	const int flush_dcache_mask = 0;
 
-
 	if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
 		kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
 		kvm_mips_dump_host_tlbs();
@@ -302,14 +299,15 @@
 	}
 
 	entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
-	entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
-			(0x1 << 1);
-	entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
-			(0x1 << 1);
+	entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
+		   (1 << 2) | (0x1 << 1);
+	entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
+		   (1 << 2) | (0x1 << 1);
 
 	return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
 				       flush_dcache_mask);
 }
+EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault);
 
 int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
 	struct kvm_vcpu *vcpu)
@@ -318,11 +316,10 @@
 	unsigned long flags, old_entryhi = 0, vaddr = 0;
 	unsigned long entrylo0 = 0, entrylo1 = 0;
 
-
 	pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
 	pfn1 = 0;
-	entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
-			(0x1 << 1);
+	entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
+		   (1 << 2) | (0x1 << 1);
 	entrylo1 = 0;
 
 	local_irq_save(flags);
@@ -341,9 +338,9 @@
 	mtc0_tlbw_hazard();
 	tlbw_use_hazard();
 
-	kvm_debug ("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
-	     vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
-	     read_c0_entrylo0(), read_c0_entrylo1());
+	kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
+		  vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
+		  read_c0_entrylo0(), read_c0_entrylo1());
 
 	/* Restore old ASID */
 	write_c0_entryhi(old_entryhi);
@@ -353,28 +350,33 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault);
 
-int
-kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
-	struct kvm_mips_tlb *tlb, unsigned long *hpa0, unsigned long *hpa1)
+int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
+					 struct kvm_mips_tlb *tlb,
+					 unsigned long *hpa0,
+					 unsigned long *hpa1)
 {
 	unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
 	struct kvm *kvm = vcpu->kvm;
 	pfn_t pfn0, pfn1;
 
-
 	if ((tlb->tlb_hi & VPN2_MASK) == 0) {
 		pfn0 = 0;
 		pfn1 = 0;
 	} else {
-		if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT) < 0)
+		if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
+					   >> PAGE_SHIFT) < 0)
 			return -1;
 
-		if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT) < 0)
+		if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
+					   >> PAGE_SHIFT) < 0)
 			return -1;
 
-		pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT];
-		pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT];
+		pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
+					    >> PAGE_SHIFT];
+		pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
+					    >> PAGE_SHIFT];
 	}
 
 	if (hpa0)
@@ -385,11 +387,12 @@
 
 	/* Get attributes from the Guest TLB */
 	entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
-			kvm_mips_get_kernel_asid(vcpu) : kvm_mips_get_user_asid(vcpu));
+					       kvm_mips_get_kernel_asid(vcpu) :
+					       kvm_mips_get_user_asid(vcpu));
 	entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
-			(tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
+		   (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
 	entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
-			(tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
+		   (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
 
 	kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
 		  tlb->tlb_lo0, tlb->tlb_lo1);
@@ -397,6 +400,7 @@
 	return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
 				       tlb->tlb_mask);
 }
+EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault);
 
 int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
 {
@@ -404,10 +408,9 @@
 	int index = -1;
 	struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
 
-
 	for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
-		if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) &&
-			(TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == (entryhi & ASID_MASK)))) {
+		if (TLB_HI_VPN2_HIT(tlb[i], entryhi) &&
+		    TLB_HI_ASID_HIT(tlb[i], entryhi)) {
 			index = i;
 			break;
 		}
@@ -418,21 +421,23 @@
 
 	return index;
 }
+EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup);
 
 int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
 {
 	unsigned long old_entryhi, flags;
-	volatile int idx;
-
+	int idx;
 
 	local_irq_save(flags);
 
 	old_entryhi = read_c0_entryhi();
 
 	if (KVM_GUEST_KERNEL_MODE(vcpu))
-		write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_kernel_asid(vcpu));
+		write_c0_entryhi((vaddr & VPN2_MASK) |
+				 kvm_mips_get_kernel_asid(vcpu));
 	else {
-		write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
+		write_c0_entryhi((vaddr & VPN2_MASK) |
+				 kvm_mips_get_user_asid(vcpu));
 	}
 
 	mtc0_tlbw_hazard();
@@ -452,6 +457,7 @@
 
 	return idx;
 }
+EXPORT_SYMBOL(kvm_mips_host_tlb_lookup);
 
 int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
 {
@@ -460,7 +466,6 @@
 
 	local_irq_save(flags);
 
-
 	old_entryhi = read_c0_entryhi();
 
 	write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
@@ -499,8 +504,9 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(kvm_mips_host_tlb_inv);
 
-/* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID*/
+/* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID */
 int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index)
 {
 	unsigned long flags, old_entryhi;
@@ -510,7 +516,6 @@
 
 	local_irq_save(flags);
 
-
 	old_entryhi = read_c0_entryhi();
 
 	write_c0_entryhi(UNIQUE_ENTRYHI(index));
@@ -546,7 +551,6 @@
 	int entry = 0;
 	int maxentry = current_cpu_data.tlbsize;
 
-
 	local_irq_save(flags);
 
 	old_entryhi = read_c0_entryhi();
@@ -554,7 +558,6 @@
 
 	/* Blast 'em all away. */
 	for (entry = 0; entry < maxentry; entry++) {
-
 		write_c0_index(entry);
 		mtc0_tlbw_hazard();
 
@@ -565,9 +568,8 @@
 			entryhi = read_c0_entryhi();
 
 			/* Don't blow away guest kernel entries */
-			if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0) {
+			if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0)
 				continue;
-			}
 		}
 
 		/* Make sure all entries differ. */
@@ -591,17 +593,17 @@
 
 	local_irq_restore(flags);
 }
+EXPORT_SYMBOL(kvm_mips_flush_host_tlb);
 
-void
-kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
-			struct kvm_vcpu *vcpu)
+void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
+			     struct kvm_vcpu *vcpu)
 {
 	unsigned long asid = asid_cache(cpu);
 
-	if (!((asid += ASID_INC) & ASID_MASK)) {
-		if (cpu_has_vtag_icache) {
+	asid += ASID_INC;
+	if (!(asid & ASID_MASK)) {
+		if (cpu_has_vtag_icache)
 			flush_icache_all();
-		}
 
 		kvm_local_flush_tlb_all();      /* start new asid cycle */
 
@@ -639,6 +641,7 @@
 
 	local_irq_restore(flags);
 }
+EXPORT_SYMBOL(kvm_local_flush_tlb_all);
 
 /**
  * kvm_mips_migrate_count() - Migrate timer.
@@ -699,7 +702,10 @@
 	}
 
 	if (!newasid) {
-		/* If we preempted while the guest was executing, then reload the pre-empted ASID */
+		/*
+		 * If we preempted while the guest was executing, then reload
+		 * the pre-empted ASID
+		 */
 		if (current->flags & PF_VCPU) {
 			write_c0_entryhi(vcpu->arch.
 					 preempt_entryhi & ASID_MASK);
@@ -708,9 +714,10 @@
 	} else {
 		/* New ASIDs were allocated for the VM */
 
-		/* Were we in guest context? If so then the pre-empted ASID is no longer
-		 * valid, we need to set it to what it should be based on the mode of
-		 * the Guest (Kernel/User)
+		/*
+		 * Were we in guest context? If so then the pre-empted ASID is
+		 * no longer valid, we need to set it to what it should be based
+		 * on the mode of the Guest (Kernel/User)
 		 */
 		if (current->flags & PF_VCPU) {
 			if (KVM_GUEST_KERNEL_MODE(vcpu))
@@ -728,6 +735,7 @@
 	local_irq_restore(flags);
 
 }
+EXPORT_SYMBOL(kvm_arch_vcpu_load);
 
 /* ASID can change if another task is scheduled during preemption */
 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
@@ -739,7 +747,6 @@
 
 	cpu = smp_processor_id();
 
-
 	vcpu->arch.preempt_entryhi = read_c0_entryhi();
 	vcpu->arch.last_sched_cpu = cpu;
 
@@ -754,11 +761,12 @@
 
 	local_irq_restore(flags);
 }
+EXPORT_SYMBOL(kvm_arch_vcpu_put);
 
 uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
 {
 	struct mips_coproc *cop0 = vcpu->arch.cop0;
-	unsigned long paddr, flags;
+	unsigned long paddr, flags, vpn2, asid;
 	uint32_t inst;
 	int index;
 
@@ -769,16 +777,12 @@
 		if (index >= 0) {
 			inst = *(opc);
 		} else {
-			index =
-			    kvm_mips_guest_tlb_lookup(vcpu,
-						      ((unsigned long) opc & VPN2_MASK)
-						      |
-						      (kvm_read_c0_guest_entryhi
-						       (cop0) & ASID_MASK));
+			vpn2 = (unsigned long) opc & VPN2_MASK;
+			asid = kvm_read_c0_guest_entryhi(cop0) & ASID_MASK;
+			index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid);
 			if (index < 0) {
-				kvm_err
-				    ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
-				     __func__, opc, vcpu, read_c0_entryhi());
+				kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
+					__func__, opc, vcpu, read_c0_entryhi());
 				kvm_mips_dump_host_tlbs();
 				local_irq_restore(flags);
 				return KVM_INVALID_INST;
@@ -793,7 +797,7 @@
 	} else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
 		paddr =
 		    kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
-							 (unsigned long) opc);
+							  (unsigned long) opc);
 		inst = *(uint32_t *) CKSEG0ADDR(paddr);
 	} else {
 		kvm_err("%s: illegal address: %p\n", __func__, opc);
@@ -802,18 +806,4 @@
 
 	return inst;
 }
-
-EXPORT_SYMBOL(kvm_local_flush_tlb_all);
-EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault);
-EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault);
-EXPORT_SYMBOL(kvm_mips_dump_host_tlbs);
-EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault);
-EXPORT_SYMBOL(kvm_mips_host_tlb_lookup);
-EXPORT_SYMBOL(kvm_mips_flush_host_tlb);
-EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup);
-EXPORT_SYMBOL(kvm_mips_host_tlb_inv);
-EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa);
-EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs);
 EXPORT_SYMBOL(kvm_get_inst);
-EXPORT_SYMBOL(kvm_arch_vcpu_load);
-EXPORT_SYMBOL(kvm_arch_vcpu_put);
diff --git a/arch/mips/kvm/trace.h b/arch/mips/kvm/trace.h
index bc9e0f4..c1388d4 100644
--- a/arch/mips/kvm/trace.h
+++ b/arch/mips/kvm/trace.h
@@ -1,11 +1,11 @@
 /*
-* This file is subject to the terms and conditions of the GNU General Public
-* License.  See the file "COPYING" in the main directory of this archive
-* for more details.
-*
-* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
-* Authors: Sanjay Lal <sanjayl@kymasys.com>
-*/
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
 
 #if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
 #define _TRACE_KVM_H
@@ -17,9 +17,7 @@
 #define TRACE_INCLUDE_PATH .
 #define TRACE_INCLUDE_FILE trace
 
-/*
- * Tracepoints for VM eists
- */
+/* Tracepoints for VM eists */
 extern char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES];
 
 TRACE_EVENT(kvm_exit,
diff --git a/arch/mips/kvm/kvm_trap_emul.c b/arch/mips/kvm/trap_emul.c
similarity index 83%
rename from arch/mips/kvm/kvm_trap_emul.c
rename to arch/mips/kvm/trap_emul.c
index 693f952..fd7257b 100644
--- a/arch/mips/kvm/kvm_trap_emul.c
+++ b/arch/mips/kvm/trap_emul.c
@@ -1,13 +1,13 @@
 /*
-* This file is subject to the terms and conditions of the GNU General Public
-* License.  See the file "COPYING" in the main directory of this archive
-* for more details.
-*
-* KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
-*
-* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
-* Authors: Sanjay Lal <sanjayl@kymasys.com>
-*/
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
 
 #include <linux/errno.h>
 #include <linux/err.h>
@@ -16,8 +16,8 @@
 
 #include <linux/kvm_host.h>
 
-#include "kvm_mips_opcode.h"
-#include "kvm_mips_int.h"
+#include "opcode.h"
+#include "interrupt.h"
 
 static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
 {
@@ -27,7 +27,7 @@
 	if ((kseg == CKSEG0) || (kseg == CKSEG1))
 		gpa = CPHYSADDR(gva);
 	else {
-		printk("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
+		kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
 		kvm_mips_dump_host_tlbs();
 		gpa = KVM_INVALID_ADDR;
 	}
@@ -37,7 +37,6 @@
 	return gpa;
 }
 
-
 static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
 {
 	struct kvm_run *run = vcpu->run;
@@ -46,9 +45,9 @@
 	enum emulation_result er = EMULATE_DONE;
 	int ret = RESUME_GUEST;
 
-	if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
+	if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1)
 		er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
-	} else
+	else
 		er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
 
 	switch (er) {
@@ -83,9 +82,8 @@
 
 	if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
 	    || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
-		kvm_debug
-		    ("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
-		     cause, opc, badvaddr);
+		kvm_debug("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
+			  cause, opc, badvaddr);
 		er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu);
 
 		if (er == EMULATE_DONE)
@@ -95,20 +93,20 @@
 			ret = RESUME_HOST;
 		}
 	} else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
-		/* XXXKYMA: The guest kernel does not expect to get this fault when we are not
-		 * using HIGHMEM. Need to address this in a HIGHMEM kernel
+		/*
+		 * XXXKYMA: The guest kernel does not expect to get this fault
+		 * when we are not using HIGHMEM. Need to address this in a
+		 * HIGHMEM kernel
 		 */
-		printk
-		    ("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n",
-		     cause, opc, badvaddr);
+		kvm_err("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n",
+			cause, opc, badvaddr);
 		kvm_mips_dump_host_tlbs();
 		kvm_arch_vcpu_dump_regs(vcpu);
 		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
 		ret = RESUME_HOST;
 	} else {
-		printk
-		    ("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
-		     cause, opc, badvaddr);
+		kvm_err("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
+			cause, opc, badvaddr);
 		kvm_mips_dump_host_tlbs();
 		kvm_arch_vcpu_dump_regs(vcpu);
 		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
@@ -134,9 +132,8 @@
 		}
 	} else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
 		   || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
-		kvm_debug
-		    ("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
-		     cause, opc, badvaddr);
+		kvm_debug("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
+			  cause, opc, badvaddr);
 		er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
 		if (er == EMULATE_DONE)
 			ret = RESUME_GUEST;
@@ -145,8 +142,9 @@
 			ret = RESUME_HOST;
 		}
 	} else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
-		/* All KSEG0 faults are handled by KVM, as the guest kernel does not
-		 * expect to ever get them
+		/*
+		 * All KSEG0 faults are handled by KVM, as the guest kernel does
+		 * not expect to ever get them
 		 */
 		if (kvm_mips_handle_kseg0_tlb_fault
 		    (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
@@ -154,9 +152,8 @@
 			ret = RESUME_HOST;
 		}
 	} else {
-		kvm_err
-		    ("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
-		     cause, opc, badvaddr);
+		kvm_err("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
+			cause, opc, badvaddr);
 		kvm_mips_dump_host_tlbs();
 		kvm_arch_vcpu_dump_regs(vcpu);
 		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
@@ -185,11 +182,14 @@
 		kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n",
 			  vcpu->arch.pc, badvaddr);
 
-		/* User Address (UA) fault, this could happen if
-		 * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
-		 *     case we pass on the fault to the guest kernel and let it handle it.
-		 * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
-		 *     case we inject the TLB from the Guest TLB into the shadow host TLB
+		/*
+		 * User Address (UA) fault, this could happen if
+		 * (1) TLB entry not present/valid in both Guest and shadow host
+		 *     TLBs, in this case we pass on the fault to the guest
+		 *     kernel and let it handle it.
+		 * (2) TLB entry is present in the Guest TLB but not in the
+		 *     shadow, in this case we inject the TLB from the Guest TLB
+		 *     into the shadow host TLB
 		 */
 
 		er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
@@ -206,9 +206,8 @@
 			ret = RESUME_HOST;
 		}
 	} else {
-		printk
-		    ("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
-		     cause, opc, badvaddr);
+		kvm_err("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
+			cause, opc, badvaddr);
 		kvm_mips_dump_host_tlbs();
 		kvm_arch_vcpu_dump_regs(vcpu);
 		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
@@ -231,7 +230,7 @@
 		kvm_debug("Emulate Store to MMIO space\n");
 		er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
 		if (er == EMULATE_FAIL) {
-			printk("Emulate Store to MMIO space failed\n");
+			kvm_err("Emulate Store to MMIO space failed\n");
 			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
 			ret = RESUME_HOST;
 		} else {
@@ -239,9 +238,8 @@
 			ret = RESUME_HOST;
 		}
 	} else {
-		printk
-		    ("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n",
-		     cause, opc, badvaddr);
+		kvm_err("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n",
+			cause, opc, badvaddr);
 		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
 		ret = RESUME_HOST;
 	}
@@ -261,7 +259,7 @@
 		kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr);
 		er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
 		if (er == EMULATE_FAIL) {
-			printk("Emulate Load from MMIO space failed\n");
+			kvm_err("Emulate Load from MMIO space failed\n");
 			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
 			ret = RESUME_HOST;
 		} else {
@@ -269,9 +267,8 @@
 			ret = RESUME_HOST;
 		}
 	} else {
-		printk
-		    ("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n",
-		     cause, opc, badvaddr);
+		kvm_err("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n",
+			cause, opc, badvaddr);
 		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
 		ret = RESUME_HOST;
 		er = EMULATE_FAIL;
@@ -349,9 +346,9 @@
 	uint32_t config1;
 	int vcpu_id = vcpu->vcpu_id;
 
-	/* Arch specific stuff, set up config registers properly so that the
-	 * guest will come up as expected, for now we simulate a
-	 * MIPS 24kc
+	/*
+	 * Arch specific stuff, set up config registers properly so that the
+	 * guest will come up as expected, for now we simulate a MIPS 24kc
 	 */
 	kvm_write_c0_guest_prid(cop0, 0x00019300);
 	kvm_write_c0_guest_config(cop0,
@@ -373,14 +370,15 @@
 
 	kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2);
 	/* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */
-	kvm_write_c0_guest_config3(cop0,
-				   MIPS_CONFIG3 | (0 << CP0C3_VInt) | (1 <<
-								       CP0C3_ULRI));
+	kvm_write_c0_guest_config3(cop0, MIPS_CONFIG3 | (0 << CP0C3_VInt) |
+					 (1 << CP0C3_ULRI));
 
 	/* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
 	kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
 
-	/* Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5) */
+	/*
+	 * Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5)
+	 */
 	kvm_write_c0_guest_intctl(cop0, 0xFC000000);
 
 	/* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
diff --git a/arch/mn10300/include/asm/processor.h b/arch/mn10300/include/asm/processor.h
index 8b80b19..769d5ed 100644
--- a/arch/mn10300/include/asm/processor.h
+++ b/arch/mn10300/include/asm/processor.h
@@ -68,7 +68,9 @@
 extern void identify_cpu(struct mn10300_cpuinfo *);
 extern void print_cpu_info(struct mn10300_cpuinfo *);
 extern void dodgy_tsc(void);
+
 #define cpu_relax() barrier()
+#define cpu_relax_lowlatency() cpu_relax()
 
 /*
  * User space process size: 1.75GB (default).
diff --git a/arch/openrisc/include/asm/processor.h b/arch/openrisc/include/asm/processor.h
index cab746f..4d235e3 100644
--- a/arch/openrisc/include/asm/processor.h
+++ b/arch/openrisc/include/asm/processor.h
@@ -101,6 +101,7 @@
 #define init_stack      (init_thread_union.stack)
 
 #define cpu_relax()     barrier()
+#define cpu_relax_lowlatency() cpu_relax()
 
 #endif /* __ASSEMBLY__ */
 #endif /* __ASM_OPENRISC_PROCESSOR_H */
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 108d48e..6e75e20 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -6,7 +6,6 @@
 	select HAVE_OPROFILE
 	select HAVE_FUNCTION_TRACER if 64BIT
 	select HAVE_FUNCTION_GRAPH_TRACER if 64BIT
-	select HAVE_FUNCTION_TRACE_MCOUNT_TEST if 64BIT
 	select ARCH_WANT_FRAME_POINTERS
 	select RTC_CLASS
 	select RTC_DRV_GENERIC
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
index d951c96..689a8ad 100644
--- a/arch/parisc/include/asm/processor.h
+++ b/arch/parisc/include/asm/processor.h
@@ -338,6 +338,7 @@
 #define KSTK_ESP(tsk)	((tsk)->thread.regs.gr[30])
 
 #define cpu_relax()	barrier()
+#define cpu_relax_lowlatency() cpu_relax()
 
 /* Used as a macro to identify the combined VIPT/PIPT cached
  * CPUs which require a guarantee of coherency (no inequivalent
diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c
index 5beb97b..559d400 100644
--- a/arch/parisc/kernel/ftrace.c
+++ b/arch/parisc/kernel/ftrace.c
@@ -112,6 +112,9 @@
 	unsigned long long calltime;
 	struct ftrace_graph_ent trace;
 
+	if (unlikely(ftrace_graph_is_dead()))
+		return;
+
 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
 		return;
 
@@ -152,9 +155,6 @@
 {
 	extern ftrace_func_t ftrace_trace_function;
 
-	if (function_trace_stop)
-		return;
-
 	if (ftrace_trace_function != ftrace_stub) {
 		ftrace_trace_function(parent, self_addr);
 		return;
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-sec6.0-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-sec6.0-0.dtsi
index f75b4f820..7d4a6a2 100644
--- a/arch/powerpc/boot/dts/fsl/qoriq-sec6.0-0.dtsi
+++ b/arch/powerpc/boot/dts/fsl/qoriq-sec6.0-0.dtsi
@@ -32,7 +32,8 @@
  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-	compatible = "fsl,sec-v6.0";
+	compatible = "fsl,sec-v6.0", "fsl,sec-v5.0",
+		     "fsl,sec-v4.0";
 	fsl,sec-era = <6>;
 	#address-cells = <1>;
 	#size-cells = <1>;
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 6d59072..dda7ac4 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -400,6 +400,8 @@
 #define cpu_relax()	barrier()
 #endif
 
+#define cpu_relax_lowlatency() cpu_relax()
+
 /* Check that a certain kernel stack pointer is valid in task_struct p */
 int validate_sp(unsigned long sp, struct task_struct *p,
                        unsigned long nbytes);
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
index d178834..390311c 100644
--- a/arch/powerpc/kernel/ftrace.c
+++ b/arch/powerpc/kernel/ftrace.c
@@ -525,6 +525,9 @@
 	struct ftrace_graph_ent trace;
 	unsigned long return_hooker = (unsigned long)&return_to_handler;
 
+	if (unlikely(ftrace_graph_is_dead()))
+		return;
+
 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
 		return;
 
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index b49c72f..b2814e2 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -123,21 +123,12 @@
 
 void pcibios_reset_secondary_bus(struct pci_dev *dev)
 {
-	u16 ctrl;
-
 	if (ppc_md.pcibios_reset_secondary_bus) {
 		ppc_md.pcibios_reset_secondary_bus(dev);
 		return;
 	}
 
-	pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
-	ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
-	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
-	msleep(2);
-
-	ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
-	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
-	ssleep(1);
+	pci_reset_secondary_bus(dev);
 }
 
 static resource_size_t pcibios_io_size(const struct pci_controller *hose)
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index 658e89d..db2b482 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -611,17 +611,19 @@
 	for (f = flist; f; f = next) {
 		/* Translate data addrs to absolute */
 		for (i = 0; i < f->num_blocks; i++) {
-			f->blocks[i].data = (char *)__pa(f->blocks[i].data);
+			f->blocks[i].data = (char *)cpu_to_be64(__pa(f->blocks[i].data));
 			image_size += f->blocks[i].length;
+			f->blocks[i].length = cpu_to_be64(f->blocks[i].length);
 		}
 		next = f->next;
 		/* Don't translate NULL pointer for last entry */
 		if (f->next)
-			f->next = (struct flash_block_list *)__pa(f->next);
+			f->next = (struct flash_block_list *)cpu_to_be64(__pa(f->next));
 		else
 			f->next = NULL;
 		/* make num_blocks into the version/length field */
 		f->num_blocks = (FLASH_BLOCK_LIST_VERSION << 56) | ((f->num_blocks+1)*16);
+		f->num_blocks = cpu_to_be64(f->num_blocks);
 	}
 
 	printk(KERN_ALERT "FLASH: flash image is %ld bytes\n", image_size);
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 6b0641c..fe52db2 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -1307,6 +1307,9 @@
  out_enable:
 	pmao_restore_workaround(ebb);
 
+	if (ppmu->flags & PPMU_ARCH_207S)
+		mtspr(SPRN_MMCR2, 0);
+
 	mmcr0 = ebb_switch_in(ebb, cpuhw->mmcr[0]);
 
 	mb();
@@ -1315,9 +1318,6 @@
 
 	write_mmcr0(cpuhw, mmcr0);
 
-	if (ppmu->flags & PPMU_ARCH_207S)
-		mtspr(SPRN_MMCR2, 0);
-
 	/*
 	 * Enable instruction sampling if necessary
 	 */
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index e0766b8..66d0f17 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -387,8 +387,7 @@
 	    event->attr.exclude_hv     ||
 	    event->attr.exclude_idle   ||
 	    event->attr.exclude_host   ||
-	    event->attr.exclude_guest  ||
-	    is_sampling_event(event)) /* no sampling */
+	    event->attr.exclude_guest)
 		return -EINVAL;
 
 	/* no branch sampling */
@@ -513,6 +512,9 @@
 	if (!hv_page_cache)
 		return -ENOMEM;
 
+	/* sampling not supported */
+	h_24x7_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
+
 	r = perf_pmu_register(&h_24x7_pmu, h_24x7_pmu.name, -1);
 	if (r)
 		return r;
diff --git a/arch/powerpc/perf/hv-gpci.c b/arch/powerpc/perf/hv-gpci.c
index c9d399a..15fc76c 100644
--- a/arch/powerpc/perf/hv-gpci.c
+++ b/arch/powerpc/perf/hv-gpci.c
@@ -210,8 +210,7 @@
 	    event->attr.exclude_hv     ||
 	    event->attr.exclude_idle   ||
 	    event->attr.exclude_host   ||
-	    event->attr.exclude_guest  ||
-	    is_sampling_event(event)) /* no sampling */
+	    event->attr.exclude_guest)
 		return -EINVAL;
 
 	/* no branch sampling */
@@ -284,6 +283,9 @@
 		return -ENODEV;
 	}
 
+	/* sampling not supported */
+	h_gpci_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
+
 	r = perf_pmu_register(&h_gpci_pmu, h_gpci_pmu.name, -1);
 	if (r)
 		return r;
diff --git a/arch/powerpc/platforms/powernv/opal-elog.c b/arch/powerpc/platforms/powernv/opal-elog.c
index 10268c4..0ad533b 100644
--- a/arch/powerpc/platforms/powernv/opal-elog.c
+++ b/arch/powerpc/platforms/powernv/opal-elog.c
@@ -249,7 +249,7 @@
 
 	rc = opal_get_elog_size(&id, &size, &type);
 	if (rc != OPAL_SUCCESS) {
-		pr_err("ELOG: Opal log read failed\n");
+		pr_err("ELOG: OPAL log info read failed\n");
 		return;
 	}
 
@@ -257,7 +257,7 @@
 	log_id = be64_to_cpu(id);
 	elog_type = be64_to_cpu(type);
 
-	BUG_ON(elog_size > OPAL_MAX_ERRLOG_SIZE);
+	WARN_ON(elog_size > OPAL_MAX_ERRLOG_SIZE);
 
 	if (elog_size >= OPAL_MAX_ERRLOG_SIZE)
 		elog_size  =  OPAL_MAX_ERRLOG_SIZE;
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index bb63499..f5af5f6 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -116,7 +116,6 @@
 	select HAVE_FTRACE_MCOUNT_RECORD
 	select HAVE_FUNCTION_GRAPH_TRACER
 	select HAVE_FUNCTION_TRACER
-	select HAVE_FUNCTION_TRACE_MCOUNT_TEST
 	select HAVE_FUTEX_CMPXCHG if FUTEX
 	select HAVE_KERNEL_BZIP2
 	select HAVE_KERNEL_GZIP
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 4181d7b..773bef7 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -305,7 +305,6 @@
 	struct list_head list;
 	atomic_t active;
 	struct kvm_s390_float_interrupt *float_int;
-	int timer_due; /* event indicator for waitqueue below */
 	wait_queue_head_t *wq;
 	atomic_t *cpuflags;
 	unsigned int action_bits;
@@ -367,7 +366,6 @@
 	s390_fp_regs      guest_fpregs;
 	struct kvm_s390_local_interrupt local_int;
 	struct hrtimer    ckc_timer;
-	struct tasklet_struct tasklet;
 	struct kvm_s390_pgm_info pgm;
 	union  {
 		struct cpuid	cpu_id;
@@ -418,6 +416,7 @@
 	int css_support;
 	int use_irqchip;
 	int use_cmma;
+	int user_cpu_state_ctrl;
 	struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS];
 	wait_queue_head_t ipte_wq;
 	spinlock_t start_stop_lock;
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 6f02d45..e568fc8 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -217,7 +217,7 @@
 	barrier();
 }
 
-#define arch_mutex_cpu_relax()  barrier()
+#define cpu_relax_lowlatency()  barrier()
 
 static inline void psw_set_key(unsigned int key)
 {
diff --git a/arch/s390/include/uapi/asm/Kbuild b/arch/s390/include/uapi/asm/Kbuild
index 7366373..08fe6da 100644
--- a/arch/s390/include/uapi/asm/Kbuild
+++ b/arch/s390/include/uapi/asm/Kbuild
@@ -16,6 +16,7 @@
 header-y += ipcbuf.h
 header-y += kvm.h
 header-y += kvm_para.h
+header-y += kvm_perf.h
 header-y += kvm_virtio.h
 header-y += mman.h
 header-y += monwriter.h
diff --git a/arch/s390/include/uapi/asm/kvm_perf.h b/arch/s390/include/uapi/asm/kvm_perf.h
new file mode 100644
index 0000000..3972827
--- /dev/null
+++ b/arch/s390/include/uapi/asm/kvm_perf.h
@@ -0,0 +1,25 @@
+/*
+ * Definitions for perf-kvm on s390
+ *
+ * Copyright 2014 IBM Corp.
+ * Author(s): Alexander Yarygin <yarygin@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_KVM_PERF_S390_H
+#define __LINUX_KVM_PERF_S390_H
+
+#include <asm/sie.h>
+
+#define DECODE_STR_LEN 40
+
+#define VCPU_ID "id"
+
+#define KVM_ENTRY_TRACE "kvm:kvm_s390_sie_enter"
+#define KVM_EXIT_TRACE "kvm:kvm_s390_sie_exit"
+#define KVM_EXIT_REASON "icptcode"
+
+#endif
diff --git a/arch/s390/include/uapi/asm/sie.h b/arch/s390/include/uapi/asm/sie.h
index 5d9cc19..d4096fd 100644
--- a/arch/s390/include/uapi/asm/sie.h
+++ b/arch/s390/include/uapi/asm/sie.h
@@ -108,6 +108,7 @@
 	exit_code_ipa0(0xB2, 0x17, "STETR"),	\
 	exit_code_ipa0(0xB2, 0x18, "PC"),	\
 	exit_code_ipa0(0xB2, 0x20, "SERVC"),	\
+	exit_code_ipa0(0xB2, 0x21, "IPTE"),	\
 	exit_code_ipa0(0xB2, 0x28, "PT"),	\
 	exit_code_ipa0(0xB2, 0x29, "ISKE"),	\
 	exit_code_ipa0(0xB2, 0x2a, "RRBE"),	\
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index 08dcf21..433c6db 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -21,13 +21,9 @@
 ENTRY(ftrace_caller)
 #endif
 	stm	%r2,%r5,16(%r15)
-	bras	%r1,2f
+	bras	%r1,1f
 0:	.long	ftrace_trace_function
-1:	.long	function_trace_stop
-2:	l	%r2,1b-0b(%r1)
-	icm	%r2,0xf,0(%r2)
-	jnz	3f
-	st	%r14,56(%r15)
+1:	st	%r14,56(%r15)
 	lr	%r0,%r15
 	ahi	%r15,-96
 	l	%r3,100(%r15)
@@ -50,7 +46,7 @@
 #endif
 	ahi	%r15,96
 	l	%r14,56(%r15)
-3:	lm	%r2,%r5,16(%r15)
+	lm	%r2,%r5,16(%r15)
 	br	%r14
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
diff --git a/arch/s390/kernel/mcount64.S b/arch/s390/kernel/mcount64.S
index 1c52eae..c67a8bf 100644
--- a/arch/s390/kernel/mcount64.S
+++ b/arch/s390/kernel/mcount64.S
@@ -20,9 +20,6 @@
 
 ENTRY(ftrace_caller)
 #endif
-	larl	%r1,function_trace_stop
-	icm	%r1,0xf,0(%r1)
-	bnzr	%r14
 	stmg	%r2,%r5,32(%r15)
 	stg	%r14,112(%r15)
 	lgr	%r1,%r15
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index ea75d01..d3194de 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -411,12 +411,6 @@
 	case PERF_TYPE_HARDWARE:
 	case PERF_TYPE_HW_CACHE:
 	case PERF_TYPE_RAW:
-		/* The CPU measurement counter facility does not have overflow
-		 * interrupts to do sampling.  Sampling must be provided by
-		 * external means, for example, by timers.
-		 */
-		if (is_sampling_event(event))
-			return -ENOENT;
 		err = __hw_perf_event_init(event);
 		break;
 	default:
@@ -681,6 +675,12 @@
 		goto out;
 	}
 
+	/* The CPU measurement counter facility does not have overflow
+	 * interrupts to do sampling.  Sampling must be provided by
+	 * external means, for example, by timers.
+	 */
+	cpumf_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
+
 	cpumf_pmu.attr_groups = cpumf_cf_event_group();
 	rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW);
 	if (rc) {
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index 0161675..59bd8f9 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -176,7 +176,8 @@
 		return -EOPNOTSUPP;
 	}
 
-	kvm_s390_vcpu_stop(vcpu);
+	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
+		kvm_s390_vcpu_stop(vcpu);
 	vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM;
 	vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL;
 	vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT;
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index a0b586c..eaf4629 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -56,32 +56,26 @@
 static int handle_stop(struct kvm_vcpu *vcpu)
 {
 	int rc = 0;
+	unsigned int action_bits;
 
 	vcpu->stat.exit_stop_request++;
-	spin_lock_bh(&vcpu->arch.local_int.lock);
-
 	trace_kvm_s390_stop_request(vcpu->arch.local_int.action_bits);
 
-	if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) {
-		kvm_s390_vcpu_stop(vcpu);
-		vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP;
-		VCPU_EVENT(vcpu, 3, "%s", "cpu stopped");
-		rc = -EOPNOTSUPP;
-	}
+	action_bits = vcpu->arch.local_int.action_bits;
 
-	if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) {
-		vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP;
-		/* store status must be called unlocked. Since local_int.lock
-		 * only protects local_int.* and not guest memory we can give
-		 * up the lock here */
-		spin_unlock_bh(&vcpu->arch.local_int.lock);
+	if (!(action_bits & ACTION_STOP_ON_STOP))
+		return 0;
+
+	if (action_bits & ACTION_STORE_ON_STOP) {
 		rc = kvm_s390_vcpu_store_status(vcpu,
 						KVM_S390_STORE_STATUS_NOADDR);
-		if (rc >= 0)
-			rc = -EOPNOTSUPP;
-	} else
-		spin_unlock_bh(&vcpu->arch.local_int.lock);
-	return rc;
+		if (rc)
+			return rc;
+	}
+
+	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
+		kvm_s390_vcpu_stop(vcpu);
+	return -EOPNOTSUPP;
 }
 
 static int handle_validity(struct kvm_vcpu *vcpu)
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 90c8de2..92528a0 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -158,6 +158,9 @@
 					       LCTL_CR10 | LCTL_CR11);
 		vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT);
 	}
+
+	if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP)
+		atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
 }
 
 static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
@@ -544,13 +547,13 @@
 	int rc = 0;
 
 	if (atomic_read(&li->active)) {
-		spin_lock_bh(&li->lock);
+		spin_lock(&li->lock);
 		list_for_each_entry(inti, &li->list, list)
 			if (__interrupt_is_deliverable(vcpu, inti)) {
 				rc = 1;
 				break;
 			}
-		spin_unlock_bh(&li->lock);
+		spin_unlock(&li->lock);
 	}
 
 	if ((!rc) && atomic_read(&fi->active)) {
@@ -585,88 +588,56 @@
 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
 {
 	u64 now, sltime;
-	DECLARE_WAITQUEUE(wait, current);
 
 	vcpu->stat.exit_wait_state++;
-	if (kvm_cpu_has_interrupt(vcpu))
-		return 0;
 
-	__set_cpu_idle(vcpu);
-	spin_lock_bh(&vcpu->arch.local_int.lock);
-	vcpu->arch.local_int.timer_due = 0;
-	spin_unlock_bh(&vcpu->arch.local_int.lock);
+	/* fast path */
+	if (kvm_cpu_has_pending_timer(vcpu) || kvm_arch_vcpu_runnable(vcpu))
+		return 0;
 
 	if (psw_interrupts_disabled(vcpu)) {
 		VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
-		__unset_cpu_idle(vcpu);
 		return -EOPNOTSUPP; /* disabled wait */
 	}
 
+	__set_cpu_idle(vcpu);
 	if (!ckc_interrupts_enabled(vcpu)) {
 		VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
 		goto no_timer;
 	}
 
 	now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
-	if (vcpu->arch.sie_block->ckc < now) {
-		__unset_cpu_idle(vcpu);
-		return 0;
-	}
-
 	sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
-
 	hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
 	VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
 no_timer:
 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
-	spin_lock(&vcpu->arch.local_int.float_int->lock);
-	spin_lock_bh(&vcpu->arch.local_int.lock);
-	add_wait_queue(&vcpu->wq, &wait);
-	while (list_empty(&vcpu->arch.local_int.list) &&
-		list_empty(&vcpu->arch.local_int.float_int->list) &&
-		(!vcpu->arch.local_int.timer_due) &&
-		!signal_pending(current) &&
-		!kvm_s390_si_ext_call_pending(vcpu)) {
-		set_current_state(TASK_INTERRUPTIBLE);
-		spin_unlock_bh(&vcpu->arch.local_int.lock);
-		spin_unlock(&vcpu->arch.local_int.float_int->lock);
-		schedule();
-		spin_lock(&vcpu->arch.local_int.float_int->lock);
-		spin_lock_bh(&vcpu->arch.local_int.lock);
-	}
+	kvm_vcpu_block(vcpu);
 	__unset_cpu_idle(vcpu);
-	__set_current_state(TASK_RUNNING);
-	remove_wait_queue(&vcpu->wq, &wait);
-	spin_unlock_bh(&vcpu->arch.local_int.lock);
-	spin_unlock(&vcpu->arch.local_int.float_int->lock);
 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
 
 	hrtimer_try_to_cancel(&vcpu->arch.ckc_timer);
 	return 0;
 }
 
-void kvm_s390_tasklet(unsigned long parm)
+void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
 {
-	struct kvm_vcpu *vcpu = (struct kvm_vcpu *) parm;
-
-	spin_lock(&vcpu->arch.local_int.lock);
-	vcpu->arch.local_int.timer_due = 1;
-	if (waitqueue_active(&vcpu->wq))
+	if (waitqueue_active(&vcpu->wq)) {
+		/*
+		 * The vcpu gave up the cpu voluntarily, mark it as a good
+		 * yield-candidate.
+		 */
+		vcpu->preempted = true;
 		wake_up_interruptible(&vcpu->wq);
-	spin_unlock(&vcpu->arch.local_int.lock);
+	}
 }
 
-/*
- * low level hrtimer wake routine. Because this runs in hardirq context
- * we schedule a tasklet to do the real work.
- */
 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
 {
 	struct kvm_vcpu *vcpu;
 
 	vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
-	vcpu->preempted = true;
-	tasklet_schedule(&vcpu->arch.tasklet);
+	kvm_s390_vcpu_wakeup(vcpu);
 
 	return HRTIMER_NORESTART;
 }
@@ -676,13 +647,13 @@
 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
 	struct kvm_s390_interrupt_info  *n, *inti = NULL;
 
-	spin_lock_bh(&li->lock);
+	spin_lock(&li->lock);
 	list_for_each_entry_safe(inti, n, &li->list, list) {
 		list_del(&inti->list);
 		kfree(inti);
 	}
 	atomic_set(&li->active, 0);
-	spin_unlock_bh(&li->lock);
+	spin_unlock(&li->lock);
 
 	/* clear pending external calls set by sigp interpretation facility */
 	atomic_clear_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
@@ -701,7 +672,7 @@
 	if (atomic_read(&li->active)) {
 		do {
 			deliver = 0;
-			spin_lock_bh(&li->lock);
+			spin_lock(&li->lock);
 			list_for_each_entry_safe(inti, n, &li->list, list) {
 				if (__interrupt_is_deliverable(vcpu, inti)) {
 					list_del(&inti->list);
@@ -712,7 +683,7 @@
 			}
 			if (list_empty(&li->list))
 				atomic_set(&li->active, 0);
-			spin_unlock_bh(&li->lock);
+			spin_unlock(&li->lock);
 			if (deliver) {
 				__do_deliver_interrupt(vcpu, inti);
 				kfree(inti);
@@ -758,7 +729,7 @@
 	if (atomic_read(&li->active)) {
 		do {
 			deliver = 0;
-			spin_lock_bh(&li->lock);
+			spin_lock(&li->lock);
 			list_for_each_entry_safe(inti, n, &li->list, list) {
 				if ((inti->type == KVM_S390_MCHK) &&
 				    __interrupt_is_deliverable(vcpu, inti)) {
@@ -770,7 +741,7 @@
 			}
 			if (list_empty(&li->list))
 				atomic_set(&li->active, 0);
-			spin_unlock_bh(&li->lock);
+			spin_unlock(&li->lock);
 			if (deliver) {
 				__do_deliver_interrupt(vcpu, inti);
 				kfree(inti);
@@ -817,11 +788,11 @@
 
 	VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code);
 	trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, inti->type, code, 0, 1);
-	spin_lock_bh(&li->lock);
+	spin_lock(&li->lock);
 	list_add(&inti->list, &li->list);
 	atomic_set(&li->active, 1);
 	BUG_ON(waitqueue_active(li->wq));
-	spin_unlock_bh(&li->lock);
+	spin_unlock(&li->lock);
 	return 0;
 }
 
@@ -842,11 +813,11 @@
 
 	inti->type = KVM_S390_PROGRAM_INT;
 	memcpy(&inti->pgm, pgm_info, sizeof(inti->pgm));
-	spin_lock_bh(&li->lock);
+	spin_lock(&li->lock);
 	list_add(&inti->list, &li->list);
 	atomic_set(&li->active, 1);
 	BUG_ON(waitqueue_active(li->wq));
-	spin_unlock_bh(&li->lock);
+	spin_unlock(&li->lock);
 	return 0;
 }
 
@@ -934,12 +905,10 @@
 	}
 	dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
 	li = &dst_vcpu->arch.local_int;
-	spin_lock_bh(&li->lock);
+	spin_lock(&li->lock);
 	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
-	if (waitqueue_active(li->wq))
-		wake_up_interruptible(li->wq);
-	kvm_get_vcpu(kvm, sigcpu)->preempted = true;
-	spin_unlock_bh(&li->lock);
+	spin_unlock(&li->lock);
+	kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu));
 unlock_fi:
 	spin_unlock(&fi->lock);
 	mutex_unlock(&kvm->lock);
@@ -1081,7 +1050,7 @@
 
 	mutex_lock(&vcpu->kvm->lock);
 	li = &vcpu->arch.local_int;
-	spin_lock_bh(&li->lock);
+	spin_lock(&li->lock);
 	if (inti->type == KVM_S390_PROGRAM_INT)
 		list_add(&inti->list, &li->list);
 	else
@@ -1090,11 +1059,9 @@
 	if (inti->type == KVM_S390_SIGP_STOP)
 		li->action_bits |= ACTION_STOP_ON_STOP;
 	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
-	if (waitqueue_active(&vcpu->wq))
-		wake_up_interruptible(&vcpu->wq);
-	vcpu->preempted = true;
-	spin_unlock_bh(&li->lock);
+	spin_unlock(&li->lock);
 	mutex_unlock(&vcpu->kvm->lock);
+	kvm_s390_vcpu_wakeup(vcpu);
 	return 0;
 }
 
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 2f3e14f..339b34a 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -166,7 +166,9 @@
 	case KVM_CAP_IOEVENTFD:
 	case KVM_CAP_DEVICE_CTRL:
 	case KVM_CAP_ENABLE_CAP_VM:
+	case KVM_CAP_S390_IRQCHIP:
 	case KVM_CAP_VM_ATTRIBUTES:
+	case KVM_CAP_MP_STATE:
 		r = 1;
 		break;
 	case KVM_CAP_NR_VCPUS:
@@ -595,7 +597,8 @@
 	vcpu->arch.sie_block->pp = 0;
 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
 	kvm_clear_async_pf_completion_queue(vcpu);
-	kvm_s390_vcpu_stop(vcpu);
+	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
+		kvm_s390_vcpu_stop(vcpu);
 	kvm_s390_clear_local_irqs(vcpu);
 }
 
@@ -647,8 +650,6 @@
 			return rc;
 	}
 	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
-	tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
-		     (unsigned long) vcpu);
 	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
 	get_cpu_id(&vcpu->arch.cpu_id);
 	vcpu->arch.cpu_id.version = 0xff;
@@ -926,7 +927,7 @@
 {
 	int rc = 0;
 
-	if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
+	if (!is_vcpu_stopped(vcpu))
 		rc = -EBUSY;
 	else {
 		vcpu->run->psw_mask = psw.mask;
@@ -980,13 +981,34 @@
 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
 				    struct kvm_mp_state *mp_state)
 {
-	return -EINVAL; /* not implemented yet */
+	/* CHECK_STOP and LOAD are not supported yet */
+	return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
+				       KVM_MP_STATE_OPERATING;
 }
 
 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
 				    struct kvm_mp_state *mp_state)
 {
-	return -EINVAL; /* not implemented yet */
+	int rc = 0;
+
+	/* user space knows about this interface - let it control the state */
+	vcpu->kvm->arch.user_cpu_state_ctrl = 1;
+
+	switch (mp_state->mp_state) {
+	case KVM_MP_STATE_STOPPED:
+		kvm_s390_vcpu_stop(vcpu);
+		break;
+	case KVM_MP_STATE_OPERATING:
+		kvm_s390_vcpu_start(vcpu);
+		break;
+	case KVM_MP_STATE_LOAD:
+	case KVM_MP_STATE_CHECK_STOP:
+		/* fall through - CHECK_STOP and LOAD are not supported yet */
+	default:
+		rc = -ENXIO;
+	}
+
+	return rc;
 }
 
 bool kvm_s390_cmma_enabled(struct kvm *kvm)
@@ -1045,6 +1067,9 @@
 		goto retry;
 	}
 
+	/* nothing to do, just clear the request */
+	clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
+
 	return 0;
 }
 
@@ -1284,7 +1309,13 @@
 	if (vcpu->sigset_active)
 		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
 
-	kvm_s390_vcpu_start(vcpu);
+	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
+		kvm_s390_vcpu_start(vcpu);
+	} else if (is_vcpu_stopped(vcpu)) {
+		pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
+				   vcpu->vcpu_id);
+		return -EINVAL;
+	}
 
 	switch (kvm_run->exit_reason) {
 	case KVM_EXIT_S390_SIEIC:
@@ -1413,11 +1444,6 @@
 	return kvm_s390_store_status_unloaded(vcpu, addr);
 }
 
-static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)
-{
-	return atomic_read(&(vcpu)->arch.sie_block->cpuflags) & CPUSTAT_STOPPED;
-}
-
 static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
 {
 	kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
@@ -1451,7 +1477,7 @@
 
 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
 	/* Only one cpu at a time may enter/leave the STOPPED state. */
-	spin_lock_bh(&vcpu->kvm->arch.start_stop_lock);
+	spin_lock(&vcpu->kvm->arch.start_stop_lock);
 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
 
 	for (i = 0; i < online_vcpus; i++) {
@@ -1477,7 +1503,7 @@
 	 * Let's play safe and flush the VCPU at startup.
 	 */
 	vcpu->arch.sie_block->ihcpu  = 0xffff;
-	spin_unlock_bh(&vcpu->kvm->arch.start_stop_lock);
+	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
 	return;
 }
 
@@ -1491,10 +1517,18 @@
 
 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
 	/* Only one cpu at a time may enter/leave the STOPPED state. */
-	spin_lock_bh(&vcpu->kvm->arch.start_stop_lock);
+	spin_lock(&vcpu->kvm->arch.start_stop_lock);
 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
 
+	/* Need to lock access to action_bits to avoid a SIGP race condition */
+	spin_lock(&vcpu->arch.local_int.lock);
 	atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
+
+	/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
+	vcpu->arch.local_int.action_bits &=
+				 ~(ACTION_STOP_ON_STOP | ACTION_STORE_ON_STOP);
+	spin_unlock(&vcpu->arch.local_int.lock);
+
 	__disable_ibs_on_vcpu(vcpu);
 
 	for (i = 0; i < online_vcpus; i++) {
@@ -1512,7 +1546,7 @@
 		__enable_ibs_on_vcpu(started_vcpu);
 	}
 
-	spin_unlock_bh(&vcpu->kvm->arch.start_stop_lock);
+	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
 	return;
 }
 
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index a8655ed..3862fa2 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -45,9 +45,9 @@
 	  d_args); \
 } while (0)
 
-static inline int __cpu_is_stopped(struct kvm_vcpu *vcpu)
+static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)
 {
-	return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOP_INT;
+	return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED;
 }
 
 static inline int kvm_is_ucontrol(struct kvm *kvm)
@@ -129,9 +129,15 @@
 	vcpu->arch.sie_block->gpsw.mask |= cc << 44;
 }
 
+/* are cpu states controlled by user space */
+static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm)
+{
+	return kvm->arch.user_cpu_state_ctrl != 0;
+}
+
 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu);
+void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu);
 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer);
-void kvm_s390_tasklet(unsigned long parm);
 void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu);
 void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu);
 void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu);
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 43079a4..cf243ba 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -125,8 +125,9 @@
 	return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
 }
 
-static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
+static int __inject_sigp_stop(struct kvm_vcpu *dst_vcpu, int action)
 {
+	struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int;
 	struct kvm_s390_interrupt_info *inti;
 	int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
 
@@ -135,7 +136,13 @@
 		return -ENOMEM;
 	inti->type = KVM_S390_SIGP_STOP;
 
-	spin_lock_bh(&li->lock);
+	spin_lock(&li->lock);
+	if (li->action_bits & ACTION_STOP_ON_STOP) {
+		/* another SIGP STOP is pending */
+		kfree(inti);
+		rc = SIGP_CC_BUSY;
+		goto out;
+	}
 	if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
 		kfree(inti);
 		if ((action & ACTION_STORE_ON_STOP) != 0)
@@ -144,19 +151,17 @@
 	}
 	list_add_tail(&inti->list, &li->list);
 	atomic_set(&li->active, 1);
-	atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
 	li->action_bits |= action;
-	if (waitqueue_active(li->wq))
-		wake_up_interruptible(li->wq);
+	atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
+	kvm_s390_vcpu_wakeup(dst_vcpu);
 out:
-	spin_unlock_bh(&li->lock);
+	spin_unlock(&li->lock);
 
 	return rc;
 }
 
 static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
 {
-	struct kvm_s390_local_interrupt *li;
 	struct kvm_vcpu *dst_vcpu = NULL;
 	int rc;
 
@@ -166,9 +171,8 @@
 	dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
 	if (!dst_vcpu)
 		return SIGP_CC_NOT_OPERATIONAL;
-	li = &dst_vcpu->arch.local_int;
 
-	rc = __inject_sigp_stop(li, action);
+	rc = __inject_sigp_stop(dst_vcpu, action);
 
 	VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
 
@@ -238,7 +242,7 @@
 	if (!inti)
 		return SIGP_CC_BUSY;
 
-	spin_lock_bh(&li->lock);
+	spin_lock(&li->lock);
 	/* cpu must be in stopped state */
 	if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
 		*reg &= 0xffffffff00000000UL;
@@ -253,13 +257,12 @@
 
 	list_add_tail(&inti->list, &li->list);
 	atomic_set(&li->active, 1);
-	if (waitqueue_active(li->wq))
-		wake_up_interruptible(li->wq);
+	kvm_s390_vcpu_wakeup(dst_vcpu);
 	rc = SIGP_CC_ORDER_CODE_ACCEPTED;
 
 	VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
 out_li:
-	spin_unlock_bh(&li->lock);
+	spin_unlock(&li->lock);
 	return rc;
 }
 
@@ -275,9 +278,9 @@
 	if (!dst_vcpu)
 		return SIGP_CC_NOT_OPERATIONAL;
 
-	spin_lock_bh(&dst_vcpu->arch.local_int.lock);
+	spin_lock(&dst_vcpu->arch.local_int.lock);
 	flags = atomic_read(dst_vcpu->arch.local_int.cpuflags);
-	spin_unlock_bh(&dst_vcpu->arch.local_int.lock);
+	spin_unlock(&dst_vcpu->arch.local_int.lock);
 	if (!(flags & CPUSTAT_STOPPED)) {
 		*reg &= 0xffffffff00000000UL;
 		*reg |= SIGP_STATUS_INCORRECT_STATE;
@@ -338,10 +341,10 @@
 	if (!dst_vcpu)
 		return SIGP_CC_NOT_OPERATIONAL;
 	li = &dst_vcpu->arch.local_int;
-	spin_lock_bh(&li->lock);
+	spin_lock(&li->lock);
 	if (li->action_bits & ACTION_STOP_ON_STOP)
 		rc = SIGP_CC_BUSY;
-	spin_unlock_bh(&li->lock);
+	spin_unlock(&li->lock);
 
 	return rc;
 }
@@ -461,12 +464,7 @@
 		dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
 		BUG_ON(dest_vcpu == NULL);
 
-		spin_lock_bh(&dest_vcpu->arch.local_int.lock);
-		if (waitqueue_active(&dest_vcpu->wq))
-			wake_up_interruptible(&dest_vcpu->wq);
-		dest_vcpu->preempted = true;
-		spin_unlock_bh(&dest_vcpu->arch.local_int.lock);
-
+		kvm_s390_vcpu_wakeup(dest_vcpu);
 		kvm_s390_set_psw_cc(vcpu, SIGP_CC_ORDER_CODE_ACCEPTED);
 		return 0;
 	}
diff --git a/arch/score/include/asm/processor.h b/arch/score/include/asm/processor.h
index d9a922d..851f441 100644
--- a/arch/score/include/asm/processor.h
+++ b/arch/score/include/asm/processor.h
@@ -24,6 +24,7 @@
 #define current_text_addr() ({ __label__ _l; _l: &&_l; })
 
 #define cpu_relax()		barrier()
+#define cpu_relax_lowlatency()        cpu_relax()
 #define release_thread(thread)	do {} while (0)
 
 /*
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 834b67c..aa2df3e 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -57,7 +57,6 @@
 	select HAVE_FUNCTION_TRACER
 	select HAVE_FTRACE_MCOUNT_RECORD
 	select HAVE_DYNAMIC_FTRACE
-	select HAVE_FUNCTION_TRACE_MCOUNT_TEST
 	select HAVE_FTRACE_NMI_ENTER if DYNAMIC_FTRACE
 	select ARCH_WANT_IPC_PARSE_VERSION
 	select HAVE_FUNCTION_GRAPH_TRACER
diff --git a/arch/sh/include/asm/processor.h b/arch/sh/include/asm/processor.h
index 5448f9b..1506897 100644
--- a/arch/sh/include/asm/processor.h
+++ b/arch/sh/include/asm/processor.h
@@ -97,6 +97,7 @@
 
 #define cpu_sleep()	__asm__ __volatile__ ("sleep" : : : "memory")
 #define cpu_relax()	barrier()
+#define cpu_relax_lowlatency() cpu_relax()
 
 void default_idle(void);
 void stop_this_cpu(void *);
diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c
index 3c74f53..079d70e 100644
--- a/arch/sh/kernel/ftrace.c
+++ b/arch/sh/kernel/ftrace.c
@@ -344,6 +344,9 @@
 	struct ftrace_graph_ent trace;
 	unsigned long return_hooker = (unsigned long)&return_to_handler;
 
+	if (unlikely(ftrace_graph_is_dead()))
+		return;
+
 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
 		return;
 
diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c
index 0233167..7cfd7f1 100644
--- a/arch/sh/kernel/perf_event.c
+++ b/arch/sh/kernel/perf_event.c
@@ -129,14 +129,6 @@
 		return -ENODEV;
 
 	/*
-	 * All of the on-chip counters are "limited", in that they have
-	 * no interrupts, and are therefore unable to do sampling without
-	 * further work and timer assistance.
-	 */
-	if (hwc->sample_period)
-		return -EINVAL;
-
-	/*
 	 * See if we need to reserve the counter.
 	 *
 	 * If no events are currently in use, then we have to take a
@@ -392,6 +384,13 @@
 
 	pr_info("Performance Events: %s support registered\n", _pmu->name);
 
+	/*
+	 * All of the on-chip counters are "limited", in that they have
+	 * no interrupts, and are therefore unable to do sampling without
+	 * further work and timer assistance.
+	 */
+	pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
+
 	WARN_ON(_pmu->num_events > MAX_HWEVENTS);
 
 	perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
diff --git a/arch/sh/lib/mcount.S b/arch/sh/lib/mcount.S
index 52aa201..7a8572f 100644
--- a/arch/sh/lib/mcount.S
+++ b/arch/sh/lib/mcount.S
@@ -92,13 +92,6 @@
 	rts
 	 nop
 #else
-#ifndef CONFIG_DYNAMIC_FTRACE
-	mov.l	.Lfunction_trace_stop, r0
-	mov.l	@r0, r0
-	tst	r0, r0
-	bf	ftrace_stub
-#endif
-
 	MCOUNT_ENTER()
 
 #ifdef CONFIG_DYNAMIC_FTRACE
@@ -174,11 +167,6 @@
 
 	.globl ftrace_caller
 ftrace_caller:
-	mov.l	.Lfunction_trace_stop, r0
-	mov.l	@r0, r0
-	tst	r0, r0
-	bf	ftrace_stub
-
 	MCOUNT_ENTER()
 
 	.globl ftrace_call
@@ -196,8 +184,6 @@
 #endif /* CONFIG_DYNAMIC_FTRACE */
 
 	.align 2
-.Lfunction_trace_stop:
-	.long	function_trace_stop
 
 /*
  * NOTE: From here on the locations of the .Lftrace_stub label and
@@ -217,12 +203,7 @@
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 	.globl	ftrace_graph_caller
 ftrace_graph_caller:
-	mov.l	2f, r0
-	mov.l	@r0, r0
-	tst	r0, r0
-	bt	1f
-
-	mov.l	3f, r1
+	mov.l	2f, r1
 	jmp	@r1
 	 nop
 1:
@@ -242,8 +223,7 @@
 	MCOUNT_LEAVE()
 
 	.align 2
-2:	.long	function_trace_stop
-3:	.long	skip_trace
+2:	.long	skip_trace
 .Lprepare_ftrace_return:
 	.long	prepare_ftrace_return
 
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 407c87d..4692c90 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -55,7 +55,6 @@
 	select HAVE_FUNCTION_TRACER
 	select HAVE_FUNCTION_GRAPH_TRACER
 	select HAVE_FUNCTION_GRAPH_FP_TEST
-	select HAVE_FUNCTION_TRACE_MCOUNT_TEST
 	select HAVE_KRETPROBES
 	select HAVE_KPROBES
 	select HAVE_RCU_TABLE_FREE if SMP
diff --git a/arch/sparc/include/asm/processor_32.h b/arch/sparc/include/asm/processor_32.h
index a564817..812fd08 100644
--- a/arch/sparc/include/asm/processor_32.h
+++ b/arch/sparc/include/asm/processor_32.h
@@ -119,6 +119,8 @@
 int do_mathemu(struct pt_regs *regs, struct task_struct *fpt);
 
 #define cpu_relax()	barrier()
+#define cpu_relax_lowlatency() cpu_relax()
+
 extern void (*sparc_idle)(void);
 
 #endif
diff --git a/arch/sparc/include/asm/processor_64.h b/arch/sparc/include/asm/processor_64.h
index 7028fe1..6924bde 100644
--- a/arch/sparc/include/asm/processor_64.h
+++ b/arch/sparc/include/asm/processor_64.h
@@ -216,6 +216,7 @@
 				     "nop\n\t"				\
 				     ".previous"			\
 				     ::: "memory")
+#define cpu_relax_lowlatency() cpu_relax()
 
 /* Prefetch support.  This is tuned for UltraSPARC-III and later.
  * UltraSPARC-I will treat these as nops, and UltraSPARC-II has
diff --git a/arch/sparc/lib/mcount.S b/arch/sparc/lib/mcount.S
index 3ad6cbd..0b0ed4d 100644
--- a/arch/sparc/lib/mcount.S
+++ b/arch/sparc/lib/mcount.S
@@ -24,10 +24,7 @@
 #ifdef CONFIG_DYNAMIC_FTRACE
 	/* Do nothing, the retl/nop below is all we need.  */
 #else
-	sethi		%hi(function_trace_stop), %g1
-	lduw		[%g1 + %lo(function_trace_stop)], %g2
-	brnz,pn		%g2, 2f
-	 sethi		%hi(ftrace_trace_function), %g1
+	sethi		%hi(ftrace_trace_function), %g1
 	sethi		%hi(ftrace_stub), %g2
 	ldx		[%g1 + %lo(ftrace_trace_function)], %g1
 	or		%g2, %lo(ftrace_stub), %g2
@@ -80,11 +77,8 @@
 	.globl		ftrace_caller
 	.type		ftrace_caller,#function
 ftrace_caller:
-	sethi		%hi(function_trace_stop), %g1
 	mov		%i7, %g2
-	lduw		[%g1 + %lo(function_trace_stop)], %g1
-	brnz,pn		%g1, ftrace_stub
-	 mov		%fp, %g3
+	mov		%fp, %g3
 	save		%sp, -176, %sp
 	mov		%g2, %o1
 	mov		%g2, %l0
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 4f3006b..7fcd492 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -128,7 +128,6 @@
 	select SPARSE_IRQ
 	select GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
 	select HAVE_FUNCTION_TRACER
-	select HAVE_FUNCTION_TRACE_MCOUNT_TEST
 	select HAVE_FUNCTION_GRAPH_TRACER
 	select HAVE_DYNAMIC_FTRACE
 	select HAVE_FTRACE_MCOUNT_RECORD
diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h
index 4232363..dd4f9f1 100644
--- a/arch/tile/include/asm/processor.h
+++ b/arch/tile/include/asm/processor.h
@@ -266,6 +266,8 @@
 	barrier();
 }
 
+#define cpu_relax_lowlatency() cpu_relax()
+
 /* Info on this processor (see fs/proc/cpuinfo.c) */
 struct seq_operations;
 extern const struct seq_operations cpuinfo_op;
diff --git a/arch/tile/kernel/mcount_64.S b/arch/tile/kernel/mcount_64.S
index 70d7bb0..3c2b8d5 100644
--- a/arch/tile/kernel/mcount_64.S
+++ b/arch/tile/kernel/mcount_64.S
@@ -77,15 +77,6 @@
 
 	.align	64
 STD_ENTRY(ftrace_caller)
-	moveli	r11, hw2_last(function_trace_stop)
-	{ shl16insli	r11, r11, hw1(function_trace_stop); move r12, lr }
-	{ shl16insli	r11, r11, hw0(function_trace_stop); move lr, r10 }
-	ld	r11, r11
-	beqz	r11, 1f
-	jrp	r12
-
-1:
-	{ move	r10, lr; move	lr, r12 }
 	MCOUNT_SAVE_REGS
 
 	/* arg1: self return address */
@@ -119,15 +110,6 @@
 
 	.align	64
 STD_ENTRY(__mcount)
-	moveli	r11, hw2_last(function_trace_stop)
-	{ shl16insli	r11, r11, hw1(function_trace_stop); move r12, lr }
-	{ shl16insli	r11, r11, hw0(function_trace_stop); move lr, r10 }
-	ld	r11, r11
-	beqz	r11, 1f
-	jrp	r12
-
-1:
-	{ move	r10, lr; move	lr, r12 }
 	{
 	 moveli	r11, hw2_last(ftrace_trace_function)
 	 moveli	r13, hw2_last(ftrace_stub)
diff --git a/arch/unicore32/include/asm/processor.h b/arch/unicore32/include/asm/processor.h
index 4eaa421..8d21b7a 100644
--- a/arch/unicore32/include/asm/processor.h
+++ b/arch/unicore32/include/asm/processor.h
@@ -71,6 +71,7 @@
 unsigned long get_wchan(struct task_struct *p);
 
 #define cpu_relax()			barrier()
+#define cpu_relax_lowlatency()                cpu_relax()
 
 #define task_pt_regs(p) \
 	((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index d24887b..2840c27 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -54,7 +54,6 @@
 	select HAVE_FUNCTION_TRACER
 	select HAVE_FUNCTION_GRAPH_TRACER
 	select HAVE_FUNCTION_GRAPH_FP_TEST
-	select HAVE_FUNCTION_TRACE_MCOUNT_TEST
 	select HAVE_SYSCALL_TRACEPOINTS
 	select SYSCTL_EXCEPTION_TRACE
 	select HAVE_KVM
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index 61d6e28..d551165 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -14,6 +14,7 @@
 obj-$(CONFIG_CRYPTO_SERPENT_SSE2_586) += serpent-sse2-i586.o
 
 obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o
+obj-$(CONFIG_CRYPTO_DES3_EDE_X86_64) += des3_ede-x86_64.o
 obj-$(CONFIG_CRYPTO_CAMELLIA_X86_64) += camellia-x86_64.o
 obj-$(CONFIG_CRYPTO_BLOWFISH_X86_64) += blowfish-x86_64.o
 obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o
@@ -52,6 +53,7 @@
 serpent-sse2-i586-y := serpent-sse2-i586-asm_32.o serpent_sse2_glue.o
 
 aes-x86_64-y := aes-x86_64-asm_64.o aes_glue.o
+des3_ede-x86_64-y := des3_ede-asm_64.o des3_ede_glue.o
 camellia-x86_64-y := camellia-x86_64-asm_64.o camellia_glue.o
 blowfish-x86_64-y := blowfish-x86_64-asm_64.o blowfish_glue.o
 twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o
@@ -76,7 +78,7 @@
 endif
 
 aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o fpu.o
-aesni-intel-$(CONFIG_64BIT) += aesni-intel_avx-x86_64.o
+aesni-intel-$(CONFIG_64BIT) += aesni-intel_avx-x86_64.o aes_ctrby8_avx-x86_64.o
 ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o
 sha1-ssse3-y := sha1_ssse3_asm.o sha1_ssse3_glue.o
 ifeq ($(avx2_supported),yes)
diff --git a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
new file mode 100644
index 0000000..f091f12
--- /dev/null
+++ b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
@@ -0,0 +1,546 @@
+/*
+ *	Implement AES CTR mode by8 optimization with AVX instructions. (x86_64)
+ *
+ * This is AES128/192/256 CTR mode optimization implementation. It requires
+ * the support of Intel(R) AESNI and AVX instructions.
+ *
+ * This work was inspired by the AES CTR mode optimization published
+ * in Intel Optimized IPSEC Cryptograhpic library.
+ * Additional information on it can be found at:
+ *    http://downloadcenter.intel.com/Detail_Desc.aspx?agr=Y&DwnldID=22972
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * Contact Information:
+ * James Guilford <james.guilford@intel.com>
+ * Sean Gulley <sean.m.gulley@intel.com>
+ * Chandramouli Narayanan <mouli@linux.intel.com>
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/linkage.h>
+#include <asm/inst.h>
+
+#define CONCAT(a,b)	a##b
+#define VMOVDQ		vmovdqu
+
+#define xdata0		%xmm0
+#define xdata1		%xmm1
+#define xdata2		%xmm2
+#define xdata3		%xmm3
+#define xdata4		%xmm4
+#define xdata5		%xmm5
+#define xdata6		%xmm6
+#define xdata7		%xmm7
+#define xcounter	%xmm8
+#define xbyteswap	%xmm9
+#define xkey0		%xmm10
+#define xkey3		%xmm11
+#define xkey6		%xmm12
+#define xkey9		%xmm13
+#define xkey4		%xmm11
+#define xkey8		%xmm12
+#define xkey12		%xmm13
+#define xkeyA		%xmm14
+#define xkeyB		%xmm15
+
+#define p_in		%rdi
+#define p_iv		%rsi
+#define p_keys		%rdx
+#define p_out		%rcx
+#define num_bytes	%r8
+
+#define tmp		%r10
+#define	DDQ(i)		CONCAT(ddq_add_,i)
+#define	XMM(i)		CONCAT(%xmm, i)
+#define	DDQ_DATA	0
+#define	XDATA		1
+#define KEY_128		1
+#define KEY_192		2
+#define KEY_256		3
+
+.section .rodata
+.align 16
+
+byteswap_const:
+	.octa 0x000102030405060708090A0B0C0D0E0F
+ddq_add_1:
+	.octa 0x00000000000000000000000000000001
+ddq_add_2:
+	.octa 0x00000000000000000000000000000002
+ddq_add_3:
+	.octa 0x00000000000000000000000000000003
+ddq_add_4:
+	.octa 0x00000000000000000000000000000004
+ddq_add_5:
+	.octa 0x00000000000000000000000000000005
+ddq_add_6:
+	.octa 0x00000000000000000000000000000006
+ddq_add_7:
+	.octa 0x00000000000000000000000000000007
+ddq_add_8:
+	.octa 0x00000000000000000000000000000008
+
+.text
+
+/* generate a unique variable for ddq_add_x */
+
+.macro setddq n
+	var_ddq_add = DDQ(\n)
+.endm
+
+/* generate a unique variable for xmm register */
+.macro setxdata n
+	var_xdata = XMM(\n)
+.endm
+
+/* club the numeric 'id' to the symbol 'name' */
+
+.macro club name, id
+.altmacro
+	.if \name == DDQ_DATA
+		setddq %\id
+	.elseif \name == XDATA
+		setxdata %\id
+	.endif
+.noaltmacro
+.endm
+
+/*
+ * do_aes num_in_par load_keys key_len
+ * This increments p_in, but not p_out
+ */
+.macro do_aes b, k, key_len
+	.set by, \b
+	.set load_keys, \k
+	.set klen, \key_len
+
+	.if (load_keys)
+		vmovdqa	0*16(p_keys), xkey0
+	.endif
+
+	vpshufb	xbyteswap, xcounter, xdata0
+
+	.set i, 1
+	.rept (by - 1)
+		club DDQ_DATA, i
+		club XDATA, i
+		vpaddd	var_ddq_add(%rip), xcounter, var_xdata
+		vpshufb	xbyteswap, var_xdata, var_xdata
+		.set i, (i +1)
+	.endr
+
+	vmovdqa	1*16(p_keys), xkeyA
+
+	vpxor	xkey0, xdata0, xdata0
+	club DDQ_DATA, by
+	vpaddd	var_ddq_add(%rip), xcounter, xcounter
+
+	.set i, 1
+	.rept (by - 1)
+		club XDATA, i
+		vpxor	xkey0, var_xdata, var_xdata
+		.set i, (i +1)
+	.endr
+
+	vmovdqa	2*16(p_keys), xkeyB
+
+	.set i, 0
+	.rept by
+		club XDATA, i
+		vaesenc	xkeyA, var_xdata, var_xdata		/* key 1 */
+		.set i, (i +1)
+	.endr
+
+	.if (klen == KEY_128)
+		.if (load_keys)
+			vmovdqa	3*16(p_keys), xkeyA
+		.endif
+	.else
+		vmovdqa	3*16(p_keys), xkeyA
+	.endif
+
+	.set i, 0
+	.rept by
+		club XDATA, i
+		vaesenc	xkeyB, var_xdata, var_xdata		/* key 2 */
+		.set i, (i +1)
+	.endr
+
+	add	$(16*by), p_in
+
+	.if (klen == KEY_128)
+		vmovdqa	4*16(p_keys), xkey4
+	.else
+		.if (load_keys)
+			vmovdqa	4*16(p_keys), xkey4
+		.endif
+	.endif
+
+	.set i, 0
+	.rept by
+		club XDATA, i
+		vaesenc	xkeyA, var_xdata, var_xdata		/* key 3 */
+		.set i, (i +1)
+	.endr
+
+	vmovdqa	5*16(p_keys), xkeyA
+
+	.set i, 0
+	.rept by
+		club XDATA, i
+		vaesenc	xkey4, var_xdata, var_xdata		/* key 4 */
+		.set i, (i +1)
+	.endr
+
+	.if (klen == KEY_128)
+		.if (load_keys)
+			vmovdqa	6*16(p_keys), xkeyB
+		.endif
+	.else
+		vmovdqa	6*16(p_keys), xkeyB
+	.endif
+
+	.set i, 0
+	.rept by
+		club XDATA, i
+		vaesenc	xkeyA, var_xdata, var_xdata		/* key 5 */
+		.set i, (i +1)
+	.endr
+
+	vmovdqa	7*16(p_keys), xkeyA
+
+	.set i, 0
+	.rept by
+		club XDATA, i
+		vaesenc	xkeyB, var_xdata, var_xdata		/* key 6 */
+		.set i, (i +1)
+	.endr
+
+	.if (klen == KEY_128)
+		vmovdqa	8*16(p_keys), xkey8
+	.else
+		.if (load_keys)
+			vmovdqa	8*16(p_keys), xkey8
+		.endif
+	.endif
+
+	.set i, 0
+	.rept by
+		club XDATA, i
+		vaesenc	xkeyA, var_xdata, var_xdata		/* key 7 */
+		.set i, (i +1)
+	.endr
+
+	.if (klen == KEY_128)
+		.if (load_keys)
+			vmovdqa	9*16(p_keys), xkeyA
+		.endif
+	.else
+		vmovdqa	9*16(p_keys), xkeyA
+	.endif
+
+	.set i, 0
+	.rept by
+		club XDATA, i
+		vaesenc	xkey8, var_xdata, var_xdata		/* key 8 */
+		.set i, (i +1)
+	.endr
+
+	vmovdqa	10*16(p_keys), xkeyB
+
+	.set i, 0
+	.rept by
+		club XDATA, i
+		vaesenc	xkeyA, var_xdata, var_xdata		/* key 9 */
+		.set i, (i +1)
+	.endr
+
+	.if (klen != KEY_128)
+		vmovdqa	11*16(p_keys), xkeyA
+	.endif
+
+	.set i, 0
+	.rept by
+		club XDATA, i
+		/* key 10 */
+		.if (klen == KEY_128)
+			vaesenclast	xkeyB, var_xdata, var_xdata
+		.else
+			vaesenc	xkeyB, var_xdata, var_xdata
+		.endif
+		.set i, (i +1)
+	.endr
+
+	.if (klen != KEY_128)
+		.if (load_keys)
+			vmovdqa	12*16(p_keys), xkey12
+		.endif
+
+		.set i, 0
+		.rept by
+			club XDATA, i
+			vaesenc	xkeyA, var_xdata, var_xdata	/* key 11 */
+			.set i, (i +1)
+		.endr
+
+		.if (klen == KEY_256)
+			vmovdqa	13*16(p_keys), xkeyA
+		.endif
+
+		.set i, 0
+		.rept by
+			club XDATA, i
+			.if (klen == KEY_256)
+				/* key 12 */
+				vaesenc	xkey12, var_xdata, var_xdata
+			.else
+				vaesenclast xkey12, var_xdata, var_xdata
+			.endif
+			.set i, (i +1)
+		.endr
+
+		.if (klen == KEY_256)
+			vmovdqa	14*16(p_keys), xkeyB
+
+			.set i, 0
+			.rept by
+				club XDATA, i
+				/* key 13 */
+				vaesenc	xkeyA, var_xdata, var_xdata
+				.set i, (i +1)
+			.endr
+
+			.set i, 0
+			.rept by
+				club XDATA, i
+				/* key 14 */
+				vaesenclast	xkeyB, var_xdata, var_xdata
+				.set i, (i +1)
+			.endr
+		.endif
+	.endif
+
+	.set i, 0
+	.rept (by / 2)
+		.set j, (i+1)
+		VMOVDQ	(i*16 - 16*by)(p_in), xkeyA
+		VMOVDQ	(j*16 - 16*by)(p_in), xkeyB
+		club XDATA, i
+		vpxor	xkeyA, var_xdata, var_xdata
+		club XDATA, j
+		vpxor	xkeyB, var_xdata, var_xdata
+		.set i, (i+2)
+	.endr
+
+	.if (i < by)
+		VMOVDQ	(i*16 - 16*by)(p_in), xkeyA
+		club XDATA, i
+		vpxor	xkeyA, var_xdata, var_xdata
+	.endif
+
+	.set i, 0
+	.rept by
+		club XDATA, i
+		VMOVDQ	var_xdata, i*16(p_out)
+		.set i, (i+1)
+	.endr
+.endm
+
+.macro do_aes_load val, key_len
+	do_aes \val, 1, \key_len
+.endm
+
+.macro do_aes_noload val, key_len
+	do_aes \val, 0, \key_len
+.endm
+
+/* main body of aes ctr load */
+
+.macro do_aes_ctrmain key_len
+
+	cmp	$16, num_bytes
+	jb	.Ldo_return2\key_len
+
+	vmovdqa	byteswap_const(%rip), xbyteswap
+	vmovdqu	(p_iv), xcounter
+	vpshufb	xbyteswap, xcounter, xcounter
+
+	mov	num_bytes, tmp
+	and	$(7*16), tmp
+	jz	.Lmult_of_8_blks\key_len
+
+	/* 1 <= tmp <= 7 */
+	cmp	$(4*16), tmp
+	jg	.Lgt4\key_len
+	je	.Leq4\key_len
+
+.Llt4\key_len:
+	cmp	$(2*16), tmp
+	jg	.Leq3\key_len
+	je	.Leq2\key_len
+
+.Leq1\key_len:
+	do_aes_load	1, \key_len
+	add	$(1*16), p_out
+	and	$(~7*16), num_bytes
+	jz	.Ldo_return2\key_len
+	jmp	.Lmain_loop2\key_len
+
+.Leq2\key_len:
+	do_aes_load	2, \key_len
+	add	$(2*16), p_out
+	and	$(~7*16), num_bytes
+	jz	.Ldo_return2\key_len
+	jmp	.Lmain_loop2\key_len
+
+
+.Leq3\key_len:
+	do_aes_load	3, \key_len
+	add	$(3*16), p_out
+	and	$(~7*16), num_bytes
+	jz	.Ldo_return2\key_len
+	jmp	.Lmain_loop2\key_len
+
+.Leq4\key_len:
+	do_aes_load	4, \key_len
+	add	$(4*16), p_out
+	and	$(~7*16), num_bytes
+	jz	.Ldo_return2\key_len
+	jmp	.Lmain_loop2\key_len
+
+.Lgt4\key_len:
+	cmp	$(6*16), tmp
+	jg	.Leq7\key_len
+	je	.Leq6\key_len
+
+.Leq5\key_len:
+	do_aes_load	5, \key_len
+	add	$(5*16), p_out
+	and	$(~7*16), num_bytes
+	jz	.Ldo_return2\key_len
+	jmp	.Lmain_loop2\key_len
+
+.Leq6\key_len:
+	do_aes_load	6, \key_len
+	add	$(6*16), p_out
+	and	$(~7*16), num_bytes
+	jz	.Ldo_return2\key_len
+	jmp	.Lmain_loop2\key_len
+
+.Leq7\key_len:
+	do_aes_load	7, \key_len
+	add	$(7*16), p_out
+	and	$(~7*16), num_bytes
+	jz	.Ldo_return2\key_len
+	jmp	.Lmain_loop2\key_len
+
+.Lmult_of_8_blks\key_len:
+	.if (\key_len != KEY_128)
+		vmovdqa	0*16(p_keys), xkey0
+		vmovdqa	4*16(p_keys), xkey4
+		vmovdqa	8*16(p_keys), xkey8
+		vmovdqa	12*16(p_keys), xkey12
+	.else
+		vmovdqa	0*16(p_keys), xkey0
+		vmovdqa	3*16(p_keys), xkey4
+		vmovdqa	6*16(p_keys), xkey8
+		vmovdqa	9*16(p_keys), xkey12
+	.endif
+.align 16
+.Lmain_loop2\key_len:
+	/* num_bytes is a multiple of 8 and >0 */
+	do_aes_noload	8, \key_len
+	add	$(8*16), p_out
+	sub	$(8*16), num_bytes
+	jne	.Lmain_loop2\key_len
+
+.Ldo_return2\key_len:
+	/* return updated IV */
+	vpshufb	xbyteswap, xcounter, xcounter
+	vmovdqu	xcounter, (p_iv)
+	ret
+.endm
+
+/*
+ * routine to do AES128 CTR enc/decrypt "by8"
+ * XMM registers are clobbered.
+ * Saving/restoring must be done at a higher level
+ * aes_ctr_enc_128_avx_by8(void *in, void *iv, void *keys, void *out,
+ *			unsigned int num_bytes)
+ */
+ENTRY(aes_ctr_enc_128_avx_by8)
+	/* call the aes main loop */
+	do_aes_ctrmain KEY_128
+
+ENDPROC(aes_ctr_enc_128_avx_by8)
+
+/*
+ * routine to do AES192 CTR enc/decrypt "by8"
+ * XMM registers are clobbered.
+ * Saving/restoring must be done at a higher level
+ * aes_ctr_enc_192_avx_by8(void *in, void *iv, void *keys, void *out,
+ *			unsigned int num_bytes)
+ */
+ENTRY(aes_ctr_enc_192_avx_by8)
+	/* call the aes main loop */
+	do_aes_ctrmain KEY_192
+
+ENDPROC(aes_ctr_enc_192_avx_by8)
+
+/*
+ * routine to do AES256 CTR enc/decrypt "by8"
+ * XMM registers are clobbered.
+ * Saving/restoring must be done at a higher level
+ * aes_ctr_enc_256_avx_by8(void *in, void *iv, void *keys, void *out,
+ *			unsigned int num_bytes)
+ */
+ENTRY(aes_ctr_enc_256_avx_by8)
+	/* call the aes main loop */
+	do_aes_ctrmain KEY_256
+
+ENDPROC(aes_ctr_enc_256_avx_by8)
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 948ad0e..888950f 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -105,6 +105,9 @@
 #define AVX_GEN4_OPTSIZE 4096
 
 #ifdef CONFIG_X86_64
+
+static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
+			      const u8 *in, unsigned int len, u8 *iv);
 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
 			      const u8 *in, unsigned int len, u8 *iv);
 
@@ -155,6 +158,12 @@
 
 
 #ifdef CONFIG_AS_AVX
+asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
+		void *keys, u8 *out, unsigned int num_bytes);
+asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
+		void *keys, u8 *out, unsigned int num_bytes);
+asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv,
+		void *keys, u8 *out, unsigned int num_bytes);
 /*
  * asmlinkage void aesni_gcm_precomp_avx_gen2()
  * gcm_data *my_ctx_data, context data
@@ -472,6 +481,25 @@
 	crypto_inc(ctrblk, AES_BLOCK_SIZE);
 }
 
+#ifdef CONFIG_AS_AVX
+static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
+			      const u8 *in, unsigned int len, u8 *iv)
+{
+	/*
+	 * based on key length, override with the by8 version
+	 * of ctr mode encryption/decryption for improved performance
+	 * aes_set_key_common() ensures that key length is one of
+	 * {128,192,256}
+	 */
+	if (ctx->key_length == AES_KEYSIZE_128)
+		aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len);
+	else if (ctx->key_length == AES_KEYSIZE_192)
+		aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len);
+	else
+		aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len);
+}
+#endif
+
 static int ctr_crypt(struct blkcipher_desc *desc,
 		     struct scatterlist *dst, struct scatterlist *src,
 		     unsigned int nbytes)
@@ -486,8 +514,8 @@
 
 	kernel_fpu_begin();
 	while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
-		aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
-			      nbytes & AES_BLOCK_MASK, walk.iv);
+		aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+				  nbytes & AES_BLOCK_MASK, walk.iv);
 		nbytes &= AES_BLOCK_SIZE - 1;
 		err = blkcipher_walk_done(desc, &walk, nbytes);
 	}
@@ -1493,6 +1521,14 @@
 		aesni_gcm_enc_tfm = aesni_gcm_enc;
 		aesni_gcm_dec_tfm = aesni_gcm_dec;
 	}
+	aesni_ctr_enc_tfm = aesni_ctr_enc;
+#ifdef CONFIG_AS_AVX
+	if (cpu_has_avx) {
+		/* optimize performance of ctr mode encryption transform */
+		aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm;
+		pr_info("AES CTR mode by8 optimization enabled\n");
+	}
+#endif
 #endif
 
 	err = crypto_fpu_init();
diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
index dbc4339..26d49eb 100644
--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
@@ -72,6 +72,7 @@
 
 # unsigned int crc_pcl(u8 *buffer, int len, unsigned int crc_init);
 
+.text
 ENTRY(crc_pcl)
 #define    bufp		%rdi
 #define    bufp_dw	%edi
@@ -216,15 +217,11 @@
 	## 4) Combine three results:
 	################################################################
 
-	lea	(K_table-16)(%rip), bufp	# first entry is for idx 1
+	lea	(K_table-8)(%rip), bufp		# first entry is for idx 1
 	shlq    $3, %rax			# rax *= 8
-	subq    %rax, tmp			# tmp -= rax*8
-	shlq    $1, %rax
-	subq    %rax, tmp			# tmp -= rax*16
-						# (total tmp -= rax*24)
-	addq    %rax, bufp
-
-	movdqa  (bufp), %xmm0			# 2 consts: K1:K2
+	pmovzxdq (bufp,%rax), %xmm0		# 2 consts: K1:K2
+	leal	(%eax,%eax,2), %eax		# rax *= 3 (total *24)
+	subq    %rax, tmp			# tmp -= rax*24
 
 	movq    crc_init, %xmm1			# CRC for block 1
 	PCLMULQDQ 0x00,%xmm0,%xmm1		# Multiply by K2
@@ -238,9 +235,9 @@
 	mov     crc2, crc_init
 	crc32   %rax, crc_init
 
-################################################################
-## 5) Check for end:
-################################################################
+	################################################################
+	## 5) Check for end:
+	################################################################
 
 LABEL crc_ 0
 	mov     tmp, len
@@ -331,136 +328,136 @@
 
 	################################################################
 	## PCLMULQDQ tables
-	## Table is 128 entries x 2 quad words each
+	## Table is 128 entries x 2 words (8 bytes) each
 	################################################################
-.data
-.align 64
+.section	.rotata, "a", %progbits
+.align 8
 K_table:
-        .quad 0x14cd00bd6,0x105ec76f0
-        .quad 0x0ba4fc28e,0x14cd00bd6
-        .quad 0x1d82c63da,0x0f20c0dfe
-        .quad 0x09e4addf8,0x0ba4fc28e
-        .quad 0x039d3b296,0x1384aa63a
-        .quad 0x102f9b8a2,0x1d82c63da
-        .quad 0x14237f5e6,0x01c291d04
-        .quad 0x00d3b6092,0x09e4addf8
-        .quad 0x0c96cfdc0,0x0740eef02
-        .quad 0x18266e456,0x039d3b296
-        .quad 0x0daece73e,0x0083a6eec
-        .quad 0x0ab7aff2a,0x102f9b8a2
-        .quad 0x1248ea574,0x1c1733996
-        .quad 0x083348832,0x14237f5e6
-        .quad 0x12c743124,0x02ad91c30
-        .quad 0x0b9e02b86,0x00d3b6092
-        .quad 0x018b33a4e,0x06992cea2
-        .quad 0x1b331e26a,0x0c96cfdc0
-        .quad 0x17d35ba46,0x07e908048
-        .quad 0x1bf2e8b8a,0x18266e456
-        .quad 0x1a3e0968a,0x11ed1f9d8
-        .quad 0x0ce7f39f4,0x0daece73e
-        .quad 0x061d82e56,0x0f1d0f55e
-        .quad 0x0d270f1a2,0x0ab7aff2a
-        .quad 0x1c3f5f66c,0x0a87ab8a8
-        .quad 0x12ed0daac,0x1248ea574
-        .quad 0x065863b64,0x08462d800
-        .quad 0x11eef4f8e,0x083348832
-        .quad 0x1ee54f54c,0x071d111a8
-        .quad 0x0b3e32c28,0x12c743124
-        .quad 0x0064f7f26,0x0ffd852c6
-        .quad 0x0dd7e3b0c,0x0b9e02b86
-        .quad 0x0f285651c,0x0dcb17aa4
-        .quad 0x010746f3c,0x018b33a4e
-        .quad 0x1c24afea4,0x0f37c5aee
-        .quad 0x0271d9844,0x1b331e26a
-        .quad 0x08e766a0c,0x06051d5a2
-        .quad 0x093a5f730,0x17d35ba46
-        .quad 0x06cb08e5c,0x11d5ca20e
-        .quad 0x06b749fb2,0x1bf2e8b8a
-        .quad 0x1167f94f2,0x021f3d99c
-        .quad 0x0cec3662e,0x1a3e0968a
-        .quad 0x19329634a,0x08f158014
-        .quad 0x0e6fc4e6a,0x0ce7f39f4
-        .quad 0x08227bb8a,0x1a5e82106
-        .quad 0x0b0cd4768,0x061d82e56
-        .quad 0x13c2b89c4,0x188815ab2
-        .quad 0x0d7a4825c,0x0d270f1a2
-        .quad 0x10f5ff2ba,0x105405f3e
-        .quad 0x00167d312,0x1c3f5f66c
-        .quad 0x0f6076544,0x0e9adf796
-        .quad 0x026f6a60a,0x12ed0daac
-        .quad 0x1a2adb74e,0x096638b34
-        .quad 0x19d34af3a,0x065863b64
-        .quad 0x049c3cc9c,0x1e50585a0
-        .quad 0x068bce87a,0x11eef4f8e
-        .quad 0x1524fa6c6,0x19f1c69dc
-        .quad 0x16cba8aca,0x1ee54f54c
-        .quad 0x042d98888,0x12913343e
-        .quad 0x1329d9f7e,0x0b3e32c28
-        .quad 0x1b1c69528,0x088f25a3a
-        .quad 0x02178513a,0x0064f7f26
-        .quad 0x0e0ac139e,0x04e36f0b0
-        .quad 0x0170076fa,0x0dd7e3b0c
-        .quad 0x141a1a2e2,0x0bd6f81f8
-        .quad 0x16ad828b4,0x0f285651c
-        .quad 0x041d17b64,0x19425cbba
-        .quad 0x1fae1cc66,0x010746f3c
-        .quad 0x1a75b4b00,0x18db37e8a
-        .quad 0x0f872e54c,0x1c24afea4
-        .quad 0x01e41e9fc,0x04c144932
-        .quad 0x086d8e4d2,0x0271d9844
-        .quad 0x160f7af7a,0x052148f02
-        .quad 0x05bb8f1bc,0x08e766a0c
-        .quad 0x0a90fd27a,0x0a3c6f37a
-        .quad 0x0b3af077a,0x093a5f730
-        .quad 0x04984d782,0x1d22c238e
-        .quad 0x0ca6ef3ac,0x06cb08e5c
-        .quad 0x0234e0b26,0x063ded06a
-        .quad 0x1d88abd4a,0x06b749fb2
-        .quad 0x04597456a,0x04d56973c
-        .quad 0x0e9e28eb4,0x1167f94f2
-        .quad 0x07b3ff57a,0x19385bf2e
-        .quad 0x0c9c8b782,0x0cec3662e
-        .quad 0x13a9cba9e,0x0e417f38a
-        .quad 0x093e106a4,0x19329634a
-        .quad 0x167001a9c,0x14e727980
-        .quad 0x1ddffc5d4,0x0e6fc4e6a
-        .quad 0x00df04680,0x0d104b8fc
-        .quad 0x02342001e,0x08227bb8a
-        .quad 0x00a2a8d7e,0x05b397730
-        .quad 0x168763fa6,0x0b0cd4768
-        .quad 0x1ed5a407a,0x0e78eb416
-        .quad 0x0d2c3ed1a,0x13c2b89c4
-        .quad 0x0995a5724,0x1641378f0
-        .quad 0x19b1afbc4,0x0d7a4825c
-        .quad 0x109ffedc0,0x08d96551c
-        .quad 0x0f2271e60,0x10f5ff2ba
-        .quad 0x00b0bf8ca,0x00bf80dd2
-        .quad 0x123888b7a,0x00167d312
-        .quad 0x1e888f7dc,0x18dcddd1c
-        .quad 0x002ee03b2,0x0f6076544
-        .quad 0x183e8d8fe,0x06a45d2b2
-        .quad 0x133d7a042,0x026f6a60a
-        .quad 0x116b0f50c,0x1dd3e10e8
-        .quad 0x05fabe670,0x1a2adb74e
-        .quad 0x130004488,0x0de87806c
-        .quad 0x000bcf5f6,0x19d34af3a
-        .quad 0x18f0c7078,0x014338754
-        .quad 0x017f27698,0x049c3cc9c
-        .quad 0x058ca5f00,0x15e3e77ee
-        .quad 0x1af900c24,0x068bce87a
-        .quad 0x0b5cfca28,0x0dd07448e
-        .quad 0x0ded288f8,0x1524fa6c6
-        .quad 0x059f229bc,0x1d8048348
-        .quad 0x06d390dec,0x16cba8aca
-        .quad 0x037170390,0x0a3e3e02c
-        .quad 0x06353c1cc,0x042d98888
-        .quad 0x0c4584f5c,0x0d73c7bea
-        .quad 0x1f16a3418,0x1329d9f7e
-        .quad 0x0531377e2,0x185137662
-        .quad 0x1d8d9ca7c,0x1b1c69528
-        .quad 0x0b25b29f2,0x18a08b5bc
-        .quad 0x19fb2a8b0,0x02178513a
-        .quad 0x1a08fe6ac,0x1da758ae0
-        .quad 0x045cddf4e,0x0e0ac139e
-        .quad 0x1a91647f2,0x169cf9eb0
-        .quad 0x1a0f717c4,0x0170076fa
+	.long 0x493c7d27, 0x00000001
+	.long 0xba4fc28e, 0x493c7d27
+	.long 0xddc0152b, 0xf20c0dfe
+	.long 0x9e4addf8, 0xba4fc28e
+	.long 0x39d3b296, 0x3da6d0cb
+	.long 0x0715ce53, 0xddc0152b
+	.long 0x47db8317, 0x1c291d04
+	.long 0x0d3b6092, 0x9e4addf8
+	.long 0xc96cfdc0, 0x740eef02
+	.long 0x878a92a7, 0x39d3b296
+	.long 0xdaece73e, 0x083a6eec
+	.long 0xab7aff2a, 0x0715ce53
+	.long 0x2162d385, 0xc49f4f67
+	.long 0x83348832, 0x47db8317
+	.long 0x299847d5, 0x2ad91c30
+	.long 0xb9e02b86, 0x0d3b6092
+	.long 0x18b33a4e, 0x6992cea2
+	.long 0xb6dd949b, 0xc96cfdc0
+	.long 0x78d9ccb7, 0x7e908048
+	.long 0xbac2fd7b, 0x878a92a7
+	.long 0xa60ce07b, 0x1b3d8f29
+	.long 0xce7f39f4, 0xdaece73e
+	.long 0x61d82e56, 0xf1d0f55e
+	.long 0xd270f1a2, 0xab7aff2a
+	.long 0xc619809d, 0xa87ab8a8
+	.long 0x2b3cac5d, 0x2162d385
+	.long 0x65863b64, 0x8462d800
+	.long 0x1b03397f, 0x83348832
+	.long 0xebb883bd, 0x71d111a8
+	.long 0xb3e32c28, 0x299847d5
+	.long 0x064f7f26, 0xffd852c6
+	.long 0xdd7e3b0c, 0xb9e02b86
+	.long 0xf285651c, 0xdcb17aa4
+	.long 0x10746f3c, 0x18b33a4e
+	.long 0xc7a68855, 0xf37c5aee
+	.long 0x271d9844, 0xb6dd949b
+	.long 0x8e766a0c, 0x6051d5a2
+	.long 0x93a5f730, 0x78d9ccb7
+	.long 0x6cb08e5c, 0x18b0d4ff
+	.long 0x6b749fb2, 0xbac2fd7b
+	.long 0x1393e203, 0x21f3d99c
+	.long 0xcec3662e, 0xa60ce07b
+	.long 0x96c515bb, 0x8f158014
+	.long 0xe6fc4e6a, 0xce7f39f4
+	.long 0x8227bb8a, 0xa00457f7
+	.long 0xb0cd4768, 0x61d82e56
+	.long 0x39c7ff35, 0x8d6d2c43
+	.long 0xd7a4825c, 0xd270f1a2
+	.long 0x0ab3844b, 0x00ac29cf
+	.long 0x0167d312, 0xc619809d
+	.long 0xf6076544, 0xe9adf796
+	.long 0x26f6a60a, 0x2b3cac5d
+	.long 0xa741c1bf, 0x96638b34
+	.long 0x98d8d9cb, 0x65863b64
+	.long 0x49c3cc9c, 0xe0e9f351
+	.long 0x68bce87a, 0x1b03397f
+	.long 0x57a3d037, 0x9af01f2d
+	.long 0x6956fc3b, 0xebb883bd
+	.long 0x42d98888, 0x2cff42cf
+	.long 0x3771e98f, 0xb3e32c28
+	.long 0xb42ae3d9, 0x88f25a3a
+	.long 0x2178513a, 0x064f7f26
+	.long 0xe0ac139e, 0x4e36f0b0
+	.long 0x170076fa, 0xdd7e3b0c
+	.long 0x444dd413, 0xbd6f81f8
+	.long 0x6f345e45, 0xf285651c
+	.long 0x41d17b64, 0x91c9bd4b
+	.long 0xff0dba97, 0x10746f3c
+	.long 0xa2b73df1, 0x885f087b
+	.long 0xf872e54c, 0xc7a68855
+	.long 0x1e41e9fc, 0x4c144932
+	.long 0x86d8e4d2, 0x271d9844
+	.long 0x651bd98b, 0x52148f02
+	.long 0x5bb8f1bc, 0x8e766a0c
+	.long 0xa90fd27a, 0xa3c6f37a
+	.long 0xb3af077a, 0x93a5f730
+	.long 0x4984d782, 0xd7c0557f
+	.long 0xca6ef3ac, 0x6cb08e5c
+	.long 0x234e0b26, 0x63ded06a
+	.long 0xdd66cbbb, 0x6b749fb2
+	.long 0x4597456a, 0x4d56973c
+	.long 0xe9e28eb4, 0x1393e203
+	.long 0x7b3ff57a, 0x9669c9df
+	.long 0xc9c8b782, 0xcec3662e
+	.long 0x3f70cc6f, 0xe417f38a
+	.long 0x93e106a4, 0x96c515bb
+	.long 0x62ec6c6d, 0x4b9e0f71
+	.long 0xd813b325, 0xe6fc4e6a
+	.long 0x0df04680, 0xd104b8fc
+	.long 0x2342001e, 0x8227bb8a
+	.long 0x0a2a8d7e, 0x5b397730
+	.long 0x6d9a4957, 0xb0cd4768
+	.long 0xe8b6368b, 0xe78eb416
+	.long 0xd2c3ed1a, 0x39c7ff35
+	.long 0x995a5724, 0x61ff0e01
+	.long 0x9ef68d35, 0xd7a4825c
+	.long 0x0c139b31, 0x8d96551c
+	.long 0xf2271e60, 0x0ab3844b
+	.long 0x0b0bf8ca, 0x0bf80dd2
+	.long 0x2664fd8b, 0x0167d312
+	.long 0xed64812d, 0x8821abed
+	.long 0x02ee03b2, 0xf6076544
+	.long 0x8604ae0f, 0x6a45d2b2
+	.long 0x363bd6b3, 0x26f6a60a
+	.long 0x135c83fd, 0xd8d26619
+	.long 0x5fabe670, 0xa741c1bf
+	.long 0x35ec3279, 0xde87806c
+	.long 0x00bcf5f6, 0x98d8d9cb
+	.long 0x8ae00689, 0x14338754
+	.long 0x17f27698, 0x49c3cc9c
+	.long 0x58ca5f00, 0x5bd2011f
+	.long 0xaa7c7ad5, 0x68bce87a
+	.long 0xb5cfca28, 0xdd07448e
+	.long 0xded288f8, 0x57a3d037
+	.long 0x59f229bc, 0xdde8f5b9
+	.long 0x6d390dec, 0x6956fc3b
+	.long 0x37170390, 0xa3e3e02c
+	.long 0x6353c1cc, 0x42d98888
+	.long 0xc4584f5c, 0xd73c7bea
+	.long 0xf48642e9, 0x3771e98f
+	.long 0x531377e2, 0x80ff0093
+	.long 0xdd35bc8d, 0xb42ae3d9
+	.long 0xb25b29f2, 0x8fe4c34d
+	.long 0x9a5ede41, 0x2178513a
+	.long 0xa563905d, 0xdf99fc11
+	.long 0x45cddf4e, 0xe0ac139e
+	.long 0xacfa3103, 0x6c23e841
+	.long 0xa51b6135, 0x170076fa
diff --git a/arch/x86/crypto/des3_ede-asm_64.S b/arch/x86/crypto/des3_ede-asm_64.S
new file mode 100644
index 0000000..038f6ae
--- /dev/null
+++ b/arch/x86/crypto/des3_ede-asm_64.S
@@ -0,0 +1,805 @@
+/*
+ * des3_ede-asm_64.S  -  x86-64 assembly implementation of 3DES cipher
+ *
+ * Copyright © 2014 Jussi Kivilinna <jussi.kivilinna@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/linkage.h>
+
+.file "des3_ede-asm_64.S"
+.text
+
+#define s1 .L_s1
+#define s2 ((s1) + (64*8))
+#define s3 ((s2) + (64*8))
+#define s4 ((s3) + (64*8))
+#define s5 ((s4) + (64*8))
+#define s6 ((s5) + (64*8))
+#define s7 ((s6) + (64*8))
+#define s8 ((s7) + (64*8))
+
+/* register macros */
+#define CTX %rdi
+
+#define RL0 %r8
+#define RL1 %r9
+#define RL2 %r10
+
+#define RL0d %r8d
+#define RL1d %r9d
+#define RL2d %r10d
+
+#define RR0 %r11
+#define RR1 %r12
+#define RR2 %r13
+
+#define RR0d %r11d
+#define RR1d %r12d
+#define RR2d %r13d
+
+#define RW0 %rax
+#define RW1 %rbx
+#define RW2 %rcx
+
+#define RW0d %eax
+#define RW1d %ebx
+#define RW2d %ecx
+
+#define RW0bl %al
+#define RW1bl %bl
+#define RW2bl %cl
+
+#define RW0bh %ah
+#define RW1bh %bh
+#define RW2bh %ch
+
+#define RT0 %r15
+#define RT1 %rbp
+#define RT2 %r14
+#define RT3 %rdx
+
+#define RT0d %r15d
+#define RT1d %ebp
+#define RT2d %r14d
+#define RT3d %edx
+
+/***********************************************************************
+ * 1-way 3DES
+ ***********************************************************************/
+#define do_permutation(a, b, offset, mask) \
+	movl a, RT0d; \
+	shrl $(offset), RT0d; \
+	xorl b, RT0d; \
+	andl $(mask), RT0d; \
+	xorl RT0d, b; \
+	shll $(offset), RT0d; \
+	xorl RT0d, a;
+
+#define expand_to_64bits(val, mask) \
+	movl val##d, RT0d; \
+	rorl $4, RT0d; \
+	shlq $32, RT0; \
+	orq RT0, val; \
+	andq mask, val;
+
+#define compress_to_64bits(val) \
+	movq val, RT0; \
+	shrq $32, RT0; \
+	roll $4, RT0d; \
+	orl RT0d, val##d;
+
+#define initial_permutation(left, right) \
+	do_permutation(left##d, right##d,  4, 0x0f0f0f0f); \
+	do_permutation(left##d, right##d, 16, 0x0000ffff); \
+	do_permutation(right##d, left##d,  2, 0x33333333); \
+	do_permutation(right##d, left##d,  8, 0x00ff00ff); \
+	movabs $0x3f3f3f3f3f3f3f3f, RT3; \
+	movl left##d, RW0d; \
+	roll $1, right##d; \
+	xorl right##d, RW0d; \
+	andl $0xaaaaaaaa, RW0d; \
+	xorl RW0d, left##d; \
+	xorl RW0d, right##d; \
+	roll $1, left##d; \
+	expand_to_64bits(right, RT3); \
+	expand_to_64bits(left, RT3);
+
+#define final_permutation(left, right) \
+	compress_to_64bits(right); \
+	compress_to_64bits(left); \
+	movl right##d, RW0d; \
+	rorl $1, left##d; \
+	xorl left##d, RW0d; \
+	andl $0xaaaaaaaa, RW0d; \
+	xorl RW0d, right##d; \
+	xorl RW0d, left##d; \
+	rorl $1, right##d; \
+	do_permutation(right##d, left##d,  8, 0x00ff00ff); \
+	do_permutation(right##d, left##d,  2, 0x33333333); \
+	do_permutation(left##d, right##d, 16, 0x0000ffff); \
+	do_permutation(left##d, right##d,  4, 0x0f0f0f0f);
+
+#define round1(n, from, to, load_next_key) \
+	xorq from, RW0; \
+	\
+	movzbl RW0bl, RT0d; \
+	movzbl RW0bh, RT1d; \
+	shrq $16, RW0; \
+	movzbl RW0bl, RT2d; \
+	movzbl RW0bh, RT3d; \
+	shrq $16, RW0; \
+	movq s8(, RT0, 8), RT0; \
+	xorq s6(, RT1, 8), to; \
+	movzbl RW0bl, RL1d; \
+	movzbl RW0bh, RT1d; \
+	shrl $16, RW0d; \
+	xorq s4(, RT2, 8), RT0; \
+	xorq s2(, RT3, 8), to; \
+	movzbl RW0bl, RT2d; \
+	movzbl RW0bh, RT3d; \
+	xorq s7(, RL1, 8), RT0; \
+	xorq s5(, RT1, 8), to; \
+	xorq s3(, RT2, 8), RT0; \
+	load_next_key(n, RW0); \
+	xorq RT0, to; \
+	xorq s1(, RT3, 8), to; \
+
+#define load_next_key(n, RWx) \
+	movq (((n) + 1) * 8)(CTX), RWx;
+
+#define dummy2(a, b) /*_*/
+
+#define read_block(io, left, right) \
+	movl    (io), left##d; \
+	movl   4(io), right##d; \
+	bswapl left##d; \
+	bswapl right##d;
+
+#define write_block(io, left, right) \
+	bswapl left##d; \
+	bswapl right##d; \
+	movl   left##d,   (io); \
+	movl   right##d, 4(io);
+
+ENTRY(des3_ede_x86_64_crypt_blk)
+	/* input:
+	 *	%rdi: round keys, CTX
+	 *	%rsi: dst
+	 *	%rdx: src
+	 */
+	pushq %rbp;
+	pushq %rbx;
+	pushq %r12;
+	pushq %r13;
+	pushq %r14;
+	pushq %r15;
+
+	read_block(%rdx, RL0, RR0);
+	initial_permutation(RL0, RR0);
+
+	movq (CTX), RW0;
+
+	round1(0, RR0, RL0, load_next_key);
+	round1(1, RL0, RR0, load_next_key);
+	round1(2, RR0, RL0, load_next_key);
+	round1(3, RL0, RR0, load_next_key);
+	round1(4, RR0, RL0, load_next_key);
+	round1(5, RL0, RR0, load_next_key);
+	round1(6, RR0, RL0, load_next_key);
+	round1(7, RL0, RR0, load_next_key);
+	round1(8, RR0, RL0, load_next_key);
+	round1(9, RL0, RR0, load_next_key);
+	round1(10, RR0, RL0, load_next_key);
+	round1(11, RL0, RR0, load_next_key);
+	round1(12, RR0, RL0, load_next_key);
+	round1(13, RL0, RR0, load_next_key);
+	round1(14, RR0, RL0, load_next_key);
+	round1(15, RL0, RR0, load_next_key);
+
+	round1(16+0, RL0, RR0, load_next_key);
+	round1(16+1, RR0, RL0, load_next_key);
+	round1(16+2, RL0, RR0, load_next_key);
+	round1(16+3, RR0, RL0, load_next_key);
+	round1(16+4, RL0, RR0, load_next_key);
+	round1(16+5, RR0, RL0, load_next_key);
+	round1(16+6, RL0, RR0, load_next_key);
+	round1(16+7, RR0, RL0, load_next_key);
+	round1(16+8, RL0, RR0, load_next_key);
+	round1(16+9, RR0, RL0, load_next_key);
+	round1(16+10, RL0, RR0, load_next_key);
+	round1(16+11, RR0, RL0, load_next_key);
+	round1(16+12, RL0, RR0, load_next_key);
+	round1(16+13, RR0, RL0, load_next_key);
+	round1(16+14, RL0, RR0, load_next_key);
+	round1(16+15, RR0, RL0, load_next_key);
+
+	round1(32+0, RR0, RL0, load_next_key);
+	round1(32+1, RL0, RR0, load_next_key);
+	round1(32+2, RR0, RL0, load_next_key);
+	round1(32+3, RL0, RR0, load_next_key);
+	round1(32+4, RR0, RL0, load_next_key);
+	round1(32+5, RL0, RR0, load_next_key);
+	round1(32+6, RR0, RL0, load_next_key);
+	round1(32+7, RL0, RR0, load_next_key);
+	round1(32+8, RR0, RL0, load_next_key);
+	round1(32+9, RL0, RR0, load_next_key);
+	round1(32+10, RR0, RL0, load_next_key);
+	round1(32+11, RL0, RR0, load_next_key);
+	round1(32+12, RR0, RL0, load_next_key);
+	round1(32+13, RL0, RR0, load_next_key);
+	round1(32+14, RR0, RL0, load_next_key);
+	round1(32+15, RL0, RR0, dummy2);
+
+	final_permutation(RR0, RL0);
+	write_block(%rsi, RR0, RL0);
+
+	popq %r15;
+	popq %r14;
+	popq %r13;
+	popq %r12;
+	popq %rbx;
+	popq %rbp;
+
+	ret;
+ENDPROC(des3_ede_x86_64_crypt_blk)
+
+/***********************************************************************
+ * 3-way 3DES
+ ***********************************************************************/
+#define expand_to_64bits(val, mask) \
+	movl val##d, RT0d; \
+	rorl $4, RT0d; \
+	shlq $32, RT0; \
+	orq RT0, val; \
+	andq mask, val;
+
+#define compress_to_64bits(val) \
+	movq val, RT0; \
+	shrq $32, RT0; \
+	roll $4, RT0d; \
+	orl RT0d, val##d;
+
+#define initial_permutation3(left, right) \
+	do_permutation(left##0d, right##0d,  4, 0x0f0f0f0f); \
+	do_permutation(left##0d, right##0d, 16, 0x0000ffff); \
+	  do_permutation(left##1d, right##1d,  4, 0x0f0f0f0f); \
+	  do_permutation(left##1d, right##1d, 16, 0x0000ffff); \
+	    do_permutation(left##2d, right##2d,  4, 0x0f0f0f0f); \
+	    do_permutation(left##2d, right##2d, 16, 0x0000ffff); \
+	    \
+	do_permutation(right##0d, left##0d,  2, 0x33333333); \
+	do_permutation(right##0d, left##0d,  8, 0x00ff00ff); \
+	  do_permutation(right##1d, left##1d,  2, 0x33333333); \
+	  do_permutation(right##1d, left##1d,  8, 0x00ff00ff); \
+	    do_permutation(right##2d, left##2d,  2, 0x33333333); \
+	    do_permutation(right##2d, left##2d,  8, 0x00ff00ff); \
+	    \
+	movabs $0x3f3f3f3f3f3f3f3f, RT3; \
+	    \
+	movl left##0d, RW0d; \
+	roll $1, right##0d; \
+	xorl right##0d, RW0d; \
+	andl $0xaaaaaaaa, RW0d; \
+	xorl RW0d, left##0d; \
+	xorl RW0d, right##0d; \
+	roll $1, left##0d; \
+	expand_to_64bits(right##0, RT3); \
+	expand_to_64bits(left##0, RT3); \
+	  movl left##1d, RW1d; \
+	  roll $1, right##1d; \
+	  xorl right##1d, RW1d; \
+	  andl $0xaaaaaaaa, RW1d; \
+	  xorl RW1d, left##1d; \
+	  xorl RW1d, right##1d; \
+	  roll $1, left##1d; \
+	  expand_to_64bits(right##1, RT3); \
+	  expand_to_64bits(left##1, RT3); \
+	    movl left##2d, RW2d; \
+	    roll $1, right##2d; \
+	    xorl right##2d, RW2d; \
+	    andl $0xaaaaaaaa, RW2d; \
+	    xorl RW2d, left##2d; \
+	    xorl RW2d, right##2d; \
+	    roll $1, left##2d; \
+	    expand_to_64bits(right##2, RT3); \
+	    expand_to_64bits(left##2, RT3);
+
+#define final_permutation3(left, right) \
+	compress_to_64bits(right##0); \
+	compress_to_64bits(left##0); \
+	movl right##0d, RW0d; \
+	rorl $1, left##0d; \
+	xorl left##0d, RW0d; \
+	andl $0xaaaaaaaa, RW0d; \
+	xorl RW0d, right##0d; \
+	xorl RW0d, left##0d; \
+	rorl $1, right##0d; \
+	  compress_to_64bits(right##1); \
+	  compress_to_64bits(left##1); \
+	  movl right##1d, RW1d; \
+	  rorl $1, left##1d; \
+	  xorl left##1d, RW1d; \
+	  andl $0xaaaaaaaa, RW1d; \
+	  xorl RW1d, right##1d; \
+	  xorl RW1d, left##1d; \
+	  rorl $1, right##1d; \
+	    compress_to_64bits(right##2); \
+	    compress_to_64bits(left##2); \
+	    movl right##2d, RW2d; \
+	    rorl $1, left##2d; \
+	    xorl left##2d, RW2d; \
+	    andl $0xaaaaaaaa, RW2d; \
+	    xorl RW2d, right##2d; \
+	    xorl RW2d, left##2d; \
+	    rorl $1, right##2d; \
+	    \
+	do_permutation(right##0d, left##0d,  8, 0x00ff00ff); \
+	do_permutation(right##0d, left##0d,  2, 0x33333333); \
+	  do_permutation(right##1d, left##1d,  8, 0x00ff00ff); \
+	  do_permutation(right##1d, left##1d,  2, 0x33333333); \
+	    do_permutation(right##2d, left##2d,  8, 0x00ff00ff); \
+	    do_permutation(right##2d, left##2d,  2, 0x33333333); \
+	    \
+	do_permutation(left##0d, right##0d, 16, 0x0000ffff); \
+	do_permutation(left##0d, right##0d,  4, 0x0f0f0f0f); \
+	  do_permutation(left##1d, right##1d, 16, 0x0000ffff); \
+	  do_permutation(left##1d, right##1d,  4, 0x0f0f0f0f); \
+	    do_permutation(left##2d, right##2d, 16, 0x0000ffff); \
+	    do_permutation(left##2d, right##2d,  4, 0x0f0f0f0f);
+
+#define round3(n, from, to, load_next_key, do_movq) \
+	xorq from##0, RW0; \
+	movzbl RW0bl, RT3d; \
+	movzbl RW0bh, RT1d; \
+	shrq $16, RW0; \
+	xorq s8(, RT3, 8), to##0; \
+	xorq s6(, RT1, 8), to##0; \
+	movzbl RW0bl, RT3d; \
+	movzbl RW0bh, RT1d; \
+	shrq $16, RW0; \
+	xorq s4(, RT3, 8), to##0; \
+	xorq s2(, RT1, 8), to##0; \
+	movzbl RW0bl, RT3d; \
+	movzbl RW0bh, RT1d; \
+	shrl $16, RW0d; \
+	xorq s7(, RT3, 8), to##0; \
+	xorq s5(, RT1, 8), to##0; \
+	movzbl RW0bl, RT3d; \
+	movzbl RW0bh, RT1d; \
+	load_next_key(n, RW0); \
+	xorq s3(, RT3, 8), to##0; \
+	xorq s1(, RT1, 8), to##0; \
+		xorq from##1, RW1; \
+		movzbl RW1bl, RT3d; \
+		movzbl RW1bh, RT1d; \
+		shrq $16, RW1; \
+		xorq s8(, RT3, 8), to##1; \
+		xorq s6(, RT1, 8), to##1; \
+		movzbl RW1bl, RT3d; \
+		movzbl RW1bh, RT1d; \
+		shrq $16, RW1; \
+		xorq s4(, RT3, 8), to##1; \
+		xorq s2(, RT1, 8), to##1; \
+		movzbl RW1bl, RT3d; \
+		movzbl RW1bh, RT1d; \
+		shrl $16, RW1d; \
+		xorq s7(, RT3, 8), to##1; \
+		xorq s5(, RT1, 8), to##1; \
+		movzbl RW1bl, RT3d; \
+		movzbl RW1bh, RT1d; \
+		do_movq(RW0, RW1); \
+		xorq s3(, RT3, 8), to##1; \
+		xorq s1(, RT1, 8), to##1; \
+			xorq from##2, RW2; \
+			movzbl RW2bl, RT3d; \
+			movzbl RW2bh, RT1d; \
+			shrq $16, RW2; \
+			xorq s8(, RT3, 8), to##2; \
+			xorq s6(, RT1, 8), to##2; \
+			movzbl RW2bl, RT3d; \
+			movzbl RW2bh, RT1d; \
+			shrq $16, RW2; \
+			xorq s4(, RT3, 8), to##2; \
+			xorq s2(, RT1, 8), to##2; \
+			movzbl RW2bl, RT3d; \
+			movzbl RW2bh, RT1d; \
+			shrl $16, RW2d; \
+			xorq s7(, RT3, 8), to##2; \
+			xorq s5(, RT1, 8), to##2; \
+			movzbl RW2bl, RT3d; \
+			movzbl RW2bh, RT1d; \
+			do_movq(RW0, RW2); \
+			xorq s3(, RT3, 8), to##2; \
+			xorq s1(, RT1, 8), to##2;
+
+#define __movq(src, dst) \
+	movq src, dst;
+
+ENTRY(des3_ede_x86_64_crypt_blk_3way)
+	/* input:
+	 *	%rdi: ctx, round keys
+	 *	%rsi: dst (3 blocks)
+	 *	%rdx: src (3 blocks)
+	 */
+
+	pushq %rbp;
+	pushq %rbx;
+	pushq %r12;
+	pushq %r13;
+	pushq %r14;
+	pushq %r15;
+
+	/* load input */
+	movl 0 * 4(%rdx), RL0d;
+	movl 1 * 4(%rdx), RR0d;
+	movl 2 * 4(%rdx), RL1d;
+	movl 3 * 4(%rdx), RR1d;
+	movl 4 * 4(%rdx), RL2d;
+	movl 5 * 4(%rdx), RR2d;
+
+	bswapl RL0d;
+	bswapl RR0d;
+	bswapl RL1d;
+	bswapl RR1d;
+	bswapl RL2d;
+	bswapl RR2d;
+
+	initial_permutation3(RL, RR);
+
+	movq 0(CTX), RW0;
+	movq RW0, RW1;
+	movq RW0, RW2;
+
+	round3(0, RR, RL, load_next_key, __movq);
+	round3(1, RL, RR, load_next_key, __movq);
+	round3(2, RR, RL, load_next_key, __movq);
+	round3(3, RL, RR, load_next_key, __movq);
+	round3(4, RR, RL, load_next_key, __movq);
+	round3(5, RL, RR, load_next_key, __movq);
+	round3(6, RR, RL, load_next_key, __movq);
+	round3(7, RL, RR, load_next_key, __movq);
+	round3(8, RR, RL, load_next_key, __movq);
+	round3(9, RL, RR, load_next_key, __movq);
+	round3(10, RR, RL, load_next_key, __movq);
+	round3(11, RL, RR, load_next_key, __movq);
+	round3(12, RR, RL, load_next_key, __movq);
+	round3(13, RL, RR, load_next_key, __movq);
+	round3(14, RR, RL, load_next_key, __movq);
+	round3(15, RL, RR, load_next_key, __movq);
+
+	round3(16+0, RL, RR, load_next_key, __movq);
+	round3(16+1, RR, RL, load_next_key, __movq);
+	round3(16+2, RL, RR, load_next_key, __movq);
+	round3(16+3, RR, RL, load_next_key, __movq);
+	round3(16+4, RL, RR, load_next_key, __movq);
+	round3(16+5, RR, RL, load_next_key, __movq);
+	round3(16+6, RL, RR, load_next_key, __movq);
+	round3(16+7, RR, RL, load_next_key, __movq);
+	round3(16+8, RL, RR, load_next_key, __movq);
+	round3(16+9, RR, RL, load_next_key, __movq);
+	round3(16+10, RL, RR, load_next_key, __movq);
+	round3(16+11, RR, RL, load_next_key, __movq);
+	round3(16+12, RL, RR, load_next_key, __movq);
+	round3(16+13, RR, RL, load_next_key, __movq);
+	round3(16+14, RL, RR, load_next_key, __movq);
+	round3(16+15, RR, RL, load_next_key, __movq);
+
+	round3(32+0, RR, RL, load_next_key, __movq);
+	round3(32+1, RL, RR, load_next_key, __movq);
+	round3(32+2, RR, RL, load_next_key, __movq);
+	round3(32+3, RL, RR, load_next_key, __movq);
+	round3(32+4, RR, RL, load_next_key, __movq);
+	round3(32+5, RL, RR, load_next_key, __movq);
+	round3(32+6, RR, RL, load_next_key, __movq);
+	round3(32+7, RL, RR, load_next_key, __movq);
+	round3(32+8, RR, RL, load_next_key, __movq);
+	round3(32+9, RL, RR, load_next_key, __movq);
+	round3(32+10, RR, RL, load_next_key, __movq);
+	round3(32+11, RL, RR, load_next_key, __movq);
+	round3(32+12, RR, RL, load_next_key, __movq);
+	round3(32+13, RL, RR, load_next_key, __movq);
+	round3(32+14, RR, RL, load_next_key, __movq);
+	round3(32+15, RL, RR, dummy2, dummy2);
+
+	final_permutation3(RR, RL);
+
+	bswapl RR0d;
+	bswapl RL0d;
+	bswapl RR1d;
+	bswapl RL1d;
+	bswapl RR2d;
+	bswapl RL2d;
+
+	movl RR0d, 0 * 4(%rsi);
+	movl RL0d, 1 * 4(%rsi);
+	movl RR1d, 2 * 4(%rsi);
+	movl RL1d, 3 * 4(%rsi);
+	movl RR2d, 4 * 4(%rsi);
+	movl RL2d, 5 * 4(%rsi);
+
+	popq %r15;
+	popq %r14;
+	popq %r13;
+	popq %r12;
+	popq %rbx;
+	popq %rbp;
+
+	ret;
+ENDPROC(des3_ede_x86_64_crypt_blk_3way)
+
+.data
+.align 16
+.L_s1:
+	.quad 0x0010100001010400, 0x0000000000000000
+	.quad 0x0000100000010000, 0x0010100001010404
+	.quad 0x0010100001010004, 0x0000100000010404
+	.quad 0x0000000000000004, 0x0000100000010000
+	.quad 0x0000000000000400, 0x0010100001010400
+	.quad 0x0010100001010404, 0x0000000000000400
+	.quad 0x0010000001000404, 0x0010100001010004
+	.quad 0x0010000001000000, 0x0000000000000004
+	.quad 0x0000000000000404, 0x0010000001000400
+	.quad 0x0010000001000400, 0x0000100000010400
+	.quad 0x0000100000010400, 0x0010100001010000
+	.quad 0x0010100001010000, 0x0010000001000404
+	.quad 0x0000100000010004, 0x0010000001000004
+	.quad 0x0010000001000004, 0x0000100000010004
+	.quad 0x0000000000000000, 0x0000000000000404
+	.quad 0x0000100000010404, 0x0010000001000000
+	.quad 0x0000100000010000, 0x0010100001010404
+	.quad 0x0000000000000004, 0x0010100001010000
+	.quad 0x0010100001010400, 0x0010000001000000
+	.quad 0x0010000001000000, 0x0000000000000400
+	.quad 0x0010100001010004, 0x0000100000010000
+	.quad 0x0000100000010400, 0x0010000001000004
+	.quad 0x0000000000000400, 0x0000000000000004
+	.quad 0x0010000001000404, 0x0000100000010404
+	.quad 0x0010100001010404, 0x0000100000010004
+	.quad 0x0010100001010000, 0x0010000001000404
+	.quad 0x0010000001000004, 0x0000000000000404
+	.quad 0x0000100000010404, 0x0010100001010400
+	.quad 0x0000000000000404, 0x0010000001000400
+	.quad 0x0010000001000400, 0x0000000000000000
+	.quad 0x0000100000010004, 0x0000100000010400
+	.quad 0x0000000000000000, 0x0010100001010004
+.L_s2:
+	.quad 0x0801080200100020, 0x0800080000000000
+	.quad 0x0000080000000000, 0x0001080200100020
+	.quad 0x0001000000100000, 0x0000000200000020
+	.quad 0x0801000200100020, 0x0800080200000020
+	.quad 0x0800000200000020, 0x0801080200100020
+	.quad 0x0801080000100000, 0x0800000000000000
+	.quad 0x0800080000000000, 0x0001000000100000
+	.quad 0x0000000200000020, 0x0801000200100020
+	.quad 0x0001080000100000, 0x0001000200100020
+	.quad 0x0800080200000020, 0x0000000000000000
+	.quad 0x0800000000000000, 0x0000080000000000
+	.quad 0x0001080200100020, 0x0801000000100000
+	.quad 0x0001000200100020, 0x0800000200000020
+	.quad 0x0000000000000000, 0x0001080000100000
+	.quad 0x0000080200000020, 0x0801080000100000
+	.quad 0x0801000000100000, 0x0000080200000020
+	.quad 0x0000000000000000, 0x0001080200100020
+	.quad 0x0801000200100020, 0x0001000000100000
+	.quad 0x0800080200000020, 0x0801000000100000
+	.quad 0x0801080000100000, 0x0000080000000000
+	.quad 0x0801000000100000, 0x0800080000000000
+	.quad 0x0000000200000020, 0x0801080200100020
+	.quad 0x0001080200100020, 0x0000000200000020
+	.quad 0x0000080000000000, 0x0800000000000000
+	.quad 0x0000080200000020, 0x0801080000100000
+	.quad 0x0001000000100000, 0x0800000200000020
+	.quad 0x0001000200100020, 0x0800080200000020
+	.quad 0x0800000200000020, 0x0001000200100020
+	.quad 0x0001080000100000, 0x0000000000000000
+	.quad 0x0800080000000000, 0x0000080200000020
+	.quad 0x0800000000000000, 0x0801000200100020
+	.quad 0x0801080200100020, 0x0001080000100000
+.L_s3:
+	.quad 0x0000002000000208, 0x0000202008020200
+	.quad 0x0000000000000000, 0x0000200008020008
+	.quad 0x0000002008000200, 0x0000000000000000
+	.quad 0x0000202000020208, 0x0000002008000200
+	.quad 0x0000200000020008, 0x0000000008000008
+	.quad 0x0000000008000008, 0x0000200000020000
+	.quad 0x0000202008020208, 0x0000200000020008
+	.quad 0x0000200008020000, 0x0000002000000208
+	.quad 0x0000000008000000, 0x0000000000000008
+	.quad 0x0000202008020200, 0x0000002000000200
+	.quad 0x0000202000020200, 0x0000200008020000
+	.quad 0x0000200008020008, 0x0000202000020208
+	.quad 0x0000002008000208, 0x0000202000020200
+	.quad 0x0000200000020000, 0x0000002008000208
+	.quad 0x0000000000000008, 0x0000202008020208
+	.quad 0x0000002000000200, 0x0000000008000000
+	.quad 0x0000202008020200, 0x0000000008000000
+	.quad 0x0000200000020008, 0x0000002000000208
+	.quad 0x0000200000020000, 0x0000202008020200
+	.quad 0x0000002008000200, 0x0000000000000000
+	.quad 0x0000002000000200, 0x0000200000020008
+	.quad 0x0000202008020208, 0x0000002008000200
+	.quad 0x0000000008000008, 0x0000002000000200
+	.quad 0x0000000000000000, 0x0000200008020008
+	.quad 0x0000002008000208, 0x0000200000020000
+	.quad 0x0000000008000000, 0x0000202008020208
+	.quad 0x0000000000000008, 0x0000202000020208
+	.quad 0x0000202000020200, 0x0000000008000008
+	.quad 0x0000200008020000, 0x0000002008000208
+	.quad 0x0000002000000208, 0x0000200008020000
+	.quad 0x0000202000020208, 0x0000000000000008
+	.quad 0x0000200008020008, 0x0000202000020200
+.L_s4:
+	.quad 0x1008020000002001, 0x1000020800002001
+	.quad 0x1000020800002001, 0x0000000800000000
+	.quad 0x0008020800002000, 0x1008000800000001
+	.quad 0x1008000000000001, 0x1000020000002001
+	.quad 0x0000000000000000, 0x0008020000002000
+	.quad 0x0008020000002000, 0x1008020800002001
+	.quad 0x1000000800000001, 0x0000000000000000
+	.quad 0x0008000800000000, 0x1008000000000001
+	.quad 0x1000000000000001, 0x0000020000002000
+	.quad 0x0008000000000000, 0x1008020000002001
+	.quad 0x0000000800000000, 0x0008000000000000
+	.quad 0x1000020000002001, 0x0000020800002000
+	.quad 0x1008000800000001, 0x1000000000000001
+	.quad 0x0000020800002000, 0x0008000800000000
+	.quad 0x0000020000002000, 0x0008020800002000
+	.quad 0x1008020800002001, 0x1000000800000001
+	.quad 0x0008000800000000, 0x1008000000000001
+	.quad 0x0008020000002000, 0x1008020800002001
+	.quad 0x1000000800000001, 0x0000000000000000
+	.quad 0x0000000000000000, 0x0008020000002000
+	.quad 0x0000020800002000, 0x0008000800000000
+	.quad 0x1008000800000001, 0x1000000000000001
+	.quad 0x1008020000002001, 0x1000020800002001
+	.quad 0x1000020800002001, 0x0000000800000000
+	.quad 0x1008020800002001, 0x1000000800000001
+	.quad 0x1000000000000001, 0x0000020000002000
+	.quad 0x1008000000000001, 0x1000020000002001
+	.quad 0x0008020800002000, 0x1008000800000001
+	.quad 0x1000020000002001, 0x0000020800002000
+	.quad 0x0008000000000000, 0x1008020000002001
+	.quad 0x0000000800000000, 0x0008000000000000
+	.quad 0x0000020000002000, 0x0008020800002000
+.L_s5:
+	.quad 0x0000001000000100, 0x0020001002080100
+	.quad 0x0020000002080000, 0x0420001002000100
+	.quad 0x0000000000080000, 0x0000001000000100
+	.quad 0x0400000000000000, 0x0020000002080000
+	.quad 0x0400001000080100, 0x0000000000080000
+	.quad 0x0020001002000100, 0x0400001000080100
+	.quad 0x0420001002000100, 0x0420000002080000
+	.quad 0x0000001000080100, 0x0400000000000000
+	.quad 0x0020000002000000, 0x0400000000080000
+	.quad 0x0400000000080000, 0x0000000000000000
+	.quad 0x0400001000000100, 0x0420001002080100
+	.quad 0x0420001002080100, 0x0020001002000100
+	.quad 0x0420000002080000, 0x0400001000000100
+	.quad 0x0000000000000000, 0x0420000002000000
+	.quad 0x0020001002080100, 0x0020000002000000
+	.quad 0x0420000002000000, 0x0000001000080100
+	.quad 0x0000000000080000, 0x0420001002000100
+	.quad 0x0000001000000100, 0x0020000002000000
+	.quad 0x0400000000000000, 0x0020000002080000
+	.quad 0x0420001002000100, 0x0400001000080100
+	.quad 0x0020001002000100, 0x0400000000000000
+	.quad 0x0420000002080000, 0x0020001002080100
+	.quad 0x0400001000080100, 0x0000001000000100
+	.quad 0x0020000002000000, 0x0420000002080000
+	.quad 0x0420001002080100, 0x0000001000080100
+	.quad 0x0420000002000000, 0x0420001002080100
+	.quad 0x0020000002080000, 0x0000000000000000
+	.quad 0x0400000000080000, 0x0420000002000000
+	.quad 0x0000001000080100, 0x0020001002000100
+	.quad 0x0400001000000100, 0x0000000000080000
+	.quad 0x0000000000000000, 0x0400000000080000
+	.quad 0x0020001002080100, 0x0400001000000100
+.L_s6:
+	.quad 0x0200000120000010, 0x0204000020000000
+	.quad 0x0000040000000000, 0x0204040120000010
+	.quad 0x0204000020000000, 0x0000000100000010
+	.quad 0x0204040120000010, 0x0004000000000000
+	.quad 0x0200040020000000, 0x0004040100000010
+	.quad 0x0004000000000000, 0x0200000120000010
+	.quad 0x0004000100000010, 0x0200040020000000
+	.quad 0x0200000020000000, 0x0000040100000010
+	.quad 0x0000000000000000, 0x0004000100000010
+	.quad 0x0200040120000010, 0x0000040000000000
+	.quad 0x0004040000000000, 0x0200040120000010
+	.quad 0x0000000100000010, 0x0204000120000010
+	.quad 0x0204000120000010, 0x0000000000000000
+	.quad 0x0004040100000010, 0x0204040020000000
+	.quad 0x0000040100000010, 0x0004040000000000
+	.quad 0x0204040020000000, 0x0200000020000000
+	.quad 0x0200040020000000, 0x0000000100000010
+	.quad 0x0204000120000010, 0x0004040000000000
+	.quad 0x0204040120000010, 0x0004000000000000
+	.quad 0x0000040100000010, 0x0200000120000010
+	.quad 0x0004000000000000, 0x0200040020000000
+	.quad 0x0200000020000000, 0x0000040100000010
+	.quad 0x0200000120000010, 0x0204040120000010
+	.quad 0x0004040000000000, 0x0204000020000000
+	.quad 0x0004040100000010, 0x0204040020000000
+	.quad 0x0000000000000000, 0x0204000120000010
+	.quad 0x0000000100000010, 0x0000040000000000
+	.quad 0x0204000020000000, 0x0004040100000010
+	.quad 0x0000040000000000, 0x0004000100000010
+	.quad 0x0200040120000010, 0x0000000000000000
+	.quad 0x0204040020000000, 0x0200000020000000
+	.quad 0x0004000100000010, 0x0200040120000010
+.L_s7:
+	.quad 0x0002000000200000, 0x2002000004200002
+	.quad 0x2000000004000802, 0x0000000000000000
+	.quad 0x0000000000000800, 0x2000000004000802
+	.quad 0x2002000000200802, 0x0002000004200800
+	.quad 0x2002000004200802, 0x0002000000200000
+	.quad 0x0000000000000000, 0x2000000004000002
+	.quad 0x2000000000000002, 0x0000000004000000
+	.quad 0x2002000004200002, 0x2000000000000802
+	.quad 0x0000000004000800, 0x2002000000200802
+	.quad 0x2002000000200002, 0x0000000004000800
+	.quad 0x2000000004000002, 0x0002000004200000
+	.quad 0x0002000004200800, 0x2002000000200002
+	.quad 0x0002000004200000, 0x0000000000000800
+	.quad 0x2000000000000802, 0x2002000004200802
+	.quad 0x0002000000200800, 0x2000000000000002
+	.quad 0x0000000004000000, 0x0002000000200800
+	.quad 0x0000000004000000, 0x0002000000200800
+	.quad 0x0002000000200000, 0x2000000004000802
+	.quad 0x2000000004000802, 0x2002000004200002
+	.quad 0x2002000004200002, 0x2000000000000002
+	.quad 0x2002000000200002, 0x0000000004000000
+	.quad 0x0000000004000800, 0x0002000000200000
+	.quad 0x0002000004200800, 0x2000000000000802
+	.quad 0x2002000000200802, 0x0002000004200800
+	.quad 0x2000000000000802, 0x2000000004000002
+	.quad 0x2002000004200802, 0x0002000004200000
+	.quad 0x0002000000200800, 0x0000000000000000
+	.quad 0x2000000000000002, 0x2002000004200802
+	.quad 0x0000000000000000, 0x2002000000200802
+	.quad 0x0002000004200000, 0x0000000000000800
+	.quad 0x2000000004000002, 0x0000000004000800
+	.quad 0x0000000000000800, 0x2002000000200002
+.L_s8:
+	.quad 0x0100010410001000, 0x0000010000001000
+	.quad 0x0000000000040000, 0x0100010410041000
+	.quad 0x0100000010000000, 0x0100010410001000
+	.quad 0x0000000400000000, 0x0100000010000000
+	.quad 0x0000000400040000, 0x0100000010040000
+	.quad 0x0100010410041000, 0x0000010000041000
+	.quad 0x0100010010041000, 0x0000010400041000
+	.quad 0x0000010000001000, 0x0000000400000000
+	.quad 0x0100000010040000, 0x0100000410000000
+	.quad 0x0100010010001000, 0x0000010400001000
+	.quad 0x0000010000041000, 0x0000000400040000
+	.quad 0x0100000410040000, 0x0100010010041000
+	.quad 0x0000010400001000, 0x0000000000000000
+	.quad 0x0000000000000000, 0x0100000410040000
+	.quad 0x0100000410000000, 0x0100010010001000
+	.quad 0x0000010400041000, 0x0000000000040000
+	.quad 0x0000010400041000, 0x0000000000040000
+	.quad 0x0100010010041000, 0x0000010000001000
+	.quad 0x0000000400000000, 0x0100000410040000
+	.quad 0x0000010000001000, 0x0000010400041000
+	.quad 0x0100010010001000, 0x0000000400000000
+	.quad 0x0100000410000000, 0x0100000010040000
+	.quad 0x0100000410040000, 0x0100000010000000
+	.quad 0x0000000000040000, 0x0100010410001000
+	.quad 0x0000000000000000, 0x0100010410041000
+	.quad 0x0000000400040000, 0x0100000410000000
+	.quad 0x0100000010040000, 0x0100010010001000
+	.quad 0x0100010410001000, 0x0000000000000000
+	.quad 0x0100010410041000, 0x0000010000041000
+	.quad 0x0000010000041000, 0x0000010400001000
+	.quad 0x0000010400001000, 0x0000000400040000
+	.quad 0x0100000010000000, 0x0100010010041000
diff --git a/arch/x86/crypto/des3_ede_glue.c b/arch/x86/crypto/des3_ede_glue.c
new file mode 100644
index 0000000..0e9c066
--- /dev/null
+++ b/arch/x86/crypto/des3_ede_glue.c
@@ -0,0 +1,509 @@
+/*
+ * Glue Code for assembler optimized version of 3DES
+ *
+ * Copyright © 2014 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
+ *
+ * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
+ *   Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
+ * CTR part based on code (crypto/ctr.c) by:
+ *   (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/processor.h>
+#include <crypto/des.h>
+#include <linux/crypto.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <crypto/algapi.h>
+
+struct des3_ede_x86_ctx {
+	u32 enc_expkey[DES3_EDE_EXPKEY_WORDS];
+	u32 dec_expkey[DES3_EDE_EXPKEY_WORDS];
+};
+
+/* regular block cipher functions */
+asmlinkage void des3_ede_x86_64_crypt_blk(const u32 *expkey, u8 *dst,
+					  const u8 *src);
+
+/* 3-way parallel cipher functions */
+asmlinkage void des3_ede_x86_64_crypt_blk_3way(const u32 *expkey, u8 *dst,
+					       const u8 *src);
+
+static inline void des3_ede_enc_blk(struct des3_ede_x86_ctx *ctx, u8 *dst,
+				    const u8 *src)
+{
+	u32 *enc_ctx = ctx->enc_expkey;
+
+	des3_ede_x86_64_crypt_blk(enc_ctx, dst, src);
+}
+
+static inline void des3_ede_dec_blk(struct des3_ede_x86_ctx *ctx, u8 *dst,
+				    const u8 *src)
+{
+	u32 *dec_ctx = ctx->dec_expkey;
+
+	des3_ede_x86_64_crypt_blk(dec_ctx, dst, src);
+}
+
+static inline void des3_ede_enc_blk_3way(struct des3_ede_x86_ctx *ctx, u8 *dst,
+					 const u8 *src)
+{
+	u32 *enc_ctx = ctx->enc_expkey;
+
+	des3_ede_x86_64_crypt_blk_3way(enc_ctx, dst, src);
+}
+
+static inline void des3_ede_dec_blk_3way(struct des3_ede_x86_ctx *ctx, u8 *dst,
+					 const u8 *src)
+{
+	u32 *dec_ctx = ctx->dec_expkey;
+
+	des3_ede_x86_64_crypt_blk_3way(dec_ctx, dst, src);
+}
+
+static void des3_ede_x86_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+{
+	des3_ede_enc_blk(crypto_tfm_ctx(tfm), dst, src);
+}
+
+static void des3_ede_x86_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+{
+	des3_ede_dec_blk(crypto_tfm_ctx(tfm), dst, src);
+}
+
+static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
+		     const u32 *expkey)
+{
+	unsigned int bsize = DES3_EDE_BLOCK_SIZE;
+	unsigned int nbytes;
+	int err;
+
+	err = blkcipher_walk_virt(desc, walk);
+
+	while ((nbytes = walk->nbytes)) {
+		u8 *wsrc = walk->src.virt.addr;
+		u8 *wdst = walk->dst.virt.addr;
+
+		/* Process four block batch */
+		if (nbytes >= bsize * 3) {
+			do {
+				des3_ede_x86_64_crypt_blk_3way(expkey, wdst,
+							       wsrc);
+
+				wsrc += bsize * 3;
+				wdst += bsize * 3;
+				nbytes -= bsize * 3;
+			} while (nbytes >= bsize * 3);
+
+			if (nbytes < bsize)
+				goto done;
+		}
+
+		/* Handle leftovers */
+		do {
+			des3_ede_x86_64_crypt_blk(expkey, wdst, wsrc);
+
+			wsrc += bsize;
+			wdst += bsize;
+			nbytes -= bsize;
+		} while (nbytes >= bsize);
+
+done:
+		err = blkcipher_walk_done(desc, walk, nbytes);
+	}
+
+	return err;
+}
+
+static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+		       struct scatterlist *src, unsigned int nbytes)
+{
+	struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+	struct blkcipher_walk walk;
+
+	blkcipher_walk_init(&walk, dst, src, nbytes);
+	return ecb_crypt(desc, &walk, ctx->enc_expkey);
+}
+
+static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+		       struct scatterlist *src, unsigned int nbytes)
+{
+	struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+	struct blkcipher_walk walk;
+
+	blkcipher_walk_init(&walk, dst, src, nbytes);
+	return ecb_crypt(desc, &walk, ctx->dec_expkey);
+}
+
+static unsigned int __cbc_encrypt(struct blkcipher_desc *desc,
+				  struct blkcipher_walk *walk)
+{
+	struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+	unsigned int bsize = DES3_EDE_BLOCK_SIZE;
+	unsigned int nbytes = walk->nbytes;
+	u64 *src = (u64 *)walk->src.virt.addr;
+	u64 *dst = (u64 *)walk->dst.virt.addr;
+	u64 *iv = (u64 *)walk->iv;
+
+	do {
+		*dst = *src ^ *iv;
+		des3_ede_enc_blk(ctx, (u8 *)dst, (u8 *)dst);
+		iv = dst;
+
+		src += 1;
+		dst += 1;
+		nbytes -= bsize;
+	} while (nbytes >= bsize);
+
+	*(u64 *)walk->iv = *iv;
+	return nbytes;
+}
+
+static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+		       struct scatterlist *src, unsigned int nbytes)
+{
+	struct blkcipher_walk walk;
+	int err;
+
+	blkcipher_walk_init(&walk, dst, src, nbytes);
+	err = blkcipher_walk_virt(desc, &walk);
+
+	while ((nbytes = walk.nbytes)) {
+		nbytes = __cbc_encrypt(desc, &walk);
+		err = blkcipher_walk_done(desc, &walk, nbytes);
+	}
+
+	return err;
+}
+
+static unsigned int __cbc_decrypt(struct blkcipher_desc *desc,
+				  struct blkcipher_walk *walk)
+{
+	struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+	unsigned int bsize = DES3_EDE_BLOCK_SIZE;
+	unsigned int nbytes = walk->nbytes;
+	u64 *src = (u64 *)walk->src.virt.addr;
+	u64 *dst = (u64 *)walk->dst.virt.addr;
+	u64 ivs[3 - 1];
+	u64 last_iv;
+
+	/* Start of the last block. */
+	src += nbytes / bsize - 1;
+	dst += nbytes / bsize - 1;
+
+	last_iv = *src;
+
+	/* Process four block batch */
+	if (nbytes >= bsize * 3) {
+		do {
+			nbytes -= bsize * 3 - bsize;
+			src -= 3 - 1;
+			dst -= 3 - 1;
+
+			ivs[0] = src[0];
+			ivs[1] = src[1];
+
+			des3_ede_dec_blk_3way(ctx, (u8 *)dst, (u8 *)src);
+
+			dst[1] ^= ivs[0];
+			dst[2] ^= ivs[1];
+
+			nbytes -= bsize;
+			if (nbytes < bsize)
+				goto done;
+
+			*dst ^= *(src - 1);
+			src -= 1;
+			dst -= 1;
+		} while (nbytes >= bsize * 3);
+	}
+
+	/* Handle leftovers */
+	for (;;) {
+		des3_ede_dec_blk(ctx, (u8 *)dst, (u8 *)src);
+
+		nbytes -= bsize;
+		if (nbytes < bsize)
+			break;
+
+		*dst ^= *(src - 1);
+		src -= 1;
+		dst -= 1;
+	}
+
+done:
+	*dst ^= *(u64 *)walk->iv;
+	*(u64 *)walk->iv = last_iv;
+
+	return nbytes;
+}
+
+static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+		       struct scatterlist *src, unsigned int nbytes)
+{
+	struct blkcipher_walk walk;
+	int err;
+
+	blkcipher_walk_init(&walk, dst, src, nbytes);
+	err = blkcipher_walk_virt(desc, &walk);
+
+	while ((nbytes = walk.nbytes)) {
+		nbytes = __cbc_decrypt(desc, &walk);
+		err = blkcipher_walk_done(desc, &walk, nbytes);
+	}
+
+	return err;
+}
+
+static void ctr_crypt_final(struct des3_ede_x86_ctx *ctx,
+			    struct blkcipher_walk *walk)
+{
+	u8 *ctrblk = walk->iv;
+	u8 keystream[DES3_EDE_BLOCK_SIZE];
+	u8 *src = walk->src.virt.addr;
+	u8 *dst = walk->dst.virt.addr;
+	unsigned int nbytes = walk->nbytes;
+
+	des3_ede_enc_blk(ctx, keystream, ctrblk);
+	crypto_xor(keystream, src, nbytes);
+	memcpy(dst, keystream, nbytes);
+
+	crypto_inc(ctrblk, DES3_EDE_BLOCK_SIZE);
+}
+
+static unsigned int __ctr_crypt(struct blkcipher_desc *desc,
+				struct blkcipher_walk *walk)
+{
+	struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+	unsigned int bsize = DES3_EDE_BLOCK_SIZE;
+	unsigned int nbytes = walk->nbytes;
+	__be64 *src = (__be64 *)walk->src.virt.addr;
+	__be64 *dst = (__be64 *)walk->dst.virt.addr;
+	u64 ctrblk = be64_to_cpu(*(__be64 *)walk->iv);
+	__be64 ctrblocks[3];
+
+	/* Process four block batch */
+	if (nbytes >= bsize * 3) {
+		do {
+			/* create ctrblks for parallel encrypt */
+			ctrblocks[0] = cpu_to_be64(ctrblk++);
+			ctrblocks[1] = cpu_to_be64(ctrblk++);
+			ctrblocks[2] = cpu_to_be64(ctrblk++);
+
+			des3_ede_enc_blk_3way(ctx, (u8 *)ctrblocks,
+					      (u8 *)ctrblocks);
+
+			dst[0] = src[0] ^ ctrblocks[0];
+			dst[1] = src[1] ^ ctrblocks[1];
+			dst[2] = src[2] ^ ctrblocks[2];
+
+			src += 3;
+			dst += 3;
+		} while ((nbytes -= bsize * 3) >= bsize * 3);
+
+		if (nbytes < bsize)
+			goto done;
+	}
+
+	/* Handle leftovers */
+	do {
+		ctrblocks[0] = cpu_to_be64(ctrblk++);
+
+		des3_ede_enc_blk(ctx, (u8 *)ctrblocks, (u8 *)ctrblocks);
+
+		dst[0] = src[0] ^ ctrblocks[0];
+
+		src += 1;
+		dst += 1;
+	} while ((nbytes -= bsize) >= bsize);
+
+done:
+	*(__be64 *)walk->iv = cpu_to_be64(ctrblk);
+	return nbytes;
+}
+
+static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+		     struct scatterlist *src, unsigned int nbytes)
+{
+	struct blkcipher_walk walk;
+	int err;
+
+	blkcipher_walk_init(&walk, dst, src, nbytes);
+	err = blkcipher_walk_virt_block(desc, &walk, DES3_EDE_BLOCK_SIZE);
+
+	while ((nbytes = walk.nbytes) >= DES3_EDE_BLOCK_SIZE) {
+		nbytes = __ctr_crypt(desc, &walk);
+		err = blkcipher_walk_done(desc, &walk, nbytes);
+	}
+
+	if (walk.nbytes) {
+		ctr_crypt_final(crypto_blkcipher_ctx(desc->tfm), &walk);
+		err = blkcipher_walk_done(desc, &walk, 0);
+	}
+
+	return err;
+}
+
+static int des3_ede_x86_setkey(struct crypto_tfm *tfm, const u8 *key,
+			       unsigned int keylen)
+{
+	struct des3_ede_x86_ctx *ctx = crypto_tfm_ctx(tfm);
+	u32 i, j, tmp;
+	int err;
+
+	/* Generate encryption context using generic implementation. */
+	err = __des3_ede_setkey(ctx->enc_expkey, &tfm->crt_flags, key, keylen);
+	if (err < 0)
+		return err;
+
+	/* Fix encryption context for this implementation and form decryption
+	 * context. */
+	j = DES3_EDE_EXPKEY_WORDS - 2;
+	for (i = 0; i < DES3_EDE_EXPKEY_WORDS; i += 2, j -= 2) {
+		tmp = ror32(ctx->enc_expkey[i + 1], 4);
+		ctx->enc_expkey[i + 1] = tmp;
+
+		ctx->dec_expkey[j + 0] = ctx->enc_expkey[i + 0];
+		ctx->dec_expkey[j + 1] = tmp;
+	}
+
+	return 0;
+}
+
+static struct crypto_alg des3_ede_algs[4] = { {
+	.cra_name		= "des3_ede",
+	.cra_driver_name	= "des3_ede-asm",
+	.cra_priority		= 200,
+	.cra_flags		= CRYPTO_ALG_TYPE_CIPHER,
+	.cra_blocksize		= DES3_EDE_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct des3_ede_x86_ctx),
+	.cra_alignmask		= 0,
+	.cra_module		= THIS_MODULE,
+	.cra_u = {
+		.cipher = {
+			.cia_min_keysize	= DES3_EDE_KEY_SIZE,
+			.cia_max_keysize	= DES3_EDE_KEY_SIZE,
+			.cia_setkey		= des3_ede_x86_setkey,
+			.cia_encrypt		= des3_ede_x86_encrypt,
+			.cia_decrypt		= des3_ede_x86_decrypt,
+		}
+	}
+}, {
+	.cra_name		= "ecb(des3_ede)",
+	.cra_driver_name	= "ecb-des3_ede-asm",
+	.cra_priority		= 300,
+	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
+	.cra_blocksize		= DES3_EDE_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct des3_ede_x86_ctx),
+	.cra_alignmask		= 0,
+	.cra_type		= &crypto_blkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_u = {
+		.blkcipher = {
+			.min_keysize	= DES3_EDE_KEY_SIZE,
+			.max_keysize	= DES3_EDE_KEY_SIZE,
+			.setkey		= des3_ede_x86_setkey,
+			.encrypt	= ecb_encrypt,
+			.decrypt	= ecb_decrypt,
+		},
+	},
+}, {
+	.cra_name		= "cbc(des3_ede)",
+	.cra_driver_name	= "cbc-des3_ede-asm",
+	.cra_priority		= 300,
+	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
+	.cra_blocksize		= DES3_EDE_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct des3_ede_x86_ctx),
+	.cra_alignmask		= 0,
+	.cra_type		= &crypto_blkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_u = {
+		.blkcipher = {
+			.min_keysize	= DES3_EDE_KEY_SIZE,
+			.max_keysize	= DES3_EDE_KEY_SIZE,
+			.ivsize		= DES3_EDE_BLOCK_SIZE,
+			.setkey		= des3_ede_x86_setkey,
+			.encrypt	= cbc_encrypt,
+			.decrypt	= cbc_decrypt,
+		},
+	},
+}, {
+	.cra_name		= "ctr(des3_ede)",
+	.cra_driver_name	= "ctr-des3_ede-asm",
+	.cra_priority		= 300,
+	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
+	.cra_blocksize		= 1,
+	.cra_ctxsize		= sizeof(struct des3_ede_x86_ctx),
+	.cra_alignmask		= 0,
+	.cra_type		= &crypto_blkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_u = {
+		.blkcipher = {
+			.min_keysize	= DES3_EDE_KEY_SIZE,
+			.max_keysize	= DES3_EDE_KEY_SIZE,
+			.ivsize		= DES3_EDE_BLOCK_SIZE,
+			.setkey		= des3_ede_x86_setkey,
+			.encrypt	= ctr_crypt,
+			.decrypt	= ctr_crypt,
+		},
+	},
+} };
+
+static bool is_blacklisted_cpu(void)
+{
+	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+		return false;
+
+	if (boot_cpu_data.x86 == 0x0f) {
+		/*
+		 * On Pentium 4, des3_ede-x86_64 is slower than generic C
+		 * implementation because use of 64bit rotates (which are really
+		 * slow on P4). Therefore blacklist P4s.
+		 */
+		return true;
+	}
+
+	return false;
+}
+
+static int force;
+module_param(force, int, 0);
+MODULE_PARM_DESC(force, "Force module load, ignore CPU blacklist");
+
+static int __init des3_ede_x86_init(void)
+{
+	if (!force && is_blacklisted_cpu()) {
+		pr_info("des3_ede-x86_64: performance on this CPU would be suboptimal: disabling des3_ede-x86_64.\n");
+		return -ENODEV;
+	}
+
+	return crypto_register_algs(des3_ede_algs, ARRAY_SIZE(des3_ede_algs));
+}
+
+static void __exit des3_ede_x86_fini(void)
+{
+	crypto_unregister_algs(des3_ede_algs, ARRAY_SIZE(des3_ede_algs));
+}
+
+module_init(des3_ede_x86_init);
+module_exit(des3_ede_x86_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Triple DES EDE Cipher Algorithm, asm optimized");
+MODULE_ALIAS("des3_ede");
+MODULE_ALIAS("des3_ede-asm");
+MODULE_ALIAS("des");
+MODULE_ALIAS("des-asm");
+MODULE_AUTHOR("Jussi Kivilinna <jussi.kivilinna@iki.fi>");
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index 5c7198c..0f4460b 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -99,7 +99,7 @@
 #if defined(CONFIG_X86_PPRO_FENCE)
 
 /*
- * For either of these options x86 doesn't have a strong TSO memory
+ * For this option x86 doesn't have a strong TSO memory
  * model and we should fall back to full barriers.
  */
 
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index 0525a8b..e1f7fec 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -68,6 +68,8 @@
 
 int ftrace_int3_handler(struct pt_regs *regs);
 
+#define FTRACE_GRAPH_TRAMP_ADDR FTRACE_GRAPH_ADDR
+
 #endif /*  CONFIG_DYNAMIC_FTRACE */
 #endif /* __ASSEMBLY__ */
 #endif /* CONFIG_FUNCTION_TRACER */
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
index bba3cf8..0a8b519 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
@@ -129,7 +129,7 @@
 
 #define PARAVIRT_ADJUST_EXCEPTION_FRAME	/*  */
 
-#define INTERRUPT_RETURN	iretq
+#define INTERRUPT_RETURN	jmp native_iret
 #define USERGS_SYSRET64				\
 	swapgs;					\
 	sysretq;
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index a04fe4e..eb18117 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -37,6 +37,7 @@
 	u8  modrm_reg;          /* index of register used               */
 	u8  modrm_rm;		/* rm part of modrm			*/
 	u64 src_val;            /* value of source operand              */
+	u64 dst_val;            /* value of destination operand         */
 	u8  src_bytes;          /* size of source operand               */
 	u8  dst_bytes;          /* size of destination operand          */
 	u8  ad_bytes;           /* size of src/dst address              */
@@ -194,6 +195,7 @@
 	int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value);
 	int (*set_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data);
 	int (*get_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata);
+	int (*check_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc);
 	int (*read_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc, u64 *pdata);
 	void (*halt)(struct x86_emulate_ctxt *ctxt);
 	void (*wbinvd)(struct x86_emulate_ctxt *ctxt);
@@ -231,7 +233,7 @@
 	union {
 		unsigned long val;
 		u64 val64;
-		char valptr[sizeof(unsigned long) + 2];
+		char valptr[sizeof(sse128_t)];
 		sse128_t vec_val;
 		u64 mm_val;
 		void *data;
@@ -240,8 +242,8 @@
 
 struct fetch_cache {
 	u8 data[15];
-	unsigned long start;
-	unsigned long end;
+	u8 *ptr;
+	u8 *end;
 };
 
 struct read_cache {
@@ -286,30 +288,36 @@
 	u8 opcode_len;
 	u8 b;
 	u8 intercept;
-	u8 lock_prefix;
-	u8 rep_prefix;
 	u8 op_bytes;
 	u8 ad_bytes;
-	u8 rex_prefix;
 	struct operand src;
 	struct operand src2;
 	struct operand dst;
-	bool has_seg_override;
-	u8 seg_override;
-	u64 d;
 	int (*execute)(struct x86_emulate_ctxt *ctxt);
 	int (*check_perm)(struct x86_emulate_ctxt *ctxt);
+	/*
+	 * The following six fields are cleared together,
+	 * the rest are initialized unconditionally in x86_decode_insn
+	 * or elsewhere
+	 */
+	bool rip_relative;
+	u8 rex_prefix;
+	u8 lock_prefix;
+	u8 rep_prefix;
+	/* bitmaps of registers in _regs[] that can be read */
+	u32 regs_valid;
+	/* bitmaps of registers in _regs[] that have been written */
+	u32 regs_dirty;
 	/* modrm */
 	u8 modrm;
 	u8 modrm_mod;
 	u8 modrm_reg;
 	u8 modrm_rm;
 	u8 modrm_seg;
-	bool rip_relative;
+	u8 seg_override;
+	u64 d;
 	unsigned long _eip;
 	struct operand memop;
-	u32 regs_valid;  /* bitmaps of registers in _regs[] that can be read */
-	u32 regs_dirty;  /* bitmaps of registers in _regs[] that have been written */
 	/* Fields above regs are cleared together. */
 	unsigned long _regs[NR_VCPU_REGS];
 	struct operand *memopp;
@@ -407,6 +415,7 @@
 #define EMULATION_OK 0
 #define EMULATION_RESTART 1
 #define EMULATION_INTERCEPTED 2
+void init_decode_cache(struct x86_emulate_ctxt *ctxt);
 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt);
 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
 			 u16 tss_selector, int idt_index, int reason,
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 49205d0..5724601 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -152,14 +152,16 @@
 
 #define DR6_BD		(1 << 13)
 #define DR6_BS		(1 << 14)
-#define DR6_FIXED_1	0xffff0ff0
-#define DR6_VOLATILE	0x0000e00f
+#define DR6_RTM		(1 << 16)
+#define DR6_FIXED_1	0xfffe0ff0
+#define DR6_INIT	0xffff0ff0
+#define DR6_VOLATILE	0x0001e00f
 
 #define DR7_BP_EN_MASK	0x000000ff
 #define DR7_GE		(1 << 9)
 #define DR7_GD		(1 << 13)
 #define DR7_FIXED_1	0x00000400
-#define DR7_VOLATILE	0xffff23ff
+#define DR7_VOLATILE	0xffff2bff
 
 /* apic attention bits */
 #define KVM_APIC_CHECK_VAPIC	0
@@ -448,7 +450,7 @@
 	u64 tsc_offset_adjustment;
 	u64 this_tsc_nsec;
 	u64 this_tsc_write;
-	u8  this_tsc_generation;
+	u64 this_tsc_generation;
 	bool tsc_catchup;
 	bool tsc_always_catchup;
 	s8 virtual_tsc_shift;
@@ -591,7 +593,7 @@
 	u64 cur_tsc_nsec;
 	u64 cur_tsc_write;
 	u64 cur_tsc_offset;
-	u8  cur_tsc_generation;
+	u64 cur_tsc_generation;
 	int nr_vcpus_matched_tsc;
 
 	spinlock_t pvclock_gtod_sync_lock;
@@ -717,7 +719,7 @@
 	int (*handle_exit)(struct kvm_vcpu *vcpu);
 	void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
 	void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
-	u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
+	u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu);
 	void (*patch_hypercall)(struct kvm_vcpu *vcpu,
 				unsigned char *hypercall_addr);
 	void (*set_irq)(struct kvm_vcpu *vcpu);
@@ -1070,6 +1072,7 @@
 bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr);
 int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
 int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
+int kvm_pmu_check_pmc(struct kvm_vcpu *vcpu, unsigned pmc);
 int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
 void kvm_handle_pmu_event(struct kvm_vcpu *vcpu);
 void kvm_deliver_pmi(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 851bcdc..fd47218 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -52,10 +52,9 @@
  * Compared to the generic __my_cpu_offset version, the following
  * saves one instruction and avoids clobbering a temp register.
  */
-#define raw_cpu_ptr(ptr)				\
+#define arch_raw_cpu_ptr(ptr)				\
 ({							\
 	unsigned long tcp_ptr__;			\
-	__verify_pcpu_ptr(ptr);				\
 	asm volatile("add " __percpu_arg(1) ", %0"	\
 		     : "=r" (tcp_ptr__)			\
 		     : "m" (this_cpu_off), "0" (ptr));	\
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index a4ea023..32cc237 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -696,6 +696,8 @@
 	rep_nop();
 }
 
+#define cpu_relax_lowlatency() cpu_relax()
+
 /* Stop speculative execution and prefetching of modified code. */
 static inline void sync_core(void)
 {
diff --git a/arch/x86/include/asm/qrwlock.h b/arch/x86/include/asm/qrwlock.h
index 70f46f0..ae0e241 100644
--- a/arch/x86/include/asm/qrwlock.h
+++ b/arch/x86/include/asm/qrwlock.h
@@ -3,7 +3,7 @@
 
 #include <asm-generic/qrwlock_types.h>
 
-#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
+#ifndef CONFIG_X86_PPRO_FENCE
 #define queue_write_unlock queue_write_unlock
 static inline void queue_write_unlock(struct qrwlock *lock)
 {
diff --git a/arch/x86/include/asm/vga.h b/arch/x86/include/asm/vga.h
index 44282fb..c4b9dc2 100644
--- a/arch/x86/include/asm/vga.h
+++ b/arch/x86/include/asm/vga.h
@@ -17,10 +17,4 @@
 #define vga_readb(x) (*(x))
 #define vga_writeb(x, y) (*(y) = (x))
 
-#ifdef CONFIG_FB_EFI
-#define __ARCH_HAS_VGA_DEFAULT_DEVICE
-extern struct pci_dev *vga_default_device(void);
-extern void vga_set_default_device(struct pci_dev *pdev);
-#endif
-
 #endif /* _ASM_X86_VGA_H */
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 7004d21..bcbfade 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -51,6 +51,9 @@
 #define CPU_BASED_MONITOR_EXITING               0x20000000
 #define CPU_BASED_PAUSE_EXITING                 0x40000000
 #define CPU_BASED_ACTIVATE_SECONDARY_CONTROLS   0x80000000
+
+#define CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR	0x0401e172
+
 /*
  * Definitions of Secondary Processor-Based VM-Execution Controls.
  */
@@ -76,7 +79,7 @@
 
 #define PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR	0x00000016
 
-#define VM_EXIT_SAVE_DEBUG_CONTROLS             0x00000002
+#define VM_EXIT_SAVE_DEBUG_CONTROLS             0x00000004
 #define VM_EXIT_HOST_ADDR_SPACE_SIZE            0x00000200
 #define VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL      0x00001000
 #define VM_EXIT_ACK_INTR_ON_EXIT                0x00008000
@@ -89,7 +92,7 @@
 
 #define VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR	0x00036dff
 
-#define VM_ENTRY_LOAD_DEBUG_CONTROLS            0x00000002
+#define VM_ENTRY_LOAD_DEBUG_CONTROLS            0x00000004
 #define VM_ENTRY_IA32E_MODE                     0x00000200
 #define VM_ENTRY_SMM                            0x00000400
 #define VM_ENTRY_DEACT_DUAL_MONITOR             0x00000800
diff --git a/arch/x86/include/uapi/asm/Kbuild b/arch/x86/include/uapi/asm/Kbuild
index 09409c4..3dec769 100644
--- a/arch/x86/include/uapi/asm/Kbuild
+++ b/arch/x86/include/uapi/asm/Kbuild
@@ -22,6 +22,7 @@
 header-y += ist.h
 header-y += kvm.h
 header-y += kvm_para.h
+header-y += kvm_perf.h
 header-y += ldt.h
 header-y += mce.h
 header-y += mman.h
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index d3a8778..d7dcef5 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -23,7 +23,10 @@
 #define GP_VECTOR 13
 #define PF_VECTOR 14
 #define MF_VECTOR 16
+#define AC_VECTOR 17
 #define MC_VECTOR 18
+#define XM_VECTOR 19
+#define VE_VECTOR 20
 
 /* Select x86 specific features in <linux/kvm.h> */
 #define __KVM_HAVE_PIT
diff --git a/arch/x86/include/uapi/asm/kvm_perf.h b/arch/x86/include/uapi/asm/kvm_perf.h
new file mode 100644
index 0000000..3bb964f
--- /dev/null
+++ b/arch/x86/include/uapi/asm/kvm_perf.h
@@ -0,0 +1,16 @@
+#ifndef _ASM_X86_KVM_PERF_H
+#define _ASM_X86_KVM_PERF_H
+
+#include <asm/svm.h>
+#include <asm/vmx.h>
+#include <asm/kvm.h>
+
+#define DECODE_STR_LEN 20
+
+#define VCPU_ID "vcpu_id"
+
+#define KVM_ENTRY_TRACE "kvm:kvm_entry"
+#define KVM_EXIT_TRACE "kvm:kvm_exit"
+#define KVM_EXIT_REASON "exit_reason"
+
+#endif /* _ASM_X86_KVM_PERF_H */
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
index fcf2b3a..eaefcc6 100644
--- a/arch/x86/include/uapi/asm/msr-index.h
+++ b/arch/x86/include/uapi/asm/msr-index.h
@@ -558,6 +558,7 @@
 
 /* VMX_BASIC bits and bitmasks */
 #define VMX_BASIC_VMCS_SIZE_SHIFT	32
+#define VMX_BASIC_TRUE_CTLS		(1ULL << 55)
 #define VMX_BASIC_64		0x0001000000000000LLU
 #define VMX_BASIC_MEM_TYPE_SHIFT	50
 #define VMX_BASIC_MEM_TYPE_MASK	0x003c000000000000LLU
diff --git a/arch/x86/kernel/cpu/perf_event_amd_uncore.c b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
index 3bbdf4c..30790d7 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
@@ -294,31 +294,41 @@
 			cpu_to_node(cpu));
 }
 
-static void amd_uncore_cpu_up_prepare(unsigned int cpu)
+static int amd_uncore_cpu_up_prepare(unsigned int cpu)
 {
-	struct amd_uncore *uncore;
+	struct amd_uncore *uncore_nb = NULL, *uncore_l2;
 
 	if (amd_uncore_nb) {
-		uncore = amd_uncore_alloc(cpu);
-		uncore->cpu = cpu;
-		uncore->num_counters = NUM_COUNTERS_NB;
-		uncore->rdpmc_base = RDPMC_BASE_NB;
-		uncore->msr_base = MSR_F15H_NB_PERF_CTL;
-		uncore->active_mask = &amd_nb_active_mask;
-		uncore->pmu = &amd_nb_pmu;
-		*per_cpu_ptr(amd_uncore_nb, cpu) = uncore;
+		uncore_nb = amd_uncore_alloc(cpu);
+		if (!uncore_nb)
+			goto fail;
+		uncore_nb->cpu = cpu;
+		uncore_nb->num_counters = NUM_COUNTERS_NB;
+		uncore_nb->rdpmc_base = RDPMC_BASE_NB;
+		uncore_nb->msr_base = MSR_F15H_NB_PERF_CTL;
+		uncore_nb->active_mask = &amd_nb_active_mask;
+		uncore_nb->pmu = &amd_nb_pmu;
+		*per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb;
 	}
 
 	if (amd_uncore_l2) {
-		uncore = amd_uncore_alloc(cpu);
-		uncore->cpu = cpu;
-		uncore->num_counters = NUM_COUNTERS_L2;
-		uncore->rdpmc_base = RDPMC_BASE_L2;
-		uncore->msr_base = MSR_F16H_L2I_PERF_CTL;
-		uncore->active_mask = &amd_l2_active_mask;
-		uncore->pmu = &amd_l2_pmu;
-		*per_cpu_ptr(amd_uncore_l2, cpu) = uncore;
+		uncore_l2 = amd_uncore_alloc(cpu);
+		if (!uncore_l2)
+			goto fail;
+		uncore_l2->cpu = cpu;
+		uncore_l2->num_counters = NUM_COUNTERS_L2;
+		uncore_l2->rdpmc_base = RDPMC_BASE_L2;
+		uncore_l2->msr_base = MSR_F16H_L2I_PERF_CTL;
+		uncore_l2->active_mask = &amd_l2_active_mask;
+		uncore_l2->pmu = &amd_l2_pmu;
+		*per_cpu_ptr(amd_uncore_l2, cpu) = uncore_l2;
 	}
+
+	return 0;
+
+fail:
+	kfree(uncore_nb);
+	return -ENOMEM;
 }
 
 static struct amd_uncore *
@@ -441,7 +451,7 @@
 
 	if (!--uncore->refcnt)
 		kfree(uncore);
-	*per_cpu_ptr(amd_uncore_nb, cpu) = NULL;
+	*per_cpu_ptr(uncores, cpu) = NULL;
 }
 
 static void amd_uncore_cpu_dead(unsigned int cpu)
@@ -461,7 +471,8 @@
 
 	switch (action & ~CPU_TASKS_FROZEN) {
 	case CPU_UP_PREPARE:
-		amd_uncore_cpu_up_prepare(cpu);
+		if (amd_uncore_cpu_up_prepare(cpu))
+			return notifier_from_errno(-ENOMEM);
 		break;
 
 	case CPU_STARTING:
@@ -501,20 +512,33 @@
 	amd_uncore_cpu_online(cpu);
 }
 
+static void cleanup_cpu_online(void *dummy)
+{
+	unsigned int cpu = smp_processor_id();
+
+	amd_uncore_cpu_dead(cpu);
+}
+
 static int __init amd_uncore_init(void)
 {
-	unsigned int cpu;
+	unsigned int cpu, cpu2;
 	int ret = -ENODEV;
 
 	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
-		return -ENODEV;
+		goto fail_nodev;
 
 	if (!cpu_has_topoext)
-		return -ENODEV;
+		goto fail_nodev;
 
 	if (cpu_has_perfctr_nb) {
 		amd_uncore_nb = alloc_percpu(struct amd_uncore *);
-		perf_pmu_register(&amd_nb_pmu, amd_nb_pmu.name, -1);
+		if (!amd_uncore_nb) {
+			ret = -ENOMEM;
+			goto fail_nb;
+		}
+		ret = perf_pmu_register(&amd_nb_pmu, amd_nb_pmu.name, -1);
+		if (ret)
+			goto fail_nb;
 
 		printk(KERN_INFO "perf: AMD NB counters detected\n");
 		ret = 0;
@@ -522,20 +546,28 @@
 
 	if (cpu_has_perfctr_l2) {
 		amd_uncore_l2 = alloc_percpu(struct amd_uncore *);
-		perf_pmu_register(&amd_l2_pmu, amd_l2_pmu.name, -1);
+		if (!amd_uncore_l2) {
+			ret = -ENOMEM;
+			goto fail_l2;
+		}
+		ret = perf_pmu_register(&amd_l2_pmu, amd_l2_pmu.name, -1);
+		if (ret)
+			goto fail_l2;
 
 		printk(KERN_INFO "perf: AMD L2I counters detected\n");
 		ret = 0;
 	}
 
 	if (ret)
-		return -ENODEV;
+		goto fail_nodev;
 
 	cpu_notifier_register_begin();
 
 	/* init cpus already online before registering for hotplug notifier */
 	for_each_online_cpu(cpu) {
-		amd_uncore_cpu_up_prepare(cpu);
+		ret = amd_uncore_cpu_up_prepare(cpu);
+		if (ret)
+			goto fail_online;
 		smp_call_function_single(cpu, init_cpu_already_online, NULL, 1);
 	}
 
@@ -543,5 +575,30 @@
 	cpu_notifier_register_done();
 
 	return 0;
+
+
+fail_online:
+	for_each_online_cpu(cpu2) {
+		if (cpu2 == cpu)
+			break;
+		smp_call_function_single(cpu, cleanup_cpu_online, NULL, 1);
+	}
+	cpu_notifier_register_done();
+
+	/* amd_uncore_nb/l2 should have been freed by cleanup_cpu_online */
+	amd_uncore_nb = amd_uncore_l2 = NULL;
+	if (cpu_has_perfctr_l2)
+		perf_pmu_unregister(&amd_l2_pmu);
+fail_l2:
+	if (cpu_has_perfctr_nb)
+		perf_pmu_unregister(&amd_nb_pmu);
+	if (amd_uncore_l2)
+		free_percpu(amd_uncore_l2);
+fail_nb:
+	if (amd_uncore_nb)
+		free_percpu(amd_uncore_nb);
+
+fail_nodev:
+	return ret;
 }
 device_initcall(amd_uncore_init);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index ae6552a..cfc6f9d 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -2947,10 +2947,7 @@
 		 * extra registers. If we failed to take an extra
 		 * register, try the alternative.
 		 */
-		if (idx % 2)
-			idx--;
-		else
-			idx++;
+		idx ^= 1;
 		if (idx != reg1->idx % 6) {
 			if (idx == 2)
 				config1 >>= 8;
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 0d0c9d4..47c410d 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -1059,9 +1059,6 @@
 END(mcount)
 
 ENTRY(ftrace_caller)
-	cmpl $0, function_trace_stop
-	jne  ftrace_stub
-
 	pushl %eax
 	pushl %ecx
 	pushl %edx
@@ -1093,8 +1090,6 @@
 
 ENTRY(ftrace_regs_caller)
 	pushf	/* push flags before compare (in cs location) */
-	cmpl $0, function_trace_stop
-	jne ftrace_restore_flags
 
 	/*
 	 * i386 does not save SS and ESP when coming from kernel.
@@ -1153,7 +1148,6 @@
 	popf			/* Pop flags at end (no addl to corrupt flags) */
 	jmp ftrace_ret
 
-ftrace_restore_flags:
 	popf
 	jmp  ftrace_stub
 #else /* ! CONFIG_DYNAMIC_FTRACE */
@@ -1162,9 +1156,6 @@
 	cmpl $__PAGE_OFFSET, %esp
 	jb ftrace_stub		/* Paging not enabled yet? */
 
-	cmpl $0, function_trace_stop
-	jne  ftrace_stub
-
 	cmpl $ftrace_stub, ftrace_trace_function
 	jnz trace
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index b25ca96..c844f08 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -830,27 +830,24 @@
 	RESTORE_ARGS 1,8,1
 
 irq_return:
+	INTERRUPT_RETURN
+
+ENTRY(native_iret)
 	/*
 	 * Are we returning to a stack segment from the LDT?  Note: in
 	 * 64-bit mode SS:RSP on the exception stack is always valid.
 	 */
 #ifdef CONFIG_X86_ESPFIX64
 	testb $4,(SS-RIP)(%rsp)
-	jnz irq_return_ldt
+	jnz native_irq_return_ldt
 #endif
 
-irq_return_iret:
-	INTERRUPT_RETURN
-	_ASM_EXTABLE(irq_return_iret, bad_iret)
-
-#ifdef CONFIG_PARAVIRT
-ENTRY(native_iret)
+native_irq_return_iret:
 	iretq
-	_ASM_EXTABLE(native_iret, bad_iret)
-#endif
+	_ASM_EXTABLE(native_irq_return_iret, bad_iret)
 
 #ifdef CONFIG_X86_ESPFIX64
-irq_return_ldt:
+native_irq_return_ldt:
 	pushq_cfi %rax
 	pushq_cfi %rdi
 	SWAPGS
@@ -872,7 +869,7 @@
 	SWAPGS
 	movq %rax,%rsp
 	popq_cfi %rax
-	jmp irq_return_iret
+	jmp native_irq_return_iret
 #endif
 
 	.section .fixup,"ax"
@@ -956,13 +953,8 @@
 	cmpl $__KERNEL_CS,CS(%rdi)
 	jne do_double_fault
 	movq RIP(%rdi),%rax
-	cmpq $irq_return_iret,%rax
-#ifdef CONFIG_PARAVIRT
-	je 1f
-	cmpq $native_iret,%rax
-#endif
+	cmpq $native_irq_return_iret,%rax
 	jne do_double_fault		/* This shouldn't happen... */
-1:
 	movq PER_CPU_VAR(kernel_stack),%rax
 	subq $(6*8-KERNEL_STACK_OFFSET),%rax	/* Reset to original stack */
 	movq %rax,RSP(%rdi)
@@ -1428,7 +1420,7 @@
  */
 error_kernelspace:
 	incl %ebx
-	leaq irq_return_iret(%rip),%rcx
+	leaq native_irq_return_iret(%rip),%rcx
 	cmpq %rcx,RIP+8(%rsp)
 	je error_swapgs
 	movl %ecx,%eax	/* zero extend */
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index cbc4a91..3386dc9 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -703,6 +703,9 @@
 	unsigned long return_hooker = (unsigned long)
 				&return_to_handler;
 
+	if (unlikely(ftrace_graph_is_dead()))
+		return;
+
 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
 		return;
 
diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
index c050a01..c73aecf 100644
--- a/arch/x86/kernel/mcount_64.S
+++ b/arch/x86/kernel/mcount_64.S
@@ -46,10 +46,6 @@
 .endm
 
 ENTRY(ftrace_caller)
-	/* Check if tracing was disabled (quick check) */
-	cmpl $0, function_trace_stop
-	jne  ftrace_stub
-
 	ftrace_caller_setup
 	/* regs go into 4th parameter (but make it NULL) */
 	movq $0, %rcx
@@ -73,10 +69,6 @@
 	/* Save the current flags before compare (in SS location)*/
 	pushfq
 
-	/* Check if tracing was disabled (quick check) */
-	cmpl $0, function_trace_stop
-	jne  ftrace_restore_flags
-
 	/* skip=8 to skip flags saved in SS */
 	ftrace_caller_setup 8
 
@@ -131,7 +123,7 @@
 	popfq
 
 	jmp ftrace_return
-ftrace_restore_flags:
+
 	popfq
 	jmp  ftrace_stub
 
@@ -141,9 +133,6 @@
 #else /* ! CONFIG_DYNAMIC_FTRACE */
 
 ENTRY(function_hook)
-	cmpl $0, function_trace_stop
-	jne  ftrace_stub
-
 	cmpq $ftrace_stub, ftrace_trace_function
 	jnz trace
 
diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
index 3f08f34..a1da673 100644
--- a/arch/x86/kernel/paravirt_patch_64.c
+++ b/arch/x86/kernel/paravirt_patch_64.c
@@ -6,7 +6,6 @@
 DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
 DEF_NATIVE(pv_irq_ops, restore_fl, "pushq %rdi; popfq");
 DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
-DEF_NATIVE(pv_cpu_ops, iret, "iretq");
 DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
 DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
 DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
@@ -50,7 +49,6 @@
 		PATCH_SITE(pv_irq_ops, save_fl);
 		PATCH_SITE(pv_irq_ops, irq_enable);
 		PATCH_SITE(pv_irq_ops, irq_disable);
-		PATCH_SITE(pv_cpu_ops, iret);
 		PATCH_SITE(pv_cpu_ops, irq_enable_sysexit);
 		PATCH_SITE(pv_cpu_ops, usergs_sysret32);
 		PATCH_SITE(pv_cpu_ops, usergs_sysret64);
diff --git a/arch/x86/kernel/resource.c b/arch/x86/kernel/resource.c
index 2a26819..80eab01 100644
--- a/arch/x86/kernel/resource.c
+++ b/arch/x86/kernel/resource.c
@@ -37,10 +37,12 @@
 
 void arch_remove_reservations(struct resource *avail)
 {
-	/* Trim out BIOS areas (low 1MB and high 2MB) and E820 regions */
+	/*
+	 * Trim out BIOS area (high 2MB) and E820 regions. We do not remove
+	 * the low 1MB unconditionally, as this area is needed for some ISA
+	 * cards requiring a memory range, e.g. the i82365 PCMCIA controller.
+	 */
 	if (avail->flags & IORESOURCE_MEM) {
-		if (avail->start < BIOS_END)
-			avail->start = BIOS_END;
 		resource_clip(avail, BIOS_ROM_BASE, BIOS_ROM_END);
 
 		remove_e820_regions(avail);
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index f908731..a538059 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -95,4 +95,12 @@
 	best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
 	return best && (best->edx & bit(X86_FEATURE_GBPAGES));
 }
+
+static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu)
+{
+	struct kvm_cpuid_entry2 *best;
+
+	best = kvm_find_cpuid_entry(vcpu, 7, 0);
+	return best && (best->ebx & bit(X86_FEATURE_RTM));
+}
 #endif
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index e4e833d..56657b0 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -162,6 +162,10 @@
 #define NoWrite     ((u64)1 << 45)  /* No writeback */
 #define SrcWrite    ((u64)1 << 46)  /* Write back src operand */
 #define NoMod	    ((u64)1 << 47)  /* Mod field is ignored */
+#define Intercept   ((u64)1 << 48)  /* Has valid intercept field */
+#define CheckPerm   ((u64)1 << 49)  /* Has valid check_perm field */
+#define NoBigReal   ((u64)1 << 50)  /* No big real mode */
+#define PrivUD      ((u64)1 << 51)  /* #UD instead of #GP on CPL > 0 */
 
 #define DstXacc     (DstAccLo | SrcAccHi | SrcWrite)
 
@@ -426,6 +430,7 @@
 		.modrm_reg  = ctxt->modrm_reg,
 		.modrm_rm   = ctxt->modrm_rm,
 		.src_val    = ctxt->src.val64,
+		.dst_val    = ctxt->dst.val64,
 		.src_bytes  = ctxt->src.bytes,
 		.dst_bytes  = ctxt->dst.bytes,
 		.ad_bytes   = ctxt->ad_bytes,
@@ -511,12 +516,6 @@
 	return desc->g ? (limit << 12) | 0xfff : limit;
 }
 
-static void set_seg_override(struct x86_emulate_ctxt *ctxt, int seg)
-{
-	ctxt->has_seg_override = true;
-	ctxt->seg_override = seg;
-}
-
 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
 {
 	if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
@@ -525,14 +524,6 @@
 	return ctxt->ops->get_cached_segment_base(ctxt, seg);
 }
 
-static unsigned seg_override(struct x86_emulate_ctxt *ctxt)
-{
-	if (!ctxt->has_seg_override)
-		return 0;
-
-	return ctxt->seg_override;
-}
-
 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
 			     u32 error, bool valid)
 {
@@ -651,7 +642,12 @@
 		if (!fetch && (desc.type & 8) && !(desc.type & 2))
 			goto bad;
 		lim = desc_limit_scaled(&desc);
-		if ((desc.type & 8) || !(desc.type & 4)) {
+		if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch &&
+		    (ctxt->d & NoBigReal)) {
+			/* la is between zero and 0xffff */
+			if (la > 0xffff || (u32)(la + size - 1) > 0xffff)
+				goto bad;
+		} else if ((desc.type & 8) || !(desc.type & 4)) {
 			/* expand-up segment */
 			if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
 				goto bad;
@@ -716,68 +712,71 @@
 }
 
 /*
- * Fetch the next byte of the instruction being emulated which is pointed to
- * by ctxt->_eip, then increment ctxt->_eip.
- *
- * Also prefetch the remaining bytes of the instruction without crossing page
+ * Prefetch the remaining bytes of the instruction without crossing page
  * boundary if they are not in fetch_cache yet.
  */
-static int do_insn_fetch_byte(struct x86_emulate_ctxt *ctxt, u8 *dest)
+static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
 {
-	struct fetch_cache *fc = &ctxt->fetch;
 	int rc;
-	int size, cur_size;
+	unsigned size;
+	unsigned long linear;
+	int cur_size = ctxt->fetch.end - ctxt->fetch.data;
+	struct segmented_address addr = { .seg = VCPU_SREG_CS,
+					   .ea = ctxt->eip + cur_size };
 
-	if (ctxt->_eip == fc->end) {
-		unsigned long linear;
-		struct segmented_address addr = { .seg = VCPU_SREG_CS,
-						  .ea  = ctxt->_eip };
-		cur_size = fc->end - fc->start;
-		size = min(15UL - cur_size,
-			   PAGE_SIZE - offset_in_page(ctxt->_eip));
-		rc = __linearize(ctxt, addr, size, false, true, &linear);
-		if (unlikely(rc != X86EMUL_CONTINUE))
-			return rc;
-		rc = ctxt->ops->fetch(ctxt, linear, fc->data + cur_size,
-				      size, &ctxt->exception);
-		if (unlikely(rc != X86EMUL_CONTINUE))
-			return rc;
-		fc->end += size;
-	}
-	*dest = fc->data[ctxt->_eip - fc->start];
-	ctxt->_eip++;
+	size = 15UL ^ cur_size;
+	rc = __linearize(ctxt, addr, size, false, true, &linear);
+	if (unlikely(rc != X86EMUL_CONTINUE))
+		return rc;
+
+	size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
+
+	/*
+	 * One instruction can only straddle two pages,
+	 * and one has been loaded at the beginning of
+	 * x86_decode_insn.  So, if not enough bytes
+	 * still, we must have hit the 15-byte boundary.
+	 */
+	if (unlikely(size < op_size))
+		return X86EMUL_UNHANDLEABLE;
+	rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
+			      size, &ctxt->exception);
+	if (unlikely(rc != X86EMUL_CONTINUE))
+		return rc;
+	ctxt->fetch.end += size;
 	return X86EMUL_CONTINUE;
 }
 
-static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
-			 void *dest, unsigned size)
+static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
+					       unsigned size)
 {
-	int rc;
-
-	/* x86 instructions are limited to 15 bytes. */
-	if (unlikely(ctxt->_eip + size - ctxt->eip > 15))
-		return X86EMUL_UNHANDLEABLE;
-	while (size--) {
-		rc = do_insn_fetch_byte(ctxt, dest++);
-		if (rc != X86EMUL_CONTINUE)
-			return rc;
-	}
-	return X86EMUL_CONTINUE;
+	if (unlikely(ctxt->fetch.end - ctxt->fetch.ptr < size))
+		return __do_insn_fetch_bytes(ctxt, size);
+	else
+		return X86EMUL_CONTINUE;
 }
 
 /* Fetch next part of the instruction being emulated. */
 #define insn_fetch(_type, _ctxt)					\
-({	unsigned long _x;						\
-	rc = do_insn_fetch(_ctxt, &_x, sizeof(_type));			\
+({	_type _x;							\
+									\
+	rc = do_insn_fetch_bytes(_ctxt, sizeof(_type));			\
 	if (rc != X86EMUL_CONTINUE)					\
 		goto done;						\
-	(_type)_x;							\
+	ctxt->_eip += sizeof(_type);					\
+	_x = *(_type __aligned(1) *) ctxt->fetch.ptr;			\
+	ctxt->fetch.ptr += sizeof(_type);				\
+	_x;								\
 })
 
 #define insn_fetch_arr(_arr, _size, _ctxt)				\
-({	rc = do_insn_fetch(_ctxt, _arr, (_size));			\
+({									\
+	rc = do_insn_fetch_bytes(_ctxt, _size);				\
 	if (rc != X86EMUL_CONTINUE)					\
 		goto done;						\
+	ctxt->_eip += (_size);						\
+	memcpy(_arr, ctxt->fetch.ptr, _size);				\
+	ctxt->fetch.ptr += (_size);					\
 })
 
 /*
@@ -1063,19 +1062,17 @@
 			struct operand *op)
 {
 	u8 sib;
-	int index_reg = 0, base_reg = 0, scale;
+	int index_reg, base_reg, scale;
 	int rc = X86EMUL_CONTINUE;
 	ulong modrm_ea = 0;
 
-	if (ctxt->rex_prefix) {
-		ctxt->modrm_reg = (ctxt->rex_prefix & 4) << 1;	/* REX.R */
-		index_reg = (ctxt->rex_prefix & 2) << 2; /* REX.X */
-		ctxt->modrm_rm = base_reg = (ctxt->rex_prefix & 1) << 3; /* REG.B */
-	}
+	ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
+	index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
+	base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
 
-	ctxt->modrm_mod |= (ctxt->modrm & 0xc0) >> 6;
+	ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
 	ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
-	ctxt->modrm_rm |= (ctxt->modrm & 0x07);
+	ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
 	ctxt->modrm_seg = VCPU_SREG_DS;
 
 	if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
@@ -1093,7 +1090,7 @@
 		if (ctxt->d & Mmx) {
 			op->type = OP_MM;
 			op->bytes = 8;
-			op->addr.xmm = ctxt->modrm_rm & 7;
+			op->addr.mm = ctxt->modrm_rm & 7;
 			return rc;
 		}
 		fetch_register_operand(op);
@@ -1190,6 +1187,9 @@
 		}
 	}
 	op->addr.mem.ea = modrm_ea;
+	if (ctxt->ad_bytes != 8)
+		ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
+
 done:
 	return rc;
 }
@@ -1220,12 +1220,14 @@
 	long sv = 0, mask;
 
 	if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
-		mask = ~(ctxt->dst.bytes * 8 - 1);
+		mask = ~((long)ctxt->dst.bytes * 8 - 1);
 
 		if (ctxt->src.bytes == 2)
 			sv = (s16)ctxt->src.val & (s16)mask;
 		else if (ctxt->src.bytes == 4)
 			sv = (s32)ctxt->src.val & (s32)mask;
+		else
+			sv = (s64)ctxt->src.val & (s64)mask;
 
 		ctxt->dst.addr.mem.ea += (sv >> 3);
 	}
@@ -1315,8 +1317,7 @@
 		in_page = (ctxt->eflags & EFLG_DF) ?
 			offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
 			PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
-		n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
-			count);
+		n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
 		if (n == 0)
 			n = 1;
 		rc->pos = rc->end = 0;
@@ -1358,17 +1359,19 @@
 				     u16 selector, struct desc_ptr *dt)
 {
 	const struct x86_emulate_ops *ops = ctxt->ops;
+	u32 base3 = 0;
 
 	if (selector & 1 << 2) {
 		struct desc_struct desc;
 		u16 sel;
 
 		memset (dt, 0, sizeof *dt);
-		if (!ops->get_segment(ctxt, &sel, &desc, NULL, VCPU_SREG_LDTR))
+		if (!ops->get_segment(ctxt, &sel, &desc, &base3,
+				      VCPU_SREG_LDTR))
 			return;
 
 		dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
-		dt->address = get_desc_base(&desc);
+		dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
 	} else
 		ops->get_gdt(ctxt, dt);
 }
@@ -1422,6 +1425,7 @@
 	ulong desc_addr;
 	int ret;
 	u16 dummy;
+	u32 base3 = 0;
 
 	memset(&seg_desc, 0, sizeof seg_desc);
 
@@ -1538,9 +1542,14 @@
 		ret = write_segment_descriptor(ctxt, selector, &seg_desc);
 		if (ret != X86EMUL_CONTINUE)
 			return ret;
+	} else if (ctxt->mode == X86EMUL_MODE_PROT64) {
+		ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
+				sizeof(base3), &ctxt->exception);
+		if (ret != X86EMUL_CONTINUE)
+			return ret;
 	}
 load:
-	ctxt->ops->set_segment(ctxt, selector, &seg_desc, 0, seg);
+	ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
 	return X86EMUL_CONTINUE;
 exception:
 	emulate_exception(ctxt, err_vec, err_code, true);
@@ -1575,34 +1584,28 @@
 
 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
 {
-	int rc;
-
 	switch (op->type) {
 	case OP_REG:
 		write_register_operand(op);
 		break;
 	case OP_MEM:
 		if (ctxt->lock_prefix)
-			rc = segmented_cmpxchg(ctxt,
+			return segmented_cmpxchg(ctxt,
+						 op->addr.mem,
+						 &op->orig_val,
+						 &op->val,
+						 op->bytes);
+		else
+			return segmented_write(ctxt,
 					       op->addr.mem,
-					       &op->orig_val,
 					       &op->val,
 					       op->bytes);
-		else
-			rc = segmented_write(ctxt,
-					     op->addr.mem,
-					     &op->val,
-					     op->bytes);
-		if (rc != X86EMUL_CONTINUE)
-			return rc;
 		break;
 	case OP_MEM_STR:
-		rc = segmented_write(ctxt,
-				op->addr.mem,
-				op->data,
-				op->bytes * op->count);
-		if (rc != X86EMUL_CONTINUE)
-			return rc;
+		return segmented_write(ctxt,
+				       op->addr.mem,
+				       op->data,
+				       op->bytes * op->count);
 		break;
 	case OP_XMM:
 		write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
@@ -1671,7 +1674,7 @@
 		return rc;
 
 	change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
-		| EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
+		| EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID;
 
 	switch(ctxt->mode) {
 	case X86EMUL_MODE_PROT64:
@@ -1754,6 +1757,9 @@
 	if (rc != X86EMUL_CONTINUE)
 		return rc;
 
+	if (ctxt->modrm_reg == VCPU_SREG_SS)
+		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
+
 	rc = load_segment_descriptor(ctxt, (u16)selector, seg);
 	return rc;
 }
@@ -1991,6 +1997,9 @@
 {
 	u64 old = ctxt->dst.orig_val64;
 
+	if (ctxt->dst.bytes == 16)
+		return X86EMUL_UNHANDLEABLE;
+
 	if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
 	    ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
 		*reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
@@ -2017,6 +2026,7 @@
 {
 	int rc;
 	unsigned long cs;
+	int cpl = ctxt->ops->cpl(ctxt);
 
 	rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes);
 	if (rc != X86EMUL_CONTINUE)
@@ -2026,6 +2036,9 @@
 	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
 	if (rc != X86EMUL_CONTINUE)
 		return rc;
+	/* Outer-privilege level return is not implemented */
+	if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
+		return X86EMUL_UNHANDLEABLE;
 	rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
 	return rc;
 }
@@ -2044,8 +2057,10 @@
 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
 {
 	/* Save real source value, then compare EAX against destination. */
+	ctxt->dst.orig_val = ctxt->dst.val;
+	ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
 	ctxt->src.orig_val = ctxt->src.val;
-	ctxt->src.val = reg_read(ctxt, VCPU_REGS_RAX);
+	ctxt->src.val = ctxt->dst.orig_val;
 	fastop(ctxt, em_cmp);
 
 	if (ctxt->eflags & EFLG_ZF) {
@@ -2055,6 +2070,7 @@
 		/* Failure: write the value we saw to EAX. */
 		ctxt->dst.type = OP_REG;
 		ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
+		ctxt->dst.val = ctxt->dst.orig_val;
 	}
 	return X86EMUL_CONTINUE;
 }
@@ -2194,7 +2210,7 @@
 	*reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
 	if (efer & EFER_LMA) {
 #ifdef CONFIG_X86_64
-		*reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags & ~EFLG_RF;
+		*reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
 
 		ops->get_msr(ctxt,
 			     ctxt->mode == X86EMUL_MODE_PROT64 ?
@@ -2202,14 +2218,14 @@
 		ctxt->_eip = msr_data;
 
 		ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
-		ctxt->eflags &= ~(msr_data | EFLG_RF);
+		ctxt->eflags &= ~msr_data;
 #endif
 	} else {
 		/* legacy mode */
 		ops->get_msr(ctxt, MSR_STAR, &msr_data);
 		ctxt->_eip = (u32)msr_data;
 
-		ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
+		ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
 	}
 
 	return X86EMUL_CONTINUE;
@@ -2258,7 +2274,7 @@
 		break;
 	}
 
-	ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
+	ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
 	cs_sel = (u16)msr_data;
 	cs_sel &= ~SELECTOR_RPL_MASK;
 	ss_sel = cs_sel + 8;
@@ -2964,7 +2980,7 @@
 
 static int em_mov(struct x86_emulate_ctxt *ctxt)
 {
-	memcpy(ctxt->dst.valptr, ctxt->src.valptr, ctxt->op_bytes);
+	memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
 	return X86EMUL_CONTINUE;
 }
 
@@ -3221,7 +3237,8 @@
 
 static int em_smsw(struct x86_emulate_ctxt *ctxt)
 {
-	ctxt->dst.bytes = 2;
+	if (ctxt->dst.type == OP_MEM)
+		ctxt->dst.bytes = 2;
 	ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
 	return X86EMUL_CONTINUE;
 }
@@ -3496,7 +3513,7 @@
 	u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
 
 	if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
-	    (rcx > 3))
+	    ctxt->ops->check_pmc(ctxt, rcx))
 		return emulate_gp(ctxt, 0);
 
 	return X86EMUL_CONTINUE;
@@ -3521,9 +3538,9 @@
 }
 
 #define D(_y) { .flags = (_y) }
-#define DI(_y, _i) { .flags = (_y), .intercept = x86_intercept_##_i }
-#define DIP(_y, _i, _p) { .flags = (_y), .intercept = x86_intercept_##_i, \
-		      .check_perm = (_p) }
+#define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
+#define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
+		      .intercept = x86_intercept_##_i, .check_perm = (_p) }
 #define N    D(NotImpl)
 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
@@ -3532,10 +3549,10 @@
 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
 #define II(_f, _e, _i) \
-	{ .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i }
+	{ .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
 #define IIP(_f, _e, _i, _p) \
-	{ .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i, \
-	  .check_perm = (_p) }
+	{ .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
+	  .intercept = x86_intercept_##_i, .check_perm = (_p) }
 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
 
 #define D2bv(_f)      D((_f) | ByteOp), D(_f)
@@ -3634,8 +3651,8 @@
 };
 
 static const struct group_dual group7 = { {
-	II(Mov | DstMem | Priv,			em_sgdt, sgdt),
-	II(Mov | DstMem | Priv,			em_sidt, sidt),
+	II(Mov | DstMem,			em_sgdt, sgdt),
+	II(Mov | DstMem,			em_sidt, sidt),
 	II(SrcMem | Priv,			em_lgdt, lgdt),
 	II(SrcMem | Priv,			em_lidt, lidt),
 	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
@@ -3899,7 +3916,7 @@
 	N, N,
 	N, N, N, N, N, N, N, N,
 	/* 0x40 - 0x4F */
-	X16(D(DstReg | SrcMem | ModRM | Mov)),
+	X16(D(DstReg | SrcMem | ModRM)),
 	/* 0x50 - 0x5F */
 	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
 	/* 0x60 - 0x6F */
@@ -4061,12 +4078,12 @@
 	mem_common:
 		*op = ctxt->memop;
 		ctxt->memopp = op;
-		if ((ctxt->d & BitOp) && op == &ctxt->dst)
+		if (ctxt->d & BitOp)
 			fetch_bit_operand(ctxt);
 		op->orig_val = op->val;
 		break;
 	case OpMem64:
-		ctxt->memop.bytes = 8;
+		ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
 		goto mem_common;
 	case OpAcc:
 		op->type = OP_REG;
@@ -4150,7 +4167,7 @@
 		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
 		op->addr.mem.ea =
 			register_address(ctxt, reg_read(ctxt, VCPU_REGS_RSI));
-		op->addr.mem.seg = seg_override(ctxt);
+		op->addr.mem.seg = ctxt->seg_override;
 		op->val = 0;
 		op->count = 1;
 		break;
@@ -4161,7 +4178,7 @@
 			register_address(ctxt,
 				reg_read(ctxt, VCPU_REGS_RBX) +
 				(reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
-		op->addr.mem.seg = seg_override(ctxt);
+		op->addr.mem.seg = ctxt->seg_override;
 		op->val = 0;
 		break;
 	case OpImmFAddr:
@@ -4208,16 +4225,22 @@
 	int mode = ctxt->mode;
 	int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
 	bool op_prefix = false;
+	bool has_seg_override = false;
 	struct opcode opcode;
 
 	ctxt->memop.type = OP_NONE;
 	ctxt->memopp = NULL;
 	ctxt->_eip = ctxt->eip;
-	ctxt->fetch.start = ctxt->_eip;
-	ctxt->fetch.end = ctxt->fetch.start + insn_len;
+	ctxt->fetch.ptr = ctxt->fetch.data;
+	ctxt->fetch.end = ctxt->fetch.data + insn_len;
 	ctxt->opcode_len = 1;
 	if (insn_len > 0)
 		memcpy(ctxt->fetch.data, insn, insn_len);
+	else {
+		rc = __do_insn_fetch_bytes(ctxt, 1);
+		if (rc != X86EMUL_CONTINUE)
+			return rc;
+	}
 
 	switch (mode) {
 	case X86EMUL_MODE_REAL:
@@ -4261,11 +4284,13 @@
 		case 0x2e:	/* CS override */
 		case 0x36:	/* SS override */
 		case 0x3e:	/* DS override */
-			set_seg_override(ctxt, (ctxt->b >> 3) & 3);
+			has_seg_override = true;
+			ctxt->seg_override = (ctxt->b >> 3) & 3;
 			break;
 		case 0x64:	/* FS override */
 		case 0x65:	/* GS override */
-			set_seg_override(ctxt, ctxt->b & 7);
+			has_seg_override = true;
+			ctxt->seg_override = ctxt->b & 7;
 			break;
 		case 0x40 ... 0x4f: /* REX */
 			if (mode != X86EMUL_MODE_PROT64)
@@ -4314,6 +4339,13 @@
 	if (ctxt->d & ModRM)
 		ctxt->modrm = insn_fetch(u8, ctxt);
 
+	/* vex-prefix instructions are not implemented */
+	if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
+	    (mode == X86EMUL_MODE_PROT64 ||
+	    (mode >= X86EMUL_MODE_PROT16 && (ctxt->modrm & 0x80)))) {
+		ctxt->d = NotImpl;
+	}
+
 	while (ctxt->d & GroupMask) {
 		switch (ctxt->d & GroupMask) {
 		case Group:
@@ -4356,49 +4388,59 @@
 		ctxt->d |= opcode.flags;
 	}
 
-	ctxt->execute = opcode.u.execute;
-	ctxt->check_perm = opcode.check_perm;
-	ctxt->intercept = opcode.intercept;
-
 	/* Unrecognised? */
-	if (ctxt->d == 0 || (ctxt->d & NotImpl))
+	if (ctxt->d == 0)
 		return EMULATION_FAILED;
 
-	if (!(ctxt->d & EmulateOnUD) && ctxt->ud)
-		return EMULATION_FAILED;
+	ctxt->execute = opcode.u.execute;
 
-	if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack))
-		ctxt->op_bytes = 8;
+	if (unlikely(ctxt->d &
+		     (NotImpl|EmulateOnUD|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm))) {
+		/*
+		 * These are copied unconditionally here, and checked unconditionally
+		 * in x86_emulate_insn.
+		 */
+		ctxt->check_perm = opcode.check_perm;
+		ctxt->intercept = opcode.intercept;
 
-	if (ctxt->d & Op3264) {
-		if (mode == X86EMUL_MODE_PROT64)
+		if (ctxt->d & NotImpl)
+			return EMULATION_FAILED;
+
+		if (!(ctxt->d & EmulateOnUD) && ctxt->ud)
+			return EMULATION_FAILED;
+
+		if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack))
 			ctxt->op_bytes = 8;
-		else
-			ctxt->op_bytes = 4;
-	}
 
-	if (ctxt->d & Sse)
-		ctxt->op_bytes = 16;
-	else if (ctxt->d & Mmx)
-		ctxt->op_bytes = 8;
+		if (ctxt->d & Op3264) {
+			if (mode == X86EMUL_MODE_PROT64)
+				ctxt->op_bytes = 8;
+			else
+				ctxt->op_bytes = 4;
+		}
+
+		if (ctxt->d & Sse)
+			ctxt->op_bytes = 16;
+		else if (ctxt->d & Mmx)
+			ctxt->op_bytes = 8;
+	}
 
 	/* ModRM and SIB bytes. */
 	if (ctxt->d & ModRM) {
 		rc = decode_modrm(ctxt, &ctxt->memop);
-		if (!ctxt->has_seg_override)
-			set_seg_override(ctxt, ctxt->modrm_seg);
+		if (!has_seg_override) {
+			has_seg_override = true;
+			ctxt->seg_override = ctxt->modrm_seg;
+		}
 	} else if (ctxt->d & MemAbs)
 		rc = decode_abs(ctxt, &ctxt->memop);
 	if (rc != X86EMUL_CONTINUE)
 		goto done;
 
-	if (!ctxt->has_seg_override)
-		set_seg_override(ctxt, VCPU_SREG_DS);
+	if (!has_seg_override)
+		ctxt->seg_override = VCPU_SREG_DS;
 
-	ctxt->memop.addr.mem.seg = seg_override(ctxt);
-
-	if (ctxt->memop.type == OP_MEM && ctxt->ad_bytes != 8)
-		ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
+	ctxt->memop.addr.mem.seg = ctxt->seg_override;
 
 	/*
 	 * Decode and fetch the source operand: register, memory
@@ -4420,7 +4462,7 @@
 	rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
 
 done:
-	if (ctxt->memopp && ctxt->memopp->type == OP_MEM && ctxt->rip_relative)
+	if (ctxt->rip_relative)
 		ctxt->memopp->addr.mem.ea += ctxt->_eip;
 
 	return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
@@ -4495,6 +4537,16 @@
 	return X86EMUL_CONTINUE;
 }
 
+void init_decode_cache(struct x86_emulate_ctxt *ctxt)
+{
+	memset(&ctxt->rip_relative, 0,
+	       (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
+
+	ctxt->io_read.pos = 0;
+	ctxt->io_read.end = 0;
+	ctxt->mem_read.end = 0;
+}
+
 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
 {
 	const struct x86_emulate_ops *ops = ctxt->ops;
@@ -4503,12 +4555,6 @@
 
 	ctxt->mem_read.pos = 0;
 
-	if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
-			(ctxt->d & Undefined)) {
-		rc = emulate_ud(ctxt);
-		goto done;
-	}
-
 	/* LOCK prefix is allowed only with some instructions */
 	if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
 		rc = emulate_ud(ctxt);
@@ -4520,70 +4566,83 @@
 		goto done;
 	}
 
-	if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
-	    || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
-		rc = emulate_ud(ctxt);
-		goto done;
-	}
-
-	if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
-		rc = emulate_nm(ctxt);
-		goto done;
-	}
-
-	if (ctxt->d & Mmx) {
-		rc = flush_pending_x87_faults(ctxt);
-		if (rc != X86EMUL_CONTINUE)
+	if (unlikely(ctxt->d &
+		     (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
+		if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
+				(ctxt->d & Undefined)) {
+			rc = emulate_ud(ctxt);
 			goto done;
-		/*
-		 * Now that we know the fpu is exception safe, we can fetch
-		 * operands from it.
-		 */
-		fetch_possible_mmx_operand(ctxt, &ctxt->src);
-		fetch_possible_mmx_operand(ctxt, &ctxt->src2);
-		if (!(ctxt->d & Mov))
-			fetch_possible_mmx_operand(ctxt, &ctxt->dst);
-	}
+		}
 
-	if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
-		rc = emulator_check_intercept(ctxt, ctxt->intercept,
-					      X86_ICPT_PRE_EXCEPT);
-		if (rc != X86EMUL_CONTINUE)
+		if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
+		    || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
+			rc = emulate_ud(ctxt);
 			goto done;
-	}
+		}
 
-	/* Privileged instruction can be executed only in CPL=0 */
-	if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
-		rc = emulate_gp(ctxt, 0);
-		goto done;
-	}
-
-	/* Instruction can only be executed in protected mode */
-	if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
-		rc = emulate_ud(ctxt);
-		goto done;
-	}
-
-	/* Do instruction specific permission checks */
-	if (ctxt->check_perm) {
-		rc = ctxt->check_perm(ctxt);
-		if (rc != X86EMUL_CONTINUE)
+		if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
+			rc = emulate_nm(ctxt);
 			goto done;
-	}
+		}
 
-	if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
-		rc = emulator_check_intercept(ctxt, ctxt->intercept,
-					      X86_ICPT_POST_EXCEPT);
-		if (rc != X86EMUL_CONTINUE)
-			goto done;
-	}
+		if (ctxt->d & Mmx) {
+			rc = flush_pending_x87_faults(ctxt);
+			if (rc != X86EMUL_CONTINUE)
+				goto done;
+			/*
+			 * Now that we know the fpu is exception safe, we can fetch
+			 * operands from it.
+			 */
+			fetch_possible_mmx_operand(ctxt, &ctxt->src);
+			fetch_possible_mmx_operand(ctxt, &ctxt->src2);
+			if (!(ctxt->d & Mov))
+				fetch_possible_mmx_operand(ctxt, &ctxt->dst);
+		}
 
-	if (ctxt->rep_prefix && (ctxt->d & String)) {
-		/* All REP prefixes have the same first termination condition */
-		if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
-			ctxt->eip = ctxt->_eip;
+		if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
+			rc = emulator_check_intercept(ctxt, ctxt->intercept,
+						      X86_ICPT_PRE_EXCEPT);
+			if (rc != X86EMUL_CONTINUE)
+				goto done;
+		}
+
+		/* Privileged instruction can be executed only in CPL=0 */
+		if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
+			if (ctxt->d & PrivUD)
+				rc = emulate_ud(ctxt);
+			else
+				rc = emulate_gp(ctxt, 0);
 			goto done;
 		}
+
+		/* Instruction can only be executed in protected mode */
+		if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
+			rc = emulate_ud(ctxt);
+			goto done;
+		}
+
+		/* Do instruction specific permission checks */
+		if (ctxt->d & CheckPerm) {
+			rc = ctxt->check_perm(ctxt);
+			if (rc != X86EMUL_CONTINUE)
+				goto done;
+		}
+
+		if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
+			rc = emulator_check_intercept(ctxt, ctxt->intercept,
+						      X86_ICPT_POST_EXCEPT);
+			if (rc != X86EMUL_CONTINUE)
+				goto done;
+		}
+
+		if (ctxt->rep_prefix && (ctxt->d & String)) {
+			/* All REP prefixes have the same first termination condition */
+			if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
+				ctxt->eip = ctxt->_eip;
+				ctxt->eflags &= ~EFLG_RF;
+				goto done;
+			}
+		}
 	}
 
 	if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
@@ -4616,13 +4675,18 @@
 
 special_insn:
 
-	if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
+	if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
 		rc = emulator_check_intercept(ctxt, ctxt->intercept,
 					      X86_ICPT_POST_MEMACCESS);
 		if (rc != X86EMUL_CONTINUE)
 			goto done;
 	}
 
+	if (ctxt->rep_prefix && (ctxt->d & String))
+		ctxt->eflags |= EFLG_RF;
+	else
+		ctxt->eflags &= ~EFLG_RF;
+
 	if (ctxt->execute) {
 		if (ctxt->d & Fastop) {
 			void (*fop)(struct fastop *) = (void *)ctxt->execute;
@@ -4657,8 +4721,9 @@
 		break;
 	case 0x90 ... 0x97: /* nop / xchg reg, rax */
 		if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
-			break;
-		rc = em_xchg(ctxt);
+			ctxt->dst.type = OP_NONE;
+		else
+			rc = em_xchg(ctxt);
 		break;
 	case 0x98: /* cbw/cwde/cdqe */
 		switch (ctxt->op_bytes) {
@@ -4709,17 +4774,17 @@
 		goto done;
 
 writeback:
-	if (!(ctxt->d & NoWrite)) {
-		rc = writeback(ctxt, &ctxt->dst);
-		if (rc != X86EMUL_CONTINUE)
-			goto done;
-	}
 	if (ctxt->d & SrcWrite) {
 		BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
 		rc = writeback(ctxt, &ctxt->src);
 		if (rc != X86EMUL_CONTINUE)
 			goto done;
 	}
+	if (!(ctxt->d & NoWrite)) {
+		rc = writeback(ctxt, &ctxt->dst);
+		if (rc != X86EMUL_CONTINUE)
+			goto done;
+	}
 
 	/*
 	 * restore dst type in case the decoding will be reused
@@ -4761,6 +4826,7 @@
 			}
 			goto done; /* skip rip writeback */
 		}
+		ctxt->eflags &= ~EFLG_RF;
 	}
 
 	ctxt->eip = ctxt->_eip;
@@ -4793,8 +4859,10 @@
 		ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
 		break;
 	case 0x40 ... 0x4f:	/* cmov */
-		ctxt->dst.val = ctxt->dst.orig_val = ctxt->src.val;
-		if (!test_cc(ctxt->b, ctxt->eflags))
+		if (test_cc(ctxt->b, ctxt->eflags))
+			ctxt->dst.val = ctxt->src.val;
+		else if (ctxt->mode != X86EMUL_MODE_PROT64 ||
+			 ctxt->op_bytes != 4)
 			ctxt->dst.type = OP_NONE; /* no writeback */
 		break;
 	case 0x80 ... 0x8f: /* jnz rel, etc*/
@@ -4818,8 +4886,8 @@
 		break;
 	case 0xc3:		/* movnti */
 		ctxt->dst.bytes = ctxt->op_bytes;
-		ctxt->dst.val = (ctxt->op_bytes == 4) ? (u32) ctxt->src.val :
-							(u64) ctxt->src.val;
+		ctxt->dst.val = (ctxt->op_bytes == 8) ? (u64) ctxt->src.val :
+							(u32) ctxt->src.val;
 		break;
 	default:
 		goto cannot_emulate;
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 0069118..3855103 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1451,7 +1451,7 @@
 	vcpu->arch.apic_arb_prio = 0;
 	vcpu->arch.apic_attention = 0;
 
-	apic_debug(KERN_INFO "%s: vcpu=%p, id=%d, base_msr="
+	apic_debug("%s: vcpu=%p, id=%d, base_msr="
 		   "0x%016" PRIx64 ", base_address=0x%0lx.\n", __func__,
 		   vcpu, kvm_apic_id(apic),
 		   vcpu->arch.apic_base, apic->base_address);
@@ -1895,7 +1895,7 @@
 		/* evaluate pending_events before reading the vector */
 		smp_rmb();
 		sipi_vector = apic->sipi_vector;
-		pr_debug("vcpu %d received sipi with vector # %x\n",
+		apic_debug("vcpu %d received sipi with vector # %x\n",
 			 vcpu->vcpu_id, sipi_vector);
 		kvm_vcpu_deliver_sipi_vector(vcpu, sipi_vector);
 		vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h
index 9d2e0ff..5aaf356 100644
--- a/arch/x86/kvm/mmutrace.h
+++ b/arch/x86/kvm/mmutrace.h
@@ -22,7 +22,7 @@
 	__entry->unsync = sp->unsync;
 
 #define KVM_MMU_PAGE_PRINTK() ({				        \
-	const char *ret = p->buffer + p->len;				\
+	const u32 saved_len = p->len;					\
 	static const char *access_str[] = {			        \
 		"---", "--x", "w--", "w-x", "-u-", "-ux", "wu-", "wux"  \
 	};							        \
@@ -41,7 +41,7 @@
 			 role.nxe ? "" : "!",				\
 			 __entry->root_count,				\
 			 __entry->unsync ? "unsync" : "sync", 0);	\
-	ret;								\
+	p->buffer + saved_len;						\
 		})
 
 #define kvm_mmu_trace_pferr_flags       \
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index cbecaa9..3dd6acc 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -428,6 +428,15 @@
 	return 1;
 }
 
+int kvm_pmu_check_pmc(struct kvm_vcpu *vcpu, unsigned pmc)
+{
+	struct kvm_pmu *pmu = &vcpu->arch.pmu;
+	bool fixed = pmc & (1u << 30);
+	pmc &= ~(3u << 30);
+	return (!fixed && pmc >= pmu->nr_arch_gp_counters) ||
+		(fixed && pmc >= pmu->nr_arch_fixed_counters);
+}
+
 int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data)
 {
 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index b5e994a..ddf7427 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -486,14 +486,14 @@
 	return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
 }
 
-static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
+static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 	u32 ret = 0;
 
 	if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
-		ret |= KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
-	return ret & mask;
+		ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
+	return ret;
 }
 
 static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
@@ -1415,7 +1415,16 @@
 	var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
 	var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
 	var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
-	var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;
+
+	/*
+	 * AMD CPUs circa 2014 track the G bit for all segments except CS.
+	 * However, the SVM spec states that the G bit is not observed by the
+	 * CPU, and some VMware virtual CPUs drop the G bit for all segments.
+	 * So let's synthesize a legal G bit for all segments, this helps
+	 * running KVM nested. It also helps cross-vendor migration, because
+	 * Intel's vmentry has a check on the 'G' bit.
+	 */
+	var->g = s->limit > 0xfffff;
 
 	/*
 	 * AMD's VMCB does not have an explicit unusable field, so emulate it
@@ -1424,14 +1433,6 @@
 	var->unusable = !var->present || (var->type == 0);
 
 	switch (seg) {
-	case VCPU_SREG_CS:
-		/*
-		 * SVM always stores 0 for the 'G' bit in the CS selector in
-		 * the VMCB on a VMEXIT. This hurts cross-vendor migration:
-		 * Intel's VMENTRY has a check on the 'G' bit.
-		 */
-		var->g = s->limit > 0xfffff;
-		break;
 	case VCPU_SREG_TR:
 		/*
 		 * Work around a bug where the busy flag in the tr selector
@@ -2116,22 +2117,27 @@
 
 static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
 {
-	unsigned port;
-	u8 val, bit;
+	unsigned port, size, iopm_len;
+	u16 val, mask;
+	u8 start_bit;
 	u64 gpa;
 
 	if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT)))
 		return NESTED_EXIT_HOST;
 
 	port = svm->vmcb->control.exit_info_1 >> 16;
+	size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
+		SVM_IOIO_SIZE_SHIFT;
 	gpa  = svm->nested.vmcb_iopm + (port / 8);
-	bit  = port % 8;
-	val  = 0;
+	start_bit = port % 8;
+	iopm_len = (start_bit + size > 8) ? 2 : 1;
+	mask = (0xf >> (4 - size)) << start_bit;
+	val = 0;
 
-	if (kvm_read_guest(svm->vcpu.kvm, gpa, &val, 1))
-		val &= (1 << bit);
+	if (kvm_read_guest(svm->vcpu.kvm, gpa, &val, iopm_len))
+		return NESTED_EXIT_DONE;
 
-	return val ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
+	return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
 }
 
 static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
@@ -4205,7 +4211,8 @@
 		if (info->intercept == x86_intercept_cr_write)
 			icpt_info.exit_code += info->modrm_reg;
 
-		if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0)
+		if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0 ||
+		    info->intercept == x86_intercept_clts)
 			break;
 
 		intercept = svm->nested.intercept;
@@ -4250,14 +4257,14 @@
 		u64 exit_info;
 		u32 bytes;
 
-		exit_info = (vcpu->arch.regs[VCPU_REGS_RDX] & 0xffff) << 16;
-
 		if (info->intercept == x86_intercept_in ||
 		    info->intercept == x86_intercept_ins) {
-			exit_info |= SVM_IOIO_TYPE_MASK;
-			bytes = info->src_bytes;
-		} else {
+			exit_info = ((info->src_val & 0xffff) << 16) |
+				SVM_IOIO_TYPE_MASK;
 			bytes = info->dst_bytes;
+		} else {
+			exit_info = (info->dst_val & 0xffff) << 16;
+			bytes = info->src_bytes;
 		}
 
 		if (info->intercept == x86_intercept_outs ||
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index 33574c9..e850a7d 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -721,10 +721,10 @@
 		),
 
 	TP_fast_assign(
-		__entry->rip = vcpu->arch.emulate_ctxt.fetch.start;
 		__entry->csbase = kvm_x86_ops->get_segment_base(vcpu, VCPU_SREG_CS);
-		__entry->len = vcpu->arch.emulate_ctxt._eip
-			       - vcpu->arch.emulate_ctxt.fetch.start;
+		__entry->len = vcpu->arch.emulate_ctxt.fetch.ptr
+			       - vcpu->arch.emulate_ctxt.fetch.data;
+		__entry->rip = vcpu->arch.emulate_ctxt._eip - __entry->len;
 		memcpy(__entry->insn,
 		       vcpu->arch.emulate_ctxt.fetch.data,
 		       15);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 801332e..e618f34 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -383,6 +383,9 @@
 
 	struct hrtimer preemption_timer;
 	bool preemption_timer_expired;
+
+	/* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
+	u64 vmcs01_debugctl;
 };
 
 #define POSTED_INTR_ON  0
@@ -740,7 +743,6 @@
 static void vmx_sync_pir_to_irr_dummy(struct kvm_vcpu *vcpu);
 static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx);
 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx);
-static bool vmx_mpx_supported(void);
 
 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
 static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
@@ -820,7 +822,6 @@
 #endif
 	MSR_EFER, MSR_TSC_AUX, MSR_STAR,
 };
-#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
 
 static inline bool is_page_fault(u32 intr_info)
 {
@@ -1940,7 +1941,7 @@
 	vmcs_writel(GUEST_RFLAGS, rflags);
 }
 
-static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
+static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
 {
 	u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
 	int ret = 0;
@@ -1950,7 +1951,7 @@
 	if (interruptibility & GUEST_INTR_STATE_MOV_SS)
 		ret |= KVM_X86_SHADOW_INT_MOV_SS;
 
-	return ret & mask;
+	return ret;
 }
 
 static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
@@ -2239,10 +2240,13 @@
  * or other means.
  */
 static u32 nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high;
+static u32 nested_vmx_true_procbased_ctls_low;
 static u32 nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high;
 static u32 nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high;
 static u32 nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high;
+static u32 nested_vmx_true_exit_ctls_low;
 static u32 nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high;
+static u32 nested_vmx_true_entry_ctls_low;
 static u32 nested_vmx_misc_low, nested_vmx_misc_high;
 static u32 nested_vmx_ept_caps;
 static __init void nested_vmx_setup_ctls_msrs(void)
@@ -2265,21 +2269,13 @@
 	/* pin-based controls */
 	rdmsr(MSR_IA32_VMX_PINBASED_CTLS,
 	      nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high);
-	/*
-	 * According to the Intel spec, if bit 55 of VMX_BASIC is off (as it is
-	 * in our case), bits 1, 2 and 4 (i.e., 0x16) must be 1 in this MSR.
-	 */
 	nested_vmx_pinbased_ctls_low |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
 	nested_vmx_pinbased_ctls_high &= PIN_BASED_EXT_INTR_MASK |
 		PIN_BASED_NMI_EXITING | PIN_BASED_VIRTUAL_NMIS;
 	nested_vmx_pinbased_ctls_high |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
 		PIN_BASED_VMX_PREEMPTION_TIMER;
 
-	/*
-	 * Exit controls
-	 * If bit 55 of VMX_BASIC is off, bits 0-8 and 10, 11, 13, 14, 16 and
-	 * 17 must be 1.
-	 */
+	/* exit controls */
 	rdmsr(MSR_IA32_VMX_EXIT_CTLS,
 		nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high);
 	nested_vmx_exit_ctls_low = VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
@@ -2296,10 +2292,13 @@
 	if (vmx_mpx_supported())
 		nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
 
+	/* We support free control of debug control saving. */
+	nested_vmx_true_exit_ctls_low = nested_vmx_exit_ctls_low &
+		~VM_EXIT_SAVE_DEBUG_CONTROLS;
+
 	/* entry controls */
 	rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
 		nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high);
-	/* If bit 55 of VMX_BASIC is off, bits 0-8 and 12 must be 1. */
 	nested_vmx_entry_ctls_low = VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
 	nested_vmx_entry_ctls_high &=
 #ifdef CONFIG_X86_64
@@ -2311,10 +2310,14 @@
 	if (vmx_mpx_supported())
 		nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
 
+	/* We support free control of debug control loading. */
+	nested_vmx_true_entry_ctls_low = nested_vmx_entry_ctls_low &
+		~VM_ENTRY_LOAD_DEBUG_CONTROLS;
+
 	/* cpu-based controls */
 	rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
 		nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high);
-	nested_vmx_procbased_ctls_low = 0;
+	nested_vmx_procbased_ctls_low = CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
 	nested_vmx_procbased_ctls_high &=
 		CPU_BASED_VIRTUAL_INTR_PENDING |
 		CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING |
@@ -2335,7 +2338,12 @@
 	 * can use it to avoid exits to L1 - even when L0 runs L2
 	 * without MSR bitmaps.
 	 */
-	nested_vmx_procbased_ctls_high |= CPU_BASED_USE_MSR_BITMAPS;
+	nested_vmx_procbased_ctls_high |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
+		CPU_BASED_USE_MSR_BITMAPS;
+
+	/* We support free control of CR3 access interception. */
+	nested_vmx_true_procbased_ctls_low = nested_vmx_procbased_ctls_low &
+		~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING);
 
 	/* secondary cpu-based controls */
 	rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
@@ -2394,7 +2402,7 @@
 		 * guest, and the VMCS structure we give it - not about the
 		 * VMX support of the underlying hardware.
 		 */
-		*pdata = VMCS12_REVISION |
+		*pdata = VMCS12_REVISION | VMX_BASIC_TRUE_CTLS |
 			   ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
 			   (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
 		break;
@@ -2404,16 +2412,25 @@
 					nested_vmx_pinbased_ctls_high);
 		break;
 	case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
+		*pdata = vmx_control_msr(nested_vmx_true_procbased_ctls_low,
+					nested_vmx_procbased_ctls_high);
+		break;
 	case MSR_IA32_VMX_PROCBASED_CTLS:
 		*pdata = vmx_control_msr(nested_vmx_procbased_ctls_low,
 					nested_vmx_procbased_ctls_high);
 		break;
 	case MSR_IA32_VMX_TRUE_EXIT_CTLS:
+		*pdata = vmx_control_msr(nested_vmx_true_exit_ctls_low,
+					nested_vmx_exit_ctls_high);
+		break;
 	case MSR_IA32_VMX_EXIT_CTLS:
 		*pdata = vmx_control_msr(nested_vmx_exit_ctls_low,
 					nested_vmx_exit_ctls_high);
 		break;
 	case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
+		*pdata = vmx_control_msr(nested_vmx_true_entry_ctls_low,
+					nested_vmx_entry_ctls_high);
+		break;
 	case MSR_IA32_VMX_ENTRY_CTLS:
 		*pdata = vmx_control_msr(nested_vmx_entry_ctls_low,
 					nested_vmx_entry_ctls_high);
@@ -2442,7 +2459,7 @@
 		*pdata = -1ULL;
 		break;
 	case MSR_IA32_VMX_VMCS_ENUM:
-		*pdata = 0x1f;
+		*pdata = 0x2e; /* highest index: VMX_PREEMPTION_TIMER_VALUE */
 		break;
 	case MSR_IA32_VMX_PROCBASED_CTLS2:
 		*pdata = vmx_control_msr(nested_vmx_secondary_ctls_low,
@@ -3653,7 +3670,7 @@
 	vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var));
 
 out:
-	vmx->emulation_required |= emulation_required(vcpu);
+	vmx->emulation_required = emulation_required(vcpu);
 }
 
 static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
@@ -4422,7 +4439,7 @@
 		vmx->vcpu.arch.pat = host_pat;
 	}
 
-	for (i = 0; i < NR_VMX_MSR; ++i) {
+	for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) {
 		u32 index = vmx_msr_index[i];
 		u32 data_low, data_high;
 		int j = vmx->nmsrs;
@@ -4873,7 +4890,7 @@
 		if (!(vcpu->guest_debug &
 		      (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
 			vcpu->arch.dr6 &= ~15;
-			vcpu->arch.dr6 |= dr6;
+			vcpu->arch.dr6 |= dr6 | DR6_RTM;
 			if (!(dr6 & ~DR6_RESERVED)) /* icebp */
 				skip_emulated_instruction(vcpu);
 
@@ -5039,7 +5056,7 @@
 	reg = (exit_qualification >> 8) & 15;
 	switch ((exit_qualification >> 4) & 3) {
 	case 0: /* mov to cr */
-		val = kvm_register_read(vcpu, reg);
+		val = kvm_register_readl(vcpu, reg);
 		trace_kvm_cr_write(cr, val);
 		switch (cr) {
 		case 0:
@@ -5056,7 +5073,7 @@
 			return 1;
 		case 8: {
 				u8 cr8_prev = kvm_get_cr8(vcpu);
-				u8 cr8 = kvm_register_read(vcpu, reg);
+				u8 cr8 = (u8)val;
 				err = kvm_set_cr8(vcpu, cr8);
 				kvm_complete_insn_gp(vcpu, err);
 				if (irqchip_in_kernel(vcpu->kvm))
@@ -5132,7 +5149,7 @@
 			return 0;
 		} else {
 			vcpu->arch.dr7 &= ~DR7_GD;
-			vcpu->arch.dr6 |= DR6_BD;
+			vcpu->arch.dr6 |= DR6_BD | DR6_RTM;
 			vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
 			kvm_queue_exception(vcpu, DB_VECTOR);
 			return 1;
@@ -5165,7 +5182,7 @@
 			return 1;
 		kvm_register_write(vcpu, reg, val);
 	} else
-		if (kvm_set_dr(vcpu, dr, kvm_register_read(vcpu, reg)))
+		if (kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg)))
 			return 1;
 
 	skip_emulated_instruction(vcpu);
@@ -5621,7 +5638,7 @@
 	cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
 	intr_window_requested = cpu_exec_ctrl & CPU_BASED_VIRTUAL_INTR_PENDING;
 
-	while (!guest_state_valid(vcpu) && count-- != 0) {
+	while (vmx->emulation_required && count-- != 0) {
 		if (intr_window_requested && vmx_interrupt_allowed(vcpu))
 			return handle_interrupt_window(&vmx->vcpu);
 
@@ -5655,7 +5672,6 @@
 			schedule();
 	}
 
-	vmx->emulation_required = emulation_required(vcpu);
 out:
 	return ret;
 }
@@ -5754,22 +5770,27 @@
 
 /*
  * Free all VMCSs saved for this vcpu, except the one pointed by
- * vmx->loaded_vmcs. These include the VMCSs in vmcs02_pool (except the one
- * currently used, if running L2), and vmcs01 when running L2.
+ * vmx->loaded_vmcs. We must be running L1, so vmx->loaded_vmcs
+ * must be &vmx->vmcs01.
  */
 static void nested_free_all_saved_vmcss(struct vcpu_vmx *vmx)
 {
 	struct vmcs02_list *item, *n;
+
+	WARN_ON(vmx->loaded_vmcs != &vmx->vmcs01);
 	list_for_each_entry_safe(item, n, &vmx->nested.vmcs02_pool, list) {
-		if (vmx->loaded_vmcs != &item->vmcs02)
-			free_loaded_vmcs(&item->vmcs02);
+		/*
+		 * Something will leak if the above WARN triggers.  Better than
+		 * a use-after-free.
+		 */
+		if (vmx->loaded_vmcs == &item->vmcs02)
+			continue;
+
+		free_loaded_vmcs(&item->vmcs02);
 		list_del(&item->list);
 		kfree(item);
+		vmx->nested.vmcs02_num--;
 	}
-	vmx->nested.vmcs02_num = 0;
-
-	if (vmx->loaded_vmcs != &vmx->vmcs01)
-		free_loaded_vmcs(&vmx->vmcs01);
 }
 
 /*
@@ -5918,7 +5939,7 @@
 		 * which replaces physical address width with 32
 		 *
 		 */
-		if (!IS_ALIGNED(vmptr, PAGE_SIZE) || (vmptr >> maxphyaddr)) {
+		if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
 			nested_vmx_failInvalid(vcpu);
 			skip_emulated_instruction(vcpu);
 			return 1;
@@ -5936,7 +5957,7 @@
 		vmx->nested.vmxon_ptr = vmptr;
 		break;
 	case EXIT_REASON_VMCLEAR:
-		if (!IS_ALIGNED(vmptr, PAGE_SIZE) || (vmptr >> maxphyaddr)) {
+		if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
 			nested_vmx_failValid(vcpu,
 					     VMXERR_VMCLEAR_INVALID_ADDRESS);
 			skip_emulated_instruction(vcpu);
@@ -5951,7 +5972,7 @@
 		}
 		break;
 	case EXIT_REASON_VMPTRLD:
-		if (!IS_ALIGNED(vmptr, PAGE_SIZE) || (vmptr >> maxphyaddr)) {
+		if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
 			nested_vmx_failValid(vcpu,
 					     VMXERR_VMPTRLD_INVALID_ADDRESS);
 			skip_emulated_instruction(vcpu);
@@ -6086,20 +6107,27 @@
 static inline void nested_release_vmcs12(struct vcpu_vmx *vmx)
 {
 	u32 exec_control;
+	if (vmx->nested.current_vmptr == -1ull)
+		return;
+
+	/* current_vmptr and current_vmcs12 are always set/reset together */
+	if (WARN_ON(vmx->nested.current_vmcs12 == NULL))
+		return;
+
 	if (enable_shadow_vmcs) {
-		if (vmx->nested.current_vmcs12 != NULL) {
-			/* copy to memory all shadowed fields in case
-			   they were modified */
-			copy_shadow_to_vmcs12(vmx);
-			vmx->nested.sync_shadow_vmcs = false;
-			exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
-			exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
-			vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
-			vmcs_write64(VMCS_LINK_POINTER, -1ull);
-		}
+		/* copy to memory all shadowed fields in case
+		   they were modified */
+		copy_shadow_to_vmcs12(vmx);
+		vmx->nested.sync_shadow_vmcs = false;
+		exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
+		exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
+		vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
+		vmcs_write64(VMCS_LINK_POINTER, -1ull);
 	}
 	kunmap(vmx->nested.current_vmcs12_page);
 	nested_release_page(vmx->nested.current_vmcs12_page);
+	vmx->nested.current_vmptr = -1ull;
+	vmx->nested.current_vmcs12 = NULL;
 }
 
 /*
@@ -6110,12 +6138,9 @@
 {
 	if (!vmx->nested.vmxon)
 		return;
+
 	vmx->nested.vmxon = false;
-	if (vmx->nested.current_vmptr != -1ull) {
-		nested_release_vmcs12(vmx);
-		vmx->nested.current_vmptr = -1ull;
-		vmx->nested.current_vmcs12 = NULL;
-	}
+	nested_release_vmcs12(vmx);
 	if (enable_shadow_vmcs)
 		free_vmcs(vmx->nested.current_shadow_vmcs);
 	/* Unpin physical memory we referred to in current vmcs02 */
@@ -6152,11 +6177,8 @@
 	if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMCLEAR, &vmptr))
 		return 1;
 
-	if (vmptr == vmx->nested.current_vmptr) {
+	if (vmptr == vmx->nested.current_vmptr)
 		nested_release_vmcs12(vmx);
-		vmx->nested.current_vmptr = -1ull;
-		vmx->nested.current_vmcs12 = NULL;
-	}
 
 	page = nested_get_page(vcpu, vmptr);
 	if (page == NULL) {
@@ -6384,7 +6406,7 @@
 		return 1;
 
 	/* Decode instruction info and find the field to read */
-	field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
+	field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
 	/* Read the field, zero-extended to a u64 field_value */
 	if (!vmcs12_read_any(vcpu, field, &field_value)) {
 		nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
@@ -6397,7 +6419,7 @@
 	 * on the guest's mode (32 or 64 bit), not on the given field's length.
 	 */
 	if (vmx_instruction_info & (1u << 10)) {
-		kvm_register_write(vcpu, (((vmx_instruction_info) >> 3) & 0xf),
+		kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf),
 			field_value);
 	} else {
 		if (get_vmx_mem_address(vcpu, exit_qualification,
@@ -6434,21 +6456,21 @@
 		return 1;
 
 	if (vmx_instruction_info & (1u << 10))
-		field_value = kvm_register_read(vcpu,
+		field_value = kvm_register_readl(vcpu,
 			(((vmx_instruction_info) >> 3) & 0xf));
 	else {
 		if (get_vmx_mem_address(vcpu, exit_qualification,
 				vmx_instruction_info, &gva))
 			return 1;
 		if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva,
-			   &field_value, (is_long_mode(vcpu) ? 8 : 4), &e)) {
+			   &field_value, (is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
 			kvm_inject_page_fault(vcpu, &e);
 			return 1;
 		}
 	}
 
 
-	field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
+	field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
 	if (vmcs_field_readonly(field)) {
 		nested_vmx_failValid(vcpu,
 			VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
@@ -6498,9 +6520,8 @@
 			skip_emulated_instruction(vcpu);
 			return 1;
 		}
-		if (vmx->nested.current_vmptr != -1ull)
-			nested_release_vmcs12(vmx);
 
+		nested_release_vmcs12(vmx);
 		vmx->nested.current_vmptr = vmptr;
 		vmx->nested.current_vmcs12 = new_vmcs12;
 		vmx->nested.current_vmcs12_page = page;
@@ -6571,7 +6592,7 @@
 	}
 
 	vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
-	type = kvm_register_read(vcpu, (vmx_instruction_info >> 28) & 0xf);
+	type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
 
 	types = (nested_vmx_ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
 
@@ -6751,7 +6772,7 @@
 	unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
 	int cr = exit_qualification & 15;
 	int reg = (exit_qualification >> 8) & 15;
-	unsigned long val = kvm_register_read(vcpu, reg);
+	unsigned long val = kvm_register_readl(vcpu, reg);
 
 	switch ((exit_qualification >> 4) & 3) {
 	case 0: /* mov to cr */
@@ -7112,7 +7133,26 @@
 	if (max_irr == -1)
 		return;
 
-	vmx_set_rvi(max_irr);
+	/*
+	 * If a vmexit is needed, vmx_check_nested_events handles it.
+	 */
+	if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
+		return;
+
+	if (!is_guest_mode(vcpu)) {
+		vmx_set_rvi(max_irr);
+		return;
+	}
+
+	/*
+	 * Fall back to pre-APICv interrupt injection since L2
+	 * is run without virtual interrupt delivery.
+	 */
+	if (!kvm_event_needs_reinjection(vcpu) &&
+	    vmx_interrupt_allowed(vcpu)) {
+		kvm_queue_interrupt(vcpu, max_irr, false);
+		vmx_inject_irq(vcpu);
+	}
 }
 
 static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
@@ -7520,13 +7560,31 @@
 	vmx_complete_interrupts(vmx);
 }
 
+static void vmx_load_vmcs01(struct kvm_vcpu *vcpu)
+{
+	struct vcpu_vmx *vmx = to_vmx(vcpu);
+	int cpu;
+
+	if (vmx->loaded_vmcs == &vmx->vmcs01)
+		return;
+
+	cpu = get_cpu();
+	vmx->loaded_vmcs = &vmx->vmcs01;
+	vmx_vcpu_put(vcpu);
+	vmx_vcpu_load(vcpu, cpu);
+	vcpu->cpu = cpu;
+	put_cpu();
+}
+
 static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 
 	free_vpid(vmx);
-	free_loaded_vmcs(vmx->loaded_vmcs);
+	leave_guest_mode(vcpu);
+	vmx_load_vmcs01(vcpu);
 	free_nested(vmx);
+	free_loaded_vmcs(vmx->loaded_vmcs);
 	kfree(vmx->guest_msrs);
 	kvm_vcpu_uninit(vcpu);
 	kmem_cache_free(kvm_vcpu_cache, vmx);
@@ -7548,6 +7606,9 @@
 		goto free_vcpu;
 
 	vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) * sizeof(vmx->guest_msrs[0])
+		     > PAGE_SIZE);
+
 	err = -ENOMEM;
 	if (!vmx->guest_msrs) {
 		goto uninit_vcpu;
@@ -7836,7 +7897,13 @@
 	vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
 	vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
 
-	vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
+	if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) {
+		kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
+		vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
+	} else {
+		kvm_set_dr(vcpu, 7, vcpu->arch.dr7);
+		vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl);
+	}
 	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
 		vmcs12->vm_entry_intr_info_field);
 	vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
@@ -7846,7 +7913,6 @@
 	vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
 		vmcs12->guest_interruptibility_info);
 	vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
-	kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
 	vmx_set_rflags(vcpu, vmcs12->guest_rflags);
 	vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
 		vmcs12->guest_pending_dbg_exceptions);
@@ -8113,14 +8179,14 @@
 	}
 
 	if ((vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_MSR_BITMAPS) &&
-			!IS_ALIGNED(vmcs12->msr_bitmap, PAGE_SIZE)) {
+			!PAGE_ALIGNED(vmcs12->msr_bitmap)) {
 		/*TODO: Also verify bits beyond physical address width are 0*/
 		nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
 		return 1;
 	}
 
 	if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) &&
-			!IS_ALIGNED(vmcs12->apic_access_addr, PAGE_SIZE)) {
+			!PAGE_ALIGNED(vmcs12->apic_access_addr)) {
 		/*TODO: Also verify bits beyond physical address width are 0*/
 		nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
 		return 1;
@@ -8136,15 +8202,18 @@
 	}
 
 	if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
-	      nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high) ||
+				nested_vmx_true_procbased_ctls_low,
+				nested_vmx_procbased_ctls_high) ||
 	    !vmx_control_verify(vmcs12->secondary_vm_exec_control,
 	      nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high) ||
 	    !vmx_control_verify(vmcs12->pin_based_vm_exec_control,
 	      nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high) ||
 	    !vmx_control_verify(vmcs12->vm_exit_controls,
-	      nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high) ||
+				nested_vmx_true_exit_ctls_low,
+				nested_vmx_exit_ctls_high) ||
 	    !vmx_control_verify(vmcs12->vm_entry_controls,
-	      nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high))
+				nested_vmx_true_entry_ctls_low,
+				nested_vmx_entry_ctls_high))
 	{
 		nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
 		return 1;
@@ -8221,6 +8290,9 @@
 
 	vmx->nested.vmcs01_tsc_offset = vmcs_read64(TSC_OFFSET);
 
+	if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
+		vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
+
 	cpu = get_cpu();
 	vmx->loaded_vmcs = vmcs02;
 	vmx_vcpu_put(vcpu);
@@ -8398,7 +8470,6 @@
 	vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
 	vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
 
-	kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7);
 	vmcs12->guest_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
 	vmcs12->guest_rip = kvm_register_read(vcpu, VCPU_REGS_RIP);
 	vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS);
@@ -8477,9 +8548,13 @@
 		(vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
 		(vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE);
 
+	if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) {
+		kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7);
+		vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
+	}
+
 	/* TODO: These cannot have changed unless we have MSR bitmaps and
 	 * the relevant bit asks not to trap the change */
-	vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
 	if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT)
 		vmcs12->guest_ia32_pat = vmcs_read64(GUEST_IA32_PAT);
 	if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER)
@@ -8670,7 +8745,6 @@
 			      unsigned long exit_qualification)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
-	int cpu;
 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
 
 	/* trying to cancel vmlaunch/vmresume is a bug */
@@ -8695,12 +8769,7 @@
 				       vmcs12->vm_exit_intr_error_code,
 				       KVM_ISA_VMX);
 
-	cpu = get_cpu();
-	vmx->loaded_vmcs = &vmx->vmcs01;
-	vmx_vcpu_put(vcpu);
-	vmx_vcpu_load(vcpu, cpu);
-	vcpu->cpu = cpu;
-	put_cpu();
+	vmx_load_vmcs01(vcpu);
 
 	vm_entry_controls_init(vmx, vmcs_read32(VM_ENTRY_CONTROLS));
 	vm_exit_controls_init(vmx, vmcs_read32(VM_EXIT_CONTROLS));
@@ -8890,7 +8959,7 @@
 
 	rdmsrl_safe(MSR_EFER, &host_efer);
 
-	for (i = 0; i < NR_VMX_MSR; ++i)
+	for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i)
 		kvm_define_shared_msr(i, vmx_msr_index[i]);
 
 	vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index ef432f8..b86d329 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -87,6 +87,7 @@
 
 static void update_cr8_intercept(struct kvm_vcpu *vcpu);
 static void process_nmi(struct kvm_vcpu *vcpu);
+static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
 
 struct kvm_x86_ops *kvm_x86_ops;
 EXPORT_SYMBOL_GPL(kvm_x86_ops);
@@ -211,6 +212,7 @@
 
 void kvm_define_shared_msr(unsigned slot, u32 msr)
 {
+	BUG_ON(slot >= KVM_NR_SHARED_MSRS);
 	if (slot >= shared_msrs_global.nr)
 		shared_msrs_global.nr = slot + 1;
 	shared_msrs_global.msrs[slot] = msr;
@@ -310,6 +312,31 @@
 	return EXCPT_BENIGN;
 }
 
+#define EXCPT_FAULT		0
+#define EXCPT_TRAP		1
+#define EXCPT_ABORT		2
+#define EXCPT_INTERRUPT		3
+
+static int exception_type(int vector)
+{
+	unsigned int mask;
+
+	if (WARN_ON(vector > 31 || vector == NMI_VECTOR))
+		return EXCPT_INTERRUPT;
+
+	mask = 1 << vector;
+
+	/* #DB is trap, as instruction watchpoints are handled elsewhere */
+	if (mask & ((1 << DB_VECTOR) | (1 << BP_VECTOR) | (1 << OF_VECTOR)))
+		return EXCPT_TRAP;
+
+	if (mask & ((1 << DF_VECTOR) | (1 << MC_VECTOR)))
+		return EXCPT_ABORT;
+
+	/* Reserved exceptions will result in fault */
+	return EXCPT_FAULT;
+}
+
 static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
 		unsigned nr, bool has_error, u32 error_code,
 		bool reinject)
@@ -758,6 +785,15 @@
 		vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED;
 }
 
+static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu)
+{
+	u64 fixed = DR6_FIXED_1;
+
+	if (!guest_cpuid_has_rtm(vcpu))
+		fixed |= DR6_RTM;
+	return fixed;
+}
+
 static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
 {
 	switch (dr) {
@@ -773,7 +809,7 @@
 	case 6:
 		if (val & 0xffffffff00000000ULL)
 			return -1; /* #GP */
-		vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
+		vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu);
 		kvm_update_dr6(vcpu);
 		break;
 	case 5:
@@ -1215,6 +1251,7 @@
 	unsigned long flags;
 	s64 usdiff;
 	bool matched;
+	bool already_matched;
 	u64 data = msr->data;
 
 	raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
@@ -1279,6 +1316,7 @@
 			pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
 		}
 		matched = true;
+		already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation);
 	} else {
 		/*
 		 * We split periods of matched TSC writes into generations.
@@ -1294,7 +1332,7 @@
 		kvm->arch.cur_tsc_write = data;
 		kvm->arch.cur_tsc_offset = offset;
 		matched = false;
-		pr_debug("kvm: new tsc generation %u, clock %llu\n",
+		pr_debug("kvm: new tsc generation %llu, clock %llu\n",
 			 kvm->arch.cur_tsc_generation, data);
 	}
 
@@ -1319,10 +1357,11 @@
 	raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
 
 	spin_lock(&kvm->arch.pvclock_gtod_sync_lock);
-	if (matched)
-		kvm->arch.nr_vcpus_matched_tsc++;
-	else
+	if (!matched) {
 		kvm->arch.nr_vcpus_matched_tsc = 0;
+	} else if (!already_matched) {
+		kvm->arch.nr_vcpus_matched_tsc++;
+	}
 
 	kvm_track_tsc_matching(vcpu);
 	spin_unlock(&kvm->arch.pvclock_gtod_sync_lock);
@@ -2032,6 +2071,7 @@
 		data &= ~(u64)0x40;	/* ignore flush filter disable */
 		data &= ~(u64)0x100;	/* ignore ignne emulation enable */
 		data &= ~(u64)0x8;	/* ignore TLB cache disable */
+		data &= ~(u64)0x40000;  /* ignore Mc status write enable */
 		if (data != 0) {
 			vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
 				    data);
@@ -2974,9 +3014,7 @@
 		vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft;
 	events->interrupt.nr = vcpu->arch.interrupt.nr;
 	events->interrupt.soft = 0;
-	events->interrupt.shadow =
-		kvm_x86_ops->get_interrupt_shadow(vcpu,
-			KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI);
+	events->interrupt.shadow = kvm_x86_ops->get_interrupt_shadow(vcpu);
 
 	events->nmi.injected = vcpu->arch.nmi_injected;
 	events->nmi.pending = vcpu->arch.nmi_pending != 0;
@@ -4082,7 +4120,8 @@
 
 		if (gpa == UNMAPPED_GVA)
 			return X86EMUL_PROPAGATE_FAULT;
-		ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
+		ret = kvm_read_guest_page(vcpu->kvm, gpa >> PAGE_SHIFT, data,
+					  offset, toread);
 		if (ret < 0) {
 			r = X86EMUL_IO_NEEDED;
 			goto out;
@@ -4103,10 +4142,24 @@
 {
 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
 	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
+	unsigned offset;
+	int ret;
 
-	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu,
-					  access | PFERR_FETCH_MASK,
-					  exception);
+	/* Inline kvm_read_guest_virt_helper for speed.  */
+	gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access|PFERR_FETCH_MASK,
+						    exception);
+	if (unlikely(gpa == UNMAPPED_GVA))
+		return X86EMUL_PROPAGATE_FAULT;
+
+	offset = addr & (PAGE_SIZE-1);
+	if (WARN_ON(offset + bytes > PAGE_SIZE))
+		bytes = (unsigned)PAGE_SIZE - offset;
+	ret = kvm_read_guest_page(vcpu->kvm, gpa >> PAGE_SHIFT, val,
+				  offset, bytes);
+	if (unlikely(ret < 0))
+		return X86EMUL_IO_NEEDED;
+
+	return X86EMUL_CONTINUE;
 }
 
 int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
@@ -4730,7 +4783,6 @@
 	if (desc->g)
 		var.limit = (var.limit << 12) | 0xfff;
 	var.type = desc->type;
-	var.present = desc->p;
 	var.dpl = desc->dpl;
 	var.db = desc->d;
 	var.s = desc->s;
@@ -4762,6 +4814,12 @@
 	return kvm_set_msr(emul_to_vcpu(ctxt), &msr);
 }
 
+static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt,
+			      u32 pmc)
+{
+	return kvm_pmu_check_pmc(emul_to_vcpu(ctxt), pmc);
+}
+
 static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt,
 			     u32 pmc, u64 *pdata)
 {
@@ -4838,6 +4896,7 @@
 	.set_dr              = emulator_set_dr,
 	.set_msr             = emulator_set_msr,
 	.get_msr             = emulator_get_msr,
+	.check_pmc	     = emulator_check_pmc,
 	.read_pmc            = emulator_read_pmc,
 	.halt                = emulator_halt,
 	.wbinvd              = emulator_wbinvd,
@@ -4850,7 +4909,7 @@
 
 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
 {
-	u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu, mask);
+	u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu);
 	/*
 	 * an sti; sti; sequence only disable interrupts for the first
 	 * instruction. So, if the last instruction, be it emulated or
@@ -4858,8 +4917,13 @@
 	 * means that the last instruction is an sti. We should not
 	 * leave the flag on in this case. The same goes for mov ss
 	 */
-	if (!(int_shadow & mask))
+	if (int_shadow & mask)
+		mask = 0;
+	if (unlikely(int_shadow || mask)) {
 		kvm_x86_ops->set_interrupt_shadow(vcpu, mask);
+		if (!mask)
+			kvm_make_request(KVM_REQ_EVENT, vcpu);
+	}
 }
 
 static void inject_emulated_exception(struct kvm_vcpu *vcpu)
@@ -4874,19 +4938,6 @@
 		kvm_queue_exception(vcpu, ctxt->exception.vector);
 }
 
-static void init_decode_cache(struct x86_emulate_ctxt *ctxt)
-{
-	memset(&ctxt->opcode_len, 0,
-	       (void *)&ctxt->_regs - (void *)&ctxt->opcode_len);
-
-	ctxt->fetch.start = 0;
-	ctxt->fetch.end = 0;
-	ctxt->io_read.pos = 0;
-	ctxt->io_read.end = 0;
-	ctxt->mem_read.pos = 0;
-	ctxt->mem_read.end = 0;
-}
-
 static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
 {
 	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
@@ -5085,23 +5136,22 @@
 	return dr6;
 }
 
-static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, int *r)
+static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflags, int *r)
 {
 	struct kvm_run *kvm_run = vcpu->run;
 
 	/*
-	 * Use the "raw" value to see if TF was passed to the processor.
-	 * Note that the new value of the flags has not been saved yet.
+	 * rflags is the old, "raw" value of the flags.  The new value has
+	 * not been saved yet.
 	 *
 	 * This is correct even for TF set by the guest, because "the
 	 * processor will not generate this exception after the instruction
 	 * that sets the TF flag".
 	 */
-	unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
-
 	if (unlikely(rflags & X86_EFLAGS_TF)) {
 		if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
-			kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1;
+			kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 |
+						  DR6_RTM;
 			kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip;
 			kvm_run->debug.arch.exception = DB_VECTOR;
 			kvm_run->exit_reason = KVM_EXIT_DEBUG;
@@ -5114,7 +5164,7 @@
 			 * cleared by the processor".
 			 */
 			vcpu->arch.dr6 &= ~15;
-			vcpu->arch.dr6 |= DR6_BS;
+			vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
 			kvm_queue_exception(vcpu, DB_VECTOR);
 		}
 	}
@@ -5133,7 +5183,7 @@
 					   vcpu->arch.eff_db);
 
 		if (dr6 != 0) {
-			kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1;
+			kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM;
 			kvm_run->debug.arch.pc = kvm_rip_read(vcpu) +
 				get_segment_base(vcpu, VCPU_SREG_CS);
 
@@ -5144,14 +5194,15 @@
 		}
 	}
 
-	if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK)) {
+	if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) &&
+	    !(kvm_get_rflags(vcpu) & X86_EFLAGS_RF)) {
 		dr6 = kvm_vcpu_check_hw_bp(eip, 0,
 					   vcpu->arch.dr7,
 					   vcpu->arch.db);
 
 		if (dr6 != 0) {
 			vcpu->arch.dr6 &= ~15;
-			vcpu->arch.dr6 |= dr6;
+			vcpu->arch.dr6 |= dr6 | DR6_RTM;
 			kvm_queue_exception(vcpu, DB_VECTOR);
 			*r = EMULATE_DONE;
 			return true;
@@ -5215,6 +5266,8 @@
 
 	if (emulation_type & EMULTYPE_SKIP) {
 		kvm_rip_write(vcpu, ctxt->_eip);
+		if (ctxt->eflags & X86_EFLAGS_RF)
+			kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF);
 		return EMULATE_DONE;
 	}
 
@@ -5265,13 +5318,22 @@
 		r = EMULATE_DONE;
 
 	if (writeback) {
+		unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
 		toggle_interruptibility(vcpu, ctxt->interruptibility);
-		kvm_make_request(KVM_REQ_EVENT, vcpu);
 		vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
 		kvm_rip_write(vcpu, ctxt->eip);
 		if (r == EMULATE_DONE)
-			kvm_vcpu_check_singlestep(vcpu, &r);
-		kvm_set_rflags(vcpu, ctxt->eflags);
+			kvm_vcpu_check_singlestep(vcpu, rflags, &r);
+		__kvm_set_rflags(vcpu, ctxt->eflags);
+
+		/*
+		 * For STI, interrupts are shadowed; so KVM_REQ_EVENT will
+		 * do nothing, and it will be requested again as soon as
+		 * the shadow expires.  But we still need to check here,
+		 * because POPF has no interrupt shadow.
+		 */
+		if (unlikely((ctxt->eflags & ~rflags) & X86_EFLAGS_IF))
+			kvm_make_request(KVM_REQ_EVENT, vcpu);
 	} else
 		vcpu->arch.emulate_regs_need_sync_to_vcpu = true;
 
@@ -5662,7 +5724,6 @@
 	u64 param, ingpa, outgpa, ret;
 	uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
 	bool fast, longmode;
-	int cs_db, cs_l;
 
 	/*
 	 * hypercall generates UD from non zero cpl and real mode
@@ -5673,8 +5734,7 @@
 		return 0;
 	}
 
-	kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
-	longmode = is_long_mode(vcpu) && cs_l == 1;
+	longmode = is_64_bit_mode(vcpu);
 
 	if (!longmode) {
 		param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
@@ -5739,7 +5799,7 @@
 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
 {
 	unsigned long nr, a0, a1, a2, a3, ret;
-	int r = 1;
+	int op_64_bit, r = 1;
 
 	if (kvm_hv_hypercall_enabled(vcpu->kvm))
 		return kvm_hv_hypercall(vcpu);
@@ -5752,7 +5812,8 @@
 
 	trace_kvm_hypercall(nr, a0, a1, a2, a3);
 
-	if (!is_long_mode(vcpu)) {
+	op_64_bit = is_64_bit_mode(vcpu);
+	if (!op_64_bit) {
 		nr &= 0xFFFFFFFF;
 		a0 &= 0xFFFFFFFF;
 		a1 &= 0xFFFFFFFF;
@@ -5778,6 +5839,8 @@
 		break;
 	}
 out:
+	if (!op_64_bit)
+		ret = (u32)ret;
 	kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
 	++vcpu->stat.hypercalls;
 	return r;
@@ -5856,6 +5919,11 @@
 		trace_kvm_inj_exception(vcpu->arch.exception.nr,
 					vcpu->arch.exception.has_error_code,
 					vcpu->arch.exception.error_code);
+
+		if (exception_type(vcpu->arch.exception.nr) == EXCPT_FAULT)
+			__kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) |
+					     X86_EFLAGS_RF);
+
 		kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
 					  vcpu->arch.exception.has_error_code,
 					  vcpu->arch.exception.error_code,
@@ -6847,9 +6915,11 @@
 	atomic_set(&vcpu->arch.nmi_queued, 0);
 	vcpu->arch.nmi_pending = 0;
 	vcpu->arch.nmi_injected = false;
+	kvm_clear_interrupt_queue(vcpu);
+	kvm_clear_exception_queue(vcpu);
 
 	memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
-	vcpu->arch.dr6 = DR6_FIXED_1;
+	vcpu->arch.dr6 = DR6_INIT;
 	kvm_update_dr6(vcpu);
 	vcpu->arch.dr7 = DR7_FIXED_1;
 	kvm_update_dr7(vcpu);
@@ -7405,12 +7475,17 @@
 }
 EXPORT_SYMBOL_GPL(kvm_get_rflags);
 
-void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
+static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
 {
 	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
 	    kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
 		rflags |= X86_EFLAGS_TF;
 	kvm_x86_ops->set_rflags(vcpu, rflags);
+}
+
+void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
+{
+	__kvm_set_rflags(vcpu, rflags);
 	kvm_make_request(KVM_REQ_EVENT, vcpu);
 }
 EXPORT_SYMBOL_GPL(kvm_set_rflags);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 8c97bac..306a1b7 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -47,6 +47,16 @@
 #endif
 }
 
+static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu)
+{
+	int cs_db, cs_l;
+
+	if (!is_long_mode(vcpu))
+		return false;
+	kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
+	return cs_l;
+}
+
 static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
 {
 	return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
@@ -108,6 +118,23 @@
 	return false;
 }
 
+static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu,
+					       enum kvm_reg reg)
+{
+	unsigned long val = kvm_register_read(vcpu, reg);
+
+	return is_64_bit_mode(vcpu) ? val : (u32)val;
+}
+
+static inline void kvm_register_writel(struct kvm_vcpu *vcpu,
+				       enum kvm_reg reg,
+				       unsigned long val)
+{
+	if (!is_64_bit_mode(vcpu))
+		val = (u32)val;
+	return kvm_register_write(vcpu, reg, val);
+}
+
 void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
 void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
 int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index b5e6026..c61ea57 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -326,6 +326,27 @@
 	struct pci_bus *bus;
 	u16 config;
 
+	if (!vga_default_device()) {
+		resource_size_t start, end;
+		int i;
+
+		/* Does firmware framebuffer belong to us? */
+		for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+			if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
+				continue;
+
+			start = pci_resource_start(pdev, i);
+			end  = pci_resource_end(pdev, i);
+
+			if (!start || !end)
+				continue;
+
+			if (screen_info.lfb_base >= start &&
+			    (screen_info.lfb_base + screen_info.lfb_size) < end)
+				vga_set_default_device(pdev);
+		}
+	}
+
 	/* Is VGA routed to us? */
 	bus = pdev->bus;
 	while (bus) {
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index a19ed92..2ae525e 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -162,6 +162,10 @@
 			return start;
 		if (start & 0x300)
 			start = (start + 0x3ff) & ~0x3ff;
+	} else if (res->flags & IORESOURCE_MEM) {
+		/* The low 1MB range is reserved for ISA cards */
+		if (start < BIOS_END)
+			start = BIOS_END;
 	}
 	return start;
 }
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 424f4c9..6ec7910 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -165,7 +165,7 @@
  *		by __save_processor_state()
  *	@ctxt - structure to load the registers contents from
  */
-static void __restore_processor_state(struct saved_context *ctxt)
+static void notrace __restore_processor_state(struct saved_context *ctxt)
 {
 	if (ctxt->misc_enable_saved)
 		wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable);
@@ -239,7 +239,7 @@
 }
 
 /* Needed by apm.c */
-void restore_processor_state(void)
+void notrace restore_processor_state(void)
 {
 	__restore_processor_state(&saved_context);
 }
diff --git a/arch/x86/um/asm/processor.h b/arch/x86/um/asm/processor.h
index 04f82e0..2a206d2 100644
--- a/arch/x86/um/asm/processor.h
+++ b/arch/x86/um/asm/processor.h
@@ -25,7 +25,8 @@
 	__asm__ __volatile__("rep;nop": : :"memory");
 }
 
-#define cpu_relax()	rep_nop()
+#define cpu_relax()		rep_nop()
+#define cpu_relax_lowlatency()	cpu_relax()
 
 #include <asm/processor-generic.h>
 
diff --git a/arch/x86/xen/grant-table.c b/arch/x86/xen/grant-table.c
index c985835..ebfa9b2 100644
--- a/arch/x86/xen/grant-table.c
+++ b/arch/x86/xen/grant-table.c
@@ -36,99 +36,133 @@
 
 #include <linux/sched.h>
 #include <linux/mm.h>
+#include <linux/slab.h>
 #include <linux/vmalloc.h>
 
 #include <xen/interface/xen.h>
 #include <xen/page.h>
 #include <xen/grant_table.h>
+#include <xen/xen.h>
 
 #include <asm/pgtable.h>
 
-static int map_pte_fn(pte_t *pte, struct page *pmd_page,
-		      unsigned long addr, void *data)
-{
-	unsigned long **frames = (unsigned long **)data;
-
-	set_pte_at(&init_mm, addr, pte, mfn_pte((*frames)[0], PAGE_KERNEL));
-	(*frames)++;
-	return 0;
-}
-
-/*
- * This function is used to map shared frames to store grant status. It is
- * different from map_pte_fn above, the frames type here is uint64_t.
- */
-static int map_pte_fn_status(pte_t *pte, struct page *pmd_page,
-			     unsigned long addr, void *data)
-{
-	uint64_t **frames = (uint64_t **)data;
-
-	set_pte_at(&init_mm, addr, pte, mfn_pte((*frames)[0], PAGE_KERNEL));
-	(*frames)++;
-	return 0;
-}
-
-static int unmap_pte_fn(pte_t *pte, struct page *pmd_page,
-			unsigned long addr, void *data)
-{
-
-	set_pte_at(&init_mm, addr, pte, __pte(0));
-	return 0;
-}
+static struct gnttab_vm_area {
+	struct vm_struct *area;
+	pte_t **ptes;
+} gnttab_shared_vm_area, gnttab_status_vm_area;
 
 int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
 			   unsigned long max_nr_gframes,
 			   void **__shared)
 {
-	int rc;
 	void *shared = *__shared;
+	unsigned long addr;
+	unsigned long i;
 
-	if (shared == NULL) {
-		struct vm_struct *area =
-			alloc_vm_area(PAGE_SIZE * max_nr_gframes, NULL);
-		BUG_ON(area == NULL);
-		shared = area->addr;
-		*__shared = shared;
+	if (shared == NULL)
+		*__shared = shared = gnttab_shared_vm_area.area->addr;
+
+	addr = (unsigned long)shared;
+
+	for (i = 0; i < nr_gframes; i++) {
+		set_pte_at(&init_mm, addr, gnttab_shared_vm_area.ptes[i],
+			   mfn_pte(frames[i], PAGE_KERNEL));
+		addr += PAGE_SIZE;
 	}
 
-	rc = apply_to_page_range(&init_mm, (unsigned long)shared,
-				 PAGE_SIZE * nr_gframes,
-				 map_pte_fn, &frames);
-	return rc;
+	return 0;
 }
 
 int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
 			   unsigned long max_nr_gframes,
 			   grant_status_t **__shared)
 {
-	int rc;
 	grant_status_t *shared = *__shared;
+	unsigned long addr;
+	unsigned long i;
 
-	if (shared == NULL) {
-		/* No need to pass in PTE as we are going to do it
-		 * in apply_to_page_range anyhow. */
-		struct vm_struct *area =
-			alloc_vm_area(PAGE_SIZE * max_nr_gframes, NULL);
-		BUG_ON(area == NULL);
-		shared = area->addr;
-		*__shared = shared;
+	if (shared == NULL)
+		*__shared = shared = gnttab_status_vm_area.area->addr;
+
+	addr = (unsigned long)shared;
+
+	for (i = 0; i < nr_gframes; i++) {
+		set_pte_at(&init_mm, addr, gnttab_status_vm_area.ptes[i],
+			   mfn_pte(frames[i], PAGE_KERNEL));
+		addr += PAGE_SIZE;
 	}
 
-	rc = apply_to_page_range(&init_mm, (unsigned long)shared,
-				 PAGE_SIZE * nr_gframes,
-				 map_pte_fn_status, &frames);
-	return rc;
+	return 0;
 }
 
 void arch_gnttab_unmap(void *shared, unsigned long nr_gframes)
 {
-	apply_to_page_range(&init_mm, (unsigned long)shared,
-			    PAGE_SIZE * nr_gframes, unmap_pte_fn, NULL);
+	pte_t **ptes;
+	unsigned long addr;
+	unsigned long i;
+
+	if (shared == gnttab_status_vm_area.area->addr)
+		ptes = gnttab_status_vm_area.ptes;
+	else
+		ptes = gnttab_shared_vm_area.ptes;
+
+	addr = (unsigned long)shared;
+
+	for (i = 0; i < nr_gframes; i++) {
+		set_pte_at(&init_mm, addr, ptes[i], __pte(0));
+		addr += PAGE_SIZE;
+	}
 }
+
+static int arch_gnttab_valloc(struct gnttab_vm_area *area, unsigned nr_frames)
+{
+	area->ptes = kmalloc(sizeof(pte_t *) * nr_frames, GFP_KERNEL);
+	if (area->ptes == NULL)
+		return -ENOMEM;
+
+	area->area = alloc_vm_area(PAGE_SIZE * nr_frames, area->ptes);
+	if (area->area == NULL) {
+		kfree(area->ptes);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void arch_gnttab_vfree(struct gnttab_vm_area *area)
+{
+	free_vm_area(area->area);
+	kfree(area->ptes);
+}
+
+int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status)
+{
+	int ret;
+
+	if (!xen_pv_domain())
+		return 0;
+
+	ret = arch_gnttab_valloc(&gnttab_shared_vm_area, nr_shared);
+	if (ret < 0)
+		return ret;
+
+	/*
+	 * Always allocate the space for the status frames in case
+	 * we're migrated to a host with V2 support.
+	 */
+	ret = arch_gnttab_valloc(&gnttab_status_vm_area, nr_status);
+	if (ret < 0)
+		goto err;
+
+	return 0;
+  err:
+	arch_gnttab_vfree(&gnttab_shared_vm_area);
+	return -ENOMEM;
+}
+
 #ifdef CONFIG_XEN_PVH
 #include <xen/balloon.h>
 #include <xen/events.h>
-#include <xen/xen.h>
 #include <linux/slab.h>
 static int __init xlated_setup_gnttab_pages(void)
 {
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
index abb5970..b61bdf0 100644
--- a/arch/xtensa/include/asm/processor.h
+++ b/arch/xtensa/include/asm/processor.h
@@ -182,6 +182,7 @@
 #define KSTK_ESP(tsk)		(task_pt_regs(tsk)->areg[1])
 
 #define cpu_relax()  barrier()
+#define cpu_relax_lowlatency() cpu_relax()
 
 /* Special register access. */
 
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 28d227c..e17da94 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -928,7 +928,15 @@
 	.css_offline = blkcg_css_offline,
 	.css_free = blkcg_css_free,
 	.can_attach = blkcg_can_attach,
-	.base_cftypes = blkcg_files,
+	.legacy_cftypes = blkcg_files,
+#ifdef CONFIG_MEMCG
+	/*
+	 * This ensures that, if available, memcg is automatically enabled
+	 * together on the default hierarchy so that the owner cgroup can
+	 * be retrieved from writeback pages.
+	 */
+	.depends_on = 1 << memory_cgrp_id,
+#endif
 };
 EXPORT_SYMBOL_GPL(blkio_cgrp_subsys);
 
@@ -1120,7 +1128,8 @@
 
 	/* everything is in place, add intf files for the new policy */
 	if (pol->cftypes)
-		WARN_ON(cgroup_add_cftypes(&blkio_cgrp_subsys, pol->cftypes));
+		WARN_ON(cgroup_add_legacy_cftypes(&blkio_cgrp_subsys,
+						  pol->cftypes));
 	ret = 0;
 out_unlock:
 	mutex_unlock(&blkcg_pol_mutex);
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 3fdb21a..9273d09 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -412,13 +412,13 @@
 	int rw;
 
 	/*
-	 * If sane_hierarchy is enabled, we switch to properly hierarchical
+	 * If on the default hierarchy, we switch to properly hierarchical
 	 * behavior where limits on a given throtl_grp are applied to the
 	 * whole subtree rather than just the group itself.  e.g. If 16M
 	 * read_bps limit is set on the root group, the whole system can't
 	 * exceed 16M for the device.
 	 *
-	 * If sane_hierarchy is not enabled, the broken flat hierarchy
+	 * If not on the default hierarchy, the broken flat hierarchy
 	 * behavior is retained where all throtl_grps are treated as if
 	 * they're all separate root groups right below throtl_data.
 	 * Limits of a group don't interact with limits of other groups
@@ -426,7 +426,7 @@
 	 */
 	parent_sq = &td->service_queue;
 
-	if (cgroup_sane_behavior(blkg->blkcg->css.cgroup) && blkg->parent)
+	if (cgroup_on_dfl(blkg->blkcg->css.cgroup) && blkg->parent)
 		parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
 
 	throtl_service_queue_init(&tg->service_queue, parent_sq);
diff --git a/crypto/Kconfig b/crypto/Kconfig
index ce4012a..6345c47 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -23,7 +23,8 @@
 
 config CRYPTO_FIPS
 	bool "FIPS 200 compliance"
-	depends on CRYPTO_ANSI_CPRNG && !CRYPTO_MANAGER_DISABLE_TESTS
+	depends on (CRYPTO_ANSI_CPRNG || CRYPTO_DRBG) && !CRYPTO_MANAGER_DISABLE_TESTS
+	depends on MODULE_SIG
 	help
 	  This options enables the fips boot option which is
 	  required if you want to system to operate in a FIPS 200
@@ -1019,6 +1020,19 @@
 	  DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3),
 	  optimized using SPARC64 crypto opcodes.
 
+config CRYPTO_DES3_EDE_X86_64
+	tristate "Triple DES EDE cipher algorithm (x86-64)"
+	depends on X86 && 64BIT
+	select CRYPTO_ALGAPI
+	select CRYPTO_DES
+	help
+	  Triple DES EDE (FIPS 46-3) algorithm.
+
+	  This module provides implementation of the Triple DES EDE cipher
+	  algorithm that is optimized for x86-64 processors. Two versions of
+	  algorithm are provided; regular processing one input block and
+	  one that processes three blocks parallel.
+
 config CRYPTO_FCRYPT
 	tristate "FCrypt cipher algorithm"
 	select CRYPTO_ALGAPI
@@ -1380,6 +1394,40 @@
 	  ANSI X9.31 A.2.4. Note that this option must be enabled if
 	  CRYPTO_FIPS is selected
 
+menuconfig CRYPTO_DRBG_MENU
+	tristate "NIST SP800-90A DRBG"
+	help
+	  NIST SP800-90A compliant DRBG. In the following submenu, one or
+	  more of the DRBG types must be selected.
+
+if CRYPTO_DRBG_MENU
+
+config CRYPTO_DRBG_HMAC
+	bool "Enable HMAC DRBG"
+	default y
+	select CRYPTO_HMAC
+	help
+	  Enable the HMAC DRBG variant as defined in NIST SP800-90A.
+
+config CRYPTO_DRBG_HASH
+	bool "Enable Hash DRBG"
+	select CRYPTO_HASH
+	help
+	  Enable the Hash DRBG variant as defined in NIST SP800-90A.
+
+config CRYPTO_DRBG_CTR
+	bool "Enable CTR DRBG"
+	select CRYPTO_AES
+	help
+	  Enable the CTR DRBG variant as defined in NIST SP800-90A.
+
+config CRYPTO_DRBG
+	tristate
+	default CRYPTO_DRBG_MENU if (CRYPTO_DRBG_HMAC || CRYPTO_DRBG_HASH || CRYPTO_DRBG_CTR)
+	select CRYPTO_RNG
+
+endif	# if CRYPTO_DRBG_MENU
+
 config CRYPTO_USER_API
 	tristate
 
diff --git a/crypto/Makefile b/crypto/Makefile
index 38e64231..cfa57b3 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -92,6 +92,7 @@
 obj-$(CONFIG_CRYPTO_RNG2) += rng.o
 obj-$(CONFIG_CRYPTO_RNG2) += krng.o
 obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o
+obj-$(CONFIG_CRYPTO_DRBG) += drbg.o
 obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o
 obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o
 obj-$(CONFIG_CRYPTO_USER_API) += af_alg.o
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 966f893..6a3ad80 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -21,6 +21,7 @@
 #include <linux/module.h>
 #include <linux/net.h>
 #include <linux/rwsem.h>
+#include <linux/security.h>
 
 struct alg_type_list {
 	const struct af_alg_type *type;
@@ -243,6 +244,7 @@
 
 	sock_init_data(newsock, sk2);
 	sock_graft(sk2, newsock);
+	security_sk_clone(sk, sk2);
 
 	err = type->accept(ask->private, sk2);
 	if (err) {
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 7a1ae87..e8d3a7d 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -41,8 +41,20 @@
 	return 0;
 }
 
+static inline void crypto_check_module_sig(struct module *mod)
+{
+#ifdef CONFIG_CRYPTO_FIPS
+	if (fips_enabled && mod && !mod->sig_ok)
+		panic("Module %s signature verification failed in FIPS mode\n",
+		      mod->name);
+#endif
+	return;
+}
+
 static int crypto_check_alg(struct crypto_alg *alg)
 {
+	crypto_check_module_sig(alg->cra_module);
+
 	if (alg->cra_alignmask & (alg->cra_alignmask + 1))
 		return -EINVAL;
 
@@ -430,6 +442,8 @@
 
 	down_write(&crypto_alg_sem);
 
+	crypto_check_module_sig(tmpl->module);
+
 	list_for_each_entry(q, &crypto_template_list, list) {
 		if (q == tmpl)
 			goto out;
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 7bdd61b..e592c90 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -233,7 +233,7 @@
 }
 
 static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
-				    crypto_completion_t complete)
+				    crypto_completion_t compl)
 {
 	struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
@@ -241,7 +241,7 @@
 
 	queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
 	rctx->complete = req->base.complete;
-	req->base.complete = complete;
+	req->base.complete = compl;
 
 	return cryptd_enqueue_request(queue, &req->base);
 }
@@ -414,7 +414,7 @@
 }
 
 static int cryptd_hash_enqueue(struct ahash_request *req,
-				crypto_completion_t complete)
+				crypto_completion_t compl)
 {
 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -422,7 +422,7 @@
 		cryptd_get_queue(crypto_ahash_tfm(tfm));
 
 	rctx->complete = req->base.complete;
-	req->base.complete = complete;
+	req->base.complete = compl;
 
 	return cryptd_enqueue_request(queue, &req->base);
 }
@@ -667,14 +667,14 @@
 }
 
 static int cryptd_aead_enqueue(struct aead_request *req,
-				    crypto_completion_t complete)
+				    crypto_completion_t compl)
 {
 	struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
 
 	rctx->complete = req->base.complete;
-	req->base.complete = complete;
+	req->base.complete = compl;
 	return cryptd_enqueue_request(queue, &req->base);
 }
 
diff --git a/crypto/des_generic.c b/crypto/des_generic.c
index f6cf63f..298d464 100644
--- a/crypto/des_generic.c
+++ b/crypto/des_generic.c
@@ -859,13 +859,10 @@
  *   property.
  *
  */
-static int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key,
-			   unsigned int keylen)
+int __des3_ede_setkey(u32 *expkey, u32 *flags, const u8 *key,
+		      unsigned int keylen)
 {
 	const u32 *K = (const u32 *)key;
-	struct des3_ede_ctx *dctx = crypto_tfm_ctx(tfm);
-	u32 *expkey = dctx->expkey;
-	u32 *flags = &tfm->crt_flags;
 
 	if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
 		     !((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
@@ -880,6 +877,17 @@
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(__des3_ede_setkey);
+
+static int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key,
+			   unsigned int keylen)
+{
+	struct des3_ede_ctx *dctx = crypto_tfm_ctx(tfm);
+	u32 *flags = &tfm->crt_flags;
+	u32 *expkey = dctx->expkey;
+
+	return __des3_ede_setkey(expkey, flags, key, keylen);
+}
 
 static void des3_ede_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 {
@@ -945,6 +953,8 @@
 
 static struct crypto_alg des_algs[2] = { {
 	.cra_name		=	"des",
+	.cra_driver_name	=	"des-generic",
+	.cra_priority		=	100,
 	.cra_flags		=	CRYPTO_ALG_TYPE_CIPHER,
 	.cra_blocksize		=	DES_BLOCK_SIZE,
 	.cra_ctxsize		=	sizeof(struct des_ctx),
@@ -958,6 +968,8 @@
 	.cia_decrypt		=	des_decrypt } }
 }, {
 	.cra_name		=	"des3_ede",
+	.cra_driver_name	=	"des3_ede-generic",
+	.cra_priority		=	100,
 	.cra_flags		=	CRYPTO_ALG_TYPE_CIPHER,
 	.cra_blocksize		=	DES3_EDE_BLOCK_SIZE,
 	.cra_ctxsize		=	sizeof(struct des3_ede_ctx),
diff --git a/crypto/drbg.c b/crypto/drbg.c
new file mode 100644
index 0000000..7894db9
--- /dev/null
+++ b/crypto/drbg.c
@@ -0,0 +1,2044 @@
+/*
+ * DRBG: Deterministic Random Bits Generator
+ *       Based on NIST Recommended DRBG from NIST SP800-90A with the following
+ *       properties:
+ *		* CTR DRBG with DF with AES-128, AES-192, AES-256 cores
+ *		* Hash DRBG with DF with SHA-1, SHA-256, SHA-384, SHA-512 cores
+ *		* HMAC DRBG with DF with SHA-1, SHA-256, SHA-384, SHA-512 cores
+ *		* with and without prediction resistance
+ *
+ * Copyright Stephan Mueller <smueller@chronox.de>, 2014
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, and the entire permission notice in its entirety,
+ *    including the disclaimer of warranties.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote
+ *    products derived from this software without specific prior
+ *    written permission.
+ *
+ * ALTERNATIVELY, this product may be distributed under the terms of
+ * the GNU General Public License, in which case the provisions of the GPL are
+ * required INSTEAD OF the above restrictions.  (This clause is
+ * necessary due to a potential bad interaction between the GPL and
+ * the restrictions contained in a BSD-style copyright.)
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
+ * WHICH ARE HEREBY DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ * DRBG Usage
+ * ==========
+ * The SP 800-90A DRBG allows the user to specify a personalization string
+ * for initialization as well as an additional information string for each
+ * random number request. The following code fragments show how a caller
+ * uses the kernel crypto API to use the full functionality of the DRBG.
+ *
+ * Usage without any additional data
+ * ---------------------------------
+ * struct crypto_rng *drng;
+ * int err;
+ * char data[DATALEN];
+ *
+ * drng = crypto_alloc_rng(drng_name, 0, 0);
+ * err = crypto_rng_get_bytes(drng, &data, DATALEN);
+ * crypto_free_rng(drng);
+ *
+ *
+ * Usage with personalization string during initialization
+ * -------------------------------------------------------
+ * struct crypto_rng *drng;
+ * int err;
+ * char data[DATALEN];
+ * struct drbg_string pers;
+ * char personalization[11] = "some-string";
+ *
+ * drbg_string_fill(&pers, personalization, strlen(personalization));
+ * drng = crypto_alloc_rng(drng_name, 0, 0);
+ * // The reset completely re-initializes the DRBG with the provided
+ * // personalization string
+ * err = crypto_rng_reset(drng, &personalization, strlen(personalization));
+ * err = crypto_rng_get_bytes(drng, &data, DATALEN);
+ * crypto_free_rng(drng);
+ *
+ *
+ * Usage with additional information string during random number request
+ * ---------------------------------------------------------------------
+ * struct crypto_rng *drng;
+ * int err;
+ * char data[DATALEN];
+ * char addtl_string[11] = "some-string";
+ * string drbg_string addtl;
+ *
+ * drbg_string_fill(&addtl, addtl_string, strlen(addtl_string));
+ * drng = crypto_alloc_rng(drng_name, 0, 0);
+ * // The following call is a wrapper to crypto_rng_get_bytes() and returns
+ * // the same error codes.
+ * err = crypto_drbg_get_bytes_addtl(drng, &data, DATALEN, &addtl);
+ * crypto_free_rng(drng);
+ *
+ *
+ * Usage with personalization and additional information strings
+ * -------------------------------------------------------------
+ * Just mix both scenarios above.
+ */
+
+#include <crypto/drbg.h>
+
+/***************************************************************
+ * Backend cipher definitions available to DRBG
+ ***************************************************************/
+
+/*
+ * The order of the DRBG definitions here matter: every DRBG is registered
+ * as stdrng. Each DRBG receives an increasing cra_priority values the later
+ * they are defined in this array (see drbg_fill_array).
+ *
+ * HMAC DRBGs are favored over Hash DRBGs over CTR DRBGs, and
+ * the SHA256 / AES 256 over other ciphers. Thus, the favored
+ * DRBGs are the latest entries in this array.
+ */
+static const struct drbg_core drbg_cores[] = {
+#ifdef CONFIG_CRYPTO_DRBG_CTR
+	{
+		.flags = DRBG_CTR | DRBG_STRENGTH128,
+		.statelen = 32, /* 256 bits as defined in 10.2.1 */
+		.max_addtllen = 35,
+		.max_bits = 19,
+		.max_req = 48,
+		.blocklen_bytes = 16,
+		.cra_name = "ctr_aes128",
+		.backend_cra_name = "ecb(aes)",
+	}, {
+		.flags = DRBG_CTR | DRBG_STRENGTH192,
+		.statelen = 40, /* 320 bits as defined in 10.2.1 */
+		.max_addtllen = 35,
+		.max_bits = 19,
+		.max_req = 48,
+		.blocklen_bytes = 16,
+		.cra_name = "ctr_aes192",
+		.backend_cra_name = "ecb(aes)",
+	}, {
+		.flags = DRBG_CTR | DRBG_STRENGTH256,
+		.statelen = 48, /* 384 bits as defined in 10.2.1 */
+		.max_addtllen = 35,
+		.max_bits = 19,
+		.max_req = 48,
+		.blocklen_bytes = 16,
+		.cra_name = "ctr_aes256",
+		.backend_cra_name = "ecb(aes)",
+	},
+#endif /* CONFIG_CRYPTO_DRBG_CTR */
+#ifdef CONFIG_CRYPTO_DRBG_HASH
+	{
+		.flags = DRBG_HASH | DRBG_STRENGTH128,
+		.statelen = 55, /* 440 bits */
+		.max_addtllen = 35,
+		.max_bits = 19,
+		.max_req = 48,
+		.blocklen_bytes = 20,
+		.cra_name = "sha1",
+		.backend_cra_name = "sha1",
+	}, {
+		.flags = DRBG_HASH | DRBG_STRENGTH256,
+		.statelen = 111, /* 888 bits */
+		.max_addtllen = 35,
+		.max_bits = 19,
+		.max_req = 48,
+		.blocklen_bytes = 48,
+		.cra_name = "sha384",
+		.backend_cra_name = "sha384",
+	}, {
+		.flags = DRBG_HASH | DRBG_STRENGTH256,
+		.statelen = 111, /* 888 bits */
+		.max_addtllen = 35,
+		.max_bits = 19,
+		.max_req = 48,
+		.blocklen_bytes = 64,
+		.cra_name = "sha512",
+		.backend_cra_name = "sha512",
+	}, {
+		.flags = DRBG_HASH | DRBG_STRENGTH256,
+		.statelen = 55, /* 440 bits */
+		.max_addtllen = 35,
+		.max_bits = 19,
+		.max_req = 48,
+		.blocklen_bytes = 32,
+		.cra_name = "sha256",
+		.backend_cra_name = "sha256",
+	},
+#endif /* CONFIG_CRYPTO_DRBG_HASH */
+#ifdef CONFIG_CRYPTO_DRBG_HMAC
+	{
+		.flags = DRBG_HMAC | DRBG_STRENGTH128,
+		.statelen = 20, /* block length of cipher */
+		.max_addtllen = 35,
+		.max_bits = 19,
+		.max_req = 48,
+		.blocklen_bytes = 20,
+		.cra_name = "hmac_sha1",
+		.backend_cra_name = "hmac(sha1)",
+	}, {
+		.flags = DRBG_HMAC | DRBG_STRENGTH256,
+		.statelen = 48, /* block length of cipher */
+		.max_addtllen = 35,
+		.max_bits = 19,
+		.max_req = 48,
+		.blocklen_bytes = 48,
+		.cra_name = "hmac_sha384",
+		.backend_cra_name = "hmac(sha384)",
+	}, {
+		.flags = DRBG_HMAC | DRBG_STRENGTH256,
+		.statelen = 64, /* block length of cipher */
+		.max_addtllen = 35,
+		.max_bits = 19,
+		.max_req = 48,
+		.blocklen_bytes = 64,
+		.cra_name = "hmac_sha512",
+		.backend_cra_name = "hmac(sha512)",
+	}, {
+		.flags = DRBG_HMAC | DRBG_STRENGTH256,
+		.statelen = 32, /* block length of cipher */
+		.max_addtllen = 35,
+		.max_bits = 19,
+		.max_req = 48,
+		.blocklen_bytes = 32,
+		.cra_name = "hmac_sha256",
+		.backend_cra_name = "hmac(sha256)",
+	},
+#endif /* CONFIG_CRYPTO_DRBG_HMAC */
+};
+
+/******************************************************************
+ * Generic helper functions
+ ******************************************************************/
+
+/*
+ * Return strength of DRBG according to SP800-90A section 8.4
+ *
+ * @flags DRBG flags reference
+ *
+ * Return: normalized strength in *bytes* value or 32 as default
+ *	   to counter programming errors
+ */
+static inline unsigned short drbg_sec_strength(drbg_flag_t flags)
+{
+	switch (flags & DRBG_STRENGTH_MASK) {
+	case DRBG_STRENGTH128:
+		return 16;
+	case DRBG_STRENGTH192:
+		return 24;
+	case DRBG_STRENGTH256:
+		return 32;
+	default:
+		return 32;
+	}
+}
+
+/*
+ * FIPS 140-2 continuous self test
+ * The test is performed on the result of one round of the output
+ * function. Thus, the function implicitly knows the size of the
+ * buffer.
+ *
+ * The FIPS test can be called in an endless loop until it returns
+ * true. Although the code looks like a potential for a deadlock, it
+ * is not the case, because returning a false cannot mathematically
+ * occur (except once when a reseed took place and the updated state
+ * would is now set up such that the generation of new value returns
+ * an identical one -- this is most unlikely and would happen only once).
+ * Thus, if this function repeatedly returns false and thus would cause
+ * a deadlock, the integrity of the entire kernel is lost.
+ *
+ * @drbg DRBG handle
+ * @buf output buffer of random data to be checked
+ *
+ * return:
+ *	true on success
+ *	false on error
+ */
+static bool drbg_fips_continuous_test(struct drbg_state *drbg,
+				      const unsigned char *buf)
+{
+#ifdef CONFIG_CRYPTO_FIPS
+	int ret = 0;
+	/* skip test if we test the overall system */
+	if (drbg->test_data)
+		return true;
+	/* only perform test in FIPS mode */
+	if (0 == fips_enabled)
+		return true;
+	if (!drbg->fips_primed) {
+		/* Priming of FIPS test */
+		memcpy(drbg->prev, buf, drbg_blocklen(drbg));
+		drbg->fips_primed = true;
+		/* return false due to priming, i.e. another round is needed */
+		return false;
+	}
+	ret = memcmp(drbg->prev, buf, drbg_blocklen(drbg));
+	memcpy(drbg->prev, buf, drbg_blocklen(drbg));
+	/* the test shall pass when the two compared values are not equal */
+	return ret != 0;
+#else
+	return true;
+#endif /* CONFIG_CRYPTO_FIPS */
+}
+
+/*
+ * Convert an integer into a byte representation of this integer.
+ * The byte representation is big-endian
+ *
+ * @buf buffer holding the converted integer
+ * @val value to be converted
+ * @buflen length of buffer
+ */
+#if (defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_CTR))
+static inline void drbg_int2byte(unsigned char *buf, uint64_t val,
+				 size_t buflen)
+{
+	unsigned char *byte;
+	uint64_t i;
+
+	byte = buf + (buflen - 1);
+	for (i = 0; i < buflen; i++)
+		*(byte--) = val >> (i * 8) & 0xff;
+}
+
+/*
+ * Increment buffer
+ *
+ * @dst buffer to increment
+ * @add value to add
+ */
+static inline void drbg_add_buf(unsigned char *dst, size_t dstlen,
+				const unsigned char *add, size_t addlen)
+{
+	/* implied: dstlen > addlen */
+	unsigned char *dstptr;
+	const unsigned char *addptr;
+	unsigned int remainder = 0;
+	size_t len = addlen;
+
+	dstptr = dst + (dstlen-1);
+	addptr = add + (addlen-1);
+	while (len) {
+		remainder += *dstptr + *addptr;
+		*dstptr = remainder & 0xff;
+		remainder >>= 8;
+		len--; dstptr--; addptr--;
+	}
+	len = dstlen - addlen;
+	while (len && remainder > 0) {
+		remainder = *dstptr + 1;
+		*dstptr = remainder & 0xff;
+		remainder >>= 8;
+		len--; dstptr--;
+	}
+}
+#endif /* defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_CTR) */
+
+/******************************************************************
+ * CTR DRBG callback functions
+ ******************************************************************/
+
+#ifdef CONFIG_CRYPTO_DRBG_CTR
+#define CRYPTO_DRBG_CTR_STRING "CTR "
+static int drbg_kcapi_sym(struct drbg_state *drbg, const unsigned char *key,
+			  unsigned char *outval, const struct drbg_string *in);
+static int drbg_init_sym_kernel(struct drbg_state *drbg);
+static int drbg_fini_sym_kernel(struct drbg_state *drbg);
+
+/* BCC function for CTR DRBG as defined in 10.4.3 */
+static int drbg_ctr_bcc(struct drbg_state *drbg,
+			unsigned char *out, const unsigned char *key,
+			struct list_head *in)
+{
+	int ret = 0;
+	struct drbg_string *curr = NULL;
+	struct drbg_string data;
+	short cnt = 0;
+
+	drbg_string_fill(&data, out, drbg_blocklen(drbg));
+
+	/* 10.4.3 step 1 */
+	memset(out, 0, drbg_blocklen(drbg));
+
+	/* 10.4.3 step 2 / 4 */
+	list_for_each_entry(curr, in, list) {
+		const unsigned char *pos = curr->buf;
+		size_t len = curr->len;
+		/* 10.4.3 step 4.1 */
+		while (len) {
+			/* 10.4.3 step 4.2 */
+			if (drbg_blocklen(drbg) == cnt) {
+				cnt = 0;
+				ret = drbg_kcapi_sym(drbg, key, out, &data);
+				if (ret)
+					return ret;
+			}
+			out[cnt] ^= *pos;
+			pos++;
+			cnt++;
+			len--;
+		}
+	}
+	/* 10.4.3 step 4.2 for last block */
+	if (cnt)
+		ret = drbg_kcapi_sym(drbg, key, out, &data);
+
+	return ret;
+}
+
+/*
+ * scratchpad usage: drbg_ctr_update is interlinked with drbg_ctr_df
+ * (and drbg_ctr_bcc, but this function does not need any temporary buffers),
+ * the scratchpad is used as follows:
+ * drbg_ctr_update:
+ *	temp
+ *		start: drbg->scratchpad
+ *		length: drbg_statelen(drbg) + drbg_blocklen(drbg)
+ *			note: the cipher writing into this variable works
+ *			blocklen-wise. Now, when the statelen is not a multiple
+ *			of blocklen, the generateion loop below "spills over"
+ *			by at most blocklen. Thus, we need to give sufficient
+ *			memory.
+ *	df_data
+ *		start: drbg->scratchpad +
+ *				drbg_statelen(drbg) + drbg_blocklen(drbg)
+ *		length: drbg_statelen(drbg)
+ *
+ * drbg_ctr_df:
+ *	pad
+ *		start: df_data + drbg_statelen(drbg)
+ *		length: drbg_blocklen(drbg)
+ *	iv
+ *		start: pad + drbg_blocklen(drbg)
+ *		length: drbg_blocklen(drbg)
+ *	temp
+ *		start: iv + drbg_blocklen(drbg)
+ *		length: drbg_satelen(drbg) + drbg_blocklen(drbg)
+ *			note: temp is the buffer that the BCC function operates
+ *			on. BCC operates blockwise. drbg_statelen(drbg)
+ *			is sufficient when the DRBG state length is a multiple
+ *			of the block size. For AES192 (and maybe other ciphers)
+ *			this is not correct and the length for temp is
+ *			insufficient (yes, that also means for such ciphers,
+ *			the final output of all BCC rounds are truncated).
+ *			Therefore, add drbg_blocklen(drbg) to cover all
+ *			possibilities.
+ */
+
+/* Derivation Function for CTR DRBG as defined in 10.4.2 */
+static int drbg_ctr_df(struct drbg_state *drbg,
+		       unsigned char *df_data, size_t bytes_to_return,
+		       struct list_head *seedlist)
+{
+	int ret = -EFAULT;
+	unsigned char L_N[8];
+	/* S3 is input */
+	struct drbg_string S1, S2, S4, cipherin;
+	LIST_HEAD(bcc_list);
+	unsigned char *pad = df_data + drbg_statelen(drbg);
+	unsigned char *iv = pad + drbg_blocklen(drbg);
+	unsigned char *temp = iv + drbg_blocklen(drbg);
+	size_t padlen = 0;
+	unsigned int templen = 0;
+	/* 10.4.2 step 7 */
+	unsigned int i = 0;
+	/* 10.4.2 step 8 */
+	const unsigned char *K = (unsigned char *)
+			   "\x00\x01\x02\x03\x04\x05\x06\x07"
+			   "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+			   "\x10\x11\x12\x13\x14\x15\x16\x17"
+			   "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f";
+	unsigned char *X;
+	size_t generated_len = 0;
+	size_t inputlen = 0;
+	struct drbg_string *seed = NULL;
+
+	memset(pad, 0, drbg_blocklen(drbg));
+	memset(iv, 0, drbg_blocklen(drbg));
+	memset(temp, 0, drbg_statelen(drbg));
+
+	/* 10.4.2 step 1 is implicit as we work byte-wise */
+
+	/* 10.4.2 step 2 */
+	if ((512/8) < bytes_to_return)
+		return -EINVAL;
+
+	/* 10.4.2 step 2 -- calculate the entire length of all input data */
+	list_for_each_entry(seed, seedlist, list)
+		inputlen += seed->len;
+	drbg_int2byte(&L_N[0], inputlen, 4);
+
+	/* 10.4.2 step 3 */
+	drbg_int2byte(&L_N[4], bytes_to_return, 4);
+
+	/* 10.4.2 step 5: length is L_N, input_string, one byte, padding */
+	padlen = (inputlen + sizeof(L_N) + 1) % (drbg_blocklen(drbg));
+	/* wrap the padlen appropriately */
+	if (padlen)
+		padlen = drbg_blocklen(drbg) - padlen;
+	/*
+	 * pad / padlen contains the 0x80 byte and the following zero bytes.
+	 * As the calculated padlen value only covers the number of zero
+	 * bytes, this value has to be incremented by one for the 0x80 byte.
+	 */
+	padlen++;
+	pad[0] = 0x80;
+
+	/* 10.4.2 step 4 -- first fill the linked list and then order it */
+	drbg_string_fill(&S1, iv, drbg_blocklen(drbg));
+	list_add_tail(&S1.list, &bcc_list);
+	drbg_string_fill(&S2, L_N, sizeof(L_N));
+	list_add_tail(&S2.list, &bcc_list);
+	list_splice_tail(seedlist, &bcc_list);
+	drbg_string_fill(&S4, pad, padlen);
+	list_add_tail(&S4.list, &bcc_list);
+
+	/* 10.4.2 step 9 */
+	while (templen < (drbg_keylen(drbg) + (drbg_blocklen(drbg)))) {
+		/*
+		 * 10.4.2 step 9.1 - the padding is implicit as the buffer
+		 * holds zeros after allocation -- even the increment of i
+		 * is irrelevant as the increment remains within length of i
+		 */
+		drbg_int2byte(iv, i, 4);
+		/* 10.4.2 step 9.2 -- BCC and concatenation with temp */
+		ret = drbg_ctr_bcc(drbg, temp + templen, K, &bcc_list);
+		if (ret)
+			goto out;
+		/* 10.4.2 step 9.3 */
+		i++;
+		templen += drbg_blocklen(drbg);
+	}
+
+	/* 10.4.2 step 11 */
+	X = temp + (drbg_keylen(drbg));
+	drbg_string_fill(&cipherin, X, drbg_blocklen(drbg));
+
+	/* 10.4.2 step 12: overwriting of outval is implemented in next step */
+
+	/* 10.4.2 step 13 */
+	while (generated_len < bytes_to_return) {
+		short blocklen = 0;
+		/*
+		 * 10.4.2 step 13.1: the truncation of the key length is
+		 * implicit as the key is only drbg_blocklen in size based on
+		 * the implementation of the cipher function callback
+		 */
+		ret = drbg_kcapi_sym(drbg, temp, X, &cipherin);
+		if (ret)
+			goto out;
+		blocklen = (drbg_blocklen(drbg) <
+				(bytes_to_return - generated_len)) ?
+			    drbg_blocklen(drbg) :
+				(bytes_to_return - generated_len);
+		/* 10.4.2 step 13.2 and 14 */
+		memcpy(df_data + generated_len, X, blocklen);
+		generated_len += blocklen;
+	}
+
+	ret = 0;
+
+out:
+	memset(iv, 0, drbg_blocklen(drbg));
+	memset(temp, 0, drbg_statelen(drbg));
+	memset(pad, 0, drbg_blocklen(drbg));
+	return ret;
+}
+
+/*
+ * update function of CTR DRBG as defined in 10.2.1.2
+ *
+ * The reseed variable has an enhanced meaning compared to the update
+ * functions of the other DRBGs as follows:
+ * 0 => initial seed from initialization
+ * 1 => reseed via drbg_seed
+ * 2 => first invocation from drbg_ctr_update when addtl is present. In
+ *      this case, the df_data scratchpad is not deleted so that it is
+ *      available for another calls to prevent calling the DF function
+ *      again.
+ * 3 => second invocation from drbg_ctr_update. When the update function
+ *      was called with addtl, the df_data memory already contains the
+ *      DFed addtl information and we do not need to call DF again.
+ */
+static int drbg_ctr_update(struct drbg_state *drbg, struct list_head *seed,
+			   int reseed)
+{
+	int ret = -EFAULT;
+	/* 10.2.1.2 step 1 */
+	unsigned char *temp = drbg->scratchpad;
+	unsigned char *df_data = drbg->scratchpad + drbg_statelen(drbg) +
+				 drbg_blocklen(drbg);
+	unsigned char *temp_p, *df_data_p; /* pointer to iterate over buffers */
+	unsigned int len = 0;
+	struct drbg_string cipherin;
+	unsigned char prefix = DRBG_PREFIX1;
+
+	memset(temp, 0, drbg_statelen(drbg) + drbg_blocklen(drbg));
+	if (3 > reseed)
+		memset(df_data, 0, drbg_statelen(drbg));
+
+	/* 10.2.1.3.2 step 2 and 10.2.1.4.2 step 2 */
+	if (seed) {
+		ret = drbg_ctr_df(drbg, df_data, drbg_statelen(drbg), seed);
+		if (ret)
+			goto out;
+	}
+
+	drbg_string_fill(&cipherin, drbg->V, drbg_blocklen(drbg));
+	/*
+	 * 10.2.1.3.2 steps 2 and 3 are already covered as the allocation
+	 * zeroizes all memory during initialization
+	 */
+	while (len < (drbg_statelen(drbg))) {
+		/* 10.2.1.2 step 2.1 */
+		drbg_add_buf(drbg->V, drbg_blocklen(drbg), &prefix, 1);
+		/*
+		 * 10.2.1.2 step 2.2 */
+		ret = drbg_kcapi_sym(drbg, drbg->C, temp + len, &cipherin);
+		if (ret)
+			goto out;
+		/* 10.2.1.2 step 2.3 and 3 */
+		len += drbg_blocklen(drbg);
+	}
+
+	/* 10.2.1.2 step 4 */
+	temp_p = temp;
+	df_data_p = df_data;
+	for (len = 0; len < drbg_statelen(drbg); len++) {
+		*temp_p ^= *df_data_p;
+		df_data_p++; temp_p++;
+	}
+
+	/* 10.2.1.2 step 5 */
+	memcpy(drbg->C, temp, drbg_keylen(drbg));
+	/* 10.2.1.2 step 6 */
+	memcpy(drbg->V, temp + drbg_keylen(drbg), drbg_blocklen(drbg));
+	ret = 0;
+
+out:
+	memset(temp, 0, drbg_statelen(drbg) + drbg_blocklen(drbg));
+	if (2 != reseed)
+		memset(df_data, 0, drbg_statelen(drbg));
+	return ret;
+}
+
+/*
+ * scratchpad use: drbg_ctr_update is called independently from
+ * drbg_ctr_extract_bytes. Therefore, the scratchpad is reused
+ */
+/* Generate function of CTR DRBG as defined in 10.2.1.5.2 */
+static int drbg_ctr_generate(struct drbg_state *drbg,
+			     unsigned char *buf, unsigned int buflen,
+			     struct list_head *addtl)
+{
+	int len = 0;
+	int ret = 0;
+	struct drbg_string data;
+	unsigned char prefix = DRBG_PREFIX1;
+
+	memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
+
+	/* 10.2.1.5.2 step 2 */
+	if (addtl && !list_empty(addtl)) {
+		ret = drbg_ctr_update(drbg, addtl, 2);
+		if (ret)
+			return 0;
+	}
+
+	/* 10.2.1.5.2 step 4.1 */
+	drbg_add_buf(drbg->V, drbg_blocklen(drbg), &prefix, 1);
+	drbg_string_fill(&data, drbg->V, drbg_blocklen(drbg));
+	while (len < buflen) {
+		int outlen = 0;
+		/* 10.2.1.5.2 step 4.2 */
+		ret = drbg_kcapi_sym(drbg, drbg->C, drbg->scratchpad, &data);
+		if (ret) {
+			len = ret;
+			goto out;
+		}
+		outlen = (drbg_blocklen(drbg) < (buflen - len)) ?
+			  drbg_blocklen(drbg) : (buflen - len);
+		if (!drbg_fips_continuous_test(drbg, drbg->scratchpad)) {
+			/* 10.2.1.5.2 step 6 */
+			drbg_add_buf(drbg->V, drbg_blocklen(drbg), &prefix, 1);
+			continue;
+		}
+		/* 10.2.1.5.2 step 4.3 */
+		memcpy(buf + len, drbg->scratchpad, outlen);
+		len += outlen;
+		/* 10.2.1.5.2 step 6 */
+		if (len < buflen)
+			drbg_add_buf(drbg->V, drbg_blocklen(drbg), &prefix, 1);
+	}
+
+	/* 10.2.1.5.2 step 6 */
+	ret = drbg_ctr_update(drbg, NULL, 3);
+	if (ret)
+		len = ret;
+
+out:
+	memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
+	return len;
+}
+
+static struct drbg_state_ops drbg_ctr_ops = {
+	.update		= drbg_ctr_update,
+	.generate	= drbg_ctr_generate,
+	.crypto_init	= drbg_init_sym_kernel,
+	.crypto_fini	= drbg_fini_sym_kernel,
+};
+#endif /* CONFIG_CRYPTO_DRBG_CTR */
+
+/******************************************************************
+ * HMAC DRBG callback functions
+ ******************************************************************/
+
+#if defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_HMAC)
+static int drbg_kcapi_hash(struct drbg_state *drbg, const unsigned char *key,
+			   unsigned char *outval, const struct list_head *in);
+static int drbg_init_hash_kernel(struct drbg_state *drbg);
+static int drbg_fini_hash_kernel(struct drbg_state *drbg);
+#endif /* (CONFIG_CRYPTO_DRBG_HASH || CONFIG_CRYPTO_DRBG_HMAC) */
+
+#ifdef CONFIG_CRYPTO_DRBG_HMAC
+#define CRYPTO_DRBG_HMAC_STRING "HMAC "
+/* update function of HMAC DRBG as defined in 10.1.2.2 */
+static int drbg_hmac_update(struct drbg_state *drbg, struct list_head *seed,
+			    int reseed)
+{
+	int ret = -EFAULT;
+	int i = 0;
+	struct drbg_string seed1, seed2, vdata;
+	LIST_HEAD(seedlist);
+	LIST_HEAD(vdatalist);
+
+	if (!reseed) {
+		/* 10.1.2.3 step 2 */
+		memset(drbg->C, 0, drbg_statelen(drbg));
+		memset(drbg->V, 1, drbg_statelen(drbg));
+	}
+
+	drbg_string_fill(&seed1, drbg->V, drbg_statelen(drbg));
+	list_add_tail(&seed1.list, &seedlist);
+	/* buffer of seed2 will be filled in for loop below with one byte */
+	drbg_string_fill(&seed2, NULL, 1);
+	list_add_tail(&seed2.list, &seedlist);
+	/* input data of seed is allowed to be NULL at this point */
+	if (seed)
+		list_splice_tail(seed, &seedlist);
+
+	drbg_string_fill(&vdata, drbg->V, drbg_statelen(drbg));
+	list_add_tail(&vdata.list, &vdatalist);
+	for (i = 2; 0 < i; i--) {
+		/* first round uses 0x0, second 0x1 */
+		unsigned char prefix = DRBG_PREFIX0;
+		if (1 == i)
+			prefix = DRBG_PREFIX1;
+		/* 10.1.2.2 step 1 and 4 -- concatenation and HMAC for key */
+		seed2.buf = &prefix;
+		ret = drbg_kcapi_hash(drbg, drbg->C, drbg->C, &seedlist);
+		if (ret)
+			return ret;
+
+		/* 10.1.2.2 step 2 and 5 -- HMAC for V */
+		ret = drbg_kcapi_hash(drbg, drbg->C, drbg->V, &vdatalist);
+		if (ret)
+			return ret;
+
+		/* 10.1.2.2 step 3 */
+		if (!seed)
+			return ret;
+	}
+
+	return 0;
+}
+
+/* generate function of HMAC DRBG as defined in 10.1.2.5 */
+static int drbg_hmac_generate(struct drbg_state *drbg,
+			      unsigned char *buf,
+			      unsigned int buflen,
+			      struct list_head *addtl)
+{
+	int len = 0;
+	int ret = 0;
+	struct drbg_string data;
+	LIST_HEAD(datalist);
+
+	/* 10.1.2.5 step 2 */
+	if (addtl && !list_empty(addtl)) {
+		ret = drbg_hmac_update(drbg, addtl, 1);
+		if (ret)
+			return ret;
+	}
+
+	drbg_string_fill(&data, drbg->V, drbg_statelen(drbg));
+	list_add_tail(&data.list, &datalist);
+	while (len < buflen) {
+		unsigned int outlen = 0;
+		/* 10.1.2.5 step 4.1 */
+		ret = drbg_kcapi_hash(drbg, drbg->C, drbg->V, &datalist);
+		if (ret)
+			return ret;
+		outlen = (drbg_blocklen(drbg) < (buflen - len)) ?
+			  drbg_blocklen(drbg) : (buflen - len);
+		if (!drbg_fips_continuous_test(drbg, drbg->V))
+			continue;
+
+		/* 10.1.2.5 step 4.2 */
+		memcpy(buf + len, drbg->V, outlen);
+		len += outlen;
+	}
+
+	/* 10.1.2.5 step 6 */
+	if (addtl && !list_empty(addtl))
+		ret = drbg_hmac_update(drbg, addtl, 1);
+	else
+		ret = drbg_hmac_update(drbg, NULL, 1);
+	if (ret)
+		return ret;
+
+	return len;
+}
+
+static struct drbg_state_ops drbg_hmac_ops = {
+	.update		= drbg_hmac_update,
+	.generate	= drbg_hmac_generate,
+	.crypto_init	= drbg_init_hash_kernel,
+	.crypto_fini	= drbg_fini_hash_kernel,
+
+};
+#endif /* CONFIG_CRYPTO_DRBG_HMAC */
+
+/******************************************************************
+ * Hash DRBG callback functions
+ ******************************************************************/
+
+#ifdef CONFIG_CRYPTO_DRBG_HASH
+#define CRYPTO_DRBG_HASH_STRING "HASH "
+/*
+ * scratchpad usage: as drbg_hash_update and drbg_hash_df are used
+ * interlinked, the scratchpad is used as follows:
+ * drbg_hash_update
+ *	start: drbg->scratchpad
+ *	length: drbg_statelen(drbg)
+ * drbg_hash_df:
+ *	start: drbg->scratchpad + drbg_statelen(drbg)
+ *	length: drbg_blocklen(drbg)
+ *
+ * drbg_hash_process_addtl uses the scratchpad, but fully completes
+ * before either of the functions mentioned before are invoked. Therefore,
+ * drbg_hash_process_addtl does not need to be specifically considered.
+ */
+
+/* Derivation Function for Hash DRBG as defined in 10.4.1 */
+static int drbg_hash_df(struct drbg_state *drbg,
+			unsigned char *outval, size_t outlen,
+			struct list_head *entropylist)
+{
+	int ret = 0;
+	size_t len = 0;
+	unsigned char input[5];
+	unsigned char *tmp = drbg->scratchpad + drbg_statelen(drbg);
+	struct drbg_string data;
+
+	memset(tmp, 0, drbg_blocklen(drbg));
+
+	/* 10.4.1 step 3 */
+	input[0] = 1;
+	drbg_int2byte(&input[1], (outlen * 8), 4);
+
+	/* 10.4.1 step 4.1 -- concatenation of data for input into hash */
+	drbg_string_fill(&data, input, 5);
+	list_add(&data.list, entropylist);
+
+	/* 10.4.1 step 4 */
+	while (len < outlen) {
+		short blocklen = 0;
+		/* 10.4.1 step 4.1 */
+		ret = drbg_kcapi_hash(drbg, NULL, tmp, entropylist);
+		if (ret)
+			goto out;
+		/* 10.4.1 step 4.2 */
+		input[0]++;
+		blocklen = (drbg_blocklen(drbg) < (outlen - len)) ?
+			    drbg_blocklen(drbg) : (outlen - len);
+		memcpy(outval + len, tmp, blocklen);
+		len += blocklen;
+	}
+
+out:
+	memset(tmp, 0, drbg_blocklen(drbg));
+	return ret;
+}
+
+/* update function for Hash DRBG as defined in 10.1.1.2 / 10.1.1.3 */
+static int drbg_hash_update(struct drbg_state *drbg, struct list_head *seed,
+			    int reseed)
+{
+	int ret = 0;
+	struct drbg_string data1, data2;
+	LIST_HEAD(datalist);
+	LIST_HEAD(datalist2);
+	unsigned char *V = drbg->scratchpad;
+	unsigned char prefix = DRBG_PREFIX1;
+
+	memset(drbg->scratchpad, 0, drbg_statelen(drbg));
+	if (!seed)
+		return -EINVAL;
+
+	if (reseed) {
+		/* 10.1.1.3 step 1 */
+		memcpy(V, drbg->V, drbg_statelen(drbg));
+		drbg_string_fill(&data1, &prefix, 1);
+		list_add_tail(&data1.list, &datalist);
+		drbg_string_fill(&data2, V, drbg_statelen(drbg));
+		list_add_tail(&data2.list, &datalist);
+	}
+	list_splice_tail(seed, &datalist);
+
+	/* 10.1.1.2 / 10.1.1.3 step 2 and 3 */
+	ret = drbg_hash_df(drbg, drbg->V, drbg_statelen(drbg), &datalist);
+	if (ret)
+		goto out;
+
+	/* 10.1.1.2 / 10.1.1.3 step 4  */
+	prefix = DRBG_PREFIX0;
+	drbg_string_fill(&data1, &prefix, 1);
+	list_add_tail(&data1.list, &datalist2);
+	drbg_string_fill(&data2, drbg->V, drbg_statelen(drbg));
+	list_add_tail(&data2.list, &datalist2);
+	/* 10.1.1.2 / 10.1.1.3 step 4 */
+	ret = drbg_hash_df(drbg, drbg->C, drbg_statelen(drbg), &datalist2);
+
+out:
+	memset(drbg->scratchpad, 0, drbg_statelen(drbg));
+	return ret;
+}
+
+/* processing of additional information string for Hash DRBG */
+static int drbg_hash_process_addtl(struct drbg_state *drbg,
+				   struct list_head *addtl)
+{
+	int ret = 0;
+	struct drbg_string data1, data2;
+	LIST_HEAD(datalist);
+	unsigned char prefix = DRBG_PREFIX2;
+
+	/* this is value w as per documentation */
+	memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
+
+	/* 10.1.1.4 step 2 */
+	if (!addtl || list_empty(addtl))
+		return 0;
+
+	/* 10.1.1.4 step 2a */
+	drbg_string_fill(&data1, &prefix, 1);
+	drbg_string_fill(&data2, drbg->V, drbg_statelen(drbg));
+	list_add_tail(&data1.list, &datalist);
+	list_add_tail(&data2.list, &datalist);
+	list_splice_tail(addtl, &datalist);
+	ret = drbg_kcapi_hash(drbg, NULL, drbg->scratchpad, &datalist);
+	if (ret)
+		goto out;
+
+	/* 10.1.1.4 step 2b */
+	drbg_add_buf(drbg->V, drbg_statelen(drbg),
+		     drbg->scratchpad, drbg_blocklen(drbg));
+
+out:
+	memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
+	return ret;
+}
+
+/* Hashgen defined in 10.1.1.4 */
+static int drbg_hash_hashgen(struct drbg_state *drbg,
+			     unsigned char *buf,
+			     unsigned int buflen)
+{
+	int len = 0;
+	int ret = 0;
+	unsigned char *src = drbg->scratchpad;
+	unsigned char *dst = drbg->scratchpad + drbg_statelen(drbg);
+	struct drbg_string data;
+	LIST_HEAD(datalist);
+	unsigned char prefix = DRBG_PREFIX1;
+
+	memset(src, 0, drbg_statelen(drbg));
+	memset(dst, 0, drbg_blocklen(drbg));
+
+	/* 10.1.1.4 step hashgen 2 */
+	memcpy(src, drbg->V, drbg_statelen(drbg));
+
+	drbg_string_fill(&data, src, drbg_statelen(drbg));
+	list_add_tail(&data.list, &datalist);
+	while (len < buflen) {
+		unsigned int outlen = 0;
+		/* 10.1.1.4 step hashgen 4.1 */
+		ret = drbg_kcapi_hash(drbg, NULL, dst, &datalist);
+		if (ret) {
+			len = ret;
+			goto out;
+		}
+		outlen = (drbg_blocklen(drbg) < (buflen - len)) ?
+			  drbg_blocklen(drbg) : (buflen - len);
+		if (!drbg_fips_continuous_test(drbg, dst)) {
+			drbg_add_buf(src, drbg_statelen(drbg), &prefix, 1);
+			continue;
+		}
+		/* 10.1.1.4 step hashgen 4.2 */
+		memcpy(buf + len, dst, outlen);
+		len += outlen;
+		/* 10.1.1.4 hashgen step 4.3 */
+		if (len < buflen)
+			drbg_add_buf(src, drbg_statelen(drbg), &prefix, 1);
+	}
+
+out:
+	memset(drbg->scratchpad, 0,
+	       (drbg_statelen(drbg) + drbg_blocklen(drbg)));
+	return len;
+}
+
+/* generate function for Hash DRBG as defined in  10.1.1.4 */
+static int drbg_hash_generate(struct drbg_state *drbg,
+			      unsigned char *buf, unsigned int buflen,
+			      struct list_head *addtl)
+{
+	int len = 0;
+	int ret = 0;
+	unsigned char req[8];
+	unsigned char prefix = DRBG_PREFIX3;
+	struct drbg_string data1, data2;
+	LIST_HEAD(datalist);
+
+	/* 10.1.1.4 step 2 */
+	ret = drbg_hash_process_addtl(drbg, addtl);
+	if (ret)
+		return ret;
+	/* 10.1.1.4 step 3 */
+	len = drbg_hash_hashgen(drbg, buf, buflen);
+
+	/* this is the value H as documented in 10.1.1.4 */
+	memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
+	/* 10.1.1.4 step 4 */
+	drbg_string_fill(&data1, &prefix, 1);
+	list_add_tail(&data1.list, &datalist);
+	drbg_string_fill(&data2, drbg->V, drbg_statelen(drbg));
+	list_add_tail(&data2.list, &datalist);
+	ret = drbg_kcapi_hash(drbg, NULL, drbg->scratchpad, &datalist);
+	if (ret) {
+		len = ret;
+		goto out;
+	}
+
+	/* 10.1.1.4 step 5 */
+	drbg_add_buf(drbg->V, drbg_statelen(drbg),
+		     drbg->scratchpad, drbg_blocklen(drbg));
+	drbg_add_buf(drbg->V, drbg_statelen(drbg),
+		     drbg->C, drbg_statelen(drbg));
+	drbg_int2byte(req, drbg->reseed_ctr, sizeof(req));
+	drbg_add_buf(drbg->V, drbg_statelen(drbg), req, 8);
+
+out:
+	memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
+	return len;
+}
+
+/*
+ * scratchpad usage: as update and generate are used isolated, both
+ * can use the scratchpad
+ */
+static struct drbg_state_ops drbg_hash_ops = {
+	.update		= drbg_hash_update,
+	.generate	= drbg_hash_generate,
+	.crypto_init	= drbg_init_hash_kernel,
+	.crypto_fini	= drbg_fini_hash_kernel,
+};
+#endif /* CONFIG_CRYPTO_DRBG_HASH */
+
+/******************************************************************
+ * Functions common for DRBG implementations
+ ******************************************************************/
+
+/*
+ * Seeding or reseeding of the DRBG
+ *
+ * @drbg: DRBG state struct
+ * @pers: personalization / additional information buffer
+ * @reseed: 0 for initial seed process, 1 for reseeding
+ *
+ * return:
+ *	0 on success
+ *	error value otherwise
+ */
+static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers,
+		     bool reseed)
+{
+	int ret = 0;
+	unsigned char *entropy = NULL;
+	size_t entropylen = 0;
+	struct drbg_string data1;
+	LIST_HEAD(seedlist);
+
+	/* 9.1 / 9.2 / 9.3.1 step 3 */
+	if (pers && pers->len > (drbg_max_addtl(drbg))) {
+		pr_devel("DRBG: personalization string too long %zu\n",
+			 pers->len);
+		return -EINVAL;
+	}
+
+	if (drbg->test_data && drbg->test_data->testentropy) {
+		drbg_string_fill(&data1, drbg->test_data->testentropy->buf,
+				 drbg->test_data->testentropy->len);
+		pr_devel("DRBG: using test entropy\n");
+	} else {
+		/*
+		 * Gather entropy equal to the security strength of the DRBG.
+		 * With a derivation function, a nonce is required in addition
+		 * to the entropy. A nonce must be at least 1/2 of the security
+		 * strength of the DRBG in size. Thus, entropy * nonce is 3/2
+		 * of the strength. The consideration of a nonce is only
+		 * applicable during initial seeding.
+		 */
+		entropylen = drbg_sec_strength(drbg->core->flags);
+		if (!entropylen)
+			return -EFAULT;
+		if (!reseed)
+			entropylen = ((entropylen + 1) / 2) * 3;
+		pr_devel("DRBG: (re)seeding with %zu bytes of entropy\n",
+			 entropylen);
+		entropy = kzalloc(entropylen, GFP_KERNEL);
+		if (!entropy)
+			return -ENOMEM;
+		get_random_bytes(entropy, entropylen);
+		drbg_string_fill(&data1, entropy, entropylen);
+	}
+	list_add_tail(&data1.list, &seedlist);
+
+	/*
+	 * concatenation of entropy with personalization str / addtl input)
+	 * the variable pers is directly handed in by the caller, so check its
+	 * contents whether it is appropriate
+	 */
+	if (pers && pers->buf && 0 < pers->len) {
+		list_add_tail(&pers->list, &seedlist);
+		pr_devel("DRBG: using personalization string\n");
+	}
+
+	ret = drbg->d_ops->update(drbg, &seedlist, reseed);
+	if (ret)
+		goto out;
+
+	drbg->seeded = true;
+	/* 10.1.1.2 / 10.1.1.3 step 5 */
+	drbg->reseed_ctr = 1;
+
+out:
+	if (entropy)
+		kzfree(entropy);
+	return ret;
+}
+
+/* Free all substructures in a DRBG state without the DRBG state structure */
+static inline void drbg_dealloc_state(struct drbg_state *drbg)
+{
+	if (!drbg)
+		return;
+	if (drbg->V)
+		kzfree(drbg->V);
+	drbg->V = NULL;
+	if (drbg->C)
+		kzfree(drbg->C);
+	drbg->C = NULL;
+	if (drbg->scratchpad)
+		kzfree(drbg->scratchpad);
+	drbg->scratchpad = NULL;
+	drbg->reseed_ctr = 0;
+#ifdef CONFIG_CRYPTO_FIPS
+	if (drbg->prev)
+		kzfree(drbg->prev);
+	drbg->prev = NULL;
+	drbg->fips_primed = false;
+#endif
+}
+
+/*
+ * Allocate all sub-structures for a DRBG state.
+ * The DRBG state structure must already be allocated.
+ */
+static inline int drbg_alloc_state(struct drbg_state *drbg)
+{
+	int ret = -ENOMEM;
+	unsigned int sb_size = 0;
+
+	if (!drbg)
+		return -EINVAL;
+
+	drbg->V = kzalloc(drbg_statelen(drbg), GFP_KERNEL);
+	if (!drbg->V)
+		goto err;
+	drbg->C = kzalloc(drbg_statelen(drbg), GFP_KERNEL);
+	if (!drbg->C)
+		goto err;
+#ifdef CONFIG_CRYPTO_FIPS
+	drbg->prev = kzalloc(drbg_blocklen(drbg), GFP_KERNEL);
+	if (!drbg->prev)
+		goto err;
+	drbg->fips_primed = false;
+#endif
+	/* scratchpad is only generated for CTR and Hash */
+	if (drbg->core->flags & DRBG_HMAC)
+		sb_size = 0;
+	else if (drbg->core->flags & DRBG_CTR)
+		sb_size = drbg_statelen(drbg) + drbg_blocklen(drbg) + /* temp */
+			  drbg_statelen(drbg) +	/* df_data */
+			  drbg_blocklen(drbg) +	/* pad */
+			  drbg_blocklen(drbg) +	/* iv */
+			  drbg_statelen(drbg) + drbg_blocklen(drbg); /* temp */
+	else
+		sb_size = drbg_statelen(drbg) + drbg_blocklen(drbg);
+
+	if (0 < sb_size) {
+		drbg->scratchpad = kzalloc(sb_size, GFP_KERNEL);
+		if (!drbg->scratchpad)
+			goto err;
+	}
+	spin_lock_init(&drbg->drbg_lock);
+	return 0;
+
+err:
+	drbg_dealloc_state(drbg);
+	return ret;
+}
+
+/*
+ * Strategy to avoid holding long term locks: generate a shadow copy of DRBG
+ * and perform all operations on this shadow copy. After finishing, restore
+ * the updated state of the shadow copy into original drbg state. This way,
+ * only the read and write operations of the original drbg state must be
+ * locked
+ */
+static inline void drbg_copy_drbg(struct drbg_state *src,
+				  struct drbg_state *dst)
+{
+	if (!src || !dst)
+		return;
+	memcpy(dst->V, src->V, drbg_statelen(src));
+	memcpy(dst->C, src->C, drbg_statelen(src));
+	dst->reseed_ctr = src->reseed_ctr;
+	dst->seeded = src->seeded;
+	dst->pr = src->pr;
+#ifdef CONFIG_CRYPTO_FIPS
+	dst->fips_primed = src->fips_primed;
+	memcpy(dst->prev, src->prev, drbg_blocklen(src));
+#endif
+	/*
+	 * Not copied:
+	 * scratchpad is initialized drbg_alloc_state;
+	 * priv_data is initialized with call to crypto_init;
+	 * d_ops and core are set outside, as these parameters are const;
+	 * test_data is set outside to prevent it being copied back.
+	 */
+}
+
+static int drbg_make_shadow(struct drbg_state *drbg, struct drbg_state **shadow)
+{
+	int ret = -ENOMEM;
+	struct drbg_state *tmp = NULL;
+
+	if (!drbg || !drbg->core || !drbg->V || !drbg->C) {
+		pr_devel("DRBG: attempt to generate shadow copy for "
+			 "uninitialized DRBG state rejected\n");
+		return -EINVAL;
+	}
+	/* HMAC does not have a scratchpad */
+	if (!(drbg->core->flags & DRBG_HMAC) && NULL == drbg->scratchpad)
+		return -EINVAL;
+
+	tmp = kzalloc(sizeof(struct drbg_state), GFP_KERNEL);
+	if (!tmp)
+		return -ENOMEM;
+
+	/* read-only data as they are defined as const, no lock needed */
+	tmp->core = drbg->core;
+	tmp->d_ops = drbg->d_ops;
+
+	ret = drbg_alloc_state(tmp);
+	if (ret)
+		goto err;
+
+	spin_lock_bh(&drbg->drbg_lock);
+	drbg_copy_drbg(drbg, tmp);
+	/* only make a link to the test buffer, as we only read that data */
+	tmp->test_data = drbg->test_data;
+	spin_unlock_bh(&drbg->drbg_lock);
+	*shadow = tmp;
+	return 0;
+
+err:
+	if (tmp)
+		kzfree(tmp);
+	return ret;
+}
+
+static void drbg_restore_shadow(struct drbg_state *drbg,
+				struct drbg_state **shadow)
+{
+	struct drbg_state *tmp = *shadow;
+
+	spin_lock_bh(&drbg->drbg_lock);
+	drbg_copy_drbg(tmp, drbg);
+	spin_unlock_bh(&drbg->drbg_lock);
+	drbg_dealloc_state(tmp);
+	kzfree(tmp);
+	*shadow = NULL;
+}
+
+/*************************************************************************
+ * DRBG interface functions
+ *************************************************************************/
+
+/*
+ * DRBG generate function as required by SP800-90A - this function
+ * generates random numbers
+ *
+ * @drbg DRBG state handle
+ * @buf Buffer where to store the random numbers -- the buffer must already
+ *      be pre-allocated by caller
+ * @buflen Length of output buffer - this value defines the number of random
+ *	   bytes pulled from DRBG
+ * @addtl Additional input that is mixed into state, may be NULL -- note
+ *	  the entropy is pulled by the DRBG internally unconditionally
+ *	  as defined in SP800-90A. The additional input is mixed into
+ *	  the state in addition to the pulled entropy.
+ *
+ * return: generated number of bytes
+ */
+static int drbg_generate(struct drbg_state *drbg,
+			 unsigned char *buf, unsigned int buflen,
+			 struct drbg_string *addtl)
+{
+	int len = 0;
+	struct drbg_state *shadow = NULL;
+	LIST_HEAD(addtllist);
+	struct drbg_string timestamp;
+	union {
+		cycles_t cycles;
+		unsigned char char_cycles[sizeof(cycles_t)];
+	} now;
+
+	if (0 == buflen || !buf) {
+		pr_devel("DRBG: no output buffer provided\n");
+		return -EINVAL;
+	}
+	if (addtl && NULL == addtl->buf && 0 < addtl->len) {
+		pr_devel("DRBG: wrong format of additional information\n");
+		return -EINVAL;
+	}
+
+	len = drbg_make_shadow(drbg, &shadow);
+	if (len) {
+		pr_devel("DRBG: shadow copy cannot be generated\n");
+		return len;
+	}
+
+	/* 9.3.1 step 2 */
+	len = -EINVAL;
+	if (buflen > (drbg_max_request_bytes(shadow))) {
+		pr_devel("DRBG: requested random numbers too large %u\n",
+			 buflen);
+		goto err;
+	}
+
+	/* 9.3.1 step 3 is implicit with the chosen DRBG */
+
+	/* 9.3.1 step 4 */
+	if (addtl && addtl->len > (drbg_max_addtl(shadow))) {
+		pr_devel("DRBG: additional information string too long %zu\n",
+			 addtl->len);
+		goto err;
+	}
+	/* 9.3.1 step 5 is implicit with the chosen DRBG */
+
+	/*
+	 * 9.3.1 step 6 and 9 supplemented by 9.3.2 step c is implemented
+	 * here. The spec is a bit convoluted here, we make it simpler.
+	 */
+	if ((drbg_max_requests(shadow)) < shadow->reseed_ctr)
+		shadow->seeded = false;
+
+	/* allocate cipher handle */
+	if (shadow->d_ops->crypto_init) {
+		len = shadow->d_ops->crypto_init(shadow);
+		if (len)
+			goto err;
+	}
+
+	if (shadow->pr || !shadow->seeded) {
+		pr_devel("DRBG: reseeding before generation (prediction "
+			 "resistance: %s, state %s)\n",
+			 drbg->pr ? "true" : "false",
+			 drbg->seeded ? "seeded" : "unseeded");
+		/* 9.3.1 steps 7.1 through 7.3 */
+		len = drbg_seed(shadow, addtl, true);
+		if (len)
+			goto err;
+		/* 9.3.1 step 7.4 */
+		addtl = NULL;
+	}
+
+	/*
+	 * Mix the time stamp into the DRBG state if the DRBG is not in
+	 * test mode. If there are two callers invoking the DRBG at the same
+	 * time, i.e. before the first caller merges its shadow state back,
+	 * both callers would obtain the same random number stream without
+	 * changing the state here.
+	 */
+	if (!drbg->test_data) {
+		now.cycles = random_get_entropy();
+		drbg_string_fill(&timestamp, now.char_cycles, sizeof(cycles_t));
+		list_add_tail(&timestamp.list, &addtllist);
+	}
+	if (addtl && 0 < addtl->len)
+		list_add_tail(&addtl->list, &addtllist);
+	/* 9.3.1 step 8 and 10 */
+	len = shadow->d_ops->generate(shadow, buf, buflen, &addtllist);
+
+	/* 10.1.1.4 step 6, 10.1.2.5 step 7, 10.2.1.5.2 step 7 */
+	shadow->reseed_ctr++;
+	if (0 >= len)
+		goto err;
+
+	/*
+	 * Section 11.3.3 requires to re-perform self tests after some
+	 * generated random numbers. The chosen value after which self
+	 * test is performed is arbitrary, but it should be reasonable.
+	 * However, we do not perform the self tests because of the following
+	 * reasons: it is mathematically impossible that the initial self tests
+	 * were successfully and the following are not. If the initial would
+	 * pass and the following would not, the kernel integrity is violated.
+	 * In this case, the entire kernel operation is questionable and it
+	 * is unlikely that the integrity violation only affects the
+	 * correct operation of the DRBG.
+	 *
+	 * Albeit the following code is commented out, it is provided in
+	 * case somebody has a need to implement the test of 11.3.3.
+	 */
+#if 0
+	if (shadow->reseed_ctr && !(shadow->reseed_ctr % 4096)) {
+		int err = 0;
+		pr_devel("DRBG: start to perform self test\n");
+		if (drbg->core->flags & DRBG_HMAC)
+			err = alg_test("drbg_pr_hmac_sha256",
+				       "drbg_pr_hmac_sha256", 0, 0);
+		else if (drbg->core->flags & DRBG_CTR)
+			err = alg_test("drbg_pr_ctr_aes128",
+				       "drbg_pr_ctr_aes128", 0, 0);
+		else
+			err = alg_test("drbg_pr_sha256",
+				       "drbg_pr_sha256", 0, 0);
+		if (err) {
+			pr_err("DRBG: periodical self test failed\n");
+			/*
+			 * uninstantiate implies that from now on, only errors
+			 * are returned when reusing this DRBG cipher handle
+			 */
+			drbg_uninstantiate(drbg);
+			drbg_dealloc_state(shadow);
+			kzfree(shadow);
+			return 0;
+		} else {
+			pr_devel("DRBG: self test successful\n");
+		}
+	}
+#endif
+
+err:
+	if (shadow->d_ops->crypto_fini)
+		shadow->d_ops->crypto_fini(shadow);
+	drbg_restore_shadow(drbg, &shadow);
+	return len;
+}
+
+/*
+ * Wrapper around drbg_generate which can pull arbitrary long strings
+ * from the DRBG without hitting the maximum request limitation.
+ *
+ * Parameters: see drbg_generate
+ * Return codes: see drbg_generate -- if one drbg_generate request fails,
+ *		 the entire drbg_generate_long request fails
+ */
+static int drbg_generate_long(struct drbg_state *drbg,
+			      unsigned char *buf, unsigned int buflen,
+			      struct drbg_string *addtl)
+{
+	int len = 0;
+	unsigned int slice = 0;
+	do {
+		int tmplen = 0;
+		unsigned int chunk = 0;
+		slice = ((buflen - len) / drbg_max_request_bytes(drbg));
+		chunk = slice ? drbg_max_request_bytes(drbg) : (buflen - len);
+		tmplen = drbg_generate(drbg, buf + len, chunk, addtl);
+		if (0 >= tmplen)
+			return tmplen;
+		len += tmplen;
+	} while (slice > 0 && (len < buflen));
+	return len;
+}
+
+/*
+ * DRBG instantiation function as required by SP800-90A - this function
+ * sets up the DRBG handle, performs the initial seeding and all sanity
+ * checks required by SP800-90A
+ *
+ * @drbg memory of state -- if NULL, new memory is allocated
+ * @pers Personalization string that is mixed into state, may be NULL -- note
+ *	 the entropy is pulled by the DRBG internally unconditionally
+ *	 as defined in SP800-90A. The additional input is mixed into
+ *	 the state in addition to the pulled entropy.
+ * @coreref reference to core
+ * @pr prediction resistance enabled
+ *
+ * return
+ *	0 on success
+ *	error value otherwise
+ */
+static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers,
+			    int coreref, bool pr)
+{
+	int ret = -ENOMEM;
+
+	pr_devel("DRBG: Initializing DRBG core %d with prediction resistance "
+		 "%s\n", coreref, pr ? "enabled" : "disabled");
+	drbg->core = &drbg_cores[coreref];
+	drbg->pr = pr;
+	drbg->seeded = false;
+	switch (drbg->core->flags & DRBG_TYPE_MASK) {
+#ifdef CONFIG_CRYPTO_DRBG_HMAC
+	case DRBG_HMAC:
+		drbg->d_ops = &drbg_hmac_ops;
+		break;
+#endif /* CONFIG_CRYPTO_DRBG_HMAC */
+#ifdef CONFIG_CRYPTO_DRBG_HASH
+	case DRBG_HASH:
+		drbg->d_ops = &drbg_hash_ops;
+		break;
+#endif /* CONFIG_CRYPTO_DRBG_HASH */
+#ifdef CONFIG_CRYPTO_DRBG_CTR
+	case DRBG_CTR:
+		drbg->d_ops = &drbg_ctr_ops;
+		break;
+#endif /* CONFIG_CRYPTO_DRBG_CTR */
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	/* 9.1 step 1 is implicit with the selected DRBG type */
+
+	/*
+	 * 9.1 step 2 is implicit as caller can select prediction resistance
+	 * and the flag is copied into drbg->flags --
+	 * all DRBG types support prediction resistance
+	 */
+
+	/* 9.1 step 4 is implicit in  drbg_sec_strength */
+
+	ret = drbg_alloc_state(drbg);
+	if (ret)
+		return ret;
+
+	ret = -EFAULT;
+	if (drbg->d_ops->crypto_init && drbg->d_ops->crypto_init(drbg))
+		goto err;
+	ret = drbg_seed(drbg, pers, false);
+	if (drbg->d_ops->crypto_fini)
+		drbg->d_ops->crypto_fini(drbg);
+	if (ret)
+		goto err;
+
+	return 0;
+
+err:
+	drbg_dealloc_state(drbg);
+	return ret;
+}
+
+/*
+ * DRBG uninstantiate function as required by SP800-90A - this function
+ * frees all buffers and the DRBG handle
+ *
+ * @drbg DRBG state handle
+ *
+ * return
+ *	0 on success
+ */
+static int drbg_uninstantiate(struct drbg_state *drbg)
+{
+	spin_lock_bh(&drbg->drbg_lock);
+	drbg_dealloc_state(drbg);
+	/* no scrubbing of test_data -- this shall survive an uninstantiate */
+	spin_unlock_bh(&drbg->drbg_lock);
+	return 0;
+}
+
+/*
+ * Helper function for setting the test data in the DRBG
+ *
+ * @drbg DRBG state handle
+ * @test_data test data to sets
+ */
+static inline void drbg_set_testdata(struct drbg_state *drbg,
+				     struct drbg_test_data *test_data)
+{
+	if (!test_data || !test_data->testentropy)
+		return;
+	spin_lock_bh(&drbg->drbg_lock);
+	drbg->test_data = test_data;
+	spin_unlock_bh(&drbg->drbg_lock);
+}
+
+/***************************************************************
+ * Kernel crypto API cipher invocations requested by DRBG
+ ***************************************************************/
+
+#if defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_HMAC)
+struct sdesc {
+	struct shash_desc shash;
+	char ctx[];
+};
+
+static int drbg_init_hash_kernel(struct drbg_state *drbg)
+{
+	struct sdesc *sdesc;
+	struct crypto_shash *tfm;
+
+	tfm = crypto_alloc_shash(drbg->core->backend_cra_name, 0, 0);
+	if (IS_ERR(tfm)) {
+		pr_info("DRBG: could not allocate digest TFM handle\n");
+		return PTR_ERR(tfm);
+	}
+	BUG_ON(drbg_blocklen(drbg) != crypto_shash_digestsize(tfm));
+	sdesc = kzalloc(sizeof(struct shash_desc) + crypto_shash_descsize(tfm),
+			GFP_KERNEL);
+	if (!sdesc) {
+		crypto_free_shash(tfm);
+		return -ENOMEM;
+	}
+
+	sdesc->shash.tfm = tfm;
+	sdesc->shash.flags = 0;
+	drbg->priv_data = sdesc;
+	return 0;
+}
+
+static int drbg_fini_hash_kernel(struct drbg_state *drbg)
+{
+	struct sdesc *sdesc = (struct sdesc *)drbg->priv_data;
+	if (sdesc) {
+		crypto_free_shash(sdesc->shash.tfm);
+		kzfree(sdesc);
+	}
+	drbg->priv_data = NULL;
+	return 0;
+}
+
+static int drbg_kcapi_hash(struct drbg_state *drbg, const unsigned char *key,
+			   unsigned char *outval, const struct list_head *in)
+{
+	struct sdesc *sdesc = (struct sdesc *)drbg->priv_data;
+	struct drbg_string *input = NULL;
+
+	if (key)
+		crypto_shash_setkey(sdesc->shash.tfm, key, drbg_statelen(drbg));
+	crypto_shash_init(&sdesc->shash);
+	list_for_each_entry(input, in, list)
+		crypto_shash_update(&sdesc->shash, input->buf, input->len);
+	return crypto_shash_final(&sdesc->shash, outval);
+}
+#endif /* (CONFIG_CRYPTO_DRBG_HASH || CONFIG_CRYPTO_DRBG_HMAC) */
+
+#ifdef CONFIG_CRYPTO_DRBG_CTR
+static int drbg_init_sym_kernel(struct drbg_state *drbg)
+{
+	int ret = 0;
+	struct crypto_blkcipher *tfm;
+
+	tfm = crypto_alloc_blkcipher(drbg->core->backend_cra_name, 0, 0);
+	if (IS_ERR(tfm)) {
+		pr_info("DRBG: could not allocate cipher TFM handle\n");
+		return PTR_ERR(tfm);
+	}
+	BUG_ON(drbg_blocklen(drbg) != crypto_blkcipher_blocksize(tfm));
+	drbg->priv_data = tfm;
+	return ret;
+}
+
+static int drbg_fini_sym_kernel(struct drbg_state *drbg)
+{
+	struct crypto_blkcipher *tfm =
+		(struct crypto_blkcipher *)drbg->priv_data;
+	if (tfm)
+		crypto_free_blkcipher(tfm);
+	drbg->priv_data = NULL;
+	return 0;
+}
+
+static int drbg_kcapi_sym(struct drbg_state *drbg, const unsigned char *key,
+			  unsigned char *outval, const struct drbg_string *in)
+{
+	int ret = 0;
+	struct scatterlist sg_in, sg_out;
+	struct blkcipher_desc desc;
+	struct crypto_blkcipher *tfm =
+		(struct crypto_blkcipher *)drbg->priv_data;
+
+	desc.tfm = tfm;
+	desc.flags = 0;
+	crypto_blkcipher_setkey(tfm, key, (drbg_keylen(drbg)));
+	/* there is only component in *in */
+	sg_init_one(&sg_in, in->buf, in->len);
+	sg_init_one(&sg_out, outval, drbg_blocklen(drbg));
+	ret = crypto_blkcipher_encrypt(&desc, &sg_out, &sg_in, in->len);
+
+	return ret;
+}
+#endif /* CONFIG_CRYPTO_DRBG_CTR */
+
+/***************************************************************
+ * Kernel crypto API interface to register DRBG
+ ***************************************************************/
+
+/*
+ * Look up the DRBG flags by given kernel crypto API cra_name
+ * The code uses the drbg_cores definition to do this
+ *
+ * @cra_name kernel crypto API cra_name
+ * @coreref reference to integer which is filled with the pointer to
+ *  the applicable core
+ * @pr reference for setting prediction resistance
+ *
+ * return: flags
+ */
+static inline void drbg_convert_tfm_core(const char *cra_driver_name,
+					 int *coreref, bool *pr)
+{
+	int i = 0;
+	size_t start = 0;
+	int len = 0;
+
+	*pr = true;
+	/* disassemble the names */
+	if (!memcmp(cra_driver_name, "drbg_nopr_", 10)) {
+		start = 10;
+		*pr = false;
+	} else if (!memcmp(cra_driver_name, "drbg_pr_", 8)) {
+		start = 8;
+	} else {
+		return;
+	}
+
+	/* remove the first part */
+	len = strlen(cra_driver_name) - start;
+	for (i = 0; ARRAY_SIZE(drbg_cores) > i; i++) {
+		if (!memcmp(cra_driver_name + start, drbg_cores[i].cra_name,
+			    len)) {
+			*coreref = i;
+			return;
+		}
+	}
+}
+
+static int drbg_kcapi_init(struct crypto_tfm *tfm)
+{
+	struct drbg_state *drbg = crypto_tfm_ctx(tfm);
+	bool pr = false;
+	int coreref = 0;
+
+	drbg_convert_tfm_core(crypto_tfm_alg_driver_name(tfm), &coreref, &pr);
+	/*
+	 * when personalization string is needed, the caller must call reset
+	 * and provide the personalization string as seed information
+	 */
+	return drbg_instantiate(drbg, NULL, coreref, pr);
+}
+
+static void drbg_kcapi_cleanup(struct crypto_tfm *tfm)
+{
+	drbg_uninstantiate(crypto_tfm_ctx(tfm));
+}
+
+/*
+ * Generate random numbers invoked by the kernel crypto API:
+ * The API of the kernel crypto API is extended as follows:
+ *
+ * If dlen is larger than zero, rdata is interpreted as the output buffer
+ * where random data is to be stored.
+ *
+ * If dlen is zero, rdata is interpreted as a pointer to a struct drbg_gen
+ * which holds the additional information string that is used for the
+ * DRBG generation process. The output buffer that is to be used to store
+ * data is also pointed to by struct drbg_gen.
+ */
+static int drbg_kcapi_random(struct crypto_rng *tfm, u8 *rdata,
+			     unsigned int dlen)
+{
+	struct drbg_state *drbg = crypto_rng_ctx(tfm);
+	if (0 < dlen) {
+		return drbg_generate_long(drbg, rdata, dlen, NULL);
+	} else {
+		struct drbg_gen *data = (struct drbg_gen *)rdata;
+		struct drbg_string addtl;
+		/* catch NULL pointer */
+		if (!data)
+			return 0;
+		drbg_set_testdata(drbg, data->test_data);
+		/* linked list variable is now local to allow modification */
+		drbg_string_fill(&addtl, data->addtl->buf, data->addtl->len);
+		return drbg_generate_long(drbg, data->outbuf, data->outlen,
+					  &addtl);
+	}
+}
+
+/*
+ * Reset the DRBG invoked by the kernel crypto API
+ * The reset implies a full re-initialization of the DRBG. Similar to the
+ * generate function of drbg_kcapi_random, this function extends the
+ * kernel crypto API interface with struct drbg_gen
+ */
+static int drbg_kcapi_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen)
+{
+	struct drbg_state *drbg = crypto_rng_ctx(tfm);
+	struct crypto_tfm *tfm_base = crypto_rng_tfm(tfm);
+	bool pr = false;
+	struct drbg_string seed_string;
+	int coreref = 0;
+
+	drbg_uninstantiate(drbg);
+	drbg_convert_tfm_core(crypto_tfm_alg_driver_name(tfm_base), &coreref,
+			      &pr);
+	if (0 < slen) {
+		drbg_string_fill(&seed_string, seed, slen);
+		return drbg_instantiate(drbg, &seed_string, coreref, pr);
+	} else {
+		struct drbg_gen *data = (struct drbg_gen *)seed;
+		/* allow invocation of API call with NULL, 0 */
+		if (!data)
+			return drbg_instantiate(drbg, NULL, coreref, pr);
+		drbg_set_testdata(drbg, data->test_data);
+		/* linked list variable is now local to allow modification */
+		drbg_string_fill(&seed_string, data->addtl->buf,
+				 data->addtl->len);
+		return drbg_instantiate(drbg, &seed_string, coreref, pr);
+	}
+}
+
+/***************************************************************
+ * Kernel module: code to load the module
+ ***************************************************************/
+
+/*
+ * Tests as defined in 11.3.2 in addition to the cipher tests: testing
+ * of the error handling.
+ *
+ * Note: testing of failing seed source as defined in 11.3.2 is not applicable
+ * as seed source of get_random_bytes does not fail.
+ *
+ * Note 2: There is no sensible way of testing the reseed counter
+ * enforcement, so skip it.
+ */
+static inline int __init drbg_healthcheck_sanity(void)
+{
+#ifdef CONFIG_CRYPTO_FIPS
+	int len = 0;
+#define OUTBUFLEN 16
+	unsigned char buf[OUTBUFLEN];
+	struct drbg_state *drbg = NULL;
+	int ret = -EFAULT;
+	int rc = -EFAULT;
+	bool pr = false;
+	int coreref = 0;
+	struct drbg_string addtl;
+	size_t max_addtllen, max_request_bytes;
+
+	/* only perform test in FIPS mode */
+	if (!fips_enabled)
+		return 0;
+
+#ifdef CONFIG_CRYPTO_DRBG_CTR
+	drbg_convert_tfm_core("drbg_nopr_ctr_aes128", &coreref, &pr);
+#elif defined CONFIG_CRYPTO_DRBG_HASH
+	drbg_convert_tfm_core("drbg_nopr_sha256", &coreref, &pr);
+#else
+	drbg_convert_tfm_core("drbg_nopr_hmac_sha256", &coreref, &pr);
+#endif
+
+	drbg = kzalloc(sizeof(struct drbg_state), GFP_KERNEL);
+	if (!drbg)
+		return -ENOMEM;
+
+	/*
+	 * if the following tests fail, it is likely that there is a buffer
+	 * overflow as buf is much smaller than the requested or provided
+	 * string lengths -- in case the error handling does not succeed
+	 * we may get an OOPS. And we want to get an OOPS as this is a
+	 * grave bug.
+	 */
+
+	/* get a valid instance of DRBG for following tests */
+	ret = drbg_instantiate(drbg, NULL, coreref, pr);
+	if (ret) {
+		rc = ret;
+		goto outbuf;
+	}
+	max_addtllen = drbg_max_addtl(drbg);
+	max_request_bytes = drbg_max_request_bytes(drbg);
+	drbg_string_fill(&addtl, buf, max_addtllen + 1);
+	/* overflow addtllen with additonal info string */
+	len = drbg_generate(drbg, buf, OUTBUFLEN, &addtl);
+	BUG_ON(0 < len);
+	/* overflow max_bits */
+	len = drbg_generate(drbg, buf, (max_request_bytes + 1), NULL);
+	BUG_ON(0 < len);
+	drbg_uninstantiate(drbg);
+
+	/* overflow max addtllen with personalization string */
+	ret = drbg_instantiate(drbg, &addtl, coreref, pr);
+	BUG_ON(0 == ret);
+	/* test uninstantated DRBG */
+	len = drbg_generate(drbg, buf, (max_request_bytes + 1), NULL);
+	BUG_ON(0 < len);
+	/* all tests passed */
+	rc = 0;
+
+	pr_devel("DRBG: Sanity tests for failure code paths successfully "
+		 "completed\n");
+
+	drbg_uninstantiate(drbg);
+outbuf:
+	kzfree(drbg);
+	return rc;
+#else /* CONFIG_CRYPTO_FIPS */
+	return 0;
+#endif /* CONFIG_CRYPTO_FIPS */
+}
+
+static struct crypto_alg drbg_algs[22];
+
+/*
+ * Fill the array drbg_algs used to register the different DRBGs
+ * with the kernel crypto API. To fill the array, the information
+ * from drbg_cores[] is used.
+ */
+static inline void __init drbg_fill_array(struct crypto_alg *alg,
+					  const struct drbg_core *core, int pr)
+{
+	int pos = 0;
+	static int priority = 100;
+
+	memset(alg, 0, sizeof(struct crypto_alg));
+	memcpy(alg->cra_name, "stdrng", 6);
+	if (pr) {
+		memcpy(alg->cra_driver_name, "drbg_pr_", 8);
+		pos = 8;
+	} else {
+		memcpy(alg->cra_driver_name, "drbg_nopr_", 10);
+		pos = 10;
+	}
+	memcpy(alg->cra_driver_name + pos, core->cra_name,
+	       strlen(core->cra_name));
+
+	alg->cra_priority = priority;
+	priority++;
+	/*
+	 * If FIPS mode enabled, the selected DRBG shall have the
+	 * highest cra_priority over other stdrng instances to ensure
+	 * it is selected.
+	 */
+	if (fips_enabled)
+		alg->cra_priority += 200;
+
+	alg->cra_flags		= CRYPTO_ALG_TYPE_RNG;
+	alg->cra_ctxsize 	= sizeof(struct drbg_state);
+	alg->cra_type		= &crypto_rng_type;
+	alg->cra_module		= THIS_MODULE;
+	alg->cra_init		= drbg_kcapi_init;
+	alg->cra_exit		= drbg_kcapi_cleanup;
+	alg->cra_u.rng.rng_make_random	= drbg_kcapi_random;
+	alg->cra_u.rng.rng_reset	= drbg_kcapi_reset;
+	alg->cra_u.rng.seedsize	= 0;
+}
+
+static int __init drbg_init(void)
+{
+	unsigned int i = 0; /* pointer to drbg_algs */
+	unsigned int j = 0; /* pointer to drbg_cores */
+	int ret = -EFAULT;
+
+	ret = drbg_healthcheck_sanity();
+	if (ret)
+		return ret;
+
+	if (ARRAY_SIZE(drbg_cores) * 2 > ARRAY_SIZE(drbg_algs)) {
+		pr_info("DRBG: Cannot register all DRBG types"
+			"(slots needed: %zu, slots available: %zu)\n",
+			ARRAY_SIZE(drbg_cores) * 2, ARRAY_SIZE(drbg_algs));
+		return ret;
+	}
+
+	/*
+	 * each DRBG definition can be used with PR and without PR, thus
+	 * we instantiate each DRBG in drbg_cores[] twice.
+	 *
+	 * As the order of placing them into the drbg_algs array matters
+	 * (the later DRBGs receive a higher cra_priority) we register the
+	 * prediction resistance DRBGs first as the should not be too
+	 * interesting.
+	 */
+	for (j = 0; ARRAY_SIZE(drbg_cores) > j; j++, i++)
+		drbg_fill_array(&drbg_algs[i], &drbg_cores[j], 1);
+	for (j = 0; ARRAY_SIZE(drbg_cores) > j; j++, i++)
+		drbg_fill_array(&drbg_algs[i], &drbg_cores[j], 0);
+	return crypto_register_algs(drbg_algs, (ARRAY_SIZE(drbg_cores) * 2));
+}
+
+static void __exit drbg_exit(void)
+{
+	crypto_unregister_algs(drbg_algs, (ARRAY_SIZE(drbg_cores) * 2));
+}
+
+module_init(drbg_init);
+module_exit(drbg_exit);
+#ifndef CRYPTO_DRBG_HASH_STRING
+#define CRYPTO_DRBG_HASH_STRING ""
+#endif
+#ifndef CRYPTO_DRBG_HMAC_STRING
+#define CRYPTO_DRBG_HMAC_STRING ""
+#endif
+#ifndef CRYPTO_DRBG_CTR_STRING
+#define CRYPTO_DRBG_CTR_STRING ""
+#endif
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
+MODULE_DESCRIPTION("NIST SP800-90A Deterministic Random Bit Generator (DRBG) "
+		   "using following cores: "
+		   CRYPTO_DRBG_HASH_STRING
+		   CRYPTO_DRBG_HMAC_STRING
+		   CRYPTO_DRBG_CTR_STRING);
diff --git a/crypto/eseqiv.c b/crypto/eseqiv.c
index 42ce9f5..bf7ab4a 100644
--- a/crypto/eseqiv.c
+++ b/crypto/eseqiv.c
@@ -68,7 +68,7 @@
 	struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
 	struct eseqiv_request_ctx *reqctx = skcipher_givcrypt_reqctx(req);
 	struct ablkcipher_request *subreq;
-	crypto_completion_t complete;
+	crypto_completion_t compl;
 	void *data;
 	struct scatterlist *osrc, *odst;
 	struct scatterlist *dst;
@@ -86,7 +86,7 @@
 	ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
 
 	giv = req->giv;
-	complete = req->creq.base.complete;
+	compl = req->creq.base.complete;
 	data = req->creq.base.data;
 
 	osrc = req->creq.src;
@@ -101,11 +101,11 @@
 	if (vsrc != giv + ivsize && vdst != giv + ivsize) {
 		giv = PTR_ALIGN((u8 *)reqctx->tail,
 				crypto_ablkcipher_alignmask(geniv) + 1);
-		complete = eseqiv_complete;
+		compl = eseqiv_complete;
 		data = req;
 	}
 
-	ablkcipher_request_set_callback(subreq, req->creq.base.flags, complete,
+	ablkcipher_request_set_callback(subreq, req->creq.base.flags, compl,
 					data);
 
 	sg_init_table(reqctx->src, 2);
diff --git a/crypto/gcm.c b/crypto/gcm.c
index b4f0179..276cdac 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -228,14 +228,14 @@
 
 static int gcm_hash_update(struct aead_request *req,
 			   struct crypto_gcm_req_priv_ctx *pctx,
-			   crypto_completion_t complete,
+			   crypto_completion_t compl,
 			   struct scatterlist *src,
 			   unsigned int len)
 {
 	struct ahash_request *ahreq = &pctx->u.ahreq;
 
 	ahash_request_set_callback(ahreq, aead_request_flags(req),
-				   complete, req);
+				   compl, req);
 	ahash_request_set_crypt(ahreq, src, NULL, len);
 
 	return crypto_ahash_update(ahreq);
@@ -244,12 +244,12 @@
 static int gcm_hash_remain(struct aead_request *req,
 			   struct crypto_gcm_req_priv_ctx *pctx,
 			   unsigned int remain,
-			   crypto_completion_t complete)
+			   crypto_completion_t compl)
 {
 	struct ahash_request *ahreq = &pctx->u.ahreq;
 
 	ahash_request_set_callback(ahreq, aead_request_flags(req),
-				   complete, req);
+				   compl, req);
 	sg_init_one(pctx->src, gcm_zeroes, remain);
 	ahash_request_set_crypt(ahreq, pctx->src, NULL, remain);
 
@@ -375,14 +375,14 @@
 {
 	struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
 	struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
-	crypto_completion_t complete;
+	crypto_completion_t compl;
 	unsigned int remain = 0;
 
 	if (!err && gctx->cryptlen) {
 		remain = gcm_remain(gctx->cryptlen);
-		complete = remain ? gcm_hash_crypt_done :
+		compl = remain ? gcm_hash_crypt_done :
 			gcm_hash_crypt_remain_done;
-		err = gcm_hash_update(req, pctx, complete,
+		err = gcm_hash_update(req, pctx, compl,
 				      gctx->src, gctx->cryptlen);
 		if (err == -EINPROGRESS || err == -EBUSY)
 			return;
@@ -429,14 +429,14 @@
 static void __gcm_hash_init_done(struct aead_request *req, int err)
 {
 	struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
-	crypto_completion_t complete;
+	crypto_completion_t compl;
 	unsigned int remain = 0;
 
 	if (!err && req->assoclen) {
 		remain = gcm_remain(req->assoclen);
-		complete = remain ? gcm_hash_assoc_done :
+		compl = remain ? gcm_hash_assoc_done :
 			gcm_hash_assoc_remain_done;
-		err = gcm_hash_update(req, pctx, complete,
+		err = gcm_hash_update(req, pctx, compl,
 				      req->assoc, req->assoclen);
 		if (err == -EINPROGRESS || err == -EBUSY)
 			return;
@@ -462,7 +462,7 @@
 	struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
 	struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
 	unsigned int remain;
-	crypto_completion_t complete;
+	crypto_completion_t compl;
 	int err;
 
 	ahash_request_set_tfm(ahreq, ctx->ghash);
@@ -473,8 +473,8 @@
 	if (err)
 		return err;
 	remain = gcm_remain(req->assoclen);
-	complete = remain ? gcm_hash_assoc_done : gcm_hash_assoc_remain_done;
-	err = gcm_hash_update(req, pctx, complete, req->assoc, req->assoclen);
+	compl = remain ? gcm_hash_assoc_done : gcm_hash_assoc_remain_done;
+	err = gcm_hash_update(req, pctx, compl, req->assoc, req->assoclen);
 	if (err)
 		return err;
 	if (remain) {
@@ -484,8 +484,8 @@
 			return err;
 	}
 	remain = gcm_remain(gctx->cryptlen);
-	complete = remain ? gcm_hash_crypt_done : gcm_hash_crypt_remain_done;
-	err = gcm_hash_update(req, pctx, complete, gctx->src, gctx->cryptlen);
+	compl = remain ? gcm_hash_crypt_done : gcm_hash_crypt_remain_done;
+	err = gcm_hash_update(req, pctx, compl, gctx->src, gctx->cryptlen);
 	if (err)
 		return err;
 	if (remain) {
diff --git a/crypto/lzo.c b/crypto/lzo.c
index 1c2aa69..a8ff2f7 100644
--- a/crypto/lzo.c
+++ b/crypto/lzo.c
@@ -20,6 +20,7 @@
 #include <linux/module.h>
 #include <linux/crypto.h>
 #include <linux/vmalloc.h>
+#include <linux/mm.h>
 #include <linux/lzo.h>
 
 struct lzo_ctx {
@@ -30,7 +31,10 @@
 {
 	struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
 
-	ctx->lzo_comp_mem = vmalloc(LZO1X_MEM_COMPRESS);
+	ctx->lzo_comp_mem = kmalloc(LZO1X_MEM_COMPRESS,
+				    GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
+	if (!ctx->lzo_comp_mem)
+		ctx->lzo_comp_mem = vmalloc(LZO1X_MEM_COMPRESS);
 	if (!ctx->lzo_comp_mem)
 		return -ENOMEM;
 
@@ -41,7 +45,7 @@
 {
 	struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
 
-	vfree(ctx->lzo_comp_mem);
+	kvfree(ctx->lzo_comp_mem);
 }
 
 static int lzo_compress(struct crypto_tfm *tfm, const u8 *src,
diff --git a/crypto/seqiv.c b/crypto/seqiv.c
index f2cba4ed..ee190fc 100644
--- a/crypto/seqiv.c
+++ b/crypto/seqiv.c
@@ -100,7 +100,7 @@
 	struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
 	struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
 	struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
-	crypto_completion_t complete;
+	crypto_completion_t compl;
 	void *data;
 	u8 *info;
 	unsigned int ivsize;
@@ -108,7 +108,7 @@
 
 	ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
 
-	complete = req->creq.base.complete;
+	compl = req->creq.base.complete;
 	data = req->creq.base.data;
 	info = req->creq.info;
 
@@ -122,11 +122,11 @@
 		if (!info)
 			return -ENOMEM;
 
-		complete = seqiv_complete;
+		compl = seqiv_complete;
 		data = req;
 	}
 
-	ablkcipher_request_set_callback(subreq, req->creq.base.flags, complete,
+	ablkcipher_request_set_callback(subreq, req->creq.base.flags, compl,
 					data);
 	ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst,
 				     req->creq.nbytes, info);
@@ -146,7 +146,7 @@
 	struct seqiv_ctx *ctx = crypto_aead_ctx(geniv);
 	struct aead_request *areq = &req->areq;
 	struct aead_request *subreq = aead_givcrypt_reqctx(req);
-	crypto_completion_t complete;
+	crypto_completion_t compl;
 	void *data;
 	u8 *info;
 	unsigned int ivsize;
@@ -154,7 +154,7 @@
 
 	aead_request_set_tfm(subreq, aead_geniv_base(geniv));
 
-	complete = areq->base.complete;
+	compl = areq->base.complete;
 	data = areq->base.data;
 	info = areq->iv;
 
@@ -168,11 +168,11 @@
 		if (!info)
 			return -ENOMEM;
 
-		complete = seqiv_aead_complete;
+		compl = seqiv_aead_complete;
 		data = req;
 	}
 
-	aead_request_set_callback(subreq, areq->base.flags, complete, data);
+	aead_request_set_callback(subreq, areq->base.flags, compl, data);
 	aead_request_set_crypt(subreq, areq->src, areq->dst, areq->cryptlen,
 			       info);
 	aead_request_set_assoc(subreq, areq->assoc, areq->assoclen);
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index ba247cf..890449e 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -48,6 +48,11 @@
 #define DECRYPT 0
 
 /*
+ * return a string with the driver name
+ */
+#define get_driver_name(tfm_type, tfm) crypto_tfm_alg_driver_name(tfm_type ## _tfm(tfm))
+
+/*
  * Used by test_cipher_speed()
  */
 static unsigned int sec;
@@ -68,13 +73,13 @@
 };
 
 static int test_cipher_jiffies(struct blkcipher_desc *desc, int enc,
-			       struct scatterlist *sg, int blen, int sec)
+			       struct scatterlist *sg, int blen, int secs)
 {
 	unsigned long start, end;
 	int bcount;
 	int ret;
 
-	for (start = jiffies, end = start + sec * HZ, bcount = 0;
+	for (start = jiffies, end = start + secs * HZ, bcount = 0;
 	     time_before(jiffies, end); bcount++) {
 		if (enc)
 			ret = crypto_blkcipher_encrypt(desc, sg, sg, blen);
@@ -86,7 +91,7 @@
 	}
 
 	printk("%d operations in %d seconds (%ld bytes)\n",
-	       bcount, sec, (long)bcount * blen);
+	       bcount, secs, (long)bcount * blen);
 	return 0;
 }
 
@@ -138,13 +143,13 @@
 }
 
 static int test_aead_jiffies(struct aead_request *req, int enc,
-				int blen, int sec)
+				int blen, int secs)
 {
 	unsigned long start, end;
 	int bcount;
 	int ret;
 
-	for (start = jiffies, end = start + sec * HZ, bcount = 0;
+	for (start = jiffies, end = start + secs * HZ, bcount = 0;
 	     time_before(jiffies, end); bcount++) {
 		if (enc)
 			ret = crypto_aead_encrypt(req);
@@ -156,7 +161,7 @@
 	}
 
 	printk("%d operations in %d seconds (%ld bytes)\n",
-	       bcount, sec, (long)bcount * blen);
+	       bcount, secs, (long)bcount * blen);
 	return 0;
 }
 
@@ -260,7 +265,7 @@
 	}
 }
 
-static void test_aead_speed(const char *algo, int enc, unsigned int sec,
+static void test_aead_speed(const char *algo, int enc, unsigned int secs,
 			    struct aead_speed_template *template,
 			    unsigned int tcount, u8 authsize,
 			    unsigned int aad_size, u8 *keysize)
@@ -305,9 +310,6 @@
 	asg = &sg[8];
 	sgout = &asg[8];
 
-
-	printk(KERN_INFO "\ntesting speed of %s %s\n", algo, e);
-
 	tfm = crypto_alloc_aead(algo, 0, 0);
 
 	if (IS_ERR(tfm)) {
@@ -316,6 +318,9 @@
 		goto out_notfm;
 	}
 
+	printk(KERN_INFO "\ntesting speed of %s (%s) %s\n", algo,
+			get_driver_name(crypto_aead, tfm), e);
+
 	req = aead_request_alloc(tfm, GFP_KERNEL);
 	if (!req) {
 		pr_err("alg: aead: Failed to allocate request for %s\n",
@@ -374,8 +379,9 @@
 			aead_request_set_crypt(req, sg, sgout, *b_size, iv);
 			aead_request_set_assoc(req, asg, aad_size);
 
-			if (sec)
-				ret = test_aead_jiffies(req, enc, *b_size, sec);
+			if (secs)
+				ret = test_aead_jiffies(req, enc, *b_size,
+							secs);
 			else
 				ret = test_aead_cycles(req, enc, *b_size);
 
@@ -405,7 +411,7 @@
 	return;
 }
 
-static void test_cipher_speed(const char *algo, int enc, unsigned int sec,
+static void test_cipher_speed(const char *algo, int enc, unsigned int secs,
 			      struct cipher_speed_template *template,
 			      unsigned int tcount, u8 *keysize)
 {
@@ -422,8 +428,6 @@
 	else
 		e = "decryption";
 
-	printk("\ntesting speed of %s %s\n", algo, e);
-
 	tfm = crypto_alloc_blkcipher(algo, 0, CRYPTO_ALG_ASYNC);
 
 	if (IS_ERR(tfm)) {
@@ -434,6 +438,9 @@
 	desc.tfm = tfm;
 	desc.flags = 0;
 
+	printk(KERN_INFO "\ntesting speed of %s (%s) %s\n", algo,
+			get_driver_name(crypto_blkcipher, tfm), e);
+
 	i = 0;
 	do {
 
@@ -483,9 +490,9 @@
 				crypto_blkcipher_set_iv(tfm, iv, iv_len);
 			}
 
-			if (sec)
+			if (secs)
 				ret = test_cipher_jiffies(&desc, enc, sg,
-							  *b_size, sec);
+							  *b_size, secs);
 			else
 				ret = test_cipher_cycles(&desc, enc, sg,
 							 *b_size);
@@ -506,13 +513,13 @@
 
 static int test_hash_jiffies_digest(struct hash_desc *desc,
 				    struct scatterlist *sg, int blen,
-				    char *out, int sec)
+				    char *out, int secs)
 {
 	unsigned long start, end;
 	int bcount;
 	int ret;
 
-	for (start = jiffies, end = start + sec * HZ, bcount = 0;
+	for (start = jiffies, end = start + secs * HZ, bcount = 0;
 	     time_before(jiffies, end); bcount++) {
 		ret = crypto_hash_digest(desc, sg, blen, out);
 		if (ret)
@@ -520,22 +527,22 @@
 	}
 
 	printk("%6u opers/sec, %9lu bytes/sec\n",
-	       bcount / sec, ((long)bcount * blen) / sec);
+	       bcount / secs, ((long)bcount * blen) / secs);
 
 	return 0;
 }
 
 static int test_hash_jiffies(struct hash_desc *desc, struct scatterlist *sg,
-			     int blen, int plen, char *out, int sec)
+			     int blen, int plen, char *out, int secs)
 {
 	unsigned long start, end;
 	int bcount, pcount;
 	int ret;
 
 	if (plen == blen)
-		return test_hash_jiffies_digest(desc, sg, blen, out, sec);
+		return test_hash_jiffies_digest(desc, sg, blen, out, secs);
 
-	for (start = jiffies, end = start + sec * HZ, bcount = 0;
+	for (start = jiffies, end = start + secs * HZ, bcount = 0;
 	     time_before(jiffies, end); bcount++) {
 		ret = crypto_hash_init(desc);
 		if (ret)
@@ -552,7 +559,7 @@
 	}
 
 	printk("%6u opers/sec, %9lu bytes/sec\n",
-	       bcount / sec, ((long)bcount * blen) / sec);
+	       bcount / secs, ((long)bcount * blen) / secs);
 
 	return 0;
 }
@@ -673,7 +680,7 @@
 	}
 }
 
-static void test_hash_speed(const char *algo, unsigned int sec,
+static void test_hash_speed(const char *algo, unsigned int secs,
 			    struct hash_speed *speed)
 {
 	struct scatterlist sg[TVMEMSIZE];
@@ -683,8 +690,6 @@
 	int i;
 	int ret;
 
-	printk(KERN_INFO "\ntesting speed of %s\n", algo);
-
 	tfm = crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC);
 
 	if (IS_ERR(tfm)) {
@@ -693,6 +698,9 @@
 		return;
 	}
 
+	printk(KERN_INFO "\ntesting speed of %s (%s)\n", algo,
+			get_driver_name(crypto_hash, tfm));
+
 	desc.tfm = tfm;
 	desc.flags = 0;
 
@@ -718,9 +726,9 @@
 		       "(%5u byte blocks,%5u bytes per update,%4u updates): ",
 		       i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen);
 
-		if (sec)
+		if (secs)
 			ret = test_hash_jiffies(&desc, sg, speed[i].blen,
-						speed[i].plen, output, sec);
+						speed[i].plen, output, secs);
 		else
 			ret = test_hash_cycles(&desc, sg, speed[i].blen,
 					       speed[i].plen, output);
@@ -765,13 +773,13 @@
 }
 
 static int test_ahash_jiffies_digest(struct ahash_request *req, int blen,
-				     char *out, int sec)
+				     char *out, int secs)
 {
 	unsigned long start, end;
 	int bcount;
 	int ret;
 
-	for (start = jiffies, end = start + sec * HZ, bcount = 0;
+	for (start = jiffies, end = start + secs * HZ, bcount = 0;
 	     time_before(jiffies, end); bcount++) {
 		ret = do_one_ahash_op(req, crypto_ahash_digest(req));
 		if (ret)
@@ -779,22 +787,22 @@
 	}
 
 	printk("%6u opers/sec, %9lu bytes/sec\n",
-	       bcount / sec, ((long)bcount * blen) / sec);
+	       bcount / secs, ((long)bcount * blen) / secs);
 
 	return 0;
 }
 
 static int test_ahash_jiffies(struct ahash_request *req, int blen,
-			      int plen, char *out, int sec)
+			      int plen, char *out, int secs)
 {
 	unsigned long start, end;
 	int bcount, pcount;
 	int ret;
 
 	if (plen == blen)
-		return test_ahash_jiffies_digest(req, blen, out, sec);
+		return test_ahash_jiffies_digest(req, blen, out, secs);
 
-	for (start = jiffies, end = start + sec * HZ, bcount = 0;
+	for (start = jiffies, end = start + secs * HZ, bcount = 0;
 	     time_before(jiffies, end); bcount++) {
 		ret = crypto_ahash_init(req);
 		if (ret)
@@ -811,7 +819,7 @@
 	}
 
 	pr_cont("%6u opers/sec, %9lu bytes/sec\n",
-		bcount / sec, ((long)bcount * blen) / sec);
+		bcount / secs, ((long)bcount * blen) / secs);
 
 	return 0;
 }
@@ -911,7 +919,7 @@
 	return 0;
 }
 
-static void test_ahash_speed(const char *algo, unsigned int sec,
+static void test_ahash_speed(const char *algo, unsigned int secs,
 			     struct hash_speed *speed)
 {
 	struct scatterlist sg[TVMEMSIZE];
@@ -921,8 +929,6 @@
 	static char output[1024];
 	int i, ret;
 
-	printk(KERN_INFO "\ntesting speed of async %s\n", algo);
-
 	tfm = crypto_alloc_ahash(algo, 0, 0);
 	if (IS_ERR(tfm)) {
 		pr_err("failed to load transform for %s: %ld\n",
@@ -930,6 +936,9 @@
 		return;
 	}
 
+	printk(KERN_INFO "\ntesting speed of async %s (%s)\n", algo,
+			get_driver_name(crypto_ahash, tfm));
+
 	if (crypto_ahash_digestsize(tfm) > sizeof(output)) {
 		pr_err("digestsize(%u) > outputbuffer(%zu)\n",
 		       crypto_ahash_digestsize(tfm), sizeof(output));
@@ -960,9 +969,9 @@
 
 		ahash_request_set_crypt(req, sg, output, speed[i].plen);
 
-		if (sec)
+		if (secs)
 			ret = test_ahash_jiffies(req, speed[i].blen,
-						 speed[i].plen, output, sec);
+						 speed[i].plen, output, secs);
 		else
 			ret = test_ahash_cycles(req, speed[i].blen,
 						speed[i].plen, output);
@@ -994,13 +1003,13 @@
 }
 
 static int test_acipher_jiffies(struct ablkcipher_request *req, int enc,
-				int blen, int sec)
+				int blen, int secs)
 {
 	unsigned long start, end;
 	int bcount;
 	int ret;
 
-	for (start = jiffies, end = start + sec * HZ, bcount = 0;
+	for (start = jiffies, end = start + secs * HZ, bcount = 0;
 	     time_before(jiffies, end); bcount++) {
 		if (enc)
 			ret = do_one_acipher_op(req,
@@ -1014,7 +1023,7 @@
 	}
 
 	pr_cont("%d operations in %d seconds (%ld bytes)\n",
-		bcount, sec, (long)bcount * blen);
+		bcount, secs, (long)bcount * blen);
 	return 0;
 }
 
@@ -1065,7 +1074,7 @@
 	return ret;
 }
 
-static void test_acipher_speed(const char *algo, int enc, unsigned int sec,
+static void test_acipher_speed(const char *algo, int enc, unsigned int secs,
 			       struct cipher_speed_template *template,
 			       unsigned int tcount, u8 *keysize)
 {
@@ -1083,8 +1092,6 @@
 	else
 		e = "decryption";
 
-	pr_info("\ntesting speed of async %s %s\n", algo, e);
-
 	init_completion(&tresult.completion);
 
 	tfm = crypto_alloc_ablkcipher(algo, 0, 0);
@@ -1095,6 +1102,9 @@
 		return;
 	}
 
+	pr_info("\ntesting speed of async %s (%s) %s\n", algo,
+			get_driver_name(crypto_ablkcipher, tfm), e);
+
 	req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
 	if (!req) {
 		pr_err("tcrypt: skcipher: Failed to allocate request for %s\n",
@@ -1168,9 +1178,9 @@
 
 			ablkcipher_request_set_crypt(req, sg, sg, *b_size, iv);
 
-			if (sec)
+			if (secs)
 				ret = test_acipher_jiffies(req, enc,
-							   *b_size, sec);
+							   *b_size, secs);
 			else
 				ret = test_acipher_cycles(req, enc,
 							  *b_size);
@@ -1585,6 +1595,12 @@
 		test_cipher_speed("cbc(des3_ede)", DECRYPT, sec,
 				des3_speed_template, DES3_SPEED_VECTORS,
 				speed_template_24);
+		test_cipher_speed("ctr(des3_ede)", ENCRYPT, sec,
+				des3_speed_template, DES3_SPEED_VECTORS,
+				speed_template_24);
+		test_cipher_speed("ctr(des3_ede)", DECRYPT, sec,
+				des3_speed_template, DES3_SPEED_VECTORS,
+				speed_template_24);
 		break;
 
 	case 202:
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 498649a..ac2b631 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -27,6 +27,7 @@
 #include <linux/slab.h>
 #include <linux/string.h>
 #include <crypto/rng.h>
+#include <crypto/drbg.h>
 
 #include "internal.h"
 
@@ -108,6 +109,11 @@
 	unsigned int count;
 };
 
+struct drbg_test_suite {
+	struct drbg_testvec *vecs;
+	unsigned int count;
+};
+
 struct alg_test_desc {
 	const char *alg;
 	int (*test)(const struct alg_test_desc *desc, const char *driver,
@@ -121,6 +127,7 @@
 		struct pcomp_test_suite pcomp;
 		struct hash_test_suite hash;
 		struct cprng_test_suite cprng;
+		struct drbg_test_suite drbg;
 	} suite;
 };
 
@@ -191,13 +198,20 @@
 	const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
 	unsigned int i, j, k, temp;
 	struct scatterlist sg[8];
-	char result[64];
+	char *result;
+	char *key;
 	struct ahash_request *req;
 	struct tcrypt_result tresult;
 	void *hash_buff;
 	char *xbuf[XBUFSIZE];
 	int ret = -ENOMEM;
 
+	result = kmalloc(MAX_DIGEST_SIZE, GFP_KERNEL);
+	if (!result)
+		return ret;
+	key = kmalloc(MAX_KEYLEN, GFP_KERNEL);
+	if (!key)
+		goto out_nobuf;
 	if (testmgr_alloc_buf(xbuf))
 		goto out_nobuf;
 
@@ -222,7 +236,7 @@
 			goto out;
 
 		j++;
-		memset(result, 0, 64);
+		memset(result, 0, MAX_DIGEST_SIZE);
 
 		hash_buff = xbuf[0];
 		hash_buff += align_offset;
@@ -232,8 +246,14 @@
 
 		if (template[i].ksize) {
 			crypto_ahash_clear_flags(tfm, ~0);
-			ret = crypto_ahash_setkey(tfm, template[i].key,
-						  template[i].ksize);
+			if (template[i].ksize > MAX_KEYLEN) {
+				pr_err("alg: hash: setkey failed on test %d for %s: key size %d > %d\n",
+				       j, algo, template[i].ksize, MAX_KEYLEN);
+				ret = -EINVAL;
+				goto out;
+			}
+			memcpy(key, template[i].key, template[i].ksize);
+			ret = crypto_ahash_setkey(tfm, key, template[i].ksize);
 			if (ret) {
 				printk(KERN_ERR "alg: hash: setkey failed on "
 				       "test %d for %s: ret=%d\n", j, algo,
@@ -293,7 +313,7 @@
 
 		if (template[i].np) {
 			j++;
-			memset(result, 0, 64);
+			memset(result, 0, MAX_DIGEST_SIZE);
 
 			temp = 0;
 			sg_init_table(sg, template[i].np);
@@ -312,8 +332,16 @@
 			}
 
 			if (template[i].ksize) {
+				if (template[i].ksize > MAX_KEYLEN) {
+					pr_err("alg: hash: setkey failed on test %d for %s: key size %d > %d\n",
+					       j, algo, template[i].ksize,
+					       MAX_KEYLEN);
+					ret = -EINVAL;
+					goto out;
+				}
 				crypto_ahash_clear_flags(tfm, ~0);
-				ret = crypto_ahash_setkey(tfm, template[i].key,
+				memcpy(key, template[i].key, template[i].ksize);
+				ret = crypto_ahash_setkey(tfm, key,
 							  template[i].ksize);
 
 				if (ret) {
@@ -365,6 +393,8 @@
 out_noreq:
 	testmgr_free_buf(xbuf);
 out_nobuf:
+	kfree(key);
+	kfree(result);
 	return ret;
 }
 
@@ -422,6 +452,9 @@
 	iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
 	if (!iv)
 		return ret;
+	key = kmalloc(MAX_KEYLEN, GFP_KERNEL);
+	if (!key)
+		goto out_noxbuf;
 	if (testmgr_alloc_buf(xbuf))
 		goto out_noxbuf;
 	if (testmgr_alloc_buf(axbuf))
@@ -486,7 +519,14 @@
 				crypto_aead_set_flags(
 					tfm, CRYPTO_TFM_REQ_WEAK_KEY);
 
-			key = template[i].key;
+			if (template[i].klen > MAX_KEYLEN) {
+				pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n",
+				       d, j, algo, template[i].klen,
+				       MAX_KEYLEN);
+				ret = -EINVAL;
+				goto out;
+			}
+			memcpy(key, template[i].key, template[i].klen);
 
 			ret = crypto_aead_setkey(tfm, key,
 						 template[i].klen);
@@ -587,7 +627,14 @@
 			if (template[i].wk)
 				crypto_aead_set_flags(
 					tfm, CRYPTO_TFM_REQ_WEAK_KEY);
-			key = template[i].key;
+			if (template[i].klen > MAX_KEYLEN) {
+				pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n",
+				       d, j, algo, template[i].klen,
+				       MAX_KEYLEN);
+				ret = -EINVAL;
+				goto out;
+			}
+			memcpy(key, template[i].key, template[i].klen);
 
 			ret = crypto_aead_setkey(tfm, key, template[i].klen);
 			if (!ret == template[i].fail) {
@@ -769,6 +816,7 @@
 out_noaxbuf:
 	testmgr_free_buf(xbuf);
 out_noxbuf:
+	kfree(key);
 	kfree(iv);
 	return ret;
 }
@@ -1715,6 +1763,100 @@
 	return err;
 }
 
+
+static int drbg_cavs_test(struct drbg_testvec *test, int pr,
+			  const char *driver, u32 type, u32 mask)
+{
+	int ret = -EAGAIN;
+	struct crypto_rng *drng;
+	struct drbg_test_data test_data;
+	struct drbg_string addtl, pers, testentropy;
+	unsigned char *buf = kzalloc(test->expectedlen, GFP_KERNEL);
+
+	if (!buf)
+		return -ENOMEM;
+
+	drng = crypto_alloc_rng(driver, type, mask);
+	if (IS_ERR(drng)) {
+		printk(KERN_ERR "alg: drbg: could not allocate DRNG handle for "
+		       "%s\n", driver);
+		kzfree(buf);
+		return -ENOMEM;
+	}
+
+	test_data.testentropy = &testentropy;
+	drbg_string_fill(&testentropy, test->entropy, test->entropylen);
+	drbg_string_fill(&pers, test->pers, test->perslen);
+	ret = crypto_drbg_reset_test(drng, &pers, &test_data);
+	if (ret) {
+		printk(KERN_ERR "alg: drbg: Failed to reset rng\n");
+		goto outbuf;
+	}
+
+	drbg_string_fill(&addtl, test->addtla, test->addtllen);
+	if (pr) {
+		drbg_string_fill(&testentropy, test->entpra, test->entprlen);
+		ret = crypto_drbg_get_bytes_addtl_test(drng,
+			buf, test->expectedlen, &addtl,	&test_data);
+	} else {
+		ret = crypto_drbg_get_bytes_addtl(drng,
+			buf, test->expectedlen, &addtl);
+	}
+	if (ret <= 0) {
+		printk(KERN_ERR "alg: drbg: could not obtain random data for "
+		       "driver %s\n", driver);
+		goto outbuf;
+	}
+
+	drbg_string_fill(&addtl, test->addtlb, test->addtllen);
+	if (pr) {
+		drbg_string_fill(&testentropy, test->entprb, test->entprlen);
+		ret = crypto_drbg_get_bytes_addtl_test(drng,
+			buf, test->expectedlen, &addtl, &test_data);
+	} else {
+		ret = crypto_drbg_get_bytes_addtl(drng,
+			buf, test->expectedlen, &addtl);
+	}
+	if (ret <= 0) {
+		printk(KERN_ERR "alg: drbg: could not obtain random data for "
+		       "driver %s\n", driver);
+		goto outbuf;
+	}
+
+	ret = memcmp(test->expected, buf, test->expectedlen);
+
+outbuf:
+	crypto_free_rng(drng);
+	kzfree(buf);
+	return ret;
+}
+
+
+static int alg_test_drbg(const struct alg_test_desc *desc, const char *driver,
+			 u32 type, u32 mask)
+{
+	int err = 0;
+	int pr = 0;
+	int i = 0;
+	struct drbg_testvec *template = desc->suite.drbg.vecs;
+	unsigned int tcount = desc->suite.drbg.count;
+
+	if (0 == memcmp(driver, "drbg_pr_", 8))
+		pr = 1;
+
+	for (i = 0; i < tcount; i++) {
+		err = drbg_cavs_test(&template[i], pr, driver, type, mask);
+		if (err) {
+			printk(KERN_ERR "alg: drbg: Test %d failed for %s\n",
+			       i, driver);
+			err = -EINVAL;
+			break;
+		}
+	}
+	return err;
+
+}
+
 static int alg_test_null(const struct alg_test_desc *desc,
 			     const char *driver, u32 type, u32 mask)
 {
@@ -2458,6 +2600,152 @@
 		.alg = "digest_null",
 		.test = alg_test_null,
 	}, {
+		.alg = "drbg_nopr_ctr_aes128",
+		.test = alg_test_drbg,
+		.fips_allowed = 1,
+		.suite = {
+			.drbg = {
+				.vecs = drbg_nopr_ctr_aes128_tv_template,
+				.count = ARRAY_SIZE(drbg_nopr_ctr_aes128_tv_template)
+			}
+		}
+	}, {
+		.alg = "drbg_nopr_ctr_aes192",
+		.test = alg_test_drbg,
+		.fips_allowed = 1,
+		.suite = {
+			.drbg = {
+				.vecs = drbg_nopr_ctr_aes192_tv_template,
+				.count = ARRAY_SIZE(drbg_nopr_ctr_aes192_tv_template)
+			}
+		}
+	}, {
+		.alg = "drbg_nopr_ctr_aes256",
+		.test = alg_test_drbg,
+		.fips_allowed = 1,
+		.suite = {
+			.drbg = {
+				.vecs = drbg_nopr_ctr_aes256_tv_template,
+				.count = ARRAY_SIZE(drbg_nopr_ctr_aes256_tv_template)
+			}
+		}
+	}, {
+		/*
+		 * There is no need to specifically test the DRBG with every
+		 * backend cipher -- covered by drbg_nopr_hmac_sha256 test
+		 */
+		.alg = "drbg_nopr_hmac_sha1",
+		.fips_allowed = 1,
+		.test = alg_test_null,
+	}, {
+		.alg = "drbg_nopr_hmac_sha256",
+		.test = alg_test_drbg,
+		.fips_allowed = 1,
+		.suite = {
+			.drbg = {
+				.vecs = drbg_nopr_hmac_sha256_tv_template,
+				.count =
+				ARRAY_SIZE(drbg_nopr_hmac_sha256_tv_template)
+			}
+		}
+	}, {
+		/* covered by drbg_nopr_hmac_sha256 test */
+		.alg = "drbg_nopr_hmac_sha384",
+		.fips_allowed = 1,
+		.test = alg_test_null,
+	}, {
+		.alg = "drbg_nopr_hmac_sha512",
+		.test = alg_test_null,
+		.fips_allowed = 1,
+	}, {
+		.alg = "drbg_nopr_sha1",
+		.fips_allowed = 1,
+		.test = alg_test_null,
+	}, {
+		.alg = "drbg_nopr_sha256",
+		.test = alg_test_drbg,
+		.fips_allowed = 1,
+		.suite = {
+			.drbg = {
+				.vecs = drbg_nopr_sha256_tv_template,
+				.count = ARRAY_SIZE(drbg_nopr_sha256_tv_template)
+			}
+		}
+	}, {
+		/* covered by drbg_nopr_sha256 test */
+		.alg = "drbg_nopr_sha384",
+		.fips_allowed = 1,
+		.test = alg_test_null,
+	}, {
+		.alg = "drbg_nopr_sha512",
+		.fips_allowed = 1,
+		.test = alg_test_null,
+	}, {
+		.alg = "drbg_pr_ctr_aes128",
+		.test = alg_test_drbg,
+		.fips_allowed = 1,
+		.suite = {
+			.drbg = {
+				.vecs = drbg_pr_ctr_aes128_tv_template,
+				.count = ARRAY_SIZE(drbg_pr_ctr_aes128_tv_template)
+			}
+		}
+	}, {
+		/* covered by drbg_pr_ctr_aes128 test */
+		.alg = "drbg_pr_ctr_aes192",
+		.fips_allowed = 1,
+		.test = alg_test_null,
+	}, {
+		.alg = "drbg_pr_ctr_aes256",
+		.fips_allowed = 1,
+		.test = alg_test_null,
+	}, {
+		.alg = "drbg_pr_hmac_sha1",
+		.fips_allowed = 1,
+		.test = alg_test_null,
+	}, {
+		.alg = "drbg_pr_hmac_sha256",
+		.test = alg_test_drbg,
+		.fips_allowed = 1,
+		.suite = {
+			.drbg = {
+				.vecs = drbg_pr_hmac_sha256_tv_template,
+				.count = ARRAY_SIZE(drbg_pr_hmac_sha256_tv_template)
+			}
+		}
+	}, {
+		/* covered by drbg_pr_hmac_sha256 test */
+		.alg = "drbg_pr_hmac_sha384",
+		.fips_allowed = 1,
+		.test = alg_test_null,
+	}, {
+		.alg = "drbg_pr_hmac_sha512",
+		.test = alg_test_null,
+		.fips_allowed = 1,
+	}, {
+		.alg = "drbg_pr_sha1",
+		.fips_allowed = 1,
+		.test = alg_test_null,
+	}, {
+		.alg = "drbg_pr_sha256",
+		.test = alg_test_drbg,
+		.fips_allowed = 1,
+		.suite = {
+			.drbg = {
+				.vecs = drbg_pr_sha256_tv_template,
+				.count = ARRAY_SIZE(drbg_pr_sha256_tv_template)
+			}
+		}
+	}, {
+		/* covered by drbg_pr_sha256 test */
+		.alg = "drbg_pr_sha384",
+		.fips_allowed = 1,
+		.test = alg_test_null,
+	}, {
+		.alg = "drbg_pr_sha512",
+		.fips_allowed = 1,
+		.test = alg_test_null,
+	}, {
 		.alg = "ecb(__aes-aesni)",
 		.test = alg_test_null,
 		.fips_allowed = 1,
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 69d0dd8..6597203 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -32,7 +32,7 @@
 #define MAX_DIGEST_SIZE		64
 #define MAX_TAP			8
 
-#define MAX_KEYLEN		56
+#define MAX_KEYLEN		160
 #define MAX_IVLEN		32
 
 struct hash_testvec {
@@ -92,6 +92,21 @@
 	unsigned short loops;
 };
 
+struct drbg_testvec {
+	unsigned char *entropy;
+	size_t entropylen;
+	unsigned char *entpra;
+	unsigned char *entprb;
+	size_t entprlen;
+	unsigned char *addtla;
+	unsigned char *addtlb;
+	size_t addtllen;
+	unsigned char *pers;
+	size_t perslen;
+	unsigned char *expected;
+	size_t expectedlen;
+};
+
 static char zeroed_string[48];
 
 /*
@@ -1807,18 +1822,59 @@
 	},
 };
 
-#define GHASH_TEST_VECTORS 1
+#define GHASH_TEST_VECTORS 5
 
 static struct hash_testvec ghash_tv_template[] =
 {
 	{
-
-		.key	= "\xdf\xa6\xbf\x4d\xed\x81\xdb\x03\xff\xca\xff\x95\xf8\x30\xf0\x61",
+		.key	= "\xdf\xa6\xbf\x4d\xed\x81\xdb\x03"
+			  "\xff\xca\xff\x95\xf8\x30\xf0\x61",
 		.ksize	= 16,
-		.plaintext = "\x95\x2b\x2a\x56\xa5\x60\x04a\xc0\xb3\x2b\x66\x56\xa0\x5b\x40\xb6",
+		.plaintext = "\x95\x2b\x2a\x56\xa5\x60\x04a\xc0"
+			     "\xb3\x2b\x66\x56\xa0\x5b\x40\xb6",
 		.psize	= 16,
 		.digest	= "\xda\x53\xeb\x0a\xd2\xc5\x5b\xb6"
 			  "\x4f\xc4\x80\x2c\xc3\xfe\xda\x60",
+	}, {
+		.key	= "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+			  "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
+		.ksize	= 16,
+		.plaintext = "what do ya want for nothing?",
+		.psize	= 28,
+		.digest	= "\x3e\x1f\x5c\x4d\x65\xf0\xef\xce"
+			  "\x0d\x61\x06\x27\x66\x51\xd5\xe2",
+		.np	= 2,
+		.tap	= {14, 14}
+	}, {
+		.key	= "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+			  "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa",
+		.ksize	= 16,
+		.plaintext = "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
+			"\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
+			"\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
+			"\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd",
+		.psize	= 50,
+		.digest	= "\xfb\x49\x8a\x36\xe1\x96\xe1\x96"
+			  "\xe1\x96\xe1\x96\xe1\x96\xe1\x96",
+	}, {
+		.key	= "\xda\x53\xeb\x0a\xd2\xc5\x5b\xb6"
+			  "\x4f\xc4\x80\x2c\xc3\xfe\xda\x60",
+		.ksize	= 16,
+		.plaintext = "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
+			"\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
+			"\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
+			"\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd",
+		.psize	= 50,
+		.digest	= "\x2b\x5c\x0c\x7f\x52\xd1\x60\xc2"
+			  "\x49\xed\x6e\x32\x7a\xa9\xbe\x08",
+	}, {
+		.key	= "\x95\x2b\x2a\x56\xa5\x60\x04a\xc0"
+			  "\xb3\x2b\x66\x56\xa0\x5b\x40\xb6",
+		.ksize	= 16,
+		.plaintext = "Test With Truncation",
+		.psize	= 20,
+		.digest	= "\xf8\x94\x87\x2a\x4b\x63\x99\x28"
+			  "\x23\xf7\x93\xf7\x19\xf5\x96\xd9",
 	},
 };
 
@@ -3097,8 +3153,8 @@
 			  "\x5F\x62\xC7\x72\xD9\xFC\xCB\x9A",
 		.rlen	= 248,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 248 - 8, 8 },
+		.np	= 3,
+		.tap	= { 248 - 10, 2, 8 },
 	},
 };
 
@@ -3207,8 +3263,8 @@
 			  "\xC6\x2F\xBB\x24\x8D\x19\x82\xEB",
 		.rlen	= 248,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 248 - 8, 8 },
+		.np	= 3,
+		.tap	= { 248 - 10, 2, 8 },
 	},
 };
 
@@ -3333,8 +3389,8 @@
 			  "\xC6\x4A\xF3\x55\xC7\x29\x2E\x63",
 		.rlen	= 248,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 248 - 8, 8 },
+		.np	= 3,
+		.tap	= { 248 - 10, 2, 8 },
 	},
 };
 
@@ -3442,8 +3498,8 @@
 			  "\xC6\x2F\xBB\x24\x8D\x19\x82\xEB",
 		.rlen	= 248,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 248 - 8, 8 },
+		.np	= 3,
+		.tap	= { 248 - 10, 2, 8 },
 	},
 };
 
@@ -3517,8 +3573,8 @@
 			  "\x69\x74\xA1\x06\x46\x0F\x4E\x75",
 		.rlen	= 248,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 248 - 8, 8 },
+		.np	= 3,
+		.tap	= { 248 - 10, 2, 8 },
 	}, { /* Generated with Crypto++ */
 		.key	= "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55",
 		.klen	= 8,
@@ -3663,8 +3719,8 @@
 			  "\xC6\x2F\xBB\x24\x8D\x19\x82\xEB",
 		.rlen	= 248,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 248 - 8, 8 },
+		.np	= 3,
+		.tap	= { 248 - 10, 2, 8 },
 	}, { /* Generated with Crypto++ */
 		.key	= "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55",
 		.klen	= 8,
@@ -3899,8 +3955,8 @@
 			  "\xD8\x45\xFF\x33\xBA\xBB\x2B\x63",
 		.rlen	= 496,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 496 - 16, 16 },
+		.np	= 3,
+		.tap	= { 496 - 20, 4, 16 },
 	},
 };
 
@@ -4064,8 +4120,8 @@
 			  "\xB8\x03\xEA\x7D\xE1\x48\xD3\x47",
 		.rlen	= 496,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 496 - 16, 16 },
+		.np	= 3,
+		.tap	= { 496 - 20, 4, 16 },
 	},
 };
 
@@ -4244,8 +4300,8 @@
 			  "\x95\x63\x73\xA2\x44\xAC\xF8\xA5",
 		.rlen	= 496,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 496 - 16, 16 },
+		.np	= 3,
+		.tap	= { 496 - 20, 4, 16 },
 	},
 };
 
@@ -4424,8 +4480,8 @@
 			  "\xB8\x03\xEA\x7D\xE1\x48\xD3\x47",
 		.rlen	= 496,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 496 - 16, 16 },
+		.np	= 3,
+		.tap	= { 496 - 20, 4, 16 },
 	},
 };
 
@@ -4564,8 +4620,8 @@
 			  "\x5C\xEE\xFC\xCF\xC4\x70\x00\x34",
 		.rlen	= 496,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 496 - 16, 16 },
+		.np	= 3,
+		.tap	= { 496 - 20, 4, 16 },
 	}, { /* Generated with Crypto++ */
 		.key	= "\x9C\xD6\xF3\x9C\xB9\x5A\x67\x00"
 			  "\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE"
@@ -4842,8 +4898,8 @@
 			  "\xB8\x03\xEA\x7D\xE1\x48\xD3\x47",
 		.rlen	= 496,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 496 - 16, 16 },
+		.np	= 3,
+		.tap	= { 496 - 20, 4, 16 },
 	}, { /* Generated with Crypto++ */
 		.key	= "\x9C\xD6\xF3\x9C\xB9\x5A\x67\x00"
 			  "\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE"
@@ -5182,8 +5238,8 @@
 			  "\xC9\x1A\xFB\x5D\xDE\xBB\x43\xF4",
 		.rlen	= 504,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 504 - 8, 8 },
+		.np	= 3,
+		.tap	= { 504 - 10, 2, 8 },
 	},
 };
 
@@ -5374,8 +5430,8 @@
 			  "\x2B\xC2\x59\xF0\x64\xFB\x92\x06",
 		.rlen	= 504,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 504 - 8, 8 },
+		.np	= 3,
+		.tap	= { 504 - 10, 2, 8 },
 	},
 };
 
@@ -5531,8 +5587,8 @@
 			  "\xB4\x98\xD8\x6B\x74\xE7\x65\xF4",
 		.rlen	= 504,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 504 - 8, 8 },
+		.np	= 3,
+		.tap	= { 504 - 10, 2, 8 },
 	},
 };
 
@@ -5688,8 +5744,8 @@
 			  "\x2B\xC2\x59\xF0\x64\xFB\x92\x06",
 		.rlen	= 504,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 504 - 8, 8 },
+		.np	= 3,
+		.tap	= { 504 - 10, 2, 8 },
 	},
 };
 
@@ -6694,8 +6750,8 @@
 			  "\x2C\x75\x64\xC4\xCA\xC1\x7E\xD5",
 		.rlen	= 496,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 496 - 16, 16 },
+		.np	= 3,
+		.tap	= { 496 - 20, 4, 16 },
 	},
 };
 
@@ -6862,8 +6918,8 @@
 			  "\xDC\x50\xE7\x7E\x15\x89\x20\xB7",
 		.rlen	= 496,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 496 - 16, 16 },
+		.np	= 3,
+		.tap	= { 496 - 20, 4, 16 },
 	},
 };
 
@@ -7045,8 +7101,8 @@
 			  "\x0A\xA3\x30\x10\x26\x25\x41\x2C",
 		.rlen	= 496,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 496 - 16, 16 },
+		.np	= 3,
+		.tap	= { 496 - 20, 4, 16 },
 	},
 };
 
@@ -7228,8 +7284,8 @@
 			  "\xDC\x50\xE7\x7E\x15\x89\x20\xB7",
 		.rlen	= 496,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 496 - 16, 16 },
+		.np	= 3,
+		.tap	= { 496 - 20, 4, 16 },
 	},
 };
 
@@ -8302,8 +8358,8 @@
 			  "\x11\xd7\xb8\x6e\xea\xe1\x80\x30",
 		.rlen	= 512,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 512 - 16, 16 },
+		.np	= 3,
+		.tap	= { 512 - 20, 4, 16 },
 	},
 };
 
@@ -8555,8 +8611,8 @@
 			  "\x21\xc4\xc2\x75\x67\x89\x37\x0a",
 		.rlen	= 512,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 512 - 16, 16 },
+		.np	= 3,
+		.tap	= { 512 - 20, 4, 16 },
 	},
 };
 
@@ -8897,8 +8953,8 @@
 			  "\x37\x30\xe1\x91\x8d\xb3\x2a\xff",
 		.rlen	= 512,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 512 - 16, 16 },
+		.np	= 3,
+		.tap	= { 512 - 20, 4, 16 },
 	},
 };
 
@@ -9240,8 +9296,8 @@
 			  "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
 		.rlen	= 512,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 512 - 16, 16 },
+		.np	= 3,
+		.tap	= { 512 - 20, 4, 16 },
 	},
 };
 
@@ -9438,8 +9494,8 @@
 			  "\xF4\x46\x2E\xEB\xAC\xF3\xD2\xB7",
 		.rlen	= 496,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 496 - 16, 16 },
+		.np	= 3,
+		.tap	= { 496 - 20, 4, 16 },
 	},
 };
 
@@ -9664,8 +9720,8 @@
 			  "\xDC\x50\xE7\x7E\x15\x89\x20\xB7",
 		.rlen	= 496,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 496 - 16, 16 },
+		.np	= 3,
+		.tap	= { 496 - 20, 4, 16 },
 	},
 };
 
@@ -9846,8 +9902,8 @@
 			  "\xBC\x08\x3A\xA2\x29\xB3\xDF\xD1",
 		.rlen	= 496,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 496 - 16, 16 },
+		.np	= 3,
+		.tap	= { 496 - 20, 4, 16 },
 	},
 };
 
@@ -9987,8 +10043,8 @@
 			  "\xDC\x50\xE7\x7E\x15\x89\x20\xB7",
 		.rlen	= 496,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 496 - 16, 16 },
+		.np	= 3,
+		.tap	= { 496 - 20, 4, 16 },
 	},
 };
 
@@ -11061,8 +11117,8 @@
 			  "\xd9\x51\x0f\xd7\x94\x2f\xc5\xa7",
 		.rlen	= 512,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 512 - 16, 16 },
+		.np	= 3,
+		.tap	= { 512 - 20, 4, 16 },
 	},
 };
 
@@ -11314,8 +11370,8 @@
 			  "\x21\xc4\xc2\x75\x67\x89\x37\x0a",
 		.rlen	= 512,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 512 - 16, 16 },
+		.np	= 3,
+		.tap	= { 512 - 20, 4, 16 },
 	},
 };
 
@@ -11656,8 +11712,8 @@
 			  "\xd4\xa0\x91\x98\x11\x5f\x4d\xb1",
 		.rlen	= 512,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 512 - 16, 16 },
+		.np	= 3,
+		.tap	= { 512 - 20, 4, 16 },
 	},
 };
 
@@ -11999,8 +12055,8 @@
 			  "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
 		.rlen	= 512,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 512 - 16, 16 },
+		.np	= 3,
+		.tap	= { 512 - 20, 4, 16 },
 	},
 };
 
@@ -12182,8 +12238,8 @@
 			  "\x11\x74\x93\x57\xB4\x7E\xC6\x00",
 		.rlen	= 496,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 496 - 16, 16 },
+		.np	= 3,
+		.tap	= { 496 - 20, 4, 16 },
 	},
 };
 
@@ -12353,8 +12409,8 @@
 			  "\xDC\x50\xE7\x7E\x15\x89\x20\xB7",
 		.rlen	= 496,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 496 - 16, 16 },
+		.np	= 3,
+		.tap	= { 496 - 20, 4, 16 },
 	},
 };
 
@@ -12494,8 +12550,8 @@
 			  "\x22\x46\x89\x2D\x0F\x2B\x08\x24",
 		.rlen	= 496,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 496 - 16, 16 },
+		.np	= 3,
+		.tap	= { 496 - 20, 4, 16 },
 	},
 };
 
@@ -12635,8 +12691,8 @@
 			  "\xDC\x50\xE7\x7E\x15\x89\x20\xB7",
 		.rlen	= 496,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 496 - 16, 16 },
+		.np	= 3,
+		.tap	= { 496 - 20, 4, 16 },
 	},
 };
 
@@ -12792,8 +12848,8 @@
 			  "\xF9\xC5\xDD\x27\xB3\x39\xCB\xCB",
 		.rlen	= 496,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 496 - 16, 16 },
+		.np	= 3,
+		.tap	= { 496 - 20, 4, 16 },
 	},
 };
 
@@ -12949,8 +13005,8 @@
 			  "\xDC\x50\xE7\x7E\x15\x89\x20\xB7",
 		.rlen	= 496,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 496 - 16, 16 },
+		.np	= 3,
+		.tap	= { 496 - 20, 4, 16 },
 	},
 };
 
@@ -13096,8 +13152,8 @@
 			  "\xC4\xF5\x99\x61\xBC\xBB\x5B\x46",
 		.rlen	= 512,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 512 - 16, 16 },
+		.np	= 3,
+		.tap	= { 512 - 20, 4, 16 },
 	},
 };
 
@@ -13243,8 +13299,8 @@
 			  "\x21\xc4\xc2\x75\x67\x89\x37\x0a",
 		.rlen	= 512,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 512 - 16, 16 },
+		.np	= 3,
+		.tap	= { 512 - 20, 4, 16 },
 	},
 };
 
@@ -13392,8 +13448,8 @@
 			  "\x22\x60\x4E\xE8\xA4\x5D\x85\xB9",
 		.rlen	= 512,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 512 - 16, 16 },
+		.np	= 3,
+		.tap	= { 512 - 20, 4, 16 },
 	},
 };
 
@@ -13541,8 +13597,8 @@
 			  "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
 		.rlen	= 512,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 512 - 16, 16 },
+		.np	= 3,
+		.tap	= { 512 - 20, 4, 16 },
 	},
 };
 
@@ -13749,8 +13805,8 @@
 			  "\x17\xBB\xC0\x6B\x62\x3F\x56\xE9",
 		.rlen	= 496,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 496 - 16, 16 },
+		.np	= 3,
+		.tap	= { 496 - 20, 4, 16 },
 	},
 };
 
@@ -13921,8 +13977,8 @@
 			  "\xED\x56\xBF\x28\xB4\x1D\x86\x12",
 		.rlen	= 496,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 496 - 16, 16 },
+		.np	= 3,
+		.tap	= { 496 - 20, 4, 16 },
 	},
 };
 
@@ -14140,8 +14196,8 @@
 			  "\xA3\xAA\x13\xCC\x50\xFF\x7B\x02",
 		.rlen	= 496,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 496 - 16, 16 },
+		.np	= 3,
+		.tap	= { 496 - 20, 4, 16 },
 	},
 };
 
@@ -14359,8 +14415,8 @@
 			  "\xED\x56\xBF\x28\xB4\x1D\x86\x12",
 		.rlen	= 496,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 496 - 16, 16 },
+		.np	= 3,
+		.tap	= { 496 - 20, 4, 16 },
 	},
 };
 
@@ -16265,8 +16321,8 @@
 			  "\x74\x3f\x7d\x58\x88\x75\xde\x3e",
 		.rlen   = 512,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 512 - 16, 16 },
+		.np	= 3,
+		.tap	= { 512 - 20, 4, 16 },
 	}
 };
 
@@ -16519,8 +16575,8 @@
 			  "\x21\xc4\xc2\x75\x67\x89\x37\x0a",
 		.rlen   = 512,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 512 - 16, 16 },
+		.np	= 3,
+		.tap	= { 512 - 20, 4, 16 },
 	}
 };
 
@@ -16861,8 +16917,8 @@
 			  "\xb9\xc6\xe6\x93\xe1\x48\xc1\x51",
 		.rlen	= 512,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 512 - 16, 16 },
+		.np	= 3,
+		.tap	= { 512 - 20, 4, 16 },
 	}
 };
 
@@ -17203,8 +17259,8 @@
 			  "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
 		.rlen	= 512,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 512 - 16, 16 },
+		.np	= 3,
+		.tap	= { 512 - 20, 4, 16 },
 	}
 };
 
@@ -17420,8 +17476,8 @@
 			  "\xF1\x4C\xE5\xB2\x91\x64\x0C\x51",
 		.rlen	= 496,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 496 - 16, 16 },
+		.np	= 3,
+		.tap	= { 496 - 20, 4, 16 },
 	}, { /* Generated with Crypto++ */
 		.key	= "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55"
 			  "\x0F\x32\x55\x78\x9B\xBE\x78\x9B"
@@ -17775,8 +17831,8 @@
 			  "\xED\x56\xBF\x28\xB4\x1D\x86\x12",
 		.rlen	= 496,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 496 - 16, 16 },
+		.np	= 3,
+		.tap	= { 496 - 20, 4, 16 },
 	}, { /* Generated with Crypto++ */
 		.key	= "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55"
 			  "\x0F\x32\x55\x78\x9B\xBE\x78\x9B"
@@ -20743,6 +20799,834 @@
 	},
 };
 
+/*
+ * SP800-90A DRBG Test vectors from
+ * http://csrc.nist.gov/groups/STM/cavp/documents/drbg/drbgtestvectors.zip
+ *
+ * Test vectors for DRBG with prediction resistance. All types of DRBGs
+ * (Hash, HMAC, CTR) are tested with all permutations of use cases (w/ and
+ * w/o personalization string, w/ and w/o additional input string).
+ */
+static struct drbg_testvec drbg_pr_sha256_tv_template[] = {
+	{
+		.entropy = (unsigned char *)
+			"\x72\x88\x4c\xcd\x6c\x85\x57\x70\xf7\x0b\x8b\x86"
+			"\xc1\xeb\xd2\x4e\x36\x14\xab\x18\xc4\x9c\xc9\xcf"
+			"\x1a\xe8\xf7\x7b\x02\x49\x73\xd7\xf1\x42\x7d\xc6"
+			"\x3f\x29\x2d\xec\xd3\x66\x51\x3f\x1d\x8d\x5b\x4e",
+		.entropylen = 48,
+		.entpra = (unsigned char *)
+			"\x38\x9c\x91\xfa\xc2\xa3\x46\x89\x56\x08\x3f\x62"
+			"\x73\xd5\x22\xa9\x29\x63\x3a\x1d\xe5\x5d\x5e\x4f"
+			"\x67\xb0\x67\x7a\x5e\x9e\x0c\x62",
+		.entprb = (unsigned char *)
+			"\xb2\x8f\x36\xb2\xf6\x8d\x39\x13\xfa\x6c\x66\xcf"
+			"\x62\x8a\x7e\x8c\x12\x33\x71\x9c\x69\xe4\xa5\xf0"
+			"\x8c\xee\xeb\x9c\xf5\x31\x98\x31",
+		.entprlen = 32,
+		.expected = (unsigned char *)
+			"\x52\x7b\xa3\xad\x71\x77\xa4\x49\x42\x04\x61\xc7"
+			"\xf0\xaf\xa5\xfd\xd3\xb3\x0d\x6a\x61\xba\x35\x49"
+			"\xbb\xaa\xaf\xe4\x25\x7d\xb5\x48\xaf\x5c\x18\x3d"
+			"\x33\x8d\x9d\x45\xdf\x98\xd5\x94\xa8\xda\x92\xfe"
+			"\xc4\x3c\x94\x2a\xcf\x7f\x7b\xf2\xeb\x28\xa9\xf1"
+			"\xe0\x86\x30\xa8\xfe\xf2\x48\x90\x91\x0c\x75\xb5"
+			"\x3c\x00\xf0\x4d\x09\x4f\x40\xa7\xa2\x8c\x52\xdf"
+			"\x52\xef\x17\xbf\x3d\xd1\xa2\x31\xb4\xb8\xdc\xe6"
+			"\x5b\x0d\x1f\x78\x36\xb4\xe6\x4b\xa7\x11\x25\xd5"
+			"\x94\xc6\x97\x36\xab\xf0\xe5\x31\x28\x6a\xbb\xce"
+			"\x30\x81\xa6\x8f\x27\x14\xf8\x1c",
+		.expectedlen = 128,
+		.addtla = NULL,
+		.addtlb = NULL,
+		.addtllen = 0,
+		.pers = NULL,
+		.perslen = 0,
+	}, {
+		.entropy = (unsigned char *)
+			"\x5d\xf2\x14\xbc\xf6\xb5\x4e\x0b\xf0\x0d\x6f\x2d"
+			"\xe2\x01\x66\x7b\xd0\xa4\x73\xa4\x21\xdd\xb0\xc0"
+			"\x51\x79\x09\xf4\xea\xa9\x08\xfa\xa6\x67\xe0\xe1"
+			"\xd1\x88\xa8\xad\xee\x69\x74\xb3\x55\x06\x9b\xf6",
+		.entropylen = 48,
+		.entpra = (unsigned char *)
+			"\xef\x48\x06\xa2\xc2\x45\xf1\x44\xfa\x34\x2c\xeb"
+			"\x8d\x78\x3c\x09\x8f\x34\x72\x20\xf2\xe7\xfd\x13"
+			"\x76\x0a\xf6\xdc\x3c\xf5\xc0\x15",
+		.entprb = (unsigned char *)
+			"\x4b\xbe\xe5\x24\xed\x6a\x2d\x0c\xdb\x73\x5e\x09"
+			"\xf9\xad\x67\x7c\x51\x47\x8b\x6b\x30\x2a\xc6\xde"
+			"\x76\xaa\x55\x04\x8b\x0a\x72\x95",
+		.entprlen = 32,
+		.expected = (unsigned char *)
+			"\x3b\x14\x71\x99\xa1\xda\xa0\x42\xe6\xc8\x85\x32"
+			"\x70\x20\x32\x53\x9a\xbe\xd1\x1e\x15\xef\xfb\x4c"
+			"\x25\x6e\x19\x3a\xf0\xb9\xcb\xde\xf0\x3b\xc6\x18"
+			"\x4d\x85\x5a\x9b\xf1\xe3\xc2\x23\x03\x93\x08\xdb"
+			"\xa7\x07\x4b\x33\x78\x40\x4d\xeb\x24\xf5\x6e\x81"
+			"\x4a\x1b\x6e\xa3\x94\x52\x43\xb0\xaf\x2e\x21\xf4"
+			"\x42\x46\x8e\x90\xed\x34\x21\x75\xea\xda\x67\xb6"
+			"\xe4\xf6\xff\xc6\x31\x6c\x9a\x5a\xdb\xb3\x97\x13"
+			"\x09\xd3\x20\x98\x33\x2d\x6d\xd7\xb5\x6a\xa8\xa9"
+			"\x9a\x5b\xd6\x87\x52\xa1\x89\x2b\x4b\x9c\x64\x60"
+			"\x50\x47\xa3\x63\x81\x16\xaf\x19",
+		.expectedlen = 128,
+		.addtla = (unsigned char *)
+			"\xbe\x13\xdb\x2a\xe9\xa8\xfe\x09\x97\xe1\xce\x5d"
+			"\xe8\xbb\xc0\x7c\x4f\xcb\x62\x19\x3f\x0f\xd2\xad"
+			"\xa9\xd0\x1d\x59\x02\xc4\xff\x70",
+		.addtlb = (unsigned char *)
+			"\x6f\x96\x13\xe2\xa7\xf5\x6c\xfe\xdf\x66\xe3\x31"
+			"\x63\x76\xbf\x20\x27\x06\x49\xf1\xf3\x01\x77\x41"
+			"\x9f\xeb\xe4\x38\xfe\x67\x00\xcd",
+		.addtllen = 32,
+		.pers = NULL,
+		.perslen = 0,
+	}, {
+		.entropy = (unsigned char *)
+			"\xc6\x1c\xaf\x83\xa2\x56\x38\xf9\xb0\xbc\xd9\x85"
+			"\xf5\x2e\xc4\x46\x9c\xe1\xb9\x40\x98\x70\x10\x72"
+			"\xd7\x7d\x15\x85\xa1\x83\x5a\x97\xdf\xc8\xa8\xe8"
+			"\x03\x4c\xcb\x70\x35\x8b\x90\x94\x46\x8a\x6e\xa1",
+		.entropylen = 48,
+		.entpra = (unsigned char *)
+			"\xc9\x05\xa4\xcf\x28\x80\x4b\x93\x0f\x8b\xc6\xf9"
+			"\x09\x41\x58\x74\xe9\xec\x28\xc7\x53\x0a\x73\x60"
+			"\xba\x0a\xde\x57\x5b\x4b\x9f\x29",
+		.entprb = (unsigned char *)
+			"\x4f\x31\xd2\xeb\xac\xfa\xa8\xe2\x01\x7d\xf3\xbd"
+			"\x42\xbd\x20\xa0\x30\x65\x74\xd5\x5d\xd2\xad\xa4"
+			"\xa9\xeb\x1f\x4d\xf6\xfd\xb8\x26",
+		.entprlen = 32,
+		.expected = (unsigned char *)
+			"\xf6\x13\x05\xcb\x83\x60\x16\x42\x49\x1d\xc6\x25"
+			"\x3b\x8c\x31\xa3\xbe\x8b\xbd\x1c\xe2\xec\x1d\xde"
+			"\xbb\xbf\xa1\xac\xa8\x9f\x50\xce\x69\xce\xef\xd5"
+			"\xd6\xf2\xef\x6a\xf7\x81\x38\xdf\xbc\xa7\x5a\xb9"
+			"\xb2\x42\x65\xab\xe4\x86\x8d\x2d\x9d\x59\x99\x2c"
+			"\x5a\x0d\x71\x55\x98\xa4\x45\xc2\x8d\xdb\x05\x5e"
+			"\x50\x21\xf7\xcd\xe8\x98\x43\xce\x57\x74\x63\x4c"
+			"\xf3\xb1\xa5\x14\x1e\x9e\x01\xeb\x54\xd9\x56\xae"
+			"\xbd\xb6\x6f\x1a\x47\x6b\x3b\x44\xe4\xa2\xe9\x3c"
+			"\x6c\x83\x12\x30\xb8\x78\x7f\x8e\x54\x82\xd4\xfe"
+			"\x90\x35\x0d\x4c\x4d\x85\xe7\x13",
+		.expectedlen = 128,
+		.addtla = NULL,
+		.addtlb = NULL,
+		.addtllen = 0,
+		.pers = (unsigned char *)
+			"\xa5\xbf\xac\x4f\x71\xa1\xbb\x67\x94\xc6\x50\xc7"
+			"\x2a\x45\x9e\x10\xa8\xed\xf7\x52\x4f\xfe\x21\x90"
+			"\xa4\x1b\xe1\xe2\x53\xcc\x61\x47",
+		.perslen = 32,
+	}, {
+		.entropy = (unsigned char *)
+			"\xb6\xc1\x8d\xdf\x99\x54\xbe\x95\x10\x48\xd9\xf6"
+			"\xd7\x48\xa8\x73\x2d\x74\xde\x1e\xde\x57\x7e\xf4"
+			"\x7b\x7b\x64\xef\x88\x7a\xa8\x10\x4b\xe1\xc1\x87"
+			"\xbb\x0b\xe1\x39\x39\x50\xaf\x68\x9c\xa2\xbf\x5e",
+		.entropylen = 48,
+		.entpra = (unsigned char *)
+			"\xdc\x81\x0a\x01\x58\xa7\x2e\xce\xee\x48\x8c\x7c"
+			"\x77\x9e\x3c\xf1\x17\x24\x7a\xbb\xab\x9f\xca\x12"
+			"\x19\xaf\x97\x2d\x5f\xf9\xff\xfc",
+		.entprb = (unsigned char *)
+			"\xaf\xfc\x4f\x98\x8b\x93\x95\xc1\xb5\x8b\x7f\x73"
+			"\x6d\xa6\xbe\x6d\x33\xeb\x2c\x82\xb1\xaf\xc1\xb6"
+			"\xb6\x05\xe2\x44\xaa\xfd\xe7\xdb",
+		.entprlen = 32,
+		.expected = (unsigned char *)
+			"\x51\x79\xde\x1c\x0f\x58\xf3\xf4\xc9\x57\x2e\x31"
+			"\xa7\x09\xa1\x53\x64\x63\xa2\xc5\x1d\x84\x88\x65"
+			"\x01\x1b\xc6\x16\x3c\x49\x5b\x42\x8e\x53\xf5\x18"
+			"\xad\x94\x12\x0d\x4f\x55\xcc\x45\x5c\x98\x0f\x42"
+			"\x28\x2f\x47\x11\xf9\xc4\x01\x97\x6b\xa0\x94\x50"
+			"\xa9\xd1\x5e\x06\x54\x3f\xdf\xbb\xc4\x98\xee\x8b"
+			"\xba\xa9\xfa\x49\xee\x1d\xdc\xfb\x50\xf6\x51\x9f"
+			"\x6c\x4a\x9a\x6f\x63\xa2\x7d\xad\xaf\x3a\x24\xa0"
+			"\xd9\x9f\x07\xeb\x15\xee\x26\xe0\xd5\x63\x39\xda"
+			"\x3c\x59\xd6\x33\x6c\x02\xe8\x05\x71\x46\x68\x44"
+			"\x63\x4a\x68\x72\xe9\xf5\x55\xfe",
+		.expectedlen = 128,
+		.addtla = (unsigned char *)
+			"\x15\x20\x2f\xf6\x98\x28\x63\xa2\xc4\x4e\xbb\x6c"
+			"\xb2\x25\x92\x61\x79\xc9\x22\xc4\x61\x54\x96\xff"
+			"\x4a\x85\xca\x80\xfe\x0d\x1c\xd0",
+		.addtlb = (unsigned char *)
+			"\xde\x29\x8e\x03\x42\x61\xa3\x28\x5e\xc8\x80\xc2"
+			"\x6d\xbf\xad\x13\xe1\x8d\x2a\xc7\xe8\xc7\x18\x89"
+			"\x42\x58\x9e\xd6\xcc\xad\x7b\x1e",
+		.addtllen = 32,
+		.pers = (unsigned char *)
+			"\x84\xc3\x73\x9e\xce\xb3\xbc\x89\xf7\x62\xb3\xe1"
+			"\xd7\x48\x45\x8a\xa9\xcc\xe9\xed\xd5\x81\x84\x52"
+			"\x82\x4c\xdc\x19\xb8\xf8\x92\x5c",
+		.perslen = 32,
+	},
+};
+
+static struct drbg_testvec drbg_pr_hmac_sha256_tv_template[] = {
+	{
+		.entropy = (unsigned char *)
+			"\x99\x69\xe5\x4b\x47\x03\xff\x31\x78\x5b\x87\x9a"
+			"\x7e\x5c\x0e\xae\x0d\x3e\x30\x95\x59\xe9\xfe\x96"
+			"\xb0\x67\x6d\x49\xd5\x91\xea\x4d\x07\xd2\x0d\x46"
+			"\xd0\x64\x75\x7d\x30\x23\xca\xc2\x37\x61\x27\xab",
+		.entropylen = 48,
+		.entpra = (unsigned char *)
+			"\xc6\x0f\x29\x99\x10\x0f\x73\x8c\x10\xf7\x47\x92"
+			"\x67\x6a\x3f\xc4\xa2\x62\xd1\x37\x21\x79\x80\x46"
+			"\xe2\x9a\x29\x51\x81\x56\x9f\x54",
+		.entprb = (unsigned char *)
+			"\xc1\x1d\x45\x24\xc9\x07\x1b\xd3\x09\x60\x15\xfc"
+			"\xf7\xbc\x24\xa6\x07\xf2\x2f\xa0\x65\xc9\x37\x65"
+			"\x8a\x2a\x77\xa8\x69\x90\x89\xf4",
+		.entprlen = 32,
+		.expected = (unsigned char *)
+			"\xab\xc0\x15\x85\x60\x94\x80\x3a\x93\x8d\xff\xd2"
+			"\x0d\xa9\x48\x43\x87\x0e\xf9\x35\xb8\x2c\xfe\xc1"
+			"\x77\x06\xb8\xf5\x51\xb8\x38\x50\x44\x23\x5d\xd4"
+			"\x4b\x59\x9f\x94\xb3\x9b\xe7\x8d\xd4\x76\xe0\xcf"
+			"\x11\x30\x9c\x99\x5a\x73\x34\xe0\xa7\x8b\x37\xbc"
+			"\x95\x86\x23\x50\x86\xfa\x3b\x63\x7b\xa9\x1c\xf8"
+			"\xfb\x65\xef\xa2\x2a\x58\x9c\x13\x75\x31\xaa\x7b"
+			"\x2d\x4e\x26\x07\xaa\xc2\x72\x92\xb0\x1c\x69\x8e"
+			"\x6e\x01\xae\x67\x9e\xb8\x7c\x01\xa8\x9c\x74\x22"
+			"\xd4\x37\x2d\x6d\x75\x4a\xba\xbb\x4b\xf8\x96\xfc"
+			"\xb1\xcd\x09\xd6\x92\xd0\x28\x3f",
+		.expectedlen = 128,
+		.addtla = NULL,
+		.addtlb = NULL,
+		.addtllen = 0,
+		.pers = NULL,
+		.perslen = 0,
+	}, {
+		.entropy = (unsigned char *)
+			"\xb9\x1f\xe9\xef\xdd\x9b\x7d\x20\xb6\xec\xe0\x2f"
+			"\xdb\x76\x24\xce\x41\xc8\x3a\x4a\x12\x7f\x3e\x2f"
+			"\xae\x05\x99\xea\xb5\x06\x71\x0d\x0c\x4c\xb4\x05"
+			"\x26\xc6\xbd\xf5\x7f\x2a\x3d\xf2\xb5\x49\x7b\xda",
+		.entropylen = 48,
+		.entpra = (unsigned char *)
+			"\xef\x67\x50\x9c\xa7\x7d\xdf\xb7\x2d\x81\x01\xa4"
+			"\x62\x81\x6a\x69\x5b\xb3\x37\x45\xa7\x34\x8e\x26"
+			"\x46\xd9\x26\xa2\x19\xd4\x94\x43",
+		.entprb = (unsigned char *)
+			"\x97\x75\x53\x53\xba\xb4\xa6\xb2\x91\x60\x71\x79"
+			"\xd1\x6b\x4a\x24\x9a\x34\x66\xcc\x33\xab\x07\x98"
+			"\x51\x78\x72\xb2\x79\xfd\x2c\xff",
+		.entprlen = 32,
+		.expected = (unsigned char *)
+			"\x9c\xdc\x63\x8a\x19\x23\x22\x66\x0c\xc5\xb9\xd7"
+			"\xfb\x2a\xb0\x31\xe3\x8a\x36\xa8\x5a\xa8\x14\xda"
+			"\x1e\xa9\xcc\xfe\xb8\x26\x44\x83\x9f\xf6\xff\xaa"
+			"\xc8\x98\xb8\x30\x35\x3b\x3d\x36\xd2\x49\xd4\x40"
+			"\x62\x0a\x65\x10\x76\x55\xef\xc0\x95\x9c\xa7\xda"
+			"\x3f\xcf\xb7\x7b\xc6\xe1\x28\x52\xfc\x0c\xe2\x37"
+			"\x0d\x83\xa7\x51\x4b\x31\x47\x3c\xe1\x3c\xae\x70"
+			"\x01\xc8\xa3\xd3\xc2\xac\x77\x9c\xd1\x68\x77\x9b"
+			"\x58\x27\x3b\xa5\x0f\xc2\x7a\x8b\x04\x65\x62\xd5"
+			"\xe8\xd6\xfe\x2a\xaf\xd3\xd3\xfe\xbd\x18\xfb\xcd"
+			"\xcd\x66\xb5\x01\x69\x66\xa0\x3c",
+		.expectedlen = 128,
+		.addtla = (unsigned char *)
+			"\x17\xc1\x56\xcb\xcc\x50\xd6\x03\x7d\x45\x76\xa3"
+			"\x75\x76\xc1\x4a\x66\x1b\x2e\xdf\xb0\x2e\x7d\x56"
+			"\x6d\x99\x3b\xc6\x58\xda\x03\xf6",
+		.addtlb = (unsigned char *)
+			"\x7c\x7b\x4a\x4b\x32\x5e\x6f\x67\x34\xf5\x21\x4c"
+			"\xf9\x96\xf9\xbf\x1c\x8c\x81\xd3\x9b\x60\x6a\x44"
+			"\xc6\x03\xa2\xfb\x13\x20\x19\xb7",
+		.addtllen = 32,
+		.pers = NULL,
+		.perslen = 0,
+	}, {
+		.entropy = (unsigned char *)
+			"\x13\x54\x96\xfc\x1b\x7d\x28\xf3\x18\xc9\xa7\x89"
+			"\xb6\xb3\xc8\x72\xac\x00\xd4\x59\x36\x25\x05\xaf"
+			"\xa5\xdb\x96\xcb\x3c\x58\x46\x87\xa5\xaa\xbf\x20"
+			"\x3b\xfe\x23\x0e\xd1\xc7\x41\x0f\x3f\xc9\xb3\x67",
+		.entropylen = 48,
+		.entpra = (unsigned char *)
+			"\xe2\xbd\xb7\x48\x08\x06\xf3\xe1\x93\x3c\xac\x79"
+			"\xa7\x2b\x11\xda\xe3\x2e\xe1\x91\xa5\x02\x19\x57"
+			"\x20\x28\xad\xf2\x60\xd7\xcd\x45",
+		.entprb = (unsigned char *)
+			"\x8b\xd4\x69\xfc\xff\x59\x95\x95\xc6\x51\xde\x71"
+			"\x68\x5f\xfc\xf9\x4a\xab\xec\x5a\xcb\xbe\xd3\x66"
+			"\x1f\xfa\x74\xd3\xac\xa6\x74\x60",
+		.entprlen = 32,
+		.expected = (unsigned char *)
+			"\x1f\x9e\xaf\xe4\xd2\x46\xb7\x47\x41\x4c\x65\x99"
+			"\x01\xe9\x3b\xbb\x83\x0c\x0a\xb0\xc1\x3a\xe2\xb3"
+			"\x31\x4e\xeb\x93\x73\xee\x0b\x26\xc2\x63\xa5\x75"
+			"\x45\x99\xd4\x5c\x9f\xa1\xd4\x45\x87\x6b\x20\x61"
+			"\x40\xea\x78\xa5\x32\xdf\x9e\x66\x17\xaf\xb1\x88"
+			"\x9e\x2e\x23\xdd\xc1\xda\x13\x97\x88\xa5\xb6\x5e"
+			"\x90\x14\x4e\xef\x13\xab\x5c\xd9\x2c\x97\x9e\x7c"
+			"\xd7\xf8\xce\xea\x81\xf5\xcd\x71\x15\x49\x44\xce"
+			"\x83\xb6\x05\xfb\x7d\x30\xb5\x57\x2c\x31\x4f\xfc"
+			"\xfe\x80\xb6\xc0\x13\x0c\x5b\x9b\x2e\x8f\x3d\xfc"
+			"\xc2\xa3\x0c\x11\x1b\x80\x5f\xf3",
+		.expectedlen = 128,
+		.addtla = NULL,
+		.addtlb = NULL,
+		.addtllen = 0,
+		.pers = (unsigned char *)
+			"\x64\xb6\xfc\x60\xbc\x61\x76\x23\x6d\x3f\x4a\x0f"
+			"\xe1\xb4\xd5\x20\x9e\x70\xdd\x03\x53\x6d\xbf\xce"
+			"\xcd\x56\x80\xbc\xb8\x15\xc8\xaa",
+		.perslen = 32,
+	}, {
+		.entropy = (unsigned char *)
+			"\xc7\xcc\xbc\x67\x7e\x21\x66\x1e\x27\x2b\x63\xdd"
+			"\x3a\x78\xdc\xdf\x66\x6d\x3f\x24\xae\xcf\x37\x01"
+			"\xa9\x0d\x89\x8a\xa7\xdc\x81\x58\xae\xb2\x10\x15"
+			"\x7e\x18\x44\x6d\x13\xea\xdf\x37\x85\xfe\x81\xfb",
+		.entropylen = 48,
+		.entpra = (unsigned char *)
+			"\x7b\xa1\x91\x5b\x3c\x04\xc4\x1b\x1d\x19\x2f\x1a"
+			"\x18\x81\x60\x3c\x6c\x62\x91\xb7\xe9\xf5\xcb\x96"
+			"\xbb\x81\x6a\xcc\xb5\xae\x55\xb6",
+		.entprb = (unsigned char *)
+			"\x99\x2c\xc7\x78\x7e\x3b\x88\x12\xef\xbe\xd3\xd2"
+			"\x7d\x2a\xa5\x86\xda\x8d\x58\x73\x4a\x0a\xb2\x2e"
+			"\xbb\x4c\x7e\xe3\x9a\xb6\x81\xc1",
+		.entprlen = 32,
+		.expected = (unsigned char *)
+			"\x95\x6f\x95\xfc\x3b\xb7\xfe\x3e\xd0\x4e\x1a\x14"
+			"\x6c\x34\x7f\x7b\x1d\x0d\x63\x5e\x48\x9c\x69\xe6"
+			"\x46\x07\xd2\x87\xf3\x86\x52\x3d\x98\x27\x5e\xd7"
+			"\x54\xe7\x75\x50\x4f\xfb\x4d\xfd\xac\x2f\x4b\x77"
+			"\xcf\x9e\x8e\xcc\x16\xa2\x24\xcd\x53\xde\x3e\xc5"
+			"\x55\x5d\xd5\x26\x3f\x89\xdf\xca\x8b\x4e\x1e\xb6"
+			"\x88\x78\x63\x5c\xa2\x63\x98\x4e\x6f\x25\x59\xb1"
+			"\x5f\x2b\x23\xb0\x4b\xa5\x18\x5d\xc2\x15\x74\x40"
+			"\x59\x4c\xb4\x1e\xcf\x9a\x36\xfd\x43\xe2\x03\xb8"
+			"\x59\x91\x30\x89\x2a\xc8\x5a\x43\x23\x7c\x73\x72"
+			"\xda\x3f\xad\x2b\xba\x00\x6b\xd1",
+		.expectedlen = 128,
+		.addtla = (unsigned char *)
+			"\x18\xe8\x17\xff\xef\x39\xc7\x41\x5c\x73\x03\x03"
+			"\xf6\x3d\xe8\x5f\xc8\xab\xe4\xab\x0f\xad\xe8\xd6"
+			"\x86\x88\x55\x28\xc1\x69\xdd\x76",
+		.addtlb = (unsigned char *)
+			"\xac\x07\xfc\xbe\x87\x0e\xd3\xea\x1f\x7e\xb8\xe7"
+			"\x9d\xec\xe8\xe7\xbc\xf3\x18\x25\x77\x35\x4a\xaa"
+			"\x00\x99\x2a\xdd\x0a\x00\x50\x82",
+		.addtllen = 32,
+		.pers = (unsigned char *)
+			"\xbc\x55\xab\x3c\xf6\x52\xb0\x11\x3d\x7b\x90\xb8"
+			"\x24\xc9\x26\x4e\x5a\x1e\x77\x0d\x3d\x58\x4a\xda"
+			"\xd1\x81\xe9\xf8\xeb\x30\x8f\x6f",
+		.perslen = 32,
+	},
+};
+
+static struct drbg_testvec drbg_pr_ctr_aes128_tv_template[] = {
+	{
+		.entropy = (unsigned char *)
+			"\xd1\x44\xc6\x61\x81\x6d\xca\x9d\x15\x28\x8a\x42"
+			"\x94\xd7\x28\x9c\x43\x77\x19\x29\x1a\x6d\xc3\xa2",
+		.entropylen = 24,
+		.entpra = (unsigned char *)
+			"\x96\xd8\x9e\x45\x32\xc9\xd2\x08\x7a\x6d\x97\x15"
+			"\xb4\xec\x80\xb1",
+		.entprb = (unsigned char *)
+			"\x8b\xb6\x72\xb5\x24\x0b\x98\x65\x95\x95\xe9\xc9"
+			"\x28\x07\xeb\xc2",
+		.entprlen = 16,
+		.expected = (unsigned char *)
+			"\x70\x19\xd0\x4c\x45\x78\xd6\x68\xa9\x9a\xaa\xfe"
+			"\xc1\xdf\x27\x9a\x1c\x0d\x0d\xf7\x24\x75\x46\xcc"
+			"\x77\x6b\xdf\x89\xc6\x94\xdc\x74\x50\x10\x70\x18"
+			"\x9b\xdc\x96\xb4\x89\x23\x40\x1a\xce\x09\x87\xce"
+			"\xd2\xf3\xd5\xe4\x51\x67\x74\x11\x5a\xcc\x8b\x3b"
+			"\x8a\xf1\x23\xa8",
+		.expectedlen = 64,
+		.addtla = NULL,
+		.addtlb = NULL,
+		.addtllen = 0,
+		.pers = NULL,
+		.perslen = 0,
+	}, {
+		.entropy = (unsigned char *)
+			"\x8e\x83\xe0\xeb\x37\xea\x3e\x53\x5e\x17\x6e\x77"
+			"\xbd\xb1\x53\x90\xfc\xdc\xc1\x3c\x9a\x88\x22\x94",
+		.entropylen = 24,
+		.entpra = (unsigned char *)
+			"\x6a\x85\xe7\x37\xc8\xf1\x04\x31\x98\x4f\xc8\x73"
+			"\x67\xd1\x08\xf8",
+		.entprb = (unsigned char *)
+			"\xd7\xa4\x68\xe2\x12\x74\xc3\xd9\xf1\xb7\x05\xbc"
+			"\xd4\xba\x04\x58",
+		.entprlen = 16,
+		.expected = (unsigned char *)
+			"\x78\xd6\xa6\x70\xff\xd1\x82\xf5\xa2\x88\x7f\x6d"
+			"\x3d\x8c\x39\xb1\xa8\xcb\x2c\x91\xab\x14\x7e\xbc"
+			"\x95\x45\x9f\x24\xb8\x20\xac\x21\x23\xdb\x72\xd7"
+			"\x12\x8d\x48\x95\xf3\x19\x0c\x43\xc6\x19\x45\xfc"
+			"\x8b\xac\x40\x29\x73\x00\x03\x45\x5e\x12\xff\x0c"
+			"\xc1\x02\x41\x82",
+		.expectedlen = 64,
+		.addtla = (unsigned char *)
+			"\xa2\xd9\x38\xcf\x8b\x29\x67\x5b\x65\x62\x6f\xe8"
+			"\xeb\xb3\x01\x76",
+		.addtlb = (unsigned char *)
+			"\x59\x63\x1e\x81\x8a\x14\xa8\xbb\xa1\xb8\x41\x25"
+			"\xd0\x7f\xcc\x43",
+		.addtllen = 16,
+		.pers = NULL,
+		.perslen = 0,
+	}, {
+		.entropy = (unsigned char *)
+			"\x04\xd9\x49\xa6\xdc\xe8\x6e\xbb\xf1\x08\x77\x2b"
+			"\x9e\x08\xca\x92\x65\x16\xda\x99\xa2\x59\xf3\xe8",
+		.entropylen = 24,
+		.entpra = (unsigned char *)
+			"\x38\x7e\x3f\x6b\x51\x70\x7b\x20\xec\x53\xd0\x66"
+			"\xc3\x0f\xe3\xb0",
+		.entprb = (unsigned char *)
+			"\xe0\x86\xa6\xaa\x5f\x72\x2f\xad\xf7\xef\x06\xb8"
+			"\xd6\x9c\x9d\xe8",
+		.entprlen = 16,
+		.expected = (unsigned char *)
+			"\xc9\x0a\xaf\x85\x89\x71\x44\x66\x4f\x25\x0b\x2b"
+			"\xde\xd8\xfa\xff\x52\x5a\x1b\x32\x5e\x41\x7a\x10"
+			"\x1f\xef\x1e\x62\x23\xe9\x20\x30\xc9\x0d\xad\x69"
+			"\xb4\x9c\x5b\xf4\x87\x42\xd5\xae\x5e\x5e\x43\xcc"
+			"\xd9\xfd\x0b\x93\x4a\xe3\xd4\x06\x37\x36\x0f\x3f"
+			"\x72\x82\x0c\xcf",
+		.expectedlen = 64,
+		.addtla = NULL,
+		.addtlb = NULL,
+		.addtllen = 0,
+		.pers = (unsigned char *)
+			"\xbf\xa4\x9a\x8f\x7b\xd8\xb1\x7a\x9d\xfa\x45\xed"
+			"\x21\x52\xb3\xad",
+		.perslen = 16,
+	}, {
+		.entropy = (unsigned char *)
+			"\x92\x89\x8f\x31\xfa\x1c\xff\x6d\x18\x2f\x26\x06"
+			"\x43\xdf\xf8\x18\xc2\xa4\xd9\x72\xc3\xb9\xb6\x97",
+		.entropylen = 24,
+		.entpra = (unsigned char *)
+			"\x20\x72\x8a\x06\xf8\x6f\x8d\xd4\x41\xe2\x72\xb7"
+			"\xc4\x2c\xe8\x10",
+		.entprb = (unsigned char *)
+			"\x3d\xb0\xf0\x94\xf3\x05\x50\x33\x17\x86\x3e\x22"
+			"\x08\xf7\xa5\x01",
+		.entprlen = 16,
+		.expected = (unsigned char *)
+			"\x5a\x35\x39\x87\x0f\x4d\x22\xa4\x09\x24\xee\x71"
+			"\xc9\x6f\xac\x72\x0a\xd6\xf0\x88\x82\xd0\x83\x28"
+			"\x73\xec\x3f\x93\xd8\xab\x45\x23\xf0\x7e\xac\x45"
+			"\x14\x5e\x93\x9f\xb1\xd6\x76\x43\x3d\xb6\xe8\x08"
+			"\x88\xf6\xda\x89\x08\x77\x42\xfe\x1a\xf4\x3f\xc4"
+			"\x23\xc5\x1f\x68",
+		.expectedlen = 64,
+		.addtla = (unsigned char *)
+			"\x1a\x40\xfa\xe3\xcc\x6c\x7c\xa0\xf8\xda\xba\x59"
+			"\x23\x6d\xad\x1d",
+		.addtlb = (unsigned char *)
+			"\x9f\x72\x76\x6c\xc7\x46\xe5\xed\x2e\x53\x20\x12"
+			"\xbc\x59\x31\x8c",
+		.addtllen = 16,
+		.pers = (unsigned char *)
+			"\xea\x65\xee\x60\x26\x4e\x7e\xb6\x0e\x82\x68\xc4"
+			"\x37\x3c\x5c\x0b",
+		.perslen = 16,
+	},
+};
+
+/*
+ * SP800-90A DRBG Test vectors from
+ * http://csrc.nist.gov/groups/STM/cavp/documents/drbg/drbgtestvectors.zip
+ *
+ * Test vectors for DRBG without prediction resistance. All types of DRBGs
+ * (Hash, HMAC, CTR) are tested with all permutations of use cases (w/ and
+ * w/o personalization string, w/ and w/o additional input string).
+ */
+static struct drbg_testvec drbg_nopr_sha256_tv_template[] = {
+	{
+		.entropy = (unsigned char *)
+			"\xa6\x5a\xd0\xf3\x45\xdb\x4e\x0e\xff\xe8\x75\xc3"
+			"\xa2\xe7\x1f\x42\xc7\x12\x9d\x62\x0f\xf5\xc1\x19"
+			"\xa9\xef\x55\xf0\x51\x85\xe0\xfb\x85\x81\xf9\x31"
+			"\x75\x17\x27\x6e\x06\xe9\x60\x7d\xdb\xcb\xcc\x2e",
+		.entropylen = 48,
+		.expected = (unsigned char *)
+			"\xd3\xe1\x60\xc3\x5b\x99\xf3\x40\xb2\x62\x82\x64"
+			"\xd1\x75\x10\x60\xe0\x04\x5d\xa3\x83\xff\x57\xa5"
+			"\x7d\x73\xa6\x73\xd2\xb8\xd8\x0d\xaa\xf6\xa6\xc3"
+			"\x5a\x91\xbb\x45\x79\xd7\x3f\xd0\xc8\xfe\xd1\x11"
+			"\xb0\x39\x13\x06\x82\x8a\xdf\xed\x52\x8f\x01\x81"
+			"\x21\xb3\xfe\xbd\xc3\x43\xe7\x97\xb8\x7d\xbb\x63"
+			"\xdb\x13\x33\xde\xd9\xd1\xec\xe1\x77\xcf\xa6\xb7"
+			"\x1f\xe8\xab\x1d\xa4\x66\x24\xed\x64\x15\xe5\x1c"
+			"\xcd\xe2\xc7\xca\x86\xe2\x83\x99\x0e\xea\xeb\x91"
+			"\x12\x04\x15\x52\x8b\x22\x95\x91\x02\x81\xb0\x2d"
+			"\xd4\x31\xf4\xc9\xf7\x04\x27\xdf",
+		.expectedlen = 128,
+		.addtla = NULL,
+		.addtlb = NULL,
+		.addtllen = 0,
+		.pers = NULL,
+		.perslen = 0,
+	}, {
+		.entropy = (unsigned char *)
+			"\x73\xd3\xfb\xa3\x94\x5f\x2b\x5f\xb9\x8f\xf6\x9c"
+			"\x8a\x93\x17\xae\x19\xc3\x4c\xc3\xd6\xca\xa3\x2d"
+			"\x16\xfc\x42\xd2\x2d\xd5\x6f\x56\xcc\x1d\x30\xff"
+			"\x9e\x06\x3e\x09\xce\x58\xe6\x9a\x35\xb3\xa6\x56",
+		.entropylen = 48,
+		.expected = (unsigned char *)
+			"\x71\x7b\x93\x46\x1a\x40\xaa\x35\xa4\xaa\xc5\xe7"
+			"\x6d\x5b\x5b\x8a\xa0\xdf\x39\x7d\xae\x71\x58\x5b"
+			"\x3c\x7c\xb4\xf0\x89\xfa\x4a\x8c\xa9\x5c\x54\xc0"
+			"\x40\xdf\xbc\xce\x26\x81\x34\xf8\xba\x7d\x1c\xe8"
+			"\xad\x21\xe0\x74\xcf\x48\x84\x30\x1f\xa1\xd5\x4f"
+			"\x81\x42\x2f\xf4\xdb\x0b\x23\xf8\x73\x27\xb8\x1d"
+			"\x42\xf8\x44\x58\xd8\x5b\x29\x27\x0a\xf8\x69\x59"
+			"\xb5\x78\x44\xeb\x9e\xe0\x68\x6f\x42\x9a\xb0\x5b"
+			"\xe0\x4e\xcb\x6a\xaa\xe2\xd2\xd5\x33\x25\x3e\xe0"
+			"\x6c\xc7\x6a\x07\xa5\x03\x83\x9f\xe2\x8b\xd1\x1c"
+			"\x70\xa8\x07\x59\x97\xeb\xf6\xbe",
+		.expectedlen = 128,
+		.addtla = (unsigned char *)
+			"\xf4\xd5\x98\x3d\xa8\xfc\xfa\x37\xb7\x54\x67\x73"
+			"\xc7\xc3\xdd\x47\x34\x71\x02\x5d\xc1\xa0\xd3\x10"
+			"\xc1\x8b\xbd\xf5\x66\x34\x6f\xdd",
+		.addtlb = (unsigned char *)
+			"\xf7\x9e\x6a\x56\x0e\x73\xe9\xd9\x7a\xd1\x69\xe0"
+			"\x6f\x8c\x55\x1c\x44\xd1\xce\x6f\x28\xcc\xa4\x4d"
+			"\xa8\xc0\x85\xd1\x5a\x0c\x59\x40",
+		.addtllen = 32,
+		.pers = NULL,
+		.perslen = 0,
+	}, {
+		.entropy = (unsigned char *)
+			"\x2a\x85\xa9\x8b\xd0\xda\x83\xd6\xad\xab\x9f\xbb"
+			"\x54\x31\x15\x95\x1c\x4d\x49\x9f\x6a\x15\xf6\xe4"
+			"\x15\x50\x88\x06\x29\x0d\xed\x8d\xb9\x6f\x96\xe1"
+			"\x83\x9f\xf7\x88\xda\x84\xbf\x44\x28\xd9\x1d\xaa",
+		.entropylen = 48,
+		.expected = (unsigned char *)
+			"\x2d\x55\xde\xc9\xed\x05\x47\x07\x3d\x04\xfc\x28"
+			"\x0f\x92\xf0\x4d\xd8\x00\x32\x47\x0a\x1b\x1c\x4b"
+			"\xef\xd9\x97\xa1\x17\x67\xda\x26\x6c\xfe\x76\x46"
+			"\x6f\xbc\x6d\x82\x4e\x83\x8a\x98\x66\x6c\x01\xb6"
+			"\xe6\x64\xe0\x08\x10\x6f\xd3\x5d\x90\xe7\x0d\x72"
+			"\xa6\xa7\xe3\xbb\x98\x11\x12\x56\x23\xc2\x6d\xd1"
+			"\xc8\xa8\x7a\x39\xf3\x34\xe3\xb8\xf8\x66\x00\x77"
+			"\x7d\xcf\x3c\x3e\xfa\xc9\x0f\xaf\xe0\x24\xfa\xe9"
+			"\x84\xf9\x6a\x01\xf6\x35\xdb\x5c\xab\x2a\xef\x4e"
+			"\xac\xab\x55\xb8\x9b\xef\x98\x68\xaf\x51\xd8\x16"
+			"\xa5\x5e\xae\xf9\x1e\xd2\xdb\xe6",
+		.expectedlen = 128,
+		.addtla = NULL,
+		.addtlb = NULL,
+		.addtllen = 0,
+		.pers = (unsigned char *)
+			"\xa8\x80\xec\x98\x30\x98\x15\xd2\xc6\xc4\x68\xf1"
+			"\x3a\x1c\xbf\xce\x6a\x40\x14\xeb\x36\x99\x53\xda"
+			"\x57\x6b\xce\xa4\x1c\x66\x3d\xbc",
+		.perslen = 32,
+	}, {
+		.entropy = (unsigned char *)
+			"\x69\xed\x82\xa9\xc5\x7b\xbf\xe5\x1d\x2f\xcb\x7a"
+			"\xd3\x50\x7d\x96\xb4\xb9\x2b\x50\x77\x51\x27\x74"
+			"\x33\x74\xba\xf1\x30\xdf\x8e\xdf\x87\x1d\x87\xbc"
+			"\x96\xb2\xc3\xa7\xed\x60\x5e\x61\x4e\x51\x29\x1a",
+		.entropylen = 48,
+		.expected = (unsigned char *)
+			"\xa5\x71\x24\x31\x11\xfe\x13\xe1\xa8\x24\x12\xfb"
+			"\x37\xa1\x27\xa5\xab\x77\xa1\x9f\xae\x8f\xaf\x13"
+			"\x93\xf7\x53\x85\x91\xb6\x1b\xab\xd4\x6b\xea\xb6"
+			"\xef\xda\x4c\x90\x6e\xef\x5f\xde\xe1\xc7\x10\x36"
+			"\xd5\x67\xbd\x14\xb6\x89\x21\x0c\xc9\x92\x65\x64"
+			"\xd0\xf3\x23\xe0\x7f\xd1\xe8\x75\xc2\x85\x06\xea"
+			"\xca\xc0\xcb\x79\x2d\x29\x82\xfc\xaa\x9a\xc6\x95"
+			"\x7e\xdc\x88\x65\xba\xec\x0e\x16\x87\xec\xa3\x9e"
+			"\xd8\x8c\x80\xab\x3a\x64\xe0\xcb\x0e\x45\x98\xdd"
+			"\x7c\x6c\x6c\x26\x11\x13\xc8\xce\xa9\x47\xa6\x06"
+			"\x57\xa2\x66\xbb\x2d\x7f\xf3\xc1",
+		.expectedlen = 128,
+		.addtla = (unsigned char *)
+			"\x74\xd3\x6d\xda\xe8\xd6\x86\x5f\x63\x01\xfd\xf2"
+			"\x7d\x06\x29\x6d\x94\xd1\x66\xf0\xd2\x72\x67\x4e"
+			"\x77\xc5\x3d\x9e\x03\xe3\xa5\x78",
+		.addtlb = (unsigned char *)
+			"\xf6\xb6\x3d\xf0\x7c\x26\x04\xc5\x8b\xcd\x3e\x6a"
+			"\x9f\x9c\x3a\x2e\xdb\x47\x87\xe5\x8e\x00\x5e\x2b"
+			"\x74\x7f\xa6\xf6\x80\xcd\x9b\x21",
+		.addtllen = 32,
+		.pers = (unsigned char *)
+			"\x74\xa6\xe0\x08\xf9\x27\xee\x1d\x6e\x3c\x28\x20"
+			"\x87\xdd\xd7\x54\x31\x47\x78\x4b\xe5\x6d\xa3\x73"
+			"\xa9\x65\xb1\x10\xc1\xdc\x77\x7c",
+		.perslen = 32,
+	},
+};
+
+static struct drbg_testvec drbg_nopr_hmac_sha256_tv_template[] = {
+	{
+		.entropy = (unsigned char *)
+			"\xca\x85\x19\x11\x34\x93\x84\xbf\xfe\x89\xde\x1c"
+			"\xbd\xc4\x6e\x68\x31\xe4\x4d\x34\xa4\xfb\x93\x5e"
+			"\xe2\x85\xdd\x14\xb7\x1a\x74\x88\x65\x9b\xa9\x6c"
+			"\x60\x1d\xc6\x9f\xc9\x02\x94\x08\x05\xec\x0c\xa8",
+		.entropylen = 48,
+		.expected = (unsigned char *)
+			"\xe5\x28\xe9\xab\xf2\xde\xce\x54\xd4\x7c\x7e\x75"
+			"\xe5\xfe\x30\x21\x49\xf8\x17\xea\x9f\xb4\xbe\xe6"
+			"\xf4\x19\x96\x97\xd0\x4d\x5b\x89\xd5\x4f\xbb\x97"
+			"\x8a\x15\xb5\xc4\x43\xc9\xec\x21\x03\x6d\x24\x60"
+			"\xb6\xf7\x3e\xba\xd0\xdc\x2a\xba\x6e\x62\x4a\xbf"
+			"\x07\x74\x5b\xc1\x07\x69\x4b\xb7\x54\x7b\xb0\x99"
+			"\x5f\x70\xde\x25\xd6\xb2\x9e\x2d\x30\x11\xbb\x19"
+			"\xd2\x76\x76\xc0\x71\x62\xc8\xb5\xcc\xde\x06\x68"
+			"\x96\x1d\xf8\x68\x03\x48\x2c\xb3\x7e\xd6\xd5\xc0"
+			"\xbb\x8d\x50\xcf\x1f\x50\xd4\x76\xaa\x04\x58\xbd"
+			"\xab\xa8\x06\xf4\x8b\xe9\xdc\xb8",
+		.expectedlen = 128,
+		.addtla = NULL,
+		.addtlb = NULL,
+		.addtllen = 0,
+		.pers = NULL,
+		.perslen = 0,
+	}, {
+		.entropy = (unsigned char *)
+			"\xf9\x7a\x3c\xfd\x91\xfa\xa0\x46\xb9\xe6\x1b\x94"
+			"\x93\xd4\x36\xc4\x93\x1f\x60\x4b\x22\xf1\x08\x15"
+			"\x21\xb3\x41\x91\x51\xe8\xff\x06\x11\xf3\xa7\xd4"
+			"\x35\x95\x35\x7d\x58\x12\x0b\xd1\xe2\xdd\x8a\xed",
+		.entropylen = 48,
+		.expected = (unsigned char *)
+			"\xc6\x87\x1c\xff\x08\x24\xfe\x55\xea\x76\x89\xa5"
+			"\x22\x29\x88\x67\x30\x45\x0e\x5d\x36\x2d\xa5\xbf"
+			"\x59\x0d\xcf\x9a\xcd\x67\xfe\xd4\xcb\x32\x10\x7d"
+			"\xf5\xd0\x39\x69\xa6\x6b\x1f\x64\x94\xfd\xf5\xd6"
+			"\x3d\x5b\x4d\x0d\x34\xea\x73\x99\xa0\x7d\x01\x16"
+			"\x12\x6d\x0d\x51\x8c\x7c\x55\xba\x46\xe1\x2f\x62"
+			"\xef\xc8\xfe\x28\xa5\x1c\x9d\x42\x8e\x6d\x37\x1d"
+			"\x73\x97\xab\x31\x9f\xc7\x3d\xed\x47\x22\xe5\xb4"
+			"\xf3\x00\x04\x03\x2a\x61\x28\xdf\x5e\x74\x97\xec"
+			"\xf8\x2c\xa7\xb0\xa5\x0e\x86\x7e\xf6\x72\x8a\x4f"
+			"\x50\x9a\x8c\x85\x90\x87\x03\x9c",
+		.expectedlen = 128,
+		.addtla = (unsigned char *)
+			"\x51\x72\x89\xaf\xe4\x44\xa0\xfe\x5e\xd1\xa4\x1d"
+			"\xbb\xb5\xeb\x17\x15\x00\x79\xbd\xd3\x1e\x29\xcf"
+			"\x2f\xf3\x00\x34\xd8\x26\x8e\x3b",
+		.addtlb = (unsigned char *)
+			"\x88\x02\x8d\x29\xef\x80\xb4\xe6\xf0\xfe\x12\xf9"
+			"\x1d\x74\x49\xfe\x75\x06\x26\x82\xe8\x9c\x57\x14"
+			"\x40\xc0\xc9\xb5\x2c\x42\xa6\xe0",
+		.addtllen = 32,
+		.pers = NULL,
+		.perslen = 0,
+	}, {
+		.entropy = (unsigned char *)
+			"\x8d\xf0\x13\xb4\xd1\x03\x52\x30\x73\x91\x7d\xdf"
+			"\x6a\x86\x97\x93\x05\x9e\x99\x43\xfc\x86\x54\x54"
+			"\x9e\x7a\xb2\x2f\x7c\x29\xf1\x22\xda\x26\x25\xaf"
+			"\x2d\xdd\x4a\xbc\xce\x3c\xf4\xfa\x46\x59\xd8\x4e",
+		.entropylen = 48,
+		.expected = (unsigned char *)
+			"\xb9\x1c\xba\x4c\xc8\x4f\xa2\x5d\xf8\x61\x0b\x81"
+			"\xb6\x41\x40\x27\x68\xa2\x09\x72\x34\x93\x2e\x37"
+			"\xd5\x90\xb1\x15\x4c\xbd\x23\xf9\x74\x52\xe3\x10"
+			"\xe2\x91\xc4\x51\x46\x14\x7f\x0d\xa2\xd8\x17\x61"
+			"\xfe\x90\xfb\xa6\x4f\x94\x41\x9c\x0f\x66\x2b\x28"
+			"\xc1\xed\x94\xda\x48\x7b\xb7\xe7\x3e\xec\x79\x8f"
+			"\xbc\xf9\x81\xb7\x91\xd1\xbe\x4f\x17\x7a\x89\x07"
+			"\xaa\x3c\x40\x16\x43\xa5\xb6\x2b\x87\xb8\x9d\x66"
+			"\xb3\xa6\x0e\x40\xd4\xa8\xe4\xe9\xd8\x2a\xf6\xd2"
+			"\x70\x0e\x6f\x53\x5c\xdb\x51\xf7\x5c\x32\x17\x29"
+			"\x10\x37\x41\x03\x0c\xcc\x3a\x56",
+		.expectedlen = 128,
+		.addtla = NULL,
+		.addtlb = NULL,
+		.addtllen = 0,
+		.pers = (unsigned char *)
+			"\xb5\x71\xe6\x6d\x7c\x33\x8b\xc0\x7b\x76\xad\x37"
+			"\x57\xbb\x2f\x94\x52\xbf\x7e\x07\x43\x7a\xe8\x58"
+			"\x1c\xe7\xbc\x7c\x3a\xc6\x51\xa9",
+		.perslen = 32,
+	}, {
+		.entropy = (unsigned char *)
+			"\xc2\xa5\x66\xa9\xa1\x81\x7b\x15\xc5\xc3\xb7\x78"
+			"\x17\x7a\xc8\x7c\x24\xe7\x97\xbe\x0a\x84\x5f\x11"
+			"\xc2\xfe\x39\x9d\xd3\x77\x32\xf2\xcb\x18\x94\xeb"
+			"\x2b\x97\xb3\xc5\x6e\x62\x83\x29\x51\x6f\x86\xec",
+		.entropylen = 48,
+		.expected = (unsigned char *)
+			"\xb3\xa3\x69\x8d\x77\x76\x99\xa0\xdd\x9f\xa3\xf0"
+			"\xa9\xfa\x57\x83\x2d\x3c\xef\xac\x5d\xf2\x44\x37"
+			"\xc6\xd7\x3a\x0f\xe4\x10\x40\xf1\x72\x90\x38\xae"
+			"\xf1\xe9\x26\x35\x2e\xa5\x9d\xe1\x20\xbf\xb7\xb0"
+			"\x73\x18\x3a\x34\x10\x6e\xfe\xd6\x27\x8f\xf8\xad"
+			"\x84\x4b\xa0\x44\x81\x15\xdf\xdd\xf3\x31\x9a\x82"
+			"\xde\x6b\xb1\x1d\x80\xbd\x87\x1a\x9a\xcd\x35\xc7"
+			"\x36\x45\xe1\x27\x0f\xb9\xfe\x4f\xa8\x8e\xc0\xe4"
+			"\x65\x40\x9e\xa0\xcb\xa8\x09\xfe\x2f\x45\xe0\x49"
+			"\x43\xa2\xe3\x96\xbb\xb7\xdd\x2f\x4e\x07\x95\x30"
+			"\x35\x24\xcc\x9c\xc5\xea\x54\xa1",
+		.expectedlen = 128,
+		.addtla = (unsigned char *)
+			"\x41\x3d\xd8\x3f\xe5\x68\x35\xab\xd4\x78\xcb\x96"
+			"\x93\xd6\x76\x35\x90\x1c\x40\x23\x9a\x26\x64\x62"
+			"\xd3\x13\x3b\x83\xe4\x9c\x82\x0b",
+		.addtlb = (unsigned char *)
+			"\xd5\xc4\xa7\x1f\x9d\x6d\x95\xa1\xbe\xdf\x0b\xd2"
+			"\x24\x7c\x27\x7d\x1f\x84\xa4\xe5\x7a\x4a\x88\x25"
+			"\xb8\x2a\x2d\x09\x7d\xe6\x3e\xf1",
+		.addtllen = 32,
+		.pers = (unsigned char *)
+			"\x13\xce\x4d\x8d\xd2\xdb\x97\x96\xf9\x41\x56\xc8"
+			"\xe8\xf0\x76\x9b\x0a\xa1\xc8\x2c\x13\x23\xb6\x15"
+			"\x36\x60\x3b\xca\x37\xc9\xee\x29",
+		.perslen = 32,
+	},
+};
+
+static struct drbg_testvec drbg_nopr_ctr_aes192_tv_template[] = {
+	{
+		.entropy = (unsigned char *)
+			"\xc3\x5c\x2f\xa2\xa8\x9d\x52\xa1\x1f\xa3\x2a\xa9"
+			"\x6c\x95\xb8\xf1\xc9\xa8\xf9\xcb\x24\x5a\x8b\x40"
+			"\xf3\xa6\xe5\xa7\xfb\xd9\xd3\xc6\x8e\x27\x7b\xa9"
+			"\xac\x9b\xbb\x00",
+		.entropylen = 40,
+		.expected = (unsigned char *)
+			"\x8c\x2e\x72\xab\xfd\x9b\xb8\x28\x4d\xb7\x9e\x17"
+			"\xa4\x3a\x31\x46\xcd\x76\x94\xe3\x52\x49\xfc\x33"
+			"\x83\x91\x4a\x71\x17\xf4\x13\x68\xe6\xd4\xf1\x48"
+			"\xff\x49\xbf\x29\x07\x6b\x50\x15\xc5\x9f\x45\x79"
+			"\x45\x66\x2e\x3d\x35\x03\x84\x3f\x4a\xa5\xa3\xdf"
+			"\x9a\x9d\xf1\x0d",
+		.expectedlen = 64,
+		.addtla = NULL,
+		.addtlb = NULL,
+		.addtllen = 0,
+		.pers = NULL,
+		.perslen = 0,
+	},
+};
+
+static struct drbg_testvec drbg_nopr_ctr_aes256_tv_template[] = {
+	{
+		.entropy = (unsigned char *)
+			"\x36\x40\x19\x40\xfa\x8b\x1f\xba\x91\xa1\x66\x1f"
+			"\x21\x1d\x78\xa0\xb9\x38\x9a\x74\xe5\xbc\xcf\xec"
+			"\xe8\xd7\x66\xaf\x1a\x6d\x3b\x14\x49\x6f\x25\xb0"
+			"\xf1\x30\x1b\x4f\x50\x1b\xe3\x03\x80\xa1\x37\xeb",
+		.entropylen = 48,
+		.expected = (unsigned char *)
+			"\x58\x62\xeb\x38\xbd\x55\x8d\xd9\x78\xa6\x96\xe6"
+			"\xdf\x16\x47\x82\xdd\xd8\x87\xe7\xe9\xa6\xc9\xf3"
+			"\xf1\xfb\xaf\xb7\x89\x41\xb5\x35\xa6\x49\x12\xdf"
+			"\xd2\x24\xc6\xdc\x74\x54\xe5\x25\x0b\x3d\x97\x16"
+			"\x5e\x16\x26\x0c\x2f\xaf\x1c\xc7\x73\x5c\xb7\x5f"
+			"\xb4\xf0\x7e\x1d",
+		.expectedlen = 64,
+		.addtla = NULL,
+		.addtlb = NULL,
+		.addtllen = 0,
+		.pers = NULL,
+		.perslen = 0,
+	},
+};
+
+static struct drbg_testvec drbg_nopr_ctr_aes128_tv_template[] = {
+	{
+		.entropy = (unsigned char *)
+			"\x87\xe1\xc5\x32\x99\x7f\x57\xa3\x5c\x28\x6d\xe8"
+			"\x64\xbf\xf2\x64\xa3\x9e\x98\xdb\x6c\x10\x78\x7f",
+		.entropylen = 24,
+		.expected = (unsigned char *)
+			"\x2c\x14\x7e\x24\x11\x9a\xd8\xd4\xb2\xed\x61\xc1"
+			"\x53\xd0\x50\xc9\x24\xff\x59\x75\x15\xf1\x17\x3a"
+			"\x3d\xf4\x4b\x2c\x84\x28\xef\x89\x0e\xb9\xde\xf3"
+			"\xe4\x78\x04\xb2\xfd\x9b\x35\x7f\xe1\x3f\x8a\x3e"
+			"\x10\xc8\x67\x0a\xf9\xdf\x2d\x6c\x96\xfb\xb2\xb8"
+			"\xcb\x2d\xd6\xb0",
+		.expectedlen = 64,
+		.addtla = NULL,
+		.addtlb = NULL,
+		.addtllen = 0,
+		.pers = NULL,
+		.perslen = 0,
+	}, {
+		.entropy = (unsigned char *)
+			"\x71\xbd\xce\x35\x42\x7d\x20\xbf\x58\xcf\x17\x74"
+			"\xce\x72\xd8\x33\x34\x50\x2d\x8f\x5b\x14\xc4\xdd",
+		.entropylen = 24,
+		.expected = (unsigned char *)
+			"\x97\x33\xe8\x20\x12\xe2\x7b\xa1\x46\x8f\xf2\x34"
+			"\xb3\xc9\xb6\x6b\x20\xb2\x4f\xee\x27\xd8\x0b\x21"
+			"\x8c\xff\x63\x73\x69\x29\xfb\xf3\x85\xcd\x88\x8e"
+			"\x43\x2c\x71\x8b\xa2\x55\xd2\x0f\x1d\x7f\xe3\xe1"
+			"\x2a\xa3\xe9\x2c\x25\x89\xc7\x14\x52\x99\x56\xcc"
+			"\xc3\xdf\xb3\x81",
+		.expectedlen = 64,
+		.addtla = (unsigned char *)
+			"\x66\xef\x42\xd6\x9a\x8c\x3d\x6d\x4a\x9e\x95\xa6"
+			"\x91\x4d\x81\x56",
+		.addtlb = (unsigned char *)
+			"\xe3\x18\x83\xd9\x4b\x5e\xc4\xcc\xaa\x61\x2f\xbb"
+			"\x4a\x55\xd1\xc6",
+		.addtllen = 16,
+		.pers = NULL,
+		.perslen = 0,
+	}, {
+		.entropy = (unsigned char *)
+			"\xca\x4b\x1e\xfa\x75\xbd\x69\x36\x38\x73\xb8\xf9"
+			"\xdb\x4d\x35\x0e\x47\xbf\x6c\x37\x72\xfd\xf7\xa9",
+		.entropylen = 24,
+		.expected = (unsigned char *)
+			"\x59\xc3\x19\x79\x1b\xb1\xf3\x0e\xe9\x34\xae\x6e"
+			"\x8b\x1f\xad\x1f\x74\xca\x25\x45\x68\xb8\x7f\x75"
+			"\x12\xf8\xf2\xab\x4c\x23\x01\x03\x05\xe1\x70\xee"
+			"\x75\xd8\xcb\xeb\x23\x4c\x7a\x23\x6e\x12\x27\xdb"
+			"\x6f\x7a\xac\x3c\x44\xb7\x87\x4b\x65\x56\x74\x45"
+			"\x34\x30\x0c\x3d",
+		.expectedlen = 64,
+		.addtla = NULL,
+		.addtlb = NULL,
+		.addtllen = 0,
+		.pers = (unsigned char *)
+			"\xeb\xaa\x60\x2c\x4d\xbe\x33\xff\x1b\xef\xbf\x0a"
+			"\x0b\xc6\x97\x54",
+		.perslen = 16,
+	}, {
+		.entropy = (unsigned char *)
+			"\xc0\x70\x1f\x92\x50\x75\x8f\xcd\xf2\xbe\x73\x98"
+			"\x80\xdb\x66\xeb\x14\x68\xb4\xa5\x87\x9c\x2d\xa6",
+		.entropylen = 24,
+		.expected = (unsigned char *)
+			"\x97\xc0\xc0\xe5\xa0\xcc\xf2\x4f\x33\x63\x48\x8a"
+			"\xdb\x13\x0a\x35\x89\xbf\x80\x65\x62\xee\x13\x95"
+			"\x7c\x33\xd3\x7d\xf4\x07\x77\x7a\x2b\x65\x0b\x5f"
+			"\x45\x5c\x13\xf1\x90\x77\x7f\xc5\x04\x3f\xcc\x1a"
+			"\x38\xf8\xcd\x1b\xbb\xd5\x57\xd1\x4a\x4c\x2e\x8a"
+			"\x2b\x49\x1e\x5c",
+		.expectedlen = 64,
+		.addtla = (unsigned char *)
+			"\xf9\x01\xf8\x16\x7a\x1d\xff\xde\x8e\x3c\x83\xe2"
+			"\x44\x85\xe7\xfe",
+		.addtlb = (unsigned char *)
+			"\x17\x1c\x09\x38\xc2\x38\x9f\x97\x87\x60\x55\xb4"
+			"\x82\x16\x62\x7f",
+		.addtllen = 16,
+		.pers = (unsigned char *)
+			"\x80\x08\xae\xe8\xe9\x69\x40\xc5\x08\x73\xc7\x9f"
+			"\x8e\xcf\xe0\x02",
+		.perslen = 16,
+	},
+};
+
 /* Cast5 test vectors from RFC 2144 */
 #define CAST5_ENC_TEST_VECTORS		4
 #define CAST5_DEC_TEST_VECTORS		4
@@ -20907,8 +21791,8 @@
 			  "\xF5\xBC\x25\xD6\x02\x56\x57\x1C",
 		.rlen	= 496,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 496 - 16, 16 },
+		.np	= 3,
+		.tap	= { 496 - 20, 4, 16 },
 	},
 };
 
@@ -21068,8 +21952,8 @@
 			  "\xDC\x50\xE7\x7E\x15\x89\x20\xB7",
 		.rlen	= 496,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 496 - 16, 16 },
+		.np	= 3,
+		.tap	= { 496 - 20, 4, 16 },
 	},
 };
 
@@ -21206,8 +22090,8 @@
 			  "\x1D\x18\x66\x44\x5B\x8F\x14\xEB",
 		.rlen	= 496,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 496 - 16, 16 },
+		.np	= 3,
+		.tap	= { 496 - 20, 4, 16 },
 	},
 };
 
@@ -21344,8 +22228,8 @@
 			  "\xDC\x50\xE7\x7E\x15\x89\x20\xB7",
 		.rlen	= 496,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 496 - 16, 16 },
+		.np	= 3,
+		.tap	= { 496 - 20, 4, 16 },
 	},
 };
 
@@ -21495,8 +22379,8 @@
 			  "\xC0\x0D\x96\xAA\x23\xF8\xFE\x13",
 		.rlen	= 496,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 496 - 16, 16 },
+		.np	= 3,
+		.tap	= { 496 - 20, 4, 16 },
 	},
 };
 
@@ -21646,8 +22530,8 @@
 			  "\xDC\x50\xE7\x7E\x15\x89\x20\xB7",
 		.rlen	= 496,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 496 - 16, 16 },
+		.np	= 3,
+		.tap	= { 496 - 20, 4, 16 },
 	},
 };
 
@@ -22805,8 +23689,8 @@
 			  "\x33\x1A\xBB\xD3\xA2\x7E\x97\x66",
 		.rlen	= 1008,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 1008 - 16, 16 },
+		.np	= 3,
+		.tap	= { 1008 - 20, 4, 16 },
 	},
 };
 
@@ -23105,8 +23989,8 @@
 			  "\x72\x09\xA0\x14\xAB\x42\xD9\x4D",
 		.rlen	= 1008,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 1008 - 16, 16 },
+		.np	= 3,
+		.tap	= { 1008 - 20, 4, 16 },
 	},
 };
 
@@ -23401,8 +24285,8 @@
 			  "\x70\xC5\xB9\x0B\x3B\x7A\x6E\x6C",
 		.rlen	= 1008,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 1008 - 16, 16 },
+		.np	= 3,
+		.tap	= { 1008 - 20, 4, 16 },
 	},
 };
 
@@ -23697,8 +24581,8 @@
 			  "\x72\x09\xA0\x14\xAB\x42\xD9\x4D",
 		.rlen	= 1008,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 1008 - 16, 16 },
+		.np	= 3,
+		.tap	= { 1008 - 20, 4, 16 },
 	},
 };
 
@@ -25283,8 +26167,8 @@
 			  "\x5a\xa8\x92\x7f\xba\xe6\x0c\x95",
 		.rlen	= 512,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 512 - 16, 16 },
+		.np	= 3,
+		.tap	= { 512 - 20, 4, 16 },
 	},
 };
 
@@ -25536,8 +26420,8 @@
 			  "\x21\xc4\xc2\x75\x67\x89\x37\x0a",
 		.rlen	= 512,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 512 - 16, 16 },
+		.np	= 3,
+		.tap	= { 512 - 20, 4, 16 },
 	},
 };
 
@@ -25878,8 +26762,8 @@
 			  "\xd5\xc6\x99\xcc\x4e\x6c\x94\x95",
 		.rlen	= 512,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 512 - 16, 16 },
+		.np	= 3,
+		.tap	= { 512 - 20, 4, 16 },
 	},
 };
 
@@ -26221,8 +27105,8 @@
 			  "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
 		.rlen	= 512,
 		.also_non_np = 1,
-		.np	= 2,
-		.tap	= { 512 - 16, 16 },
+		.np	= 3,
+		.tap	= { 512 - 20, 4, 16 },
 	},
 };
 
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 7671dba..e65d400 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -141,6 +141,15 @@
 
 	  If unsure, say N.
 
+config AHCI_TEGRA
+	tristate "NVIDIA Tegra124 AHCI SATA support"
+	depends on ARCH_TEGRA
+	help
+	  This option enables support for the NVIDIA Tegra124 SoC's
+	  onboard AHCI SATA.
+
+	  If unsure, say N.
+
 config AHCI_XGENE
 	tristate "APM X-Gene 6.0Gbps AHCI SATA host controller support"
 	depends on PHY_XGENE
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 5a02aee..ae41107 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -15,6 +15,7 @@
 obj-$(CONFIG_AHCI_MVEBU)	+= ahci_mvebu.o libahci.o libahci_platform.o
 obj-$(CONFIG_AHCI_SUNXI)	+= ahci_sunxi.o libahci.o libahci_platform.o
 obj-$(CONFIG_AHCI_ST)		+= ahci_st.o libahci.o libahci_platform.o
+obj-$(CONFIG_AHCI_TEGRA)	+= ahci_tegra.o libahci.o libahci_platform.o
 obj-$(CONFIG_AHCI_XGENE)	+= ahci_xgene.o libahci.o libahci_platform.o
 
 # SFF w/ custom DMA
diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c
index 0cd7c7a..25d0ac3 100644
--- a/drivers/ata/acard-ahci.c
+++ b/drivers/ata/acard-ahci.c
@@ -441,7 +441,7 @@
 	hpriv->mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
 
 	/* save initial config */
-	ahci_save_initial_config(&pdev->dev, hpriv, 0, 0);
+	ahci_save_initial_config(&pdev->dev, hpriv);
 
 	/* prepare host */
 	if (hpriv->cap & HOST_CAP_NCQ)
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 4cd52a4..a29f801 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -526,8 +526,7 @@
 			  "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
 	}
 
-	ahci_save_initial_config(&pdev->dev, hpriv, force_port_map,
-				 mask_port_map);
+	ahci_save_initial_config(&pdev->dev, hpriv);
 }
 
 static int ahci_pci_reset_controller(struct ata_host *host)
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 5513296..59ae0ee 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -53,7 +53,7 @@
 
 enum {
 	AHCI_MAX_PORTS		= 32,
-	AHCI_MAX_CLKS		= 3,
+	AHCI_MAX_CLKS		= 4,
 	AHCI_MAX_SG		= 168, /* hardware max is 64K */
 	AHCI_DMA_BOUNDARY	= 0xffffffff,
 	AHCI_MAX_CMDS		= 32,
@@ -316,8 +316,12 @@
 };
 
 struct ahci_host_priv {
-	void __iomem *		mmio;		/* bus-independent mem map */
+	/* Input fields */
 	unsigned int		flags;		/* AHCI_HFLAG_* */
+	u32			force_port_map;	/* force port map */
+	u32			mask_port_map;	/* mask out particular bits */
+
+	void __iomem *		mmio;		/* bus-independent mem map */
 	u32			cap;		/* cap to use */
 	u32			cap2;		/* cap2 to use */
 	u32			port_map;	/* port map to use */
@@ -330,7 +334,12 @@
 	bool			got_runtime_pm; /* Did we do pm_runtime_get? */
 	struct clk		*clks[AHCI_MAX_CLKS]; /* Optional */
 	struct regulator	*target_pwr;	/* Optional */
-	struct phy		*phy;		/* If platform uses phy */
+	/*
+	 * If platform uses PHYs. There is a 1:1 relation between the port number and
+	 * the PHY position in this array.
+	 */
+	struct phy		**phys;
+	unsigned		nports;		/* Number of ports */
 	void			*plat_data;	/* Other platform data */
 	/*
 	 * Optional ahci_start_engine override, if not set this gets set to the
@@ -361,9 +370,7 @@
 void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
 			u32 opts);
 void ahci_save_initial_config(struct device *dev,
-			      struct ahci_host_priv *hpriv,
-			      unsigned int force_port_map,
-			      unsigned int mask_port_map);
+			      struct ahci_host_priv *hpriv);
 void ahci_init_controller(struct ata_host *host);
 int ahci_reset_controller(struct ata_host *host);
 
diff --git a/drivers/ata/ahci_da850.c b/drivers/ata/ahci_da850.c
index 2b77d53..ad1e71e 100644
--- a/drivers/ata/ahci_da850.c
+++ b/drivers/ata/ahci_da850.c
@@ -85,8 +85,7 @@
 
 	da850_sata_init(dev, pwrdn_reg, hpriv->mmio);
 
-	rc = ahci_platform_init_host(pdev, hpriv, &ahci_da850_port_info,
-				     0, 0, 0);
+	rc = ahci_platform_init_host(pdev, hpriv, &ahci_da850_port_info);
 	if (rc)
 		goto disable_resources;
 
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index cac4360..f3970b4 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -64,6 +64,7 @@
 	struct regmap *gpr;
 	bool no_device;
 	bool first_time;
+	u32 phy_params;
 };
 
 static int ahci_imx_hotplug;
@@ -248,14 +249,7 @@
 				   IMX6Q_GPR13_SATA_TX_LVL_MASK |
 				   IMX6Q_GPR13_SATA_MPLL_CLK_EN |
 				   IMX6Q_GPR13_SATA_TX_EDGE_RATE,
-				   IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB |
-				   IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M |
-				   IMX6Q_GPR13_SATA_RX_DPLL_MODE_2P_4F |
-				   IMX6Q_GPR13_SATA_SPD_MODE_3P0G |
-				   IMX6Q_GPR13_SATA_MPLL_SS_EN |
-				   IMX6Q_GPR13_SATA_TX_ATTEN_9_16 |
-				   IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB |
-				   IMX6Q_GPR13_SATA_TX_LVL_1_025_V);
+				   imxpriv->phy_params);
 		regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
 				   IMX6Q_GPR13_SATA_MPLL_CLK_EN,
 				   IMX6Q_GPR13_SATA_MPLL_CLK_EN);
@@ -265,7 +259,7 @@
 		ret = imx_sata_phy_reset(hpriv);
 		if (ret) {
 			dev_err(dev, "failed to reset phy: %d\n", ret);
-			goto disable_regulator;
+			goto disable_clk;
 		}
 	}
 
@@ -273,6 +267,8 @@
 
 	return 0;
 
+disable_clk:
+	clk_disable_unprepare(imxpriv->sata_ref_clk);
 disable_regulator:
 	if (hpriv->target_pwr)
 		regulator_disable(hpriv->target_pwr);
@@ -369,6 +365,165 @@
 };
 MODULE_DEVICE_TABLE(of, imx_ahci_of_match);
 
+struct reg_value {
+	u32 of_value;
+	u32 reg_value;
+};
+
+struct reg_property {
+	const char *name;
+	const struct reg_value *values;
+	size_t num_values;
+	u32 def_value;
+	u32 set_value;
+};
+
+static const struct reg_value gpr13_tx_level[] = {
+	{  937, IMX6Q_GPR13_SATA_TX_LVL_0_937_V },
+	{  947, IMX6Q_GPR13_SATA_TX_LVL_0_947_V },
+	{  957, IMX6Q_GPR13_SATA_TX_LVL_0_957_V },
+	{  966, IMX6Q_GPR13_SATA_TX_LVL_0_966_V },
+	{  976, IMX6Q_GPR13_SATA_TX_LVL_0_976_V },
+	{  986, IMX6Q_GPR13_SATA_TX_LVL_0_986_V },
+	{  996, IMX6Q_GPR13_SATA_TX_LVL_0_996_V },
+	{ 1005, IMX6Q_GPR13_SATA_TX_LVL_1_005_V },
+	{ 1015, IMX6Q_GPR13_SATA_TX_LVL_1_015_V },
+	{ 1025, IMX6Q_GPR13_SATA_TX_LVL_1_025_V },
+	{ 1035, IMX6Q_GPR13_SATA_TX_LVL_1_035_V },
+	{ 1045, IMX6Q_GPR13_SATA_TX_LVL_1_045_V },
+	{ 1054, IMX6Q_GPR13_SATA_TX_LVL_1_054_V },
+	{ 1064, IMX6Q_GPR13_SATA_TX_LVL_1_064_V },
+	{ 1074, IMX6Q_GPR13_SATA_TX_LVL_1_074_V },
+	{ 1084, IMX6Q_GPR13_SATA_TX_LVL_1_084_V },
+	{ 1094, IMX6Q_GPR13_SATA_TX_LVL_1_094_V },
+	{ 1104, IMX6Q_GPR13_SATA_TX_LVL_1_104_V },
+	{ 1113, IMX6Q_GPR13_SATA_TX_LVL_1_113_V },
+	{ 1123, IMX6Q_GPR13_SATA_TX_LVL_1_123_V },
+	{ 1133, IMX6Q_GPR13_SATA_TX_LVL_1_133_V },
+	{ 1143, IMX6Q_GPR13_SATA_TX_LVL_1_143_V },
+	{ 1152, IMX6Q_GPR13_SATA_TX_LVL_1_152_V },
+	{ 1162, IMX6Q_GPR13_SATA_TX_LVL_1_162_V },
+	{ 1172, IMX6Q_GPR13_SATA_TX_LVL_1_172_V },
+	{ 1182, IMX6Q_GPR13_SATA_TX_LVL_1_182_V },
+	{ 1191, IMX6Q_GPR13_SATA_TX_LVL_1_191_V },
+	{ 1201, IMX6Q_GPR13_SATA_TX_LVL_1_201_V },
+	{ 1211, IMX6Q_GPR13_SATA_TX_LVL_1_211_V },
+	{ 1221, IMX6Q_GPR13_SATA_TX_LVL_1_221_V },
+	{ 1230, IMX6Q_GPR13_SATA_TX_LVL_1_230_V },
+	{ 1240, IMX6Q_GPR13_SATA_TX_LVL_1_240_V }
+};
+
+static const struct reg_value gpr13_tx_boost[] = {
+	{    0, IMX6Q_GPR13_SATA_TX_BOOST_0_00_DB },
+	{  370, IMX6Q_GPR13_SATA_TX_BOOST_0_37_DB },
+	{  740, IMX6Q_GPR13_SATA_TX_BOOST_0_74_DB },
+	{ 1110, IMX6Q_GPR13_SATA_TX_BOOST_1_11_DB },
+	{ 1480, IMX6Q_GPR13_SATA_TX_BOOST_1_48_DB },
+	{ 1850, IMX6Q_GPR13_SATA_TX_BOOST_1_85_DB },
+	{ 2220, IMX6Q_GPR13_SATA_TX_BOOST_2_22_DB },
+	{ 2590, IMX6Q_GPR13_SATA_TX_BOOST_2_59_DB },
+	{ 2960, IMX6Q_GPR13_SATA_TX_BOOST_2_96_DB },
+	{ 3330, IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB },
+	{ 3700, IMX6Q_GPR13_SATA_TX_BOOST_3_70_DB },
+	{ 4070, IMX6Q_GPR13_SATA_TX_BOOST_4_07_DB },
+	{ 4440, IMX6Q_GPR13_SATA_TX_BOOST_4_44_DB },
+	{ 4810, IMX6Q_GPR13_SATA_TX_BOOST_4_81_DB },
+	{ 5280, IMX6Q_GPR13_SATA_TX_BOOST_5_28_DB },
+	{ 5750, IMX6Q_GPR13_SATA_TX_BOOST_5_75_DB }
+};
+
+static const struct reg_value gpr13_tx_atten[] = {
+	{  8, IMX6Q_GPR13_SATA_TX_ATTEN_8_16 },
+	{  9, IMX6Q_GPR13_SATA_TX_ATTEN_9_16 },
+	{ 10, IMX6Q_GPR13_SATA_TX_ATTEN_10_16 },
+	{ 12, IMX6Q_GPR13_SATA_TX_ATTEN_12_16 },
+	{ 14, IMX6Q_GPR13_SATA_TX_ATTEN_14_16 },
+	{ 16, IMX6Q_GPR13_SATA_TX_ATTEN_16_16 },
+};
+
+static const struct reg_value gpr13_rx_eq[] = {
+	{  500, IMX6Q_GPR13_SATA_RX_EQ_VAL_0_5_DB },
+	{ 1000, IMX6Q_GPR13_SATA_RX_EQ_VAL_1_0_DB },
+	{ 1500, IMX6Q_GPR13_SATA_RX_EQ_VAL_1_5_DB },
+	{ 2000, IMX6Q_GPR13_SATA_RX_EQ_VAL_2_0_DB },
+	{ 2500, IMX6Q_GPR13_SATA_RX_EQ_VAL_2_5_DB },
+	{ 3000, IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB },
+	{ 3500, IMX6Q_GPR13_SATA_RX_EQ_VAL_3_5_DB },
+	{ 4000, IMX6Q_GPR13_SATA_RX_EQ_VAL_4_0_DB },
+};
+
+static const struct reg_property gpr13_props[] = {
+	{
+		.name = "fsl,transmit-level-mV",
+		.values = gpr13_tx_level,
+		.num_values = ARRAY_SIZE(gpr13_tx_level),
+		.def_value = IMX6Q_GPR13_SATA_TX_LVL_1_025_V,
+	}, {
+		.name = "fsl,transmit-boost-mdB",
+		.values = gpr13_tx_boost,
+		.num_values = ARRAY_SIZE(gpr13_tx_boost),
+		.def_value = IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB,
+	}, {
+		.name = "fsl,transmit-atten-16ths",
+		.values = gpr13_tx_atten,
+		.num_values = ARRAY_SIZE(gpr13_tx_atten),
+		.def_value = IMX6Q_GPR13_SATA_TX_ATTEN_9_16,
+	}, {
+		.name = "fsl,receive-eq-mdB",
+		.values = gpr13_rx_eq,
+		.num_values = ARRAY_SIZE(gpr13_rx_eq),
+		.def_value = IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB,
+	}, {
+		.name = "fsl,no-spread-spectrum",
+		.def_value = IMX6Q_GPR13_SATA_MPLL_SS_EN,
+		.set_value = 0,
+	},
+};
+
+static u32 imx_ahci_parse_props(struct device *dev,
+				const struct reg_property *prop, size_t num)
+{
+	struct device_node *np = dev->of_node;
+	u32 reg_value = 0;
+	int i, j;
+
+	for (i = 0; i < num; i++, prop++) {
+		u32 of_val;
+
+		if (prop->num_values == 0) {
+			if (of_property_read_bool(np, prop->name))
+				reg_value |= prop->set_value;
+			else
+				reg_value |= prop->def_value;
+			continue;
+		}
+
+		if (of_property_read_u32(np, prop->name, &of_val)) {
+			dev_info(dev, "%s not specified, using %08x\n",
+				prop->name, prop->def_value);
+			reg_value |= prop->def_value;
+			continue;
+		}
+
+		for (j = 0; j < prop->num_values; j++) {
+			if (prop->values[j].of_value == of_val) {
+				dev_info(dev, "%s value %u, using %08x\n",
+					prop->name, of_val, prop->values[j].reg_value);
+				reg_value |= prop->values[j].reg_value;
+				break;
+			}
+		}
+
+		if (j == prop->num_values) {
+			dev_err(dev, "DT property %s is not a valid value\n",
+				prop->name);
+			reg_value |= prop->def_value;
+		}
+	}
+
+	return reg_value;
+}
+
 static int imx_ahci_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
@@ -410,6 +565,8 @@
 	}
 
 	if (imxpriv->type == AHCI_IMX6Q) {
+		u32 reg_value;
+
 		imxpriv->gpr = syscon_regmap_lookup_by_compatible(
 							"fsl,imx6q-iomuxc-gpr");
 		if (IS_ERR(imxpriv->gpr)) {
@@ -417,6 +574,15 @@
 				"failed to find fsl,imx6q-iomux-gpr regmap\n");
 			return PTR_ERR(imxpriv->gpr);
 		}
+
+		reg_value = imx_ahci_parse_props(dev, gpr13_props,
+						 ARRAY_SIZE(gpr13_props));
+
+		imxpriv->phy_params =
+				   IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M |
+				   IMX6Q_GPR13_SATA_RX_DPLL_MODE_2P_4F |
+				   IMX6Q_GPR13_SATA_SPD_MODE_3P0G |
+				   reg_value;
 	}
 
 	hpriv = ahci_platform_get_resources(pdev);
@@ -454,8 +620,7 @@
 	reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000;
 	writel(reg_val, hpriv->mmio + IMX_TIMER1MS);
 
-	ret = ahci_platform_init_host(pdev, hpriv, &ahci_imx_port_info,
-				      0, 0, 0);
+	ret = ahci_platform_init_host(pdev, hpriv, &ahci_imx_port_info);
 	if (ret)
 		goto disable_sata;
 
diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c
index fd3dfd7..68672d2 100644
--- a/drivers/ata/ahci_mvebu.c
+++ b/drivers/ata/ahci_mvebu.c
@@ -88,8 +88,7 @@
 	ahci_mvebu_mbus_config(hpriv, dram);
 	ahci_mvebu_regret_option(hpriv);
 
-	rc = ahci_platform_init_host(pdev, hpriv, &ahci_mvebu_port_info,
-				     0, 0, 0);
+	rc = ahci_platform_init_host(pdev, hpriv, &ahci_mvebu_port_info);
 	if (rc)
 		goto disable_resources;
 
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index b10d81d..f61ddb9 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -34,7 +34,6 @@
 	struct device *dev = &pdev->dev;
 	struct ahci_platform_data *pdata = dev_get_platdata(dev);
 	struct ahci_host_priv *hpriv;
-	unsigned long hflags = 0;
 	int rc;
 
 	hpriv = ahci_platform_get_resources(pdev);
@@ -58,10 +57,9 @@
 	}
 
 	if (of_device_is_compatible(dev->of_node, "hisilicon,hisi-ahci"))
-		hflags |= AHCI_HFLAG_NO_FBS | AHCI_HFLAG_NO_NCQ;
+		hpriv->flags |= AHCI_HFLAG_NO_FBS | AHCI_HFLAG_NO_NCQ;
 
-	rc = ahci_platform_init_host(pdev, hpriv, &ahci_port_info,
-				     hflags, 0, 0);
+	rc = ahci_platform_init_host(pdev, hpriv, &ahci_port_info);
 	if (rc)
 		goto pdata_exit;
 
@@ -78,6 +76,8 @@
 			 ahci_platform_resume);
 
 static const struct of_device_id ahci_of_match[] = {
+	{ .compatible = "generic-ahci", },
+	/* Keep the following compatibles for device tree compatibility */
 	{ .compatible = "snps,spear-ahci", },
 	{ .compatible = "snps,exynos5440-ahci", },
 	{ .compatible = "ibm,476gtr-ahci", },
diff --git a/drivers/ata/ahci_st.c b/drivers/ata/ahci_st.c
index 2595598..835d6ee 100644
--- a/drivers/ata/ahci_st.c
+++ b/drivers/ata/ahci_st.c
@@ -166,7 +166,7 @@
 	if (err)
 		return err;
 
-	err = ahci_platform_init_host(pdev, hpriv, &st_ahci_port_info, 0, 0, 0);
+	err = ahci_platform_init_host(pdev, hpriv, &st_ahci_port_info);
 	if (err) {
 		ahci_platform_disable_resources(hpriv);
 		return err;
@@ -221,7 +221,7 @@
 
 static SIMPLE_DEV_PM_OPS(st_ahci_pm_ops, st_ahci_suspend, st_ahci_resume);
 
-static struct of_device_id st_ahci_match[] = {
+static const struct of_device_id st_ahci_match[] = {
 	{ .compatible = "st,ahci", },
 	{},
 };
diff --git a/drivers/ata/ahci_sunxi.c b/drivers/ata/ahci_sunxi.c
index 02002f1..e44d675 100644
--- a/drivers/ata/ahci_sunxi.c
+++ b/drivers/ata/ahci_sunxi.c
@@ -167,7 +167,6 @@
 {
 	struct device *dev = &pdev->dev;
 	struct ahci_host_priv *hpriv;
-	unsigned long hflags;
 	int rc;
 
 	hpriv = ahci_platform_get_resources(pdev);
@@ -184,11 +183,10 @@
 	if (rc)
 		goto disable_resources;
 
-	hflags = AHCI_HFLAG_32BIT_ONLY | AHCI_HFLAG_NO_MSI |
-		 AHCI_HFLAG_NO_PMP | AHCI_HFLAG_YES_NCQ;
+	hpriv->flags = AHCI_HFLAG_32BIT_ONLY | AHCI_HFLAG_NO_MSI |
+		       AHCI_HFLAG_NO_PMP | AHCI_HFLAG_YES_NCQ;
 
-	rc = ahci_platform_init_host(pdev, hpriv, &ahci_sunxi_port_info,
-				     hflags, 0, 0);
+	rc = ahci_platform_init_host(pdev, hpriv, &ahci_sunxi_port_info);
 	if (rc)
 		goto disable_resources;
 
diff --git a/drivers/ata/ahci_tegra.c b/drivers/ata/ahci_tegra.c
new file mode 100644
index 0000000..fc3df47
--- /dev/null
+++ b/drivers/ata/ahci_tegra.c
@@ -0,0 +1,376 @@
+/*
+ * drivers/ata/ahci_tegra.c
+ *
+ * Copyright (c) 2014, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * Author:
+ *	Mikko Perttunen <mperttunen@nvidia.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/ahci_platform.h>
+#include <linux/reset.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/tegra-powergate.h>
+#include <linux/regulator/consumer.h>
+#include "ahci.h"
+
+#define SATA_CONFIGURATION_0				0x180
+#define SATA_CONFIGURATION_EN_FPCI			BIT(0)
+
+#define SCFG_OFFSET					0x1000
+
+#define T_SATA0_CFG_1					0x04
+#define T_SATA0_CFG_1_IO_SPACE				BIT(0)
+#define T_SATA0_CFG_1_MEMORY_SPACE			BIT(1)
+#define T_SATA0_CFG_1_BUS_MASTER			BIT(2)
+#define T_SATA0_CFG_1_SERR				BIT(8)
+
+#define T_SATA0_CFG_9					0x24
+#define T_SATA0_CFG_9_BASE_ADDRESS_SHIFT		13
+
+#define SATA_FPCI_BAR5					0x94
+#define SATA_FPCI_BAR5_START_SHIFT			4
+
+#define SATA_INTR_MASK					0x188
+#define SATA_INTR_MASK_IP_INT_MASK			BIT(16)
+
+#define T_SATA0_AHCI_HBA_CAP_BKDR			0x300
+
+#define T_SATA0_BKDOOR_CC				0x4a4
+
+#define T_SATA0_CFG_SATA				0x54c
+#define T_SATA0_CFG_SATA_BACKDOOR_PROG_IF_EN		BIT(12)
+
+#define T_SATA0_CFG_MISC				0x550
+
+#define T_SATA0_INDEX					0x680
+
+#define T_SATA0_CHX_PHY_CTRL1_GEN1			0x690
+#define T_SATA0_CHX_PHY_CTRL1_GEN1_TX_AMP_MASK		0xff
+#define T_SATA0_CHX_PHY_CTRL1_GEN1_TX_AMP_SHIFT		0
+#define T_SATA0_CHX_PHY_CTRL1_GEN1_TX_PEAK_MASK		(0xff << 8)
+#define T_SATA0_CHX_PHY_CTRL1_GEN1_TX_PEAK_SHIFT	8
+
+#define T_SATA0_CHX_PHY_CTRL1_GEN2			0x694
+#define T_SATA0_CHX_PHY_CTRL1_GEN2_TX_AMP_MASK		0xff
+#define T_SATA0_CHX_PHY_CTRL1_GEN2_TX_AMP_SHIFT		0
+#define T_SATA0_CHX_PHY_CTRL1_GEN2_TX_PEAK_MASK		(0xff << 12)
+#define T_SATA0_CHX_PHY_CTRL1_GEN2_TX_PEAK_SHIFT	12
+
+#define T_SATA0_CHX_PHY_CTRL2				0x69c
+#define T_SATA0_CHX_PHY_CTRL2_CDR_CNTL_GEN1		0x23
+
+#define T_SATA0_CHX_PHY_CTRL11				0x6d0
+#define T_SATA0_CHX_PHY_CTRL11_GEN2_RX_EQ		(0x2800 << 16)
+
+#define FUSE_SATA_CALIB					0x124
+#define FUSE_SATA_CALIB_MASK				0x3
+
+struct sata_pad_calibration {
+	u8 gen1_tx_amp;
+	u8 gen1_tx_peak;
+	u8 gen2_tx_amp;
+	u8 gen2_tx_peak;
+};
+
+static const struct sata_pad_calibration tegra124_pad_calibration[] = {
+	{0x18, 0x04, 0x18, 0x0a},
+	{0x0e, 0x04, 0x14, 0x0a},
+	{0x0e, 0x07, 0x1a, 0x0e},
+	{0x14, 0x0e, 0x1a, 0x0e},
+};
+
+struct tegra_ahci_priv {
+	struct platform_device	   *pdev;
+	void __iomem		   *sata_regs;
+	struct reset_control	   *sata_rst;
+	struct reset_control	   *sata_oob_rst;
+	struct reset_control	   *sata_cold_rst;
+	/* Needs special handling, cannot use ahci_platform */
+	struct clk		   *sata_clk;
+	struct regulator_bulk_data supplies[5];
+};
+
+static int tegra_ahci_power_on(struct ahci_host_priv *hpriv)
+{
+	struct tegra_ahci_priv *tegra = hpriv->plat_data;
+	int ret;
+
+	ret = regulator_bulk_enable(ARRAY_SIZE(tegra->supplies),
+				    tegra->supplies);
+	if (ret)
+		return ret;
+
+	ret = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_SATA,
+						tegra->sata_clk,
+						tegra->sata_rst);
+	if (ret)
+		goto disable_regulators;
+
+	reset_control_assert(tegra->sata_oob_rst);
+	reset_control_assert(tegra->sata_cold_rst);
+
+	ret = ahci_platform_enable_resources(hpriv);
+	if (ret)
+		goto disable_power;
+
+	reset_control_deassert(tegra->sata_cold_rst);
+	reset_control_deassert(tegra->sata_oob_rst);
+
+	return 0;
+
+disable_power:
+	clk_disable_unprepare(tegra->sata_clk);
+
+	tegra_powergate_power_off(TEGRA_POWERGATE_SATA);
+
+disable_regulators:
+	regulator_bulk_disable(ARRAY_SIZE(tegra->supplies), tegra->supplies);
+
+	return ret;
+}
+
+static void tegra_ahci_power_off(struct ahci_host_priv *hpriv)
+{
+	struct tegra_ahci_priv *tegra = hpriv->plat_data;
+
+	ahci_platform_disable_resources(hpriv);
+
+	reset_control_assert(tegra->sata_rst);
+	reset_control_assert(tegra->sata_oob_rst);
+	reset_control_assert(tegra->sata_cold_rst);
+
+	clk_disable_unprepare(tegra->sata_clk);
+	tegra_powergate_power_off(TEGRA_POWERGATE_SATA);
+
+	regulator_bulk_disable(ARRAY_SIZE(tegra->supplies), tegra->supplies);
+}
+
+static int tegra_ahci_controller_init(struct ahci_host_priv *hpriv)
+{
+	struct tegra_ahci_priv *tegra = hpriv->plat_data;
+	int ret;
+	unsigned int val;
+	struct sata_pad_calibration calib;
+
+	ret = tegra_ahci_power_on(hpriv);
+	if (ret) {
+		dev_err(&tegra->pdev->dev,
+			"failed to power on AHCI controller: %d\n", ret);
+		return ret;
+	}
+
+	val = readl(tegra->sata_regs + SATA_CONFIGURATION_0);
+	val |= SATA_CONFIGURATION_EN_FPCI;
+	writel(val, tegra->sata_regs + SATA_CONFIGURATION_0);
+
+	/* Pad calibration */
+
+	/* FIXME Always use calibration 0. Change this to read the calibration
+	 * fuse once the fuse driver has landed. */
+	val = 0;
+
+	calib = tegra124_pad_calibration[val & FUSE_SATA_CALIB_MASK];
+
+	writel(BIT(0), tegra->sata_regs + SCFG_OFFSET + T_SATA0_INDEX);
+
+	val = readl(tegra->sata_regs +
+		SCFG_OFFSET + T_SATA0_CHX_PHY_CTRL1_GEN1);
+	val &= ~T_SATA0_CHX_PHY_CTRL1_GEN1_TX_AMP_MASK;
+	val &= ~T_SATA0_CHX_PHY_CTRL1_GEN1_TX_PEAK_MASK;
+	val |= calib.gen1_tx_amp <<
+			T_SATA0_CHX_PHY_CTRL1_GEN1_TX_AMP_SHIFT;
+	val |= calib.gen1_tx_peak <<
+			T_SATA0_CHX_PHY_CTRL1_GEN1_TX_PEAK_SHIFT;
+	writel(val, tegra->sata_regs + SCFG_OFFSET +
+		T_SATA0_CHX_PHY_CTRL1_GEN1);
+
+	val = readl(tegra->sata_regs +
+			SCFG_OFFSET + T_SATA0_CHX_PHY_CTRL1_GEN2);
+	val &= ~T_SATA0_CHX_PHY_CTRL1_GEN2_TX_AMP_MASK;
+	val &= ~T_SATA0_CHX_PHY_CTRL1_GEN2_TX_PEAK_MASK;
+	val |= calib.gen2_tx_amp <<
+			T_SATA0_CHX_PHY_CTRL1_GEN1_TX_AMP_SHIFT;
+	val |= calib.gen2_tx_peak <<
+			T_SATA0_CHX_PHY_CTRL1_GEN1_TX_PEAK_SHIFT;
+	writel(val, tegra->sata_regs + SCFG_OFFSET +
+		T_SATA0_CHX_PHY_CTRL1_GEN2);
+
+	writel(T_SATA0_CHX_PHY_CTRL11_GEN2_RX_EQ,
+		tegra->sata_regs + SCFG_OFFSET + T_SATA0_CHX_PHY_CTRL11);
+	writel(T_SATA0_CHX_PHY_CTRL2_CDR_CNTL_GEN1,
+		tegra->sata_regs + SCFG_OFFSET + T_SATA0_CHX_PHY_CTRL2);
+
+	writel(0, tegra->sata_regs + SCFG_OFFSET + T_SATA0_INDEX);
+
+	/* Program controller device ID */
+
+	val = readl(tegra->sata_regs + SCFG_OFFSET + T_SATA0_CFG_SATA);
+	val |= T_SATA0_CFG_SATA_BACKDOOR_PROG_IF_EN;
+	writel(val, tegra->sata_regs + SCFG_OFFSET + T_SATA0_CFG_SATA);
+
+	writel(0x01060100, tegra->sata_regs + SCFG_OFFSET + T_SATA0_BKDOOR_CC);
+
+	val = readl(tegra->sata_regs + SCFG_OFFSET + T_SATA0_CFG_SATA);
+	val &= ~T_SATA0_CFG_SATA_BACKDOOR_PROG_IF_EN;
+	writel(val, tegra->sata_regs + SCFG_OFFSET + T_SATA0_CFG_SATA);
+
+	/* Enable IO & memory access, bus master mode */
+
+	val = readl(tegra->sata_regs + SCFG_OFFSET + T_SATA0_CFG_1);
+	val |= T_SATA0_CFG_1_IO_SPACE | T_SATA0_CFG_1_MEMORY_SPACE |
+		T_SATA0_CFG_1_BUS_MASTER | T_SATA0_CFG_1_SERR;
+	writel(val, tegra->sata_regs + SCFG_OFFSET + T_SATA0_CFG_1);
+
+	/* Program SATA MMIO */
+
+	writel(0x10000 << SATA_FPCI_BAR5_START_SHIFT,
+	       tegra->sata_regs + SATA_FPCI_BAR5);
+
+	writel(0x08000 << T_SATA0_CFG_9_BASE_ADDRESS_SHIFT,
+	       tegra->sata_regs + SCFG_OFFSET + T_SATA0_CFG_9);
+
+	/* Unmask SATA interrupts */
+
+	val = readl(tegra->sata_regs + SATA_INTR_MASK);
+	val |= SATA_INTR_MASK_IP_INT_MASK;
+	writel(val, tegra->sata_regs + SATA_INTR_MASK);
+
+	return 0;
+}
+
+static void tegra_ahci_controller_deinit(struct ahci_host_priv *hpriv)
+{
+	tegra_ahci_power_off(hpriv);
+}
+
+static void tegra_ahci_host_stop(struct ata_host *host)
+{
+	struct ahci_host_priv *hpriv = host->private_data;
+
+	tegra_ahci_controller_deinit(hpriv);
+}
+
+static struct ata_port_operations ahci_tegra_port_ops = {
+	.inherits	= &ahci_ops,
+	.host_stop	= tegra_ahci_host_stop,
+};
+
+static const struct ata_port_info ahci_tegra_port_info = {
+	.flags		= AHCI_FLAG_COMMON,
+	.pio_mask	= ATA_PIO4,
+	.udma_mask	= ATA_UDMA6,
+	.port_ops	= &ahci_tegra_port_ops,
+};
+
+static const struct of_device_id tegra_ahci_of_match[] = {
+	{ .compatible = "nvidia,tegra124-ahci" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, tegra_ahci_of_match);
+
+static int tegra_ahci_probe(struct platform_device *pdev)
+{
+	struct ahci_host_priv *hpriv;
+	struct tegra_ahci_priv *tegra;
+	struct resource *res;
+	int ret;
+
+	hpriv = ahci_platform_get_resources(pdev);
+	if (IS_ERR(hpriv))
+		return PTR_ERR(hpriv);
+
+	tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
+	if (!tegra)
+		return -ENOMEM;
+
+	hpriv->plat_data = tegra;
+
+	tegra->pdev = pdev;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	tegra->sata_regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(tegra->sata_regs))
+		return PTR_ERR(tegra->sata_regs);
+
+	tegra->sata_rst = devm_reset_control_get(&pdev->dev, "sata");
+	if (IS_ERR(tegra->sata_rst)) {
+		dev_err(&pdev->dev, "Failed to get sata reset\n");
+		return PTR_ERR(tegra->sata_rst);
+	}
+
+	tegra->sata_oob_rst = devm_reset_control_get(&pdev->dev, "sata-oob");
+	if (IS_ERR(tegra->sata_oob_rst)) {
+		dev_err(&pdev->dev, "Failed to get sata-oob reset\n");
+		return PTR_ERR(tegra->sata_oob_rst);
+	}
+
+	tegra->sata_cold_rst = devm_reset_control_get(&pdev->dev, "sata-cold");
+	if (IS_ERR(tegra->sata_cold_rst)) {
+		dev_err(&pdev->dev, "Failed to get sata-cold reset\n");
+		return PTR_ERR(tegra->sata_cold_rst);
+	}
+
+	tegra->sata_clk = devm_clk_get(&pdev->dev, "sata");
+	if (IS_ERR(tegra->sata_clk)) {
+		dev_err(&pdev->dev, "Failed to get sata clock\n");
+		return PTR_ERR(tegra->sata_clk);
+	}
+
+	tegra->supplies[0].supply = "avdd";
+	tegra->supplies[1].supply = "hvdd";
+	tegra->supplies[2].supply = "vddio";
+	tegra->supplies[3].supply = "target-5v";
+	tegra->supplies[4].supply = "target-12v";
+
+	ret = devm_regulator_bulk_get(&pdev->dev, ARRAY_SIZE(tegra->supplies),
+				      tegra->supplies);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to get regulators\n");
+		return ret;
+	}
+
+	ret = tegra_ahci_controller_init(hpriv);
+	if (ret)
+		return ret;
+
+	ret = ahci_platform_init_host(pdev, hpriv, &ahci_tegra_port_info);
+	if (ret)
+		goto deinit_controller;
+
+	return 0;
+
+deinit_controller:
+	tegra_ahci_controller_deinit(hpriv);
+
+	return ret;
+};
+
+static struct platform_driver tegra_ahci_driver = {
+	.probe = tegra_ahci_probe,
+	.remove = ata_platform_remove_one,
+	.driver = {
+		.name = "tegra-ahci",
+		.of_match_table = tegra_ahci_of_match,
+	},
+	/* LP0 suspend support not implemented */
+};
+module_platform_driver(tegra_ahci_driver);
+
+MODULE_AUTHOR("Mikko Perttunen <mperttunen@nvidia.com>");
+MODULE_DESCRIPTION("Tegra124 AHCI SATA driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
index ee3a365..bc28111 100644
--- a/drivers/ata/ahci_xgene.c
+++ b/drivers/ata/ahci_xgene.c
@@ -67,6 +67,9 @@
 #define PORTAXICFG			0x000000bc
 #define PORTAXICFG_OUTTRANS_SET(dst, src) \
 		(((dst) & ~0x00f00000) | (((u32)(src) << 0x14) & 0x00f00000))
+#define PORTRANSCFG			0x000000c8
+#define PORTRANSCFG_RXWM_SET(dst, src)		\
+		(((dst) & ~0x0000007f) | (((u32)(src)) & 0x0000007f))
 
 /* SATA host controller AXI CSR */
 #define INT_SLV_TMOMASK			0x00000010
@@ -193,11 +196,11 @@
 	/* Disable fix rate */
 	writel(0x0001fffe, mmio + PORTPHY1CFG);
 	readl(mmio + PORTPHY1CFG); /* Force a barrier */
-	writel(0x5018461c, mmio + PORTPHY2CFG);
+	writel(0x28183219, mmio + PORTPHY2CFG);
 	readl(mmio + PORTPHY2CFG); /* Force a barrier */
-	writel(0x1c081907, mmio + PORTPHY3CFG);
+	writel(0x13081008, mmio + PORTPHY3CFG);
 	readl(mmio + PORTPHY3CFG); /* Force a barrier */
-	writel(0x1c080815, mmio + PORTPHY4CFG);
+	writel(0x00480815, mmio + PORTPHY4CFG);
 	readl(mmio + PORTPHY4CFG); /* Force a barrier */
 	/* Set window negotiation */
 	val = readl(mmio + PORTPHY5CFG);
@@ -209,6 +212,10 @@
 	val = PORTAXICFG_OUTTRANS_SET(val, 0xe); /* Set outstanding */
 	writel(val, mmio + PORTAXICFG);
 	readl(mmio + PORTAXICFG); /* Force a barrier */
+	/* Set the watermark threshold of the receive FIFO */
+	val = readl(mmio + PORTRANSCFG);
+	val = PORTRANSCFG_RXWM_SET(val, 0x30);
+	writel(val, mmio + PORTRANSCFG);
 }
 
 /**
@@ -415,7 +422,6 @@
 	struct ahci_host_priv *hpriv;
 	struct xgene_ahci_context *ctx;
 	struct resource *res;
-	unsigned long hflags;
 	int rc;
 
 	hpriv = ahci_platform_get_resources(pdev);
@@ -474,20 +480,9 @@
 	/* Configure the host controller */
 	xgene_ahci_hw_init(hpriv);
 
-	/*
-	 * Setup DMA mask. This is preliminary until the DMA range is sorted
-	 * out.
-	 */
-	rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
-	if (rc) {
-		dev_err(dev, "Unable to set dma mask\n");
-		goto disable_resources;
-	}
+	hpriv->flags = AHCI_HFLAG_NO_PMP | AHCI_HFLAG_YES_NCQ;
 
-	hflags = AHCI_HFLAG_NO_PMP | AHCI_HFLAG_YES_NCQ;
-
-	rc = ahci_platform_init_host(pdev, hpriv, &xgene_ahci_port_info,
-				     hflags, 0, 0);
+	rc = ahci_platform_init_host(pdev, hpriv, &xgene_ahci_port_info);
 	if (rc)
 		goto disable_resources;
 
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index d72ce04..b784e9d 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -382,8 +382,6 @@
  *	ahci_save_initial_config - Save and fixup initial config values
  *	@dev: target AHCI device
  *	@hpriv: host private area to store config values
- *	@force_port_map: force port map to a specified value
- *	@mask_port_map: mask out particular bits from port map
  *
  *	Some registers containing configuration info might be setup by
  *	BIOS and might be cleared on reset.  This function saves the
@@ -398,10 +396,7 @@
  *	LOCKING:
  *	None.
  */
-void ahci_save_initial_config(struct device *dev,
-			      struct ahci_host_priv *hpriv,
-			      unsigned int force_port_map,
-			      unsigned int mask_port_map)
+void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
 {
 	void __iomem *mmio = hpriv->mmio;
 	u32 cap, cap2, vers, port_map;
@@ -468,17 +463,17 @@
 		cap &= ~HOST_CAP_FBS;
 	}
 
-	if (force_port_map && port_map != force_port_map) {
+	if (hpriv->force_port_map && port_map != hpriv->force_port_map) {
 		dev_info(dev, "forcing port_map 0x%x -> 0x%x\n",
-			 port_map, force_port_map);
-		port_map = force_port_map;
+			 port_map, hpriv->force_port_map);
+		port_map = hpriv->force_port_map;
 	}
 
-	if (mask_port_map) {
+	if (hpriv->mask_port_map) {
 		dev_warn(dev, "masking port_map 0x%x -> 0x%x\n",
 			port_map,
-			port_map & mask_port_map);
-		port_map &= mask_port_map;
+			port_map & hpriv->mask_port_map);
+		port_map &= hpriv->mask_port_map;
 	}
 
 	/* cross check port_map and cap.n_ports */
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index b0077589..5b92c29 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -39,6 +39,67 @@
 };
 
 /**
+ * ahci_platform_enable_phys - Enable PHYs
+ * @hpriv: host private area to store config values
+ *
+ * This function enables all the PHYs found in hpriv->phys, if any.
+ * If a PHY fails to be enabled, it disables all the PHYs already
+ * enabled in reverse order and returns an error.
+ *
+ * RETURNS:
+ * 0 on success otherwise a negative error code
+ */
+int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
+{
+	int rc, i;
+
+	for (i = 0; i < hpriv->nports; i++) {
+		if (!hpriv->phys[i])
+			continue;
+
+		rc = phy_init(hpriv->phys[i]);
+		if (rc)
+			goto disable_phys;
+
+		rc = phy_power_on(hpriv->phys[i]);
+		if (rc) {
+			phy_exit(hpriv->phys[i]);
+			goto disable_phys;
+		}
+	}
+
+	return 0;
+
+disable_phys:
+	while (--i >= 0) {
+		phy_power_off(hpriv->phys[i]);
+		phy_exit(hpriv->phys[i]);
+	}
+	return rc;
+}
+EXPORT_SYMBOL_GPL(ahci_platform_enable_phys);
+
+/**
+ * ahci_platform_disable_phys - Disable PHYs
+ * @hpriv: host private area to store config values
+ *
+ * This function disables all PHYs found in hpriv->phys.
+ */
+void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
+{
+	int i;
+
+	for (i = 0; i < hpriv->nports; i++) {
+		if (!hpriv->phys[i])
+			continue;
+
+		phy_power_off(hpriv->phys[i]);
+		phy_exit(hpriv->phys[i]);
+	}
+}
+EXPORT_SYMBOL_GPL(ahci_platform_disable_phys);
+
+/**
  * ahci_platform_enable_clks - Enable platform clocks
  * @hpriv: host private area to store config values
  *
@@ -92,7 +153,7 @@
  * following order:
  * 1) Regulator
  * 2) Clocks (through ahci_platform_enable_clks)
- * 3) Phy
+ * 3) Phys
  *
  * If resource enabling fails at any point the previous enabled resources
  * are disabled in reverse order.
@@ -114,17 +175,9 @@
 	if (rc)
 		goto disable_regulator;
 
-	if (hpriv->phy) {
-		rc = phy_init(hpriv->phy);
-		if (rc)
-			goto disable_clks;
-
-		rc = phy_power_on(hpriv->phy);
-		if (rc) {
-			phy_exit(hpriv->phy);
-			goto disable_clks;
-		}
-	}
+	rc = ahci_platform_enable_phys(hpriv);
+	if (rc)
+		goto disable_clks;
 
 	return 0;
 
@@ -144,16 +197,13 @@
  *
  * This function disables all ahci_platform managed resources in the
  * following order:
- * 1) Phy
+ * 1) Phys
  * 2) Clocks (through ahci_platform_disable_clks)
  * 3) Regulator
  */
 void ahci_platform_disable_resources(struct ahci_host_priv *hpriv)
 {
-	if (hpriv->phy) {
-		phy_power_off(hpriv->phy);
-		phy_exit(hpriv->phy);
-	}
+	ahci_platform_disable_phys(hpriv);
 
 	ahci_platform_disable_clks(hpriv);
 
@@ -187,7 +237,7 @@
  * 2) regulator for controlling the targets power (optional)
  * 3) 0 - AHCI_MAX_CLKS clocks, as specified in the devs devicetree node,
  *    or for non devicetree enabled platforms a single clock
- *	4) phy (optional)
+ *	4) phys (optional)
  *
  * RETURNS:
  * The allocated ahci_host_priv on success, otherwise an ERR_PTR value
@@ -197,7 +247,9 @@
 	struct device *dev = &pdev->dev;
 	struct ahci_host_priv *hpriv;
 	struct clk *clk;
-	int i, rc = -ENOMEM;
+	struct device_node *child;
+	int i, enabled_ports = 0, rc = -ENOMEM;
+	u32 mask_port_map = 0;
 
 	if (!devres_open_group(dev, NULL, GFP_KERNEL))
 		return ERR_PTR(-ENOMEM);
@@ -246,28 +298,89 @@
 		hpriv->clks[i] = clk;
 	}
 
-	hpriv->phy = devm_phy_get(dev, "sata-phy");
-	if (IS_ERR(hpriv->phy)) {
-		rc = PTR_ERR(hpriv->phy);
-		switch (rc) {
-		case -ENOSYS:
-			/* No PHY support. Check if PHY is required. */
-			if (of_find_property(dev->of_node, "phys", NULL)) {
-				dev_err(dev, "couldn't get sata-phy: ENOSYS\n");
+	hpriv->nports = of_get_child_count(dev->of_node);
+
+	if (hpriv->nports) {
+		hpriv->phys = devm_kzalloc(dev,
+					   hpriv->nports * sizeof(*hpriv->phys),
+					   GFP_KERNEL);
+		if (!hpriv->phys) {
+			rc = -ENOMEM;
+			goto err_out;
+		}
+
+		for_each_child_of_node(dev->of_node, child) {
+			u32 port;
+
+			if (!of_device_is_available(child))
+				continue;
+
+			if (of_property_read_u32(child, "reg", &port)) {
+				rc = -EINVAL;
 				goto err_out;
 			}
-		case -ENODEV:
-			/* continue normally */
-			hpriv->phy = NULL;
-			break;
 
-		case -EPROBE_DEFER:
-			goto err_out;
+			if (port >= hpriv->nports) {
+				dev_warn(dev, "invalid port number %d\n", port);
+				continue;
+			}
 
-		default:
-			dev_err(dev, "couldn't get sata-phy\n");
+			mask_port_map |= BIT(port);
+
+			hpriv->phys[port] = devm_of_phy_get(dev, child, NULL);
+			if (IS_ERR(hpriv->phys[port])) {
+				rc = PTR_ERR(hpriv->phys[port]);
+				dev_err(dev,
+					"couldn't get PHY in node %s: %d\n",
+					child->name, rc);
+				goto err_out;
+			}
+
+			enabled_ports++;
+		}
+		if (!enabled_ports) {
+			dev_warn(dev, "No port enabled\n");
+			rc = -ENODEV;
 			goto err_out;
 		}
+
+		if (!hpriv->mask_port_map)
+			hpriv->mask_port_map = mask_port_map;
+	} else {
+		/*
+		 * If no sub-node was found, keep this for device tree
+		 * compatibility
+		 */
+		struct phy *phy = devm_phy_get(dev, "sata-phy");
+		if (!IS_ERR(phy)) {
+			hpriv->phys = devm_kzalloc(dev, sizeof(*hpriv->phys),
+						   GFP_KERNEL);
+			if (!hpriv->phys) {
+				rc = -ENOMEM;
+				goto err_out;
+			}
+
+			hpriv->phys[0] = phy;
+			hpriv->nports = 1;
+		} else {
+			rc = PTR_ERR(phy);
+			switch (rc) {
+				case -ENOSYS:
+					/* No PHY support. Check if PHY is required. */
+					if (of_find_property(dev->of_node, "phys", NULL)) {
+						dev_err(dev, "couldn't get sata-phy: ENOSYS\n");
+						goto err_out;
+					}
+				case -ENODEV:
+					/* continue normally */
+					hpriv->phys = NULL;
+					break;
+
+				default:
+					goto err_out;
+
+			}
+		}
 	}
 
 	pm_runtime_enable(dev);
@@ -288,12 +401,9 @@
  * @pdev: platform device pointer for the host
  * @hpriv: ahci-host private data for the host
  * @pi_template: template for the ata_port_info to use
- * @host_flags: ahci host flags used in ahci_host_priv
- * @force_port_map: param passed to ahci_save_initial_config
- * @mask_port_map: param passed to ahci_save_initial_config
  *
  * This function does all the usual steps needed to bring up an
- * ahci-platform host, note any necessary resources (ie clks, phy, etc.)
+ * ahci-platform host, note any necessary resources (ie clks, phys, etc.)
  * must be initialized / enabled before calling this.
  *
  * RETURNS:
@@ -301,10 +411,7 @@
  */
 int ahci_platform_init_host(struct platform_device *pdev,
 			    struct ahci_host_priv *hpriv,
-			    const struct ata_port_info *pi_template,
-			    unsigned long host_flags,
-			    unsigned int force_port_map,
-			    unsigned int mask_port_map)
+			    const struct ata_port_info *pi_template)
 {
 	struct device *dev = &pdev->dev;
 	struct ata_port_info pi = *pi_template;
@@ -319,10 +426,9 @@
 	}
 
 	/* prepare host */
-	pi.private_data = (void *)host_flags;
-	hpriv->flags |= host_flags;
+	pi.private_data = (void *)(unsigned long)hpriv->flags;
 
-	ahci_save_initial_config(dev, hpriv, force_port_map, mask_port_map);
+	ahci_save_initial_config(dev, hpriv);
 
 	if (hpriv->cap & HOST_CAP_NCQ)
 		pi.flags |= ATA_FLAG_NCQ;
@@ -369,6 +475,19 @@
 			ap->ops = &ata_dummy_port_ops;
 	}
 
+	if (hpriv->cap & HOST_CAP_64) {
+		rc = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
+		if (rc) {
+			rc = dma_coerce_mask_and_coherent(dev,
+							  DMA_BIT_MASK(32));
+			if (rc) {
+				dev_err(dev, "Failed to enable 64-bit DMA.\n");
+				return rc;
+			}
+			dev_warn(dev, "Enable 32-bit DMA instead of 64-bit.\n");
+		}
+	}
+
 	rc = ahci_reset_controller(host);
 	if (rc)
 		return rc;
@@ -399,7 +518,7 @@
  * @dev: device pointer for the host
  *
  * This function does all the usual steps needed to suspend an
- * ahci-platform host, note any necessary resources (ie clks, phy, etc.)
+ * ahci-platform host, note any necessary resources (ie clks, phys, etc.)
  * must be disabled after calling this.
  *
  * RETURNS:
@@ -436,7 +555,7 @@
  * @dev: device pointer for the host
  *
  * This function does all the usual steps needed to resume an ahci-platform
- * host, note any necessary resources (ie clks, phy, etc.)  must be
+ * host, note any necessary resources (ie clks, phys, etc.)  must be
  * initialized / enabled before calling this.
  *
  * RETURNS:
diff --git a/drivers/ata/pata_samsung_cf.c b/drivers/ata/pata_samsung_cf.c
index fb52883..2578fc1 100644
--- a/drivers/ata/pata_samsung_cf.c
+++ b/drivers/ata/pata_samsung_cf.c
@@ -54,7 +54,6 @@
 
 enum s3c_cpu_type {
 	TYPE_S3C64XX,
-	TYPE_S5PC100,
 	TYPE_S5PV210,
 };
 
@@ -476,10 +475,6 @@
 		writel(0x1b, info->ide_addr + S3C_ATA_IRQ_MSK);
 		break;
 
-	case TYPE_S5PC100:
-		pata_s3c_cfg_mode(info->sfr_addr);
-		/* FALLTHROUGH */
-
 	case TYPE_S5PV210:
 		/* Configure as little endian */
 		pata_s3c_set_endian(info->ide_addr, 0);
@@ -549,11 +544,6 @@
 		info->sfr_addr = info->ide_addr + 0x1800;
 		info->ide_addr += 0x1900;
 		info->fifo_status_reg = 0x94;
-	} else if (cpu_type == TYPE_S5PC100) {
-		ap->ops = &pata_s5p_port_ops;
-		info->sfr_addr = info->ide_addr + 0x1800;
-		info->ide_addr += 0x1900;
-		info->fifo_status_reg = 0x84;
 	} else {
 		ap->ops = &pata_s5p_port_ops;
 		info->fifo_status_reg = 0x84;
@@ -653,9 +643,6 @@
 		.name		= "s3c64xx-pata",
 		.driver_data	= TYPE_S3C64XX,
 	}, {
-		.name		= "s5pc100-pata",
-		.driver_data	= TYPE_S5PC100,
-	}, {
 		.name		= "s5pv210-pata",
 		.driver_data	= TYPE_S5PV210,
 	},
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 616a6d2..07bc7e4 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -734,13 +734,12 @@
 	if (!pp)
 		return -ENOMEM;
 
-	mem = dma_alloc_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, &mem_dma,
-				 GFP_KERNEL);
+	mem = dma_zalloc_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, &mem_dma,
+				  GFP_KERNEL);
 	if (!mem) {
 		kfree(pp);
 		return -ENOMEM;
 	}
-	memset(mem, 0, SATA_FSL_PORT_PRIV_DMA_SZ);
 
 	pp->cmdslot = mem;
 	pp->cmdslot_paddr = mem_dma;
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
index 65965cf..da3bc27 100644
--- a/drivers/ata/sata_highbank.c
+++ b/drivers/ata/sata_highbank.c
@@ -512,7 +512,7 @@
 		return rc;
 
 
-	ahci_save_initial_config(dev, hpriv, 0, 0);
+	ahci_save_initial_config(dev, hpriv);
 
 	/* prepare host */
 	if (hpriv->cap & HOST_CAP_NCQ)
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 0534890..d81b20d 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -1154,8 +1154,8 @@
 	status = readl(host_base + HOST_IRQ_STAT);
 
 	if (status == 0xffffffff) {
-		printk(KERN_ERR DRV_NAME ": IRQ status == 0xffffffff, "
-		       "PCI fault or device removal?\n");
+		dev_err(host->dev, "IRQ status == 0xffffffff, "
+			"PCI fault or device removal?\n");
 		goto out;
 	}
 
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index eee48c4..00f2208 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -23,6 +23,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/idr.h>
 #include <linux/acpi.h>
+#include <linux/clk/clk-conf.h>
 
 #include "base.h"
 #include "power/power.h"
@@ -499,6 +500,10 @@
 	struct platform_device *dev = to_platform_device(_dev);
 	int ret;
 
+	ret = of_clk_set_defaults(_dev->of_node, false);
+	if (ret < 0)
+		return ret;
+
 	acpi_dev_pm_attach(_dev, true);
 
 	ret = drv->probe(dev);
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 9f9c5ae..cfd3af7 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -102,6 +102,13 @@
           Supports clock drivers for Keystone based SOCs. These SOCs have local
 	  a power sleep control module that gate the clock to the IPs and PLLs.
 
+config COMMON_CLK_PALMAS
+	tristate "Clock driver for TI Palmas devices"
+	depends on MFD_PALMAS
+	---help---
+	  This driver supports TI Palmas devices 32KHz output KG and KG_AUDIO
+	  using common clock framework.
+
 source "drivers/clk/qcom/Kconfig"
 
 endmenu
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 567f102..f537a0b 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -9,12 +9,16 @@
 obj-$(CONFIG_COMMON_CLK)	+= clk-mux.o
 obj-$(CONFIG_COMMON_CLK)	+= clk-composite.o
 obj-$(CONFIG_COMMON_CLK)	+= clk-fractional-divider.o
+ifeq ($(CONFIG_OF), y)
+obj-$(CONFIG_COMMON_CLK)	+= clk-conf.o
+endif
 
 # hardware specific clock types
 # please keep this section sorted lexicographically by file/directory path name
 obj-$(CONFIG_COMMON_CLK_AXI_CLKGEN)	+= clk-axi-clkgen.o
 obj-$(CONFIG_ARCH_AXXIA)		+= clk-axm5516.o
 obj-$(CONFIG_ARCH_BCM2835)		+= clk-bcm2835.o
+obj-$(CONFIG_ARCH_CLPS711X)		+= clk-clps711x.o
 obj-$(CONFIG_ARCH_EFM32)		+= clk-efm32gg.o
 obj-$(CONFIG_ARCH_HIGHBANK)		+= clk-highbank.o
 obj-$(CONFIG_MACH_LOONGSON1)		+= clk-ls1x.o
@@ -22,6 +26,7 @@
 obj-$(CONFIG_ARCH_MOXART)		+= clk-moxart.o
 obj-$(CONFIG_ARCH_NOMADIK)		+= clk-nomadik.o
 obj-$(CONFIG_ARCH_NSPIRE)		+= clk-nspire.o
+obj-$(CONFIG_COMMON_CLK_PALMAS)		+= clk-palmas.o
 obj-$(CONFIG_CLK_PPC_CORENET)		+= clk-ppc-corenet.o
 obj-$(CONFIG_COMMON_CLK_S2MPS11)	+= clk-s2mps11.o
 obj-$(CONFIG_COMMON_CLK_SI5351)		+= clk-si5351.o
diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c
index 7333061..59fa3cc 100644
--- a/drivers/clk/at91/clk-main.c
+++ b/drivers/clk/at91/clk-main.c
@@ -388,6 +388,7 @@
 	if (parent_rate)
 		return parent_rate;
 
+	pr_warn("Main crystal frequency not set, using approximate value\n");
 	tmp = pmc_read(pmc, AT91_CKGR_MCFR);
 	if (!(tmp & AT91_PMC_MAINRDY))
 		return 0;
diff --git a/drivers/clk/clk-clps711x.c b/drivers/clk/clk-clps711x.c
new file mode 100644
index 0000000..715eec1
--- /dev/null
+++ b/drivers/clk/clk-clps711x.c
@@ -0,0 +1,192 @@
+/*
+ *  Cirrus Logic CLPS711X CLK driver
+ *
+ *  Copyright (C) 2014 Alexander Shiyan <shc_work@mail.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/mfd/syscon/clps711x.h>
+
+#include <dt-bindings/clock/clps711x-clock.h>
+
+#define CLPS711X_SYSCON1	(0x0100)
+#define CLPS711X_SYSCON2	(0x1100)
+#define CLPS711X_SYSFLG2	(CLPS711X_SYSCON2 + SYSFLG_OFFSET)
+#define CLPS711X_PLLR		(0xa5a8)
+
+#define CLPS711X_EXT_FREQ	(13000000)
+#define CLPS711X_OSC_FREQ	(3686400)
+
+static const struct clk_div_table spi_div_table[] = {
+	{ .val = 0, .div = 32, },
+	{ .val = 1, .div = 8, },
+	{ .val = 2, .div = 2, },
+	{ .val = 3, .div = 1, },
+};
+
+static const struct clk_div_table timer_div_table[] = {
+	{ .val = 0, .div = 256, },
+	{ .val = 1, .div = 1, },
+};
+
+struct clps711x_clk {
+	struct clk_onecell_data	clk_data;
+	spinlock_t		lock;
+	struct clk		*clks[CLPS711X_CLK_MAX];
+};
+
+static struct clps711x_clk * __init _clps711x_clk_init(void __iomem *base,
+						       u32 fref)
+{
+	u32 tmp, f_cpu, f_pll, f_bus, f_tim, f_pwm, f_spi;
+	struct clps711x_clk *clps711x_clk;
+	unsigned i;
+
+	if (!base)
+		return ERR_PTR(-ENOMEM);
+
+	clps711x_clk = kzalloc(sizeof(*clps711x_clk), GFP_KERNEL);
+	if (!clps711x_clk)
+		return ERR_PTR(-ENOMEM);
+
+	spin_lock_init(&clps711x_clk->lock);
+
+	/* Read PLL multiplier value and sanity check */
+	tmp = readl(base + CLPS711X_PLLR) >> 24;
+	if (((tmp >= 10) && (tmp <= 50)) || !fref)
+		f_pll = DIV_ROUND_UP(CLPS711X_OSC_FREQ * tmp, 2);
+	else
+		f_pll = fref;
+
+	tmp = readl(base + CLPS711X_SYSFLG2);
+	if (tmp & SYSFLG2_CKMODE) {
+		f_cpu = CLPS711X_EXT_FREQ;
+		f_bus = CLPS711X_EXT_FREQ;
+		f_spi = DIV_ROUND_CLOSEST(CLPS711X_EXT_FREQ, 96);
+		f_pll = 0;
+		f_pwm = DIV_ROUND_CLOSEST(CLPS711X_EXT_FREQ, 128);
+	} else {
+		f_cpu = f_pll;
+		if (f_cpu > 36864000)
+			f_bus = DIV_ROUND_UP(f_cpu, 2);
+		else
+			f_bus = 36864000 / 2;
+		f_spi = DIV_ROUND_CLOSEST(f_cpu, 576);
+		f_pwm = DIV_ROUND_CLOSEST(f_cpu, 768);
+	}
+
+	if (tmp & SYSFLG2_CKMODE) {
+		if (readl(base + CLPS711X_SYSCON2) & SYSCON2_OSTB)
+			f_tim = DIV_ROUND_CLOSEST(CLPS711X_EXT_FREQ, 26);
+		else
+			f_tim = DIV_ROUND_CLOSEST(CLPS711X_EXT_FREQ, 24);
+	} else
+		f_tim = DIV_ROUND_CLOSEST(f_cpu, 144);
+
+	tmp = readl(base + CLPS711X_SYSCON1);
+	/* Timer1 in free running mode.
+	 * Counter will wrap around to 0xffff when it underflows
+	 * and will continue to count down.
+	 */
+	tmp &= ~(SYSCON1_TC1M | SYSCON1_TC1S);
+	/* Timer2 in prescale mode.
+	 * Value writen is automatically re-loaded when
+	 * the counter underflows.
+	 */
+	tmp |= SYSCON1_TC2M | SYSCON1_TC2S;
+	writel(tmp, base + CLPS711X_SYSCON1);
+
+	clps711x_clk->clks[CLPS711X_CLK_DUMMY] =
+		clk_register_fixed_rate(NULL, "dummy", NULL, CLK_IS_ROOT, 0);
+	clps711x_clk->clks[CLPS711X_CLK_CPU] =
+		clk_register_fixed_rate(NULL, "cpu", NULL, CLK_IS_ROOT, f_cpu);
+	clps711x_clk->clks[CLPS711X_CLK_BUS] =
+		clk_register_fixed_rate(NULL, "bus", NULL, CLK_IS_ROOT, f_bus);
+	clps711x_clk->clks[CLPS711X_CLK_PLL] =
+		clk_register_fixed_rate(NULL, "pll", NULL, CLK_IS_ROOT, f_pll);
+	clps711x_clk->clks[CLPS711X_CLK_TIMERREF] =
+		clk_register_fixed_rate(NULL, "timer_ref", NULL, CLK_IS_ROOT,
+					f_tim);
+	clps711x_clk->clks[CLPS711X_CLK_TIMER1] =
+		clk_register_divider_table(NULL, "timer1", "timer_ref", 0,
+					   base + CLPS711X_SYSCON1, 5, 1, 0,
+					   timer_div_table, &clps711x_clk->lock);
+	clps711x_clk->clks[CLPS711X_CLK_TIMER2] =
+		clk_register_divider_table(NULL, "timer2", "timer_ref", 0,
+					   base + CLPS711X_SYSCON1, 7, 1, 0,
+					   timer_div_table, &clps711x_clk->lock);
+	clps711x_clk->clks[CLPS711X_CLK_PWM] =
+		clk_register_fixed_rate(NULL, "pwm", NULL, CLK_IS_ROOT, f_pwm);
+	clps711x_clk->clks[CLPS711X_CLK_SPIREF] =
+		clk_register_fixed_rate(NULL, "spi_ref", NULL, CLK_IS_ROOT,
+					f_spi);
+	clps711x_clk->clks[CLPS711X_CLK_SPI] =
+		clk_register_divider_table(NULL, "spi", "spi_ref", 0,
+					   base + CLPS711X_SYSCON1, 16, 2, 0,
+					   spi_div_table, &clps711x_clk->lock);
+	clps711x_clk->clks[CLPS711X_CLK_UART] =
+		clk_register_fixed_factor(NULL, "uart", "bus", 0, 1, 10);
+	clps711x_clk->clks[CLPS711X_CLK_TICK] =
+		clk_register_fixed_rate(NULL, "tick", NULL, CLK_IS_ROOT, 64);
+
+	for (i = 0; i < CLPS711X_CLK_MAX; i++)
+		if (IS_ERR(clps711x_clk->clks[i]))
+			pr_err("clk %i: register failed with %ld\n",
+			       i, PTR_ERR(clps711x_clk->clks[i]));
+
+	return clps711x_clk;
+}
+
+void __init clps711x_clk_init(void __iomem *base)
+{
+	struct clps711x_clk *clps711x_clk;
+
+	clps711x_clk = _clps711x_clk_init(base, 73728000);
+
+	BUG_ON(IS_ERR(clps711x_clk));
+
+	/* Clocksource */
+	clk_register_clkdev(clps711x_clk->clks[CLPS711X_CLK_TIMER1],
+			    NULL, "clps711x-timer.0");
+	clk_register_clkdev(clps711x_clk->clks[CLPS711X_CLK_TIMER2],
+			    NULL, "clps711x-timer.1");
+
+	/* Drivers */
+	clk_register_clkdev(clps711x_clk->clks[CLPS711X_CLK_PWM],
+			    NULL, "clps711x-pwm");
+	clk_register_clkdev(clps711x_clk->clks[CLPS711X_CLK_UART],
+			    NULL, "clps711x-uart.0");
+	clk_register_clkdev(clps711x_clk->clks[CLPS711X_CLK_UART],
+			    NULL, "clps711x-uart.1");
+}
+
+#ifdef CONFIG_OF
+static void __init clps711x_clk_init_dt(struct device_node *np)
+{
+	void __iomem *base = of_iomap(np, 0);
+	struct clps711x_clk *clps711x_clk;
+	u32 fref = 0;
+
+	WARN_ON(of_property_read_u32(np, "startup-frequency", &fref));
+
+	clps711x_clk = _clps711x_clk_init(base, fref);
+	BUG_ON(IS_ERR(clps711x_clk));
+
+	clps711x_clk->clk_data.clks = clps711x_clk->clks;
+	clps711x_clk->clk_data.clk_num = CLPS711X_CLK_MAX;
+	of_clk_add_provider(np, of_clk_src_onecell_get,
+			    &clps711x_clk->clk_data);
+}
+CLK_OF_DECLARE(clps711x, "cirrus,clps711x-clk", clps711x_clk_init_dt);
+#endif
diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
index 57a078e..b9355da 100644
--- a/drivers/clk/clk-composite.c
+++ b/drivers/clk/clk-composite.c
@@ -64,11 +64,56 @@
 	const struct clk_ops *mux_ops = composite->mux_ops;
 	struct clk_hw *rate_hw = composite->rate_hw;
 	struct clk_hw *mux_hw = composite->mux_hw;
+	struct clk *parent;
+	unsigned long parent_rate;
+	long tmp_rate, best_rate = 0;
+	unsigned long rate_diff;
+	unsigned long best_rate_diff = ULONG_MAX;
+	int i;
 
 	if (rate_hw && rate_ops && rate_ops->determine_rate) {
 		rate_hw->clk = hw->clk;
 		return rate_ops->determine_rate(rate_hw, rate, best_parent_rate,
 						best_parent_p);
+	} else if (rate_hw && rate_ops && rate_ops->round_rate &&
+		   mux_hw && mux_ops && mux_ops->set_parent) {
+		*best_parent_p = NULL;
+
+		if (__clk_get_flags(hw->clk) & CLK_SET_RATE_NO_REPARENT) {
+			*best_parent_p = clk_get_parent(mux_hw->clk);
+			*best_parent_rate = __clk_get_rate(*best_parent_p);
+
+			return rate_ops->round_rate(rate_hw, rate,
+						    best_parent_rate);
+		}
+
+		for (i = 0; i < __clk_get_num_parents(mux_hw->clk); i++) {
+			parent = clk_get_parent_by_index(mux_hw->clk, i);
+			if (!parent)
+				continue;
+
+			parent_rate = __clk_get_rate(parent);
+
+			tmp_rate = rate_ops->round_rate(rate_hw, rate,
+							&parent_rate);
+			if (tmp_rate < 0)
+				continue;
+
+			rate_diff = abs(rate - tmp_rate);
+
+			if (!rate_diff || !*best_parent_p
+				       || best_rate_diff > rate_diff) {
+				*best_parent_p = parent;
+				*best_parent_rate = parent_rate;
+				best_rate_diff = rate_diff;
+				best_rate = tmp_rate;
+			}
+
+			if (!rate_diff)
+				return rate;
+		}
+
+		return best_rate;
 	} else if (mux_hw && mux_ops && mux_ops->determine_rate) {
 		mux_hw->clk = hw->clk;
 		return mux_ops->determine_rate(mux_hw, rate, best_parent_rate,
@@ -162,7 +207,7 @@
 	clk_composite_ops = &composite->ops;
 
 	if (mux_hw && mux_ops) {
-		if (!mux_ops->get_parent || !mux_ops->set_parent) {
+		if (!mux_ops->get_parent) {
 			clk = ERR_PTR(-EINVAL);
 			goto err;
 		}
@@ -170,7 +215,8 @@
 		composite->mux_hw = mux_hw;
 		composite->mux_ops = mux_ops;
 		clk_composite_ops->get_parent = clk_composite_get_parent;
-		clk_composite_ops->set_parent = clk_composite_set_parent;
+		if (mux_ops->set_parent)
+			clk_composite_ops->set_parent = clk_composite_set_parent;
 		if (mux_ops->determine_rate)
 			clk_composite_ops->determine_rate = clk_composite_determine_rate;
 	}
@@ -180,24 +226,27 @@
 			clk = ERR_PTR(-EINVAL);
 			goto err;
 		}
+		clk_composite_ops->recalc_rate = clk_composite_recalc_rate;
 
-		/* .round_rate is a prerequisite for .set_rate */
-		if (rate_ops->round_rate) {
-			clk_composite_ops->round_rate = clk_composite_round_rate;
-			if (rate_ops->set_rate) {
-				clk_composite_ops->set_rate = clk_composite_set_rate;
-			}
-		} else {
-			WARN(rate_ops->set_rate,
-				"%s: missing round_rate op is required\n",
-				__func__);
+		if (rate_ops->determine_rate)
+			clk_composite_ops->determine_rate =
+				clk_composite_determine_rate;
+		else if (rate_ops->round_rate)
+			clk_composite_ops->round_rate =
+				clk_composite_round_rate;
+
+		/* .set_rate requires either .round_rate or .determine_rate */
+		if (rate_ops->set_rate) {
+			if (rate_ops->determine_rate || rate_ops->round_rate)
+				clk_composite_ops->set_rate =
+						clk_composite_set_rate;
+			else
+				WARN(1, "%s: missing round_rate op is required\n",
+						__func__);
 		}
 
 		composite->rate_hw = rate_hw;
 		composite->rate_ops = rate_ops;
-		clk_composite_ops->recalc_rate = clk_composite_recalc_rate;
-		if (rate_ops->determine_rate)
-			clk_composite_ops->determine_rate = clk_composite_determine_rate;
 	}
 
 	if (gate_hw && gate_ops) {
diff --git a/drivers/clk/clk-conf.c b/drivers/clk/clk-conf.c
new file mode 100644
index 0000000..aad4796
--- /dev/null
+++ b/drivers/clk/clk-conf.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2014 Samsung Electronics Co., Ltd.
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clk/clk-conf.h>
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/printk.h>
+#include "clk.h"
+
+static int __set_clk_parents(struct device_node *node, bool clk_supplier)
+{
+	struct of_phandle_args clkspec;
+	int index, rc, num_parents;
+	struct clk *clk, *pclk;
+
+	num_parents = of_count_phandle_with_args(node, "assigned-clock-parents",
+						 "#clock-cells");
+	if (num_parents == -EINVAL)
+		pr_err("clk: invalid value of clock-parents property at %s\n",
+		       node->full_name);
+
+	for (index = 0; index < num_parents; index++) {
+		rc = of_parse_phandle_with_args(node, "assigned-clock-parents",
+					"#clock-cells",	index, &clkspec);
+		if (rc < 0) {
+			/* skip empty (null) phandles */
+			if (rc == -ENOENT)
+				continue;
+			else
+				return rc;
+		}
+		if (clkspec.np == node && !clk_supplier)
+			return 0;
+		pclk = of_clk_get_by_clkspec(&clkspec);
+		if (IS_ERR(pclk)) {
+			pr_warn("clk: couldn't get parent clock %d for %s\n",
+				index, node->full_name);
+			return PTR_ERR(pclk);
+		}
+
+		rc = of_parse_phandle_with_args(node, "assigned-clocks",
+					"#clock-cells", index, &clkspec);
+		if (rc < 0)
+			goto err;
+		if (clkspec.np == node && !clk_supplier) {
+			rc = 0;
+			goto err;
+		}
+		clk = of_clk_get_by_clkspec(&clkspec);
+		if (IS_ERR(clk)) {
+			pr_warn("clk: couldn't get parent clock %d for %s\n",
+				index, node->full_name);
+			rc = PTR_ERR(clk);
+			goto err;
+		}
+
+		rc = clk_set_parent(clk, pclk);
+		if (rc < 0)
+			pr_err("clk: failed to reparent %s to %s: %d\n",
+			       __clk_get_name(clk), __clk_get_name(pclk), rc);
+		clk_put(clk);
+		clk_put(pclk);
+	}
+	return 0;
+err:
+	clk_put(pclk);
+	return rc;
+}
+
+static int __set_clk_rates(struct device_node *node, bool clk_supplier)
+{
+	struct of_phandle_args clkspec;
+	struct property	*prop;
+	const __be32 *cur;
+	int rc, index = 0;
+	struct clk *clk;
+	u32 rate;
+
+	of_property_for_each_u32(node, "assigned-clock-rates", prop, cur, rate) {
+		if (rate) {
+			rc = of_parse_phandle_with_args(node, "assigned-clocks",
+					"#clock-cells",	index, &clkspec);
+			if (rc < 0) {
+				/* skip empty (null) phandles */
+				if (rc == -ENOENT)
+					continue;
+				else
+					return rc;
+			}
+			if (clkspec.np == node && !clk_supplier)
+				return 0;
+
+			clk = of_clk_get_by_clkspec(&clkspec);
+			if (IS_ERR(clk)) {
+				pr_warn("clk: couldn't get clock %d for %s\n",
+					index, node->full_name);
+				return PTR_ERR(clk);
+			}
+
+			rc = clk_set_rate(clk, rate);
+			if (rc < 0)
+				pr_err("clk: couldn't set %s clock rate: %d\n",
+				       __clk_get_name(clk), rc);
+			clk_put(clk);
+		}
+		index++;
+	}
+	return 0;
+}
+
+/**
+ * of_clk_set_defaults() - parse and set assigned clocks configuration
+ * @node: device node to apply clock settings for
+ * @clk_supplier: true if clocks supplied by @node should also be considered
+ *
+ * This function parses 'assigned-{clocks/clock-parents/clock-rates}' properties
+ * and sets any specified clock parents and rates. The @clk_supplier argument
+ * should be set to true if @node may be also a clock supplier of any clock
+ * listed in its 'assigned-clocks' or 'assigned-clock-parents' properties.
+ * If @clk_supplier is false the function exits returnning 0 as soon as it
+ * determines the @node is also a supplier of any of the clocks.
+ */
+int of_clk_set_defaults(struct device_node *node, bool clk_supplier)
+{
+	int rc;
+
+	if (!node)
+		return 0;
+
+	rc = __set_clk_parents(node, clk_supplier);
+	if (rc < 0)
+		return rc;
+
+	return __set_clk_rates(node, clk_supplier);
+}
+EXPORT_SYMBOL_GPL(of_clk_set_defaults);
diff --git a/drivers/clk/clk-palmas.c b/drivers/clk/clk-palmas.c
new file mode 100644
index 0000000..781630e
--- /dev/null
+++ b/drivers/clk/clk-palmas.c
@@ -0,0 +1,307 @@
+/*
+ * Clock driver for Palmas device.
+ *
+ * Copyright (c) 2013, NVIDIA Corporation.
+ * Copyright (c) 2013-2014 Texas Instruments, Inc.
+ *
+ * Author:	Laxman Dewangan <ldewangan@nvidia.com>
+ *		Peter Ujfalusi <peter.ujfalusi@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
+ * whether express or implied; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/mfd/palmas.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define PALMAS_CLOCK_DT_EXT_CONTROL_ENABLE1	1
+#define PALMAS_CLOCK_DT_EXT_CONTROL_ENABLE2	2
+#define PALMAS_CLOCK_DT_EXT_CONTROL_NSLEEP	3
+
+struct palmas_clk32k_desc {
+	const char *clk_name;
+	unsigned int control_reg;
+	unsigned int enable_mask;
+	unsigned int sleep_mask;
+	unsigned int sleep_reqstr_id;
+	int delay;
+};
+
+struct palmas_clock_info {
+	struct device *dev;
+	struct clk *clk;
+	struct clk_hw hw;
+	struct palmas *palmas;
+	struct palmas_clk32k_desc *clk_desc;
+	int ext_control_pin;
+};
+
+static inline struct palmas_clock_info *to_palmas_clks_info(struct clk_hw *hw)
+{
+	return container_of(hw, struct palmas_clock_info, hw);
+}
+
+static unsigned long palmas_clks_recalc_rate(struct clk_hw *hw,
+					     unsigned long parent_rate)
+{
+	return 32768;
+}
+
+static int palmas_clks_prepare(struct clk_hw *hw)
+{
+	struct palmas_clock_info *cinfo = to_palmas_clks_info(hw);
+	int ret;
+
+	ret = palmas_update_bits(cinfo->palmas, PALMAS_RESOURCE_BASE,
+				 cinfo->clk_desc->control_reg,
+				 cinfo->clk_desc->enable_mask,
+				 cinfo->clk_desc->enable_mask);
+	if (ret < 0)
+		dev_err(cinfo->dev, "Reg 0x%02x update failed, %d\n",
+			cinfo->clk_desc->control_reg, ret);
+	else if (cinfo->clk_desc->delay)
+		udelay(cinfo->clk_desc->delay);
+
+	return ret;
+}
+
+static void palmas_clks_unprepare(struct clk_hw *hw)
+{
+	struct palmas_clock_info *cinfo = to_palmas_clks_info(hw);
+	int ret;
+
+	/*
+	 * Clock can be disabled through external pin if it is externally
+	 * controlled.
+	 */
+	if (cinfo->ext_control_pin)
+		return;
+
+	ret = palmas_update_bits(cinfo->palmas, PALMAS_RESOURCE_BASE,
+				 cinfo->clk_desc->control_reg,
+				 cinfo->clk_desc->enable_mask, 0);
+	if (ret < 0)
+		dev_err(cinfo->dev, "Reg 0x%02x update failed, %d\n",
+			cinfo->clk_desc->control_reg, ret);
+}
+
+static int palmas_clks_is_prepared(struct clk_hw *hw)
+{
+	struct palmas_clock_info *cinfo = to_palmas_clks_info(hw);
+	int ret;
+	u32 val;
+
+	if (cinfo->ext_control_pin)
+		return 1;
+
+	ret = palmas_read(cinfo->palmas, PALMAS_RESOURCE_BASE,
+			  cinfo->clk_desc->control_reg, &val);
+	if (ret < 0) {
+		dev_err(cinfo->dev, "Reg 0x%02x read failed, %d\n",
+			cinfo->clk_desc->control_reg, ret);
+		return ret;
+	}
+	return !!(val & cinfo->clk_desc->enable_mask);
+}
+
+static struct clk_ops palmas_clks_ops = {
+	.prepare	= palmas_clks_prepare,
+	.unprepare	= palmas_clks_unprepare,
+	.is_prepared	= palmas_clks_is_prepared,
+	.recalc_rate	= palmas_clks_recalc_rate,
+};
+
+struct palmas_clks_of_match_data {
+	struct clk_init_data init;
+	struct palmas_clk32k_desc desc;
+};
+
+static struct palmas_clks_of_match_data palmas_of_clk32kg = {
+	.init = {
+		.name = "clk32kg",
+		.ops = &palmas_clks_ops,
+		.flags = CLK_IS_ROOT | CLK_IGNORE_UNUSED,
+	},
+	.desc = {
+		.clk_name = "clk32kg",
+		.control_reg = PALMAS_CLK32KG_CTRL,
+		.enable_mask = PALMAS_CLK32KG_CTRL_MODE_ACTIVE,
+		.sleep_mask = PALMAS_CLK32KG_CTRL_MODE_SLEEP,
+		.sleep_reqstr_id = PALMAS_EXTERNAL_REQSTR_ID_CLK32KG,
+		.delay = 200,
+	},
+};
+
+static struct palmas_clks_of_match_data palmas_of_clk32kgaudio = {
+	.init = {
+		.name = "clk32kgaudio",
+		.ops = &palmas_clks_ops,
+		.flags = CLK_IS_ROOT | CLK_IGNORE_UNUSED,
+	},
+	.desc = {
+		.clk_name = "clk32kgaudio",
+		.control_reg = PALMAS_CLK32KGAUDIO_CTRL,
+		.enable_mask = PALMAS_CLK32KG_CTRL_MODE_ACTIVE,
+		.sleep_mask = PALMAS_CLK32KG_CTRL_MODE_SLEEP,
+		.sleep_reqstr_id = PALMAS_EXTERNAL_REQSTR_ID_CLK32KGAUDIO,
+		.delay = 200,
+	},
+};
+
+static struct of_device_id palmas_clks_of_match[] = {
+	{
+		.compatible = "ti,palmas-clk32kg",
+		.data = &palmas_of_clk32kg,
+	},
+	{
+		.compatible = "ti,palmas-clk32kgaudio",
+		.data = &palmas_of_clk32kgaudio,
+	},
+	{ },
+};
+MODULE_DEVICE_TABLE(of, palmas_clks_of_match);
+
+static void palmas_clks_get_clk_data(struct platform_device *pdev,
+				     struct palmas_clock_info *cinfo)
+{
+	struct device_node *node = pdev->dev.of_node;
+	unsigned int prop;
+	int ret;
+
+	ret = of_property_read_u32(node, "ti,external-sleep-control",
+				   &prop);
+	if (ret)
+		return;
+
+	switch (prop) {
+	case PALMAS_CLOCK_DT_EXT_CONTROL_ENABLE1:
+		prop = PALMAS_EXT_CONTROL_ENABLE1;
+		break;
+	case PALMAS_CLOCK_DT_EXT_CONTROL_ENABLE2:
+		prop = PALMAS_EXT_CONTROL_ENABLE2;
+		break;
+	case PALMAS_CLOCK_DT_EXT_CONTROL_NSLEEP:
+		prop = PALMAS_EXT_CONTROL_NSLEEP;
+		break;
+	default:
+		dev_warn(&pdev->dev, "%s: Invalid ext control option: %u\n",
+			 node->name, prop);
+		prop = 0;
+		break;
+	}
+	cinfo->ext_control_pin = prop;
+}
+
+static int palmas_clks_init_configure(struct palmas_clock_info *cinfo)
+{
+	int ret;
+
+	ret = palmas_update_bits(cinfo->palmas, PALMAS_RESOURCE_BASE,
+				 cinfo->clk_desc->control_reg,
+				 cinfo->clk_desc->sleep_mask, 0);
+	if (ret < 0) {
+		dev_err(cinfo->dev, "Reg 0x%02x update failed, %d\n",
+			cinfo->clk_desc->control_reg, ret);
+		return ret;
+	}
+
+	if (cinfo->ext_control_pin) {
+		ret = clk_prepare(cinfo->clk);
+		if (ret < 0) {
+			dev_err(cinfo->dev, "Clock prep failed, %d\n", ret);
+			return ret;
+		}
+
+		ret = palmas_ext_control_req_config(cinfo->palmas,
+					cinfo->clk_desc->sleep_reqstr_id,
+					cinfo->ext_control_pin, true);
+		if (ret < 0) {
+			dev_err(cinfo->dev, "Ext config for %s failed, %d\n",
+				cinfo->clk_desc->clk_name, ret);
+			return ret;
+		}
+	}
+
+	return ret;
+}
+static int palmas_clks_probe(struct platform_device *pdev)
+{
+	struct palmas *palmas = dev_get_drvdata(pdev->dev.parent);
+	struct device_node *node = pdev->dev.of_node;
+	struct palmas_clks_of_match_data *match_data;
+	const struct of_device_id *match;
+	struct palmas_clock_info *cinfo;
+	struct clk *clk;
+	int ret;
+
+	match = of_match_device(palmas_clks_of_match, &pdev->dev);
+	match_data = (struct palmas_clks_of_match_data *)match->data;
+
+	cinfo = devm_kzalloc(&pdev->dev, sizeof(*cinfo), GFP_KERNEL);
+	if (!cinfo)
+		return -ENOMEM;
+
+	palmas_clks_get_clk_data(pdev, cinfo);
+	platform_set_drvdata(pdev, cinfo);
+
+	cinfo->dev = &pdev->dev;
+	cinfo->palmas = palmas;
+
+	cinfo->clk_desc = &match_data->desc;
+	cinfo->hw.init = &match_data->init;
+	clk = devm_clk_register(&pdev->dev, &cinfo->hw);
+	if (IS_ERR(clk)) {
+		ret = PTR_ERR(clk);
+		dev_err(&pdev->dev, "Fail to register clock %s, %d\n",
+			match_data->desc.clk_name, ret);
+		return ret;
+	}
+
+	cinfo->clk = clk;
+	ret = palmas_clks_init_configure(cinfo);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "Clock config failed, %d\n", ret);
+		return ret;
+	}
+
+	ret = of_clk_add_provider(node, of_clk_src_simple_get, cinfo->clk);
+	if (ret < 0)
+		dev_err(&pdev->dev, "Fail to add clock driver, %d\n", ret);
+	return ret;
+}
+
+static int palmas_clks_remove(struct platform_device *pdev)
+{
+	of_clk_del_provider(pdev->dev.of_node);
+	return 0;
+}
+
+static struct platform_driver palmas_clks_driver = {
+	.driver = {
+		.name = "palmas-clk",
+		.owner = THIS_MODULE,
+		.of_match_table = palmas_clks_of_match,
+	},
+	.probe = palmas_clks_probe,
+	.remove = palmas_clks_remove,
+};
+
+module_platform_driver(palmas_clks_driver);
+
+MODULE_DESCRIPTION("Clock driver for Palmas Series Devices");
+MODULE_ALIAS("platform:palmas-clk");
+MODULE_AUTHOR("Peter Ujfalusi <peter.ujfalusi@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/clk-ppc-corenet.c b/drivers/clk/clk-ppc-corenet.c
index 8b284be..8e58edf 100644
--- a/drivers/clk/clk-ppc-corenet.c
+++ b/drivers/clk/clk-ppc-corenet.c
@@ -291,7 +291,7 @@
 	{}
 };
 
-static struct platform_driver ppc_corenet_clk_driver = {
+static struct platform_driver ppc_corenet_clk_driver __initdata = {
 	.driver = {
 		.name = "ppc_corenet_clock",
 		.owner = THIS_MODULE,
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
index 3757e9e..b7797fb 100644
--- a/drivers/clk/clk-s2mps11.c
+++ b/drivers/clk/clk-s2mps11.c
@@ -46,7 +46,6 @@
 	struct clk *clk;
 	struct clk_lookup *lookup;
 	u32 mask;
-	bool enabled;
 	unsigned int reg;
 };
 
@@ -63,8 +62,6 @@
 	ret = regmap_update_bits(s2mps11->iodev->regmap_pmic,
 				 s2mps11->reg,
 				 s2mps11->mask, s2mps11->mask);
-	if (!ret)
-		s2mps11->enabled = true;
 
 	return ret;
 }
@@ -76,32 +73,32 @@
 
 	ret = regmap_update_bits(s2mps11->iodev->regmap_pmic, s2mps11->reg,
 			   s2mps11->mask, ~s2mps11->mask);
-
-	if (!ret)
-		s2mps11->enabled = false;
 }
 
-static int s2mps11_clk_is_enabled(struct clk_hw *hw)
+static int s2mps11_clk_is_prepared(struct clk_hw *hw)
 {
+	int ret;
+	u32 val;
 	struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw);
 
-	return s2mps11->enabled;
+	ret = regmap_read(s2mps11->iodev->regmap_pmic,
+				s2mps11->reg, &val);
+	if (ret < 0)
+		return -EINVAL;
+
+	return val & s2mps11->mask;
 }
 
 static unsigned long s2mps11_clk_recalc_rate(struct clk_hw *hw,
 					     unsigned long parent_rate)
 {
-	struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw);
-	if (s2mps11->enabled)
-		return 32768;
-	else
-		return 0;
+	return 32768;
 }
 
 static struct clk_ops s2mps11_clk_ops = {
 	.prepare	= s2mps11_clk_prepare,
 	.unprepare	= s2mps11_clk_unprepare,
-	.is_enabled	= s2mps11_clk_is_enabled,
+	.is_prepared	= s2mps11_clk_is_prepared,
 	.recalc_rate	= s2mps11_clk_recalc_rate,
 };
 
@@ -169,7 +166,6 @@
 	unsigned int s2mps11_reg;
 	struct clk_init_data *clks_init;
 	int i, ret = 0;
-	u32 val;
 
 	s2mps11_clks = devm_kzalloc(&pdev->dev, sizeof(*s2mps11_clk) *
 					S2MPS11_CLKS_NUM, GFP_KERNEL);
@@ -214,13 +210,6 @@
 		s2mps11_clk->mask = 1 << i;
 		s2mps11_clk->reg = s2mps11_reg;
 
-		ret = regmap_read(s2mps11_clk->iodev->regmap_pmic,
-				  s2mps11_clk->reg, &val);
-		if (ret < 0)
-			goto err_reg;
-
-		s2mps11_clk->enabled = val & s2mps11_clk->mask;
-
 		s2mps11_clk->clk = devm_clk_register(&pdev->dev,
 							&s2mps11_clk->hw);
 		if (IS_ERR(s2mps11_clk->clk)) {
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 8b73ede..b76fa69 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -10,6 +10,7 @@
  */
 
 #include <linux/clk-private.h>
+#include <linux/clk/clk-conf.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
 #include <linux/spinlock.h>
@@ -98,9 +99,19 @@
 #include <linux/debugfs.h>
 
 static struct dentry *rootdir;
-static struct dentry *orphandir;
 static int inited = 0;
 
+static struct hlist_head *all_lists[] = {
+	&clk_root_list,
+	&clk_orphan_list,
+	NULL,
+};
+
+static struct hlist_head *orphan_list[] = {
+	&clk_orphan_list,
+	NULL,
+};
+
 static void clk_summary_show_one(struct seq_file *s, struct clk *c, int level)
 {
 	if (!c)
@@ -130,17 +141,16 @@
 static int clk_summary_show(struct seq_file *s, void *data)
 {
 	struct clk *c;
+	struct hlist_head **lists = (struct hlist_head **)s->private;
 
 	seq_puts(s, "   clock                         enable_cnt  prepare_cnt        rate   accuracy\n");
 	seq_puts(s, "--------------------------------------------------------------------------------\n");
 
 	clk_prepare_lock();
 
-	hlist_for_each_entry(c, &clk_root_list, child_node)
-		clk_summary_show_subtree(s, c, 0);
-
-	hlist_for_each_entry(c, &clk_orphan_list, child_node)
-		clk_summary_show_subtree(s, c, 0);
+	for (; *lists; lists++)
+		hlist_for_each_entry(c, *lists, child_node)
+			clk_summary_show_subtree(s, c, 0);
 
 	clk_prepare_unlock();
 
@@ -193,21 +203,19 @@
 {
 	struct clk *c;
 	bool first_node = true;
+	struct hlist_head **lists = (struct hlist_head **)s->private;
 
 	seq_printf(s, "{");
 
 	clk_prepare_lock();
 
-	hlist_for_each_entry(c, &clk_root_list, child_node) {
-		if (!first_node)
-			seq_printf(s, ",");
-		first_node = false;
-		clk_dump_subtree(s, c, 0);
-	}
-
-	hlist_for_each_entry(c, &clk_orphan_list, child_node) {
-		seq_printf(s, ",");
-		clk_dump_subtree(s, c, 0);
+	for (; *lists; lists++) {
+		hlist_for_each_entry(c, *lists, child_node) {
+			if (!first_node)
+				seq_puts(s, ",");
+			first_node = false;
+			clk_dump_subtree(s, c, 0);
+		}
 	}
 
 	clk_prepare_unlock();
@@ -276,9 +284,11 @@
 	if (!d)
 		goto err_out;
 
-	if (clk->ops->debug_init)
-		if (clk->ops->debug_init(clk->hw, clk->dentry))
+	if (clk->ops->debug_init) {
+		ret = clk->ops->debug_init(clk->hw, clk->dentry);
+		if (ret)
 			goto err_out;
+	}
 
 	ret = 0;
 	goto out;
@@ -305,7 +315,7 @@
 		goto out;
 
 	hlist_for_each_entry(child, &clk->children, child_node)
-		clk_debug_create_subtree(child, clk->dentry);
+		clk_debug_create_subtree(child, pdentry);
 
 	ret = 0;
 out:
@@ -325,31 +335,12 @@
  */
 static int clk_debug_register(struct clk *clk)
 {
-	struct clk *parent;
-	struct dentry *pdentry;
 	int ret = 0;
 
 	if (!inited)
 		goto out;
 
-	parent = clk->parent;
-
-	/*
-	 * Check to see if a clk is a root clk.  Also check that it is
-	 * safe to add this clk to debugfs
-	 */
-	if (!parent)
-		if (clk->flags & CLK_IS_ROOT)
-			pdentry = rootdir;
-		else
-			pdentry = orphandir;
-	else
-		if (parent->dentry)
-			pdentry = parent->dentry;
-		else
-			goto out;
-
-	ret = clk_debug_create_subtree(clk, pdentry);
+	ret = clk_debug_create_subtree(clk, rootdir);
 
 out:
 	return ret;
@@ -370,38 +361,17 @@
 	debugfs_remove_recursive(clk->dentry);
 }
 
-/**
- * clk_debug_reparent - reparent clk node in the debugfs clk tree
- * @clk: the clk being reparented
- * @new_parent: the new clk parent, may be NULL
- *
- * Rename clk entry in the debugfs clk tree if debugfs has been
- * initialized.  Otherwise it bails out early since the debugfs clk tree
- * will be created lazily by clk_debug_init as part of a late_initcall.
- *
- * Caller must hold prepare_lock.
- */
-static void clk_debug_reparent(struct clk *clk, struct clk *new_parent)
+struct dentry *clk_debugfs_add_file(struct clk *clk, char *name, umode_t mode,
+				void *data, const struct file_operations *fops)
 {
-	struct dentry *d;
-	struct dentry *new_parent_d;
+	struct dentry *d = NULL;
 
-	if (!inited)
-		return;
+	if (clk->dentry)
+		d = debugfs_create_file(name, mode, clk->dentry, data, fops);
 
-	if (new_parent)
-		new_parent_d = new_parent->dentry;
-	else
-		new_parent_d = orphandir;
-
-	d = debugfs_rename(clk->dentry->d_parent, clk->dentry,
-			new_parent_d, clk->name);
-	if (d)
-		clk->dentry = d;
-	else
-		pr_debug("%s: failed to rename debugfs entry for %s\n",
-				__func__, clk->name);
+	return d;
 }
+EXPORT_SYMBOL_GPL(clk_debugfs_add_file);
 
 /**
  * clk_debug_init - lazily create the debugfs clk tree visualization
@@ -425,19 +395,24 @@
 	if (!rootdir)
 		return -ENOMEM;
 
-	d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, NULL,
+	d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, &all_lists,
 				&clk_summary_fops);
 	if (!d)
 		return -ENOMEM;
 
-	d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, NULL,
+	d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, &all_lists,
 				&clk_dump_fops);
 	if (!d)
 		return -ENOMEM;
 
-	orphandir = debugfs_create_dir("orphans", rootdir);
+	d = debugfs_create_file("clk_orphan_summary", S_IRUGO, rootdir,
+				&orphan_list, &clk_summary_fops);
+	if (!d)
+		return -ENOMEM;
 
-	if (!orphandir)
+	d = debugfs_create_file("clk_orphan_dump", S_IRUGO, rootdir,
+				&orphan_list, &clk_dump_fops);
+	if (!d)
 		return -ENOMEM;
 
 	clk_prepare_lock();
@@ -446,7 +421,7 @@
 		clk_debug_create_subtree(clk, rootdir);
 
 	hlist_for_each_entry(clk, &clk_orphan_list, child_node)
-		clk_debug_create_subtree(clk, orphandir);
+		clk_debug_create_subtree(clk, rootdir);
 
 	inited = 1;
 
@@ -1284,9 +1259,6 @@
 		clk_disable(old_parent);
 		__clk_unprepare(old_parent);
 	}
-
-	/* update debugfs with new clk tree topology */
-	clk_debug_reparent(clk, parent);
 }
 
 static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
@@ -1683,7 +1655,6 @@
 void __clk_reparent(struct clk *clk, struct clk *new_parent)
 {
 	clk_reparent(clk, new_parent);
-	clk_debug_reparent(clk, new_parent);
 	__clk_recalc_accuracies(clk);
 	__clk_recalc_rates(clk, POST_RATE_CHANGE);
 }
@@ -2414,6 +2385,7 @@
 			void *data)
 {
 	struct of_clk_provider *cp;
+	int ret;
 
 	cp = kzalloc(sizeof(struct of_clk_provider), GFP_KERNEL);
 	if (!cp)
@@ -2428,7 +2400,11 @@
 	mutex_unlock(&of_clk_mutex);
 	pr_debug("Added clock from %s\n", np->full_name);
 
-	return 0;
+	ret = of_clk_set_defaults(np, true);
+	if (ret < 0)
+		of_clk_del_provider(np);
+
+	return ret;
 }
 EXPORT_SYMBOL_GPL(of_clk_add_provider);
 
@@ -2605,7 +2581,10 @@
 		list_for_each_entry_safe(clk_provider, next,
 					&clk_provider_list, node) {
 			if (force || parent_ready(clk_provider->np)) {
+
 				clk_provider->clk_init_cb(clk_provider->np);
+				of_clk_set_defaults(clk_provider->np, true);
+
 				list_del(&clk_provider->node);
 				kfree(clk_provider);
 				is_init_done = true;
@@ -2620,7 +2599,6 @@
 		 */
 		if (!is_init_done)
 			force = true;
-
 	}
 }
 #endif
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index f890b90..da4bda8 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -101,8 +101,9 @@
 		if (!IS_ERR(clk))
 			break;
 		else if (name && index >= 0) {
-			pr_err("ERROR: could not get clock %s:%s(%i)\n",
-				np->full_name, name ? name : "", index);
+			if (PTR_ERR(clk) != -EPROBE_DEFER)
+				pr_err("ERROR: could not get clock %s:%s(%i)\n",
+					np->full_name, name ? name : "", index);
 			return clk;
 		}
 
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 7f696b7..1107351 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -4,6 +4,31 @@
 	select REGMAP_MMIO
 	select RESET_CONTROLLER
 
+config APQ_GCC_8084
+	tristate "APQ8084 Global Clock Controller"
+	depends on COMMON_CLK_QCOM
+	help
+	  Support for the global clock controller on apq8084 devices.
+	  Say Y if you want to use peripheral devices such as UART, SPI,
+	  i2c, USB, SD/eMMC, SATA, PCIe, etc.
+
+config APQ_MMCC_8084
+	tristate "APQ8084 Multimedia Clock Controller"
+	select APQ_GCC_8084
+	depends on COMMON_CLK_QCOM
+	help
+	  Support for the multimedia clock controller on apq8084 devices.
+	  Say Y if you want to support multimedia devices such as display,
+	  graphics, video encode/decode, camera, etc.
+
+config IPQ_GCC_806X
+	tristate "IPQ806x Global Clock Controller"
+	depends on COMMON_CLK_QCOM
+	help
+	  Support for the global clock controller on ipq806x devices.
+	  Say Y if you want to use peripheral devices such as UART, SPI,
+	  i2c, USB, SD/eMMC, etc.
+
 config MSM_GCC_8660
 	tristate "MSM8660 Global Clock Controller"
 	depends on COMMON_CLK_QCOM
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 689e05b..783cfb2 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -8,6 +8,9 @@
 clk-qcom-y += clk-branch.o
 clk-qcom-y += reset.o
 
+obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o
+obj-$(CONFIG_APQ_MMCC_8084) += mmcc-apq8084.o
+obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o
 obj-$(CONFIG_MSM_GCC_8660) += gcc-msm8660.o
 obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o
 obj-$(CONFIG_MSM_GCC_8974) += gcc-msm8974.o
diff --git a/drivers/clk/qcom/clk-pll.c b/drivers/clk/qcom/clk-pll.c
index 0f927c5..9db03d3 100644
--- a/drivers/clk/qcom/clk-pll.c
+++ b/drivers/clk/qcom/clk-pll.c
@@ -166,7 +166,7 @@
 EXPORT_SYMBOL_GPL(clk_pll_vote_ops);
 
 static void
-clk_pll_set_fsm_mode(struct clk_pll *pll, struct regmap *regmap)
+clk_pll_set_fsm_mode(struct clk_pll *pll, struct regmap *regmap, u8 lock_count)
 {
 	u32 val;
 	u32 mask;
@@ -175,7 +175,7 @@
 	regmap_update_bits(regmap, pll->mode_reg, PLL_VOTE_FSM_RESET, 0);
 
 	/* Program bias count and lock count */
-	val = 1 << PLL_BIAS_COUNT_SHIFT;
+	val = 1 << PLL_BIAS_COUNT_SHIFT | lock_count << PLL_LOCK_COUNT_SHIFT;
 	mask = PLL_BIAS_COUNT_MASK << PLL_BIAS_COUNT_SHIFT;
 	mask |= PLL_LOCK_COUNT_MASK << PLL_LOCK_COUNT_SHIFT;
 	regmap_update_bits(regmap, pll->mode_reg, mask, val);
@@ -212,11 +212,20 @@
 	regmap_update_bits(regmap, pll->config_reg, mask, val);
 }
 
+void clk_pll_configure_sr(struct clk_pll *pll, struct regmap *regmap,
+		const struct pll_config *config, bool fsm_mode)
+{
+	clk_pll_configure(pll, regmap, config);
+	if (fsm_mode)
+		clk_pll_set_fsm_mode(pll, regmap, 8);
+}
+EXPORT_SYMBOL_GPL(clk_pll_configure_sr);
+
 void clk_pll_configure_sr_hpm_lp(struct clk_pll *pll, struct regmap *regmap,
 		const struct pll_config *config, bool fsm_mode)
 {
 	clk_pll_configure(pll, regmap, config);
 	if (fsm_mode)
-		clk_pll_set_fsm_mode(pll, regmap);
+		clk_pll_set_fsm_mode(pll, regmap, 0);
 }
 EXPORT_SYMBOL_GPL(clk_pll_configure_sr_hpm_lp);
diff --git a/drivers/clk/qcom/clk-pll.h b/drivers/clk/qcom/clk-pll.h
index 0775a99..3003e99 100644
--- a/drivers/clk/qcom/clk-pll.h
+++ b/drivers/clk/qcom/clk-pll.h
@@ -60,6 +60,8 @@
 	u32 aux_output_mask;
 };
 
+void clk_pll_configure_sr(struct clk_pll *pll, struct regmap *regmap,
+		const struct pll_config *config, bool fsm_mode);
 void clk_pll_configure_sr_hpm_lp(struct clk_pll *pll, struct regmap *regmap,
 		const struct pll_config *config, bool fsm_mode);
 
diff --git a/drivers/clk/qcom/clk-rcg.c b/drivers/clk/qcom/clk-rcg.c
index abfc2b6..b638c58 100644
--- a/drivers/clk/qcom/clk-rcg.c
+++ b/drivers/clk/qcom/clk-rcg.c
@@ -417,20 +417,25 @@
 	return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, p_rate, p);
 }
 
-static int clk_rcg_set_rate(struct clk_hw *hw, unsigned long rate,
-			    unsigned long parent_rate)
+static long clk_rcg_bypass_determine_rate(struct clk_hw *hw, unsigned long rate,
+		unsigned long *p_rate, struct clk **p)
 {
 	struct clk_rcg *rcg = to_clk_rcg(hw);
-	const struct freq_tbl *f;
+	const struct freq_tbl *f = rcg->freq_tbl;
+
+	*p = clk_get_parent_by_index(hw->clk, f->src);
+	*p_rate = __clk_round_rate(*p, rate);
+
+	return *p_rate;
+}
+
+static int __clk_rcg_set_rate(struct clk_rcg *rcg, const struct freq_tbl *f)
+{
 	u32 ns, md, ctl;
 	struct mn *mn = &rcg->mn;
 	u32 mask = 0;
 	unsigned int reset_reg;
 
-	f = find_freq(rcg->freq_tbl, rate);
-	if (!f)
-		return -EINVAL;
-
 	if (rcg->mn.reset_in_cc)
 		reset_reg = rcg->clkr.enable_reg;
 	else
@@ -466,6 +471,27 @@
 	return 0;
 }
 
+static int clk_rcg_set_rate(struct clk_hw *hw, unsigned long rate,
+			    unsigned long parent_rate)
+{
+	struct clk_rcg *rcg = to_clk_rcg(hw);
+	const struct freq_tbl *f;
+
+	f = find_freq(rcg->freq_tbl, rate);
+	if (!f)
+		return -EINVAL;
+
+	return __clk_rcg_set_rate(rcg, f);
+}
+
+static int clk_rcg_bypass_set_rate(struct clk_hw *hw, unsigned long rate,
+				unsigned long parent_rate)
+{
+	struct clk_rcg *rcg = to_clk_rcg(hw);
+
+	return __clk_rcg_set_rate(rcg, rcg->freq_tbl);
+}
+
 static int __clk_dyn_rcg_set_rate(struct clk_hw *hw, unsigned long rate)
 {
 	struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw);
@@ -503,6 +529,17 @@
 };
 EXPORT_SYMBOL_GPL(clk_rcg_ops);
 
+const struct clk_ops clk_rcg_bypass_ops = {
+	.enable = clk_enable_regmap,
+	.disable = clk_disable_regmap,
+	.get_parent = clk_rcg_get_parent,
+	.set_parent = clk_rcg_set_parent,
+	.recalc_rate = clk_rcg_recalc_rate,
+	.determine_rate = clk_rcg_bypass_determine_rate,
+	.set_rate = clk_rcg_bypass_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_rcg_bypass_ops);
+
 const struct clk_ops clk_dyn_rcg_ops = {
 	.enable = clk_enable_regmap,
 	.is_enabled = clk_is_enabled_regmap,
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index b9ec11d..ba0523c 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -95,6 +95,7 @@
 };
 
 extern const struct clk_ops clk_rcg_ops;
+extern const struct clk_ops clk_rcg_bypass_ops;
 
 #define to_clk_rcg(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg, clkr)
 
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index 9b5a1cf..eeb3eea 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -27,30 +27,35 @@
 	struct clk *clks[];
 };
 
-int qcom_cc_probe(struct platform_device *pdev, const struct qcom_cc_desc *desc)
+struct regmap *
+qcom_cc_map(struct platform_device *pdev, const struct qcom_cc_desc *desc)
 {
 	void __iomem *base;
 	struct resource *res;
+	struct device *dev = &pdev->dev;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(base))
+		return ERR_CAST(base);
+
+	return devm_regmap_init_mmio(dev, base, desc->config);
+}
+EXPORT_SYMBOL_GPL(qcom_cc_map);
+
+int qcom_cc_really_probe(struct platform_device *pdev,
+			 const struct qcom_cc_desc *desc, struct regmap *regmap)
+{
 	int i, ret;
 	struct device *dev = &pdev->dev;
 	struct clk *clk;
 	struct clk_onecell_data *data;
 	struct clk **clks;
-	struct regmap *regmap;
 	struct qcom_reset_controller *reset;
 	struct qcom_cc *cc;
 	size_t num_clks = desc->num_clks;
 	struct clk_regmap **rclks = desc->clks;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	base = devm_ioremap_resource(dev, res);
-	if (IS_ERR(base))
-		return PTR_ERR(base);
-
-	regmap = devm_regmap_init_mmio(dev, base, desc->config);
-	if (IS_ERR(regmap))
-		return PTR_ERR(regmap);
-
 	cc = devm_kzalloc(dev, sizeof(*cc) + sizeof(*clks) * num_clks,
 			  GFP_KERNEL);
 	if (!cc)
@@ -91,6 +96,18 @@
 
 	return ret;
 }
+EXPORT_SYMBOL_GPL(qcom_cc_really_probe);
+
+int qcom_cc_probe(struct platform_device *pdev, const struct qcom_cc_desc *desc)
+{
+	struct regmap *regmap;
+
+	regmap = qcom_cc_map(pdev, desc);
+	if (IS_ERR(regmap))
+		return PTR_ERR(regmap);
+
+	return qcom_cc_really_probe(pdev, desc, regmap);
+}
 EXPORT_SYMBOL_GPL(qcom_cc_probe);
 
 void qcom_cc_remove(struct platform_device *pdev)
diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h
index 2c3cfc8..2765e9d 100644
--- a/drivers/clk/qcom/common.h
+++ b/drivers/clk/qcom/common.h
@@ -17,6 +17,7 @@
 struct regmap_config;
 struct clk_regmap;
 struct qcom_reset_map;
+struct regmap;
 
 struct qcom_cc_desc {
 	const struct regmap_config *config;
@@ -26,6 +27,11 @@
 	size_t num_resets;
 };
 
+extern struct regmap *qcom_cc_map(struct platform_device *pdev,
+				  const struct qcom_cc_desc *desc);
+extern int qcom_cc_really_probe(struct platform_device *pdev,
+				const struct qcom_cc_desc *desc,
+				struct regmap *regmap);
 extern int qcom_cc_probe(struct platform_device *pdev,
 			 const struct qcom_cc_desc *desc);
 
diff --git a/drivers/clk/qcom/gcc-apq8084.c b/drivers/clk/qcom/gcc-apq8084.c
new file mode 100644
index 0000000..ee52eb1
--- /dev/null
+++ b/drivers/clk/qcom/gcc-apq8084.c
@@ -0,0 +1,3611 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,gcc-apq8084.h>
+#include <dt-bindings/reset/qcom,gcc-apq8084.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "reset.h"
+
+#define P_XO	0
+#define P_GPLL0	1
+#define P_GPLL1	1
+#define P_GPLL4	2
+#define P_PCIE_0_1_PIPE_CLK 1
+#define P_SATA_ASIC0_CLK 1
+#define P_SATA_RX_CLK 1
+#define P_SLEEP_CLK 1
+
+static const u8 gcc_xo_gpll0_map[] = {
+	[P_XO]		= 0,
+	[P_GPLL0]	= 1,
+};
+
+static const char *gcc_xo_gpll0[] = {
+	"xo",
+	"gpll0_vote",
+};
+
+static const u8 gcc_xo_gpll0_gpll4_map[] = {
+	[P_XO]		= 0,
+	[P_GPLL0]	= 1,
+	[P_GPLL4]	= 5,
+};
+
+static const char *gcc_xo_gpll0_gpll4[] = {
+	"xo",
+	"gpll0_vote",
+	"gpll4_vote",
+};
+
+static const u8 gcc_xo_sata_asic0_map[] = {
+	[P_XO]			= 0,
+	[P_SATA_ASIC0_CLK]	= 2,
+};
+
+static const char *gcc_xo_sata_asic0[] = {
+	"xo",
+	"sata_asic0_clk",
+};
+
+static const u8 gcc_xo_sata_rx_map[] = {
+	[P_XO]			= 0,
+	[P_SATA_RX_CLK]		= 2,
+};
+
+static const char *gcc_xo_sata_rx[] = {
+	"xo",
+	"sata_rx_clk",
+};
+
+static const u8 gcc_xo_pcie_map[] = {
+	[P_XO]			= 0,
+	[P_PCIE_0_1_PIPE_CLK]	= 2,
+};
+
+static const char *gcc_xo_pcie[] = {
+	"xo",
+	"pcie_pipe",
+};
+
+static const u8 gcc_xo_pcie_sleep_map[] = {
+	[P_XO]			= 0,
+	[P_SLEEP_CLK]		= 6,
+};
+
+static const char *gcc_xo_pcie_sleep[] = {
+	"xo",
+	"sleep_clk_src",
+};
+
+#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
+
+static struct clk_pll gpll0 = {
+	.l_reg = 0x0004,
+	.m_reg = 0x0008,
+	.n_reg = 0x000c,
+	.config_reg = 0x0014,
+	.mode_reg = 0x0000,
+	.status_reg = 0x001c,
+	.status_bit = 17,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gpll0",
+		.parent_names = (const char *[]){ "xo" },
+		.num_parents = 1,
+		.ops = &clk_pll_ops,
+	},
+};
+
+static struct clk_regmap gpll0_vote = {
+	.enable_reg = 0x1480,
+	.enable_mask = BIT(0),
+	.hw.init = &(struct clk_init_data){
+		.name = "gpll0_vote",
+		.parent_names = (const char *[]){ "gpll0" },
+		.num_parents = 1,
+		.ops = &clk_pll_vote_ops,
+	},
+};
+
+static struct clk_rcg2 config_noc_clk_src = {
+	.cmd_rcgr = 0x0150,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "config_noc_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 periph_noc_clk_src = {
+	.cmd_rcgr = 0x0190,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "periph_noc_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 system_noc_clk_src = {
+	.cmd_rcgr = 0x0120,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "system_noc_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_pll gpll1 = {
+	.l_reg = 0x0044,
+	.m_reg = 0x0048,
+	.n_reg = 0x004c,
+	.config_reg = 0x0054,
+	.mode_reg = 0x0040,
+	.status_reg = 0x005c,
+	.status_bit = 17,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gpll1",
+		.parent_names = (const char *[]){ "xo" },
+		.num_parents = 1,
+		.ops = &clk_pll_ops,
+	},
+};
+
+static struct clk_regmap gpll1_vote = {
+	.enable_reg = 0x1480,
+	.enable_mask = BIT(1),
+	.hw.init = &(struct clk_init_data){
+		.name = "gpll1_vote",
+		.parent_names = (const char *[]){ "gpll1" },
+		.num_parents = 1,
+		.ops = &clk_pll_vote_ops,
+	},
+};
+
+static struct clk_pll gpll4 = {
+	.l_reg = 0x1dc4,
+	.m_reg = 0x1dc8,
+	.n_reg = 0x1dcc,
+	.config_reg = 0x1dd4,
+	.mode_reg = 0x1dc0,
+	.status_reg = 0x1ddc,
+	.status_bit = 17,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gpll4",
+		.parent_names = (const char *[]){ "xo" },
+		.num_parents = 1,
+		.ops = &clk_pll_ops,
+	},
+};
+
+static struct clk_regmap gpll4_vote = {
+	.enable_reg = 0x1480,
+	.enable_mask = BIT(4),
+	.hw.init = &(struct clk_init_data){
+		.name = "gpll4_vote",
+		.parent_names = (const char *[]){ "gpll4" },
+		.num_parents = 1,
+		.ops = &clk_pll_vote_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_axi_clk[] = {
+	F(100000000, P_GPLL0, 6, 0, 0),
+	F(200000000, P_GPLL0, 3, 0, 0),
+	F(240000000, P_GPLL0, 2.5, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 ufs_axi_clk_src = {
+	.cmd_rcgr = 0x1d64,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_ufs_axi_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "ufs_axi_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_master_clk[] = {
+	F(125000000, P_GPLL0, 1, 5, 24),
+	{ }
+};
+
+static struct clk_rcg2 usb30_master_clk_src = {
+	.cmd_rcgr = 0x03d4,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_usb30_master_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "usb30_master_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_sec_master_clk[] = {
+	F(125000000, P_GPLL0, 1, 5, 24),
+	{ }
+};
+
+static struct clk_rcg2 usb30_sec_master_clk_src = {
+	.cmd_rcgr = 0x1bd4,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_usb30_sec_master_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "usb30_sec_master_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_branch gcc_usb30_sec_mock_utmi_clk = {
+	.halt_reg = 0x1bd0,
+	.clkr = {
+		.enable_reg = 0x1bd0,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb30_sec_mock_utmi_clk",
+			.parent_names = (const char *[]){
+				"usb30_sec_mock_utmi_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_usb30_sec_sleep_clk = {
+	.halt_reg = 0x1bcc,
+	.clkr = {
+		.enable_reg = 0x1bcc,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb30_sec_sleep_clk",
+			.parent_names = (const char *[]){
+				"sleep_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk[] = {
+	F(19200000, P_XO, 1, 0, 0),
+	F(50000000, P_GPLL0, 12, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
+	.cmd_rcgr = 0x0660,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp1_qup1_i2c_apps_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk[] = {
+	F(960000, P_XO, 10, 1, 2),
+	F(4800000, P_XO, 4, 0, 0),
+	F(9600000, P_XO, 2, 0, 0),
+	F(15000000, P_GPLL0, 10, 1, 4),
+	F(19200000, P_XO, 1, 0, 0),
+	F(25000000, P_GPLL0, 12, 1, 2),
+	F(50000000, P_GPLL0, 12, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
+	.cmd_rcgr = 0x064c,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp1_qup1_spi_apps_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
+	.cmd_rcgr = 0x06e0,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp1_qup2_i2c_apps_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
+	.cmd_rcgr = 0x06cc,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp1_qup2_spi_apps_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = {
+	.cmd_rcgr = 0x0760,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp1_qup3_i2c_apps_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = {
+	.cmd_rcgr = 0x074c,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp1_qup3_spi_apps_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = {
+	.cmd_rcgr = 0x07e0,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp1_qup4_i2c_apps_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = {
+	.cmd_rcgr = 0x07cc,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp1_qup4_spi_apps_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp1_qup5_i2c_apps_clk_src = {
+	.cmd_rcgr = 0x0860,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp1_qup5_i2c_apps_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp1_qup5_spi_apps_clk_src = {
+	.cmd_rcgr = 0x084c,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp1_qup5_spi_apps_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp1_qup6_i2c_apps_clk_src = {
+	.cmd_rcgr = 0x08e0,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp1_qup6_i2c_apps_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp1_qup6_spi_apps_clk_src = {
+	.cmd_rcgr = 0x08cc,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp1_qup6_spi_apps_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_blsp1_2_uart1_6_apps_clk[] = {
+	F(3686400, P_GPLL0, 1, 96, 15625),
+	F(7372800, P_GPLL0, 1, 192, 15625),
+	F(14745600, P_GPLL0, 1, 384, 15625),
+	F(16000000, P_GPLL0, 5, 2, 15),
+	F(19200000, P_XO, 1, 0, 0),
+	F(24000000, P_GPLL0, 5, 1, 5),
+	F(32000000, P_GPLL0, 1, 4, 75),
+	F(40000000, P_GPLL0, 15, 0, 0),
+	F(46400000, P_GPLL0, 1, 29, 375),
+	F(48000000, P_GPLL0, 12.5, 0, 0),
+	F(51200000, P_GPLL0, 1, 32, 375),
+	F(56000000, P_GPLL0, 1, 7, 75),
+	F(58982400, P_GPLL0, 1, 1536, 15625),
+	F(60000000, P_GPLL0, 10, 0, 0),
+	F(63160000, P_GPLL0, 9.5, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 blsp1_uart1_apps_clk_src = {
+	.cmd_rcgr = 0x068c,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp1_uart1_apps_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
+	.cmd_rcgr = 0x070c,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp1_uart2_apps_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp1_uart3_apps_clk_src = {
+	.cmd_rcgr = 0x078c,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp1_uart3_apps_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp1_uart4_apps_clk_src = {
+	.cmd_rcgr = 0x080c,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp1_uart4_apps_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp1_uart5_apps_clk_src = {
+	.cmd_rcgr = 0x088c,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp1_uart5_apps_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp1_uart6_apps_clk_src = {
+	.cmd_rcgr = 0x090c,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp1_uart6_apps_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp2_qup1_i2c_apps_clk_src = {
+	.cmd_rcgr = 0x09a0,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp2_qup1_i2c_apps_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp2_qup1_spi_apps_clk_src = {
+	.cmd_rcgr = 0x098c,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp2_qup1_spi_apps_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp2_qup2_i2c_apps_clk_src = {
+	.cmd_rcgr = 0x0a20,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp2_qup2_i2c_apps_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp2_qup2_spi_apps_clk_src = {
+	.cmd_rcgr = 0x0a0c,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp2_qup2_spi_apps_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp2_qup3_i2c_apps_clk_src = {
+	.cmd_rcgr = 0x0aa0,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp2_qup3_i2c_apps_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp2_qup3_spi_apps_clk_src = {
+	.cmd_rcgr = 0x0a8c,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp2_qup3_spi_apps_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp2_qup4_i2c_apps_clk_src = {
+	.cmd_rcgr = 0x0b20,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp2_qup4_i2c_apps_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp2_qup4_spi_apps_clk_src = {
+	.cmd_rcgr = 0x0b0c,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp2_qup4_spi_apps_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp2_qup5_i2c_apps_clk_src = {
+	.cmd_rcgr = 0x0ba0,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp2_qup5_i2c_apps_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp2_qup5_spi_apps_clk_src = {
+	.cmd_rcgr = 0x0b8c,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp2_qup5_spi_apps_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp2_qup6_i2c_apps_clk_src = {
+	.cmd_rcgr = 0x0c20,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp2_qup6_i2c_apps_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp2_qup6_spi_apps_clk_src = {
+	.cmd_rcgr = 0x0c0c,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp2_qup6_spi_apps_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp2_uart1_apps_clk_src = {
+	.cmd_rcgr = 0x09cc,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp2_uart1_apps_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp2_uart2_apps_clk_src = {
+	.cmd_rcgr = 0x0a4c,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp2_uart2_apps_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp2_uart3_apps_clk_src = {
+	.cmd_rcgr = 0x0acc,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp2_uart3_apps_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp2_uart4_apps_clk_src = {
+	.cmd_rcgr = 0x0b4c,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp2_uart4_apps_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp2_uart5_apps_clk_src = {
+	.cmd_rcgr = 0x0bcc,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp2_uart5_apps_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp2_uart6_apps_clk_src = {
+	.cmd_rcgr = 0x0c4c,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp2_uart6_apps_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_ce1_clk[] = {
+	F(50000000, P_GPLL0, 12, 0, 0),
+	F(85710000, P_GPLL0, 7, 0, 0),
+	F(100000000, P_GPLL0, 6, 0, 0),
+	F(171430000, P_GPLL0, 3.5, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 ce1_clk_src = {
+	.cmd_rcgr = 0x1050,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_ce1_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "ce1_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_ce2_clk[] = {
+	F(50000000, P_GPLL0, 12, 0, 0),
+	F(85710000, P_GPLL0, 7, 0, 0),
+	F(100000000, P_GPLL0, 6, 0, 0),
+	F(171430000, P_GPLL0, 3.5, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 ce2_clk_src = {
+	.cmd_rcgr = 0x1090,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_ce2_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "ce2_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_ce3_clk[] = {
+	F(50000000, P_GPLL0, 12, 0, 0),
+	F(85710000, P_GPLL0, 7, 0, 0),
+	F(100000000, P_GPLL0, 6, 0, 0),
+	F(171430000, P_GPLL0, 3.5, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 ce3_clk_src = {
+	.cmd_rcgr = 0x1d10,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_ce3_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "ce3_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_gp_clk[] = {
+	F(19200000, P_XO, 1, 0, 0),
+	F(100000000, P_GPLL0, 6, 0, 0),
+	F(200000000, P_GPLL0, 3, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gp1_clk_src = {
+	.cmd_rcgr = 0x1904,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_gp_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gp1_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 gp2_clk_src = {
+	.cmd_rcgr = 0x1944,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_gp_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gp2_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 gp3_clk_src = {
+	.cmd_rcgr = 0x1984,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_gp_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gp3_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_1_aux_clk[] = {
+	F(1010000, P_XO, 1, 1, 19),
+	{ }
+};
+
+static struct clk_rcg2 pcie_0_aux_clk_src = {
+	.cmd_rcgr = 0x1b2c,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_xo_pcie_sleep_map,
+	.freq_tbl = ftbl_gcc_pcie_0_1_aux_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "pcie_0_aux_clk_src",
+		.parent_names = gcc_xo_pcie_sleep,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 pcie_1_aux_clk_src = {
+	.cmd_rcgr = 0x1bac,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_xo_pcie_sleep_map,
+	.freq_tbl = ftbl_gcc_pcie_0_1_aux_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "pcie_1_aux_clk_src",
+		.parent_names = gcc_xo_pcie_sleep,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_1_pipe_clk[] = {
+	F(125000000, P_PCIE_0_1_PIPE_CLK, 1, 0, 0),
+	F(250000000, P_PCIE_0_1_PIPE_CLK, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 pcie_0_pipe_clk_src = {
+	.cmd_rcgr = 0x1b18,
+	.hid_width = 5,
+	.parent_map = gcc_xo_pcie_map,
+	.freq_tbl = ftbl_gcc_pcie_0_1_pipe_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "pcie_0_pipe_clk_src",
+		.parent_names = gcc_xo_pcie,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 pcie_1_pipe_clk_src = {
+	.cmd_rcgr = 0x1b98,
+	.hid_width = 5,
+	.parent_map = gcc_xo_pcie_map,
+	.freq_tbl = ftbl_gcc_pcie_0_1_pipe_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "pcie_1_pipe_clk_src",
+		.parent_names = gcc_xo_pcie,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk[] = {
+	F(60000000, P_GPLL0, 10, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 pdm2_clk_src = {
+	.cmd_rcgr = 0x0cd0,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_pdm2_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "pdm2_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_sata_asic0_clk[] = {
+	F(75000000, P_SATA_ASIC0_CLK, 1, 0, 0),
+	F(150000000, P_SATA_ASIC0_CLK, 1, 0, 0),
+	F(300000000, P_SATA_ASIC0_CLK, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 sata_asic0_clk_src = {
+	.cmd_rcgr = 0x1c94,
+	.hid_width = 5,
+	.parent_map = gcc_xo_sata_asic0_map,
+	.freq_tbl = ftbl_gcc_sata_asic0_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "sata_asic0_clk_src",
+		.parent_names = gcc_xo_sata_asic0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_sata_pmalive_clk[] = {
+	F(19200000, P_XO, 1, 0, 0),
+	F(50000000, P_GPLL0, 12, 0, 0),
+	F(100000000, P_GPLL0, 6, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 sata_pmalive_clk_src = {
+	.cmd_rcgr = 0x1c80,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_sata_pmalive_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "sata_pmalive_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_sata_rx_clk[] = {
+	F(75000000, P_SATA_RX_CLK, 1, 0, 0),
+	F(150000000, P_SATA_RX_CLK, 1, 0, 0),
+	F(300000000, P_SATA_RX_CLK, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 sata_rx_clk_src = {
+	.cmd_rcgr = 0x1ca8,
+	.hid_width = 5,
+	.parent_map = gcc_xo_sata_rx_map,
+	.freq_tbl = ftbl_gcc_sata_rx_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "sata_rx_clk_src",
+		.parent_names = gcc_xo_sata_rx,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_sata_rx_oob_clk[] = {
+	F(100000000, P_GPLL0, 6, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 sata_rx_oob_clk_src = {
+	.cmd_rcgr = 0x1c5c,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_sata_rx_oob_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "sata_rx_oob_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_4_apps_clk[] = {
+	F(144000, P_XO, 16, 3, 25),
+	F(400000, P_XO, 12, 1, 4),
+	F(20000000, P_GPLL0, 15, 1, 2),
+	F(25000000, P_GPLL0, 12, 1, 2),
+	F(50000000, P_GPLL0, 12, 0, 0),
+	F(100000000, P_GPLL0, 6, 0, 0),
+	F(192000000, P_GPLL4, 4, 0, 0),
+	F(200000000, P_GPLL0, 3, 0, 0),
+	F(384000000, P_GPLL4, 2, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 sdcc1_apps_clk_src = {
+	.cmd_rcgr = 0x04d0,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_gpll4_map,
+	.freq_tbl = ftbl_gcc_sdcc1_4_apps_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "sdcc1_apps_clk_src",
+		.parent_names = gcc_xo_gpll0_gpll4,
+		.num_parents = 3,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 sdcc2_apps_clk_src = {
+	.cmd_rcgr = 0x0510,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_sdcc1_4_apps_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "sdcc2_apps_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 sdcc3_apps_clk_src = {
+	.cmd_rcgr = 0x0550,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_sdcc1_4_apps_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "sdcc3_apps_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 sdcc4_apps_clk_src = {
+	.cmd_rcgr = 0x0590,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_sdcc1_4_apps_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "sdcc4_apps_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_tsif_ref_clk[] = {
+	F(105000, P_XO, 2, 1, 91),
+	{ }
+};
+
+static struct clk_rcg2 tsif_ref_clk_src = {
+	.cmd_rcgr = 0x0d90,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_tsif_ref_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "tsif_ref_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_mock_utmi_clk[] = {
+	F(60000000, P_GPLL0, 10, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 usb30_mock_utmi_clk_src = {
+	.cmd_rcgr = 0x03e8,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_usb30_mock_utmi_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "usb30_mock_utmi_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_sec_mock_utmi_clk[] = {
+	F(125000000, P_GPLL0, 1, 5, 24),
+	{ }
+};
+
+static struct clk_rcg2 usb30_sec_mock_utmi_clk_src = {
+	.cmd_rcgr = 0x1be8,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_usb30_sec_mock_utmi_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "usb30_sec_mock_utmi_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_usb_hs_system_clk[] = {
+	F(75000000, P_GPLL0, 8, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 usb_hs_system_clk_src = {
+	.cmd_rcgr = 0x0490,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_usb_hs_system_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "usb_hs_system_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_usb_hsic_clk[] = {
+	F(480000000, P_GPLL1, 1, 0, 0),
+	{ }
+};
+
+static u8 usb_hsic_clk_src_map[] = {
+	[P_XO]		= 0,
+	[P_GPLL1]	= 4,
+};
+
+static struct clk_rcg2 usb_hsic_clk_src = {
+	.cmd_rcgr = 0x0440,
+	.hid_width = 5,
+	.parent_map = usb_hsic_clk_src_map,
+	.freq_tbl = ftbl_gcc_usb_hsic_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "usb_hsic_clk_src",
+		.parent_names = (const char *[]){
+			"xo",
+			"gpll1_vote",
+		},
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_usb_hsic_ahb_clk_src[] = {
+	F(60000000, P_GPLL1, 8, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 usb_hsic_ahb_clk_src = {
+	.cmd_rcgr = 0x046c,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = usb_hsic_clk_src_map,
+	.freq_tbl = ftbl_gcc_usb_hsic_ahb_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "usb_hsic_ahb_clk_src",
+		.parent_names = (const char *[]){
+			"xo",
+			"gpll1_vote",
+		},
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_usb_hsic_io_cal_clk[] = {
+	F(9600000, P_XO, 2, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 usb_hsic_io_cal_clk_src = {
+	.cmd_rcgr = 0x0458,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_usb_hsic_io_cal_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "usb_hsic_io_cal_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 1,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_branch gcc_usb_hsic_mock_utmi_clk = {
+	.halt_reg = 0x1f14,
+	.clkr = {
+		.enable_reg = 0x1f14,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb_hsic_mock_utmi_clk",
+			.parent_names = (const char *[]){
+				"usb_hsic_mock_utmi_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_usb_hsic_mock_utmi_clk[] = {
+	F(60000000, P_GPLL0, 10, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 usb_hsic_mock_utmi_clk_src = {
+	.cmd_rcgr = 0x1f00,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_usb_hsic_mock_utmi_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "usb_hsic_mock_utmi_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 1,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_usb_hsic_system_clk[] = {
+	F(75000000, P_GPLL0, 8, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 usb_hsic_system_clk_src = {
+	.cmd_rcgr = 0x041c,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.freq_tbl = ftbl_gcc_usb_hsic_system_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "usb_hsic_system_clk_src",
+		.parent_names = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_branch gcc_bam_dma_ahb_clk = {
+	.halt_reg = 0x0d44,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x1484,
+		.enable_mask = BIT(12),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_bam_dma_ahb_clk",
+			.parent_names = (const char *[]){
+				"periph_noc_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp1_ahb_clk = {
+	.halt_reg = 0x05c4,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x1484,
+		.enable_mask = BIT(17),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp1_ahb_clk",
+			.parent_names = (const char *[]){
+				"periph_noc_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
+	.halt_reg = 0x0648,
+	.clkr = {
+		.enable_reg = 0x0648,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp1_qup1_i2c_apps_clk",
+			.parent_names = (const char *[]){
+				"blsp1_qup1_i2c_apps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
+	.halt_reg = 0x0644,
+	.clkr = {
+		.enable_reg = 0x0644,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp1_qup1_spi_apps_clk",
+			.parent_names = (const char *[]){
+				"blsp1_qup1_spi_apps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
+	.halt_reg = 0x06c8,
+	.clkr = {
+		.enable_reg = 0x06c8,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp1_qup2_i2c_apps_clk",
+			.parent_names = (const char *[]){
+				"blsp1_qup2_i2c_apps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
+	.halt_reg = 0x06c4,
+	.clkr = {
+		.enable_reg = 0x06c4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp1_qup2_spi_apps_clk",
+			.parent_names = (const char *[]){
+				"blsp1_qup2_spi_apps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
+	.halt_reg = 0x0748,
+	.clkr = {
+		.enable_reg = 0x0748,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp1_qup3_i2c_apps_clk",
+			.parent_names = (const char *[]){
+				"blsp1_qup3_i2c_apps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
+	.halt_reg = 0x0744,
+	.clkr = {
+		.enable_reg = 0x0744,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp1_qup3_spi_apps_clk",
+			.parent_names = (const char *[]){
+				"blsp1_qup3_spi_apps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
+	.halt_reg = 0x07c8,
+	.clkr = {
+		.enable_reg = 0x07c8,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp1_qup4_i2c_apps_clk",
+			.parent_names = (const char *[]){
+				"blsp1_qup4_i2c_apps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
+	.halt_reg = 0x07c4,
+	.clkr = {
+		.enable_reg = 0x07c4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp1_qup4_spi_apps_clk",
+			.parent_names = (const char *[]){
+				"blsp1_qup4_spi_apps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp1_qup5_i2c_apps_clk = {
+	.halt_reg = 0x0848,
+	.clkr = {
+		.enable_reg = 0x0848,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp1_qup5_i2c_apps_clk",
+			.parent_names = (const char *[]){
+				"blsp1_qup5_i2c_apps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp1_qup5_spi_apps_clk = {
+	.halt_reg = 0x0844,
+	.clkr = {
+		.enable_reg = 0x0844,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp1_qup5_spi_apps_clk",
+			.parent_names = (const char *[]){
+				"blsp1_qup5_spi_apps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp1_qup6_i2c_apps_clk = {
+	.halt_reg = 0x08c8,
+	.clkr = {
+		.enable_reg = 0x08c8,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp1_qup6_i2c_apps_clk",
+			.parent_names = (const char *[]){
+				"blsp1_qup6_i2c_apps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp1_qup6_spi_apps_clk = {
+	.halt_reg = 0x08c4,
+	.clkr = {
+		.enable_reg = 0x08c4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp1_qup6_spi_apps_clk",
+			.parent_names = (const char *[]){
+				"blsp1_qup6_spi_apps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp1_uart1_apps_clk = {
+	.halt_reg = 0x0684,
+	.clkr = {
+		.enable_reg = 0x0684,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp1_uart1_apps_clk",
+			.parent_names = (const char *[]){
+				"blsp1_uart1_apps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp1_uart2_apps_clk = {
+	.halt_reg = 0x0704,
+	.clkr = {
+		.enable_reg = 0x0704,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp1_uart2_apps_clk",
+			.parent_names = (const char *[]){
+				"blsp1_uart2_apps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp1_uart3_apps_clk = {
+	.halt_reg = 0x0784,
+	.clkr = {
+		.enable_reg = 0x0784,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp1_uart3_apps_clk",
+			.parent_names = (const char *[]){
+				"blsp1_uart3_apps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp1_uart4_apps_clk = {
+	.halt_reg = 0x0804,
+	.clkr = {
+		.enable_reg = 0x0804,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp1_uart4_apps_clk",
+			.parent_names = (const char *[]){
+				"blsp1_uart4_apps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp1_uart5_apps_clk = {
+	.halt_reg = 0x0884,
+	.clkr = {
+		.enable_reg = 0x0884,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp1_uart5_apps_clk",
+			.parent_names = (const char *[]){
+				"blsp1_uart5_apps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp1_uart6_apps_clk = {
+	.halt_reg = 0x0904,
+	.clkr = {
+		.enable_reg = 0x0904,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp1_uart6_apps_clk",
+			.parent_names = (const char *[]){
+				"blsp1_uart6_apps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp2_ahb_clk = {
+	.halt_reg = 0x0944,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x1484,
+		.enable_mask = BIT(15),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp2_ahb_clk",
+			.parent_names = (const char *[]){
+				"periph_noc_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp2_qup1_i2c_apps_clk = {
+	.halt_reg = 0x0988,
+	.clkr = {
+		.enable_reg = 0x0988,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp2_qup1_i2c_apps_clk",
+			.parent_names = (const char *[]){
+				"blsp2_qup1_i2c_apps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp2_qup1_spi_apps_clk = {
+	.halt_reg = 0x0984,
+	.clkr = {
+		.enable_reg = 0x0984,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp2_qup1_spi_apps_clk",
+			.parent_names = (const char *[]){
+				"blsp2_qup1_spi_apps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp2_qup2_i2c_apps_clk = {
+	.halt_reg = 0x0a08,
+	.clkr = {
+		.enable_reg = 0x0a08,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp2_qup2_i2c_apps_clk",
+			.parent_names = (const char *[]){
+				"blsp2_qup2_i2c_apps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp2_qup2_spi_apps_clk = {
+	.halt_reg = 0x0a04,
+	.clkr = {
+		.enable_reg = 0x0a04,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp2_qup2_spi_apps_clk",
+			.parent_names = (const char *[]){
+				"blsp2_qup2_spi_apps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp2_qup3_i2c_apps_clk = {
+	.halt_reg = 0x0a88,
+	.clkr = {
+		.enable_reg = 0x0a88,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp2_qup3_i2c_apps_clk",
+			.parent_names = (const char *[]){
+				"blsp2_qup3_i2c_apps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp2_qup3_spi_apps_clk = {
+	.halt_reg = 0x0a84,
+	.clkr = {
+		.enable_reg = 0x0a84,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp2_qup3_spi_apps_clk",
+			.parent_names = (const char *[]){
+				"blsp2_qup3_spi_apps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp2_qup4_i2c_apps_clk = {
+	.halt_reg = 0x0b08,
+	.clkr = {
+		.enable_reg = 0x0b08,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp2_qup4_i2c_apps_clk",
+			.parent_names = (const char *[]){
+				"blsp2_qup4_i2c_apps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp2_qup4_spi_apps_clk = {
+	.halt_reg = 0x0b04,
+	.clkr = {
+		.enable_reg = 0x0b04,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp2_qup4_spi_apps_clk",
+			.parent_names = (const char *[]){
+				"blsp2_qup4_spi_apps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp2_qup5_i2c_apps_clk = {
+	.halt_reg = 0x0b88,
+	.clkr = {
+		.enable_reg = 0x0b88,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp2_qup5_i2c_apps_clk",
+			.parent_names = (const char *[]){
+				"blsp2_qup5_i2c_apps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp2_qup5_spi_apps_clk = {
+	.halt_reg = 0x0b84,
+	.clkr = {
+		.enable_reg = 0x0b84,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp2_qup5_spi_apps_clk",
+			.parent_names = (const char *[]){
+				"blsp2_qup5_spi_apps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp2_qup6_i2c_apps_clk = {
+	.halt_reg = 0x0c08,
+	.clkr = {
+		.enable_reg = 0x0c08,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp2_qup6_i2c_apps_clk",
+			.parent_names = (const char *[]){
+				"blsp2_qup6_i2c_apps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp2_qup6_spi_apps_clk = {
+	.halt_reg = 0x0c04,
+	.clkr = {
+		.enable_reg = 0x0c04,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp2_qup6_spi_apps_clk",
+			.parent_names = (const char *[]){
+				"blsp2_qup6_spi_apps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp2_uart1_apps_clk = {
+	.halt_reg = 0x09c4,
+	.clkr = {
+		.enable_reg = 0x09c4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp2_uart1_apps_clk",
+			.parent_names = (const char *[]){
+				"blsp2_uart1_apps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp2_uart2_apps_clk = {
+	.halt_reg = 0x0a44,
+	.clkr = {
+		.enable_reg = 0x0a44,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp2_uart2_apps_clk",
+			.parent_names = (const char *[]){
+				"blsp2_uart2_apps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp2_uart3_apps_clk = {
+	.halt_reg = 0x0ac4,
+	.clkr = {
+		.enable_reg = 0x0ac4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp2_uart3_apps_clk",
+			.parent_names = (const char *[]){
+				"blsp2_uart3_apps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp2_uart4_apps_clk = {
+	.halt_reg = 0x0b44,
+	.clkr = {
+		.enable_reg = 0x0b44,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp2_uart4_apps_clk",
+			.parent_names = (const char *[]){
+				"blsp2_uart4_apps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp2_uart5_apps_clk = {
+	.halt_reg = 0x0bc4,
+	.clkr = {
+		.enable_reg = 0x0bc4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp2_uart5_apps_clk",
+			.parent_names = (const char *[]){
+				"blsp2_uart5_apps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp2_uart6_apps_clk = {
+	.halt_reg = 0x0c44,
+	.clkr = {
+		.enable_reg = 0x0c44,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp2_uart6_apps_clk",
+			.parent_names = (const char *[]){
+				"blsp2_uart6_apps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+	.halt_reg = 0x0e04,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x1484,
+		.enable_mask = BIT(10),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_boot_rom_ahb_clk",
+			.parent_names = (const char *[]){
+				"config_noc_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ce1_ahb_clk = {
+	.halt_reg = 0x104c,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x1484,
+		.enable_mask = BIT(3),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ce1_ahb_clk",
+			.parent_names = (const char *[]){
+				"config_noc_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ce1_axi_clk = {
+	.halt_reg = 0x1048,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x1484,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ce1_axi_clk",
+			.parent_names = (const char *[]){
+				"system_noc_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ce1_clk = {
+	.halt_reg = 0x1050,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x1484,
+		.enable_mask = BIT(5),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ce1_clk",
+			.parent_names = (const char *[]){
+				"ce1_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ce2_ahb_clk = {
+	.halt_reg = 0x108c,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x1484,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ce2_ahb_clk",
+			.parent_names = (const char *[]){
+				"config_noc_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ce2_axi_clk = {
+	.halt_reg = 0x1088,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x1484,
+		.enable_mask = BIT(1),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ce2_axi_clk",
+			.parent_names = (const char *[]){
+				"system_noc_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ce2_clk = {
+	.halt_reg = 0x1090,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x1484,
+		.enable_mask = BIT(2),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ce2_clk",
+			.parent_names = (const char *[]){
+				"ce2_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ce3_ahb_clk = {
+	.halt_reg = 0x1d0c,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x1d0c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ce3_ahb_clk",
+			.parent_names = (const char *[]){
+				"config_noc_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ce3_axi_clk = {
+	.halt_reg = 0x1088,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x1d08,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ce3_axi_clk",
+			.parent_names = (const char *[]){
+				"system_noc_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ce3_clk = {
+	.halt_reg = 0x1090,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x1d04,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ce3_clk",
+			.parent_names = (const char *[]){
+				"ce3_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_gp1_clk = {
+	.halt_reg = 0x1900,
+	.clkr = {
+		.enable_reg = 0x1900,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_gp1_clk",
+			.parent_names = (const char *[]){
+				"gp1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_gp2_clk = {
+	.halt_reg = 0x1940,
+	.clkr = {
+		.enable_reg = 0x1940,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_gp2_clk",
+			.parent_names = (const char *[]){
+				"gp2_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_gp3_clk = {
+	.halt_reg = 0x1980,
+	.clkr = {
+		.enable_reg = 0x1980,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_gp3_clk",
+			.parent_names = (const char *[]){
+				"gp3_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ocmem_noc_cfg_ahb_clk = {
+	.halt_reg = 0x0248,
+	.clkr = {
+		.enable_reg = 0x0248,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ocmem_noc_cfg_ahb_clk",
+			.parent_names = (const char *[]){
+				"config_noc_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+	.halt_reg = 0x1b10,
+	.clkr = {
+		.enable_reg = 0x1b10,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_pcie_0_aux_clk",
+			.parent_names = (const char *[]){
+				"pcie_0_aux_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+	.halt_reg = 0x1b0c,
+	.clkr = {
+		.enable_reg = 0x1b0c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_pcie_0_cfg_ahb_clk",
+			.parent_names = (const char *[]){
+				"config_noc_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+	.halt_reg = 0x1b08,
+	.clkr = {
+		.enable_reg = 0x1b08,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_pcie_0_mstr_axi_clk",
+			.parent_names = (const char *[]){
+				"config_noc_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+	.halt_reg = 0x1b14,
+	.clkr = {
+		.enable_reg = 0x1b14,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_pcie_0_pipe_clk",
+			.parent_names = (const char *[]){
+				"pcie_0_pipe_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+	.halt_reg = 0x1b04,
+	.clkr = {
+		.enable_reg = 0x1b04,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_pcie_0_slv_axi_clk",
+			.parent_names = (const char *[]){
+				"config_noc_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_pcie_1_aux_clk = {
+	.halt_reg = 0x1b90,
+	.clkr = {
+		.enable_reg = 0x1b90,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_pcie_1_aux_clk",
+			.parent_names = (const char *[]){
+				"pcie_1_aux_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
+	.halt_reg = 0x1b8c,
+	.clkr = {
+		.enable_reg = 0x1b8c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_pcie_1_cfg_ahb_clk",
+			.parent_names = (const char *[]){
+				"config_noc_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
+	.halt_reg = 0x1b88,
+	.clkr = {
+		.enable_reg = 0x1b88,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_pcie_1_mstr_axi_clk",
+			.parent_names = (const char *[]){
+				"config_noc_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_pcie_1_pipe_clk = {
+	.halt_reg = 0x1b94,
+	.clkr = {
+		.enable_reg = 0x1b94,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_pcie_1_pipe_clk",
+			.parent_names = (const char *[]){
+				"pcie_1_pipe_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_pcie_1_slv_axi_clk = {
+	.halt_reg = 0x1b84,
+	.clkr = {
+		.enable_reg = 0x1b84,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_pcie_1_slv_axi_clk",
+			.parent_names = (const char *[]){
+				"config_noc_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+	.halt_reg = 0x0ccc,
+	.clkr = {
+		.enable_reg = 0x0ccc,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_pdm2_clk",
+			.parent_names = (const char *[]){
+				"pdm2_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+	.halt_reg = 0x0cc4,
+	.clkr = {
+		.enable_reg = 0x0cc4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_pdm_ahb_clk",
+			.parent_names = (const char *[]){
+				"periph_noc_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_periph_noc_usb_hsic_ahb_clk = {
+	.halt_reg = 0x01a4,
+	.clkr = {
+		.enable_reg = 0x01a4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_periph_noc_usb_hsic_ahb_clk",
+			.parent_names = (const char *[]){
+				"usb_hsic_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+	.halt_reg = 0x0d04,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x1484,
+		.enable_mask = BIT(13),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_prng_ahb_clk",
+			.parent_names = (const char *[]){
+				"periph_noc_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_sata_asic0_clk = {
+	.halt_reg = 0x1c54,
+	.clkr = {
+		.enable_reg = 0x1c54,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_sata_asic0_clk",
+			.parent_names = (const char *[]){
+				"sata_asic0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_sata_axi_clk = {
+	.halt_reg = 0x1c44,
+	.clkr = {
+		.enable_reg = 0x1c44,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_sata_axi_clk",
+			.parent_names = (const char *[]){
+				"config_noc_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_sata_cfg_ahb_clk = {
+	.halt_reg = 0x1c48,
+	.clkr = {
+		.enable_reg = 0x1c48,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_sata_cfg_ahb_clk",
+			.parent_names = (const char *[]){
+				"config_noc_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_sata_pmalive_clk = {
+	.halt_reg = 0x1c50,
+	.clkr = {
+		.enable_reg = 0x1c50,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_sata_pmalive_clk",
+			.parent_names = (const char *[]){
+				"sata_pmalive_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_sata_rx_clk = {
+	.halt_reg = 0x1c58,
+	.clkr = {
+		.enable_reg = 0x1c58,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_sata_rx_clk",
+			.parent_names = (const char *[]){
+				"sata_rx_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_sata_rx_oob_clk = {
+	.halt_reg = 0x1c4c,
+	.clkr = {
+		.enable_reg = 0x1c4c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_sata_rx_oob_clk",
+			.parent_names = (const char *[]){
+				"sata_rx_oob_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+	.halt_reg = 0x04c8,
+	.clkr = {
+		.enable_reg = 0x04c8,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_sdcc1_ahb_clk",
+			.parent_names = (const char *[]){
+				"periph_noc_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+	.halt_reg = 0x04c4,
+	.clkr = {
+		.enable_reg = 0x04c4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_sdcc1_apps_clk",
+			.parent_names = (const char *[]){
+				"sdcc1_apps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_sdcc1_cdccal_ff_clk = {
+	.halt_reg = 0x04e8,
+	.clkr = {
+		.enable_reg = 0x04e8,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_sdcc1_cdccal_ff_clk",
+			.parent_names = (const char *[]){
+				"xo"
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_sdcc1_cdccal_sleep_clk = {
+	.halt_reg = 0x04e4,
+	.clkr = {
+		.enable_reg = 0x04e4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_sdcc1_cdccal_sleep_clk",
+			.parent_names = (const char *[]){
+				"sleep_clk_src"
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+	.halt_reg = 0x0508,
+	.clkr = {
+		.enable_reg = 0x0508,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_sdcc2_ahb_clk",
+			.parent_names = (const char *[]){
+				"periph_noc_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+	.halt_reg = 0x0504,
+	.clkr = {
+		.enable_reg = 0x0504,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_sdcc2_apps_clk",
+			.parent_names = (const char *[]){
+				"sdcc2_apps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_sdcc3_ahb_clk = {
+	.halt_reg = 0x0548,
+	.clkr = {
+		.enable_reg = 0x0548,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_sdcc3_ahb_clk",
+			.parent_names = (const char *[]){
+				"periph_noc_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_sdcc3_apps_clk = {
+	.halt_reg = 0x0544,
+	.clkr = {
+		.enable_reg = 0x0544,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_sdcc3_apps_clk",
+			.parent_names = (const char *[]){
+				"sdcc3_apps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_sdcc4_ahb_clk = {
+	.halt_reg = 0x0588,
+	.clkr = {
+		.enable_reg = 0x0588,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_sdcc4_ahb_clk",
+			.parent_names = (const char *[]){
+				"periph_noc_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_sdcc4_apps_clk = {
+	.halt_reg = 0x0584,
+	.clkr = {
+		.enable_reg = 0x0584,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_sdcc4_apps_clk",
+			.parent_names = (const char *[]){
+				"sdcc4_apps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_sys_noc_ufs_axi_clk = {
+	.halt_reg = 0x013c,
+	.clkr = {
+		.enable_reg = 0x013c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_sys_noc_ufs_axi_clk",
+			.parent_names = (const char *[]){
+				"ufs_axi_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_sys_noc_usb3_axi_clk = {
+	.halt_reg = 0x0108,
+	.clkr = {
+		.enable_reg = 0x0108,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_sys_noc_usb3_axi_clk",
+			.parent_names = (const char *[]){
+				"usb30_master_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_sys_noc_usb3_sec_axi_clk = {
+	.halt_reg = 0x0138,
+	.clkr = {
+		.enable_reg = 0x0138,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_sys_noc_usb3_sec_axi_clk",
+			.parent_names = (const char *[]){
+				"usb30_sec_master_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_tsif_ahb_clk = {
+	.halt_reg = 0x0d84,
+	.clkr = {
+		.enable_reg = 0x0d84,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_tsif_ahb_clk",
+			.parent_names = (const char *[]){
+				"periph_noc_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_tsif_inactivity_timers_clk = {
+	.halt_reg = 0x0d8c,
+	.clkr = {
+		.enable_reg = 0x0d8c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_tsif_inactivity_timers_clk",
+			.parent_names = (const char *[]){
+				"sleep_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_tsif_ref_clk = {
+	.halt_reg = 0x0d88,
+	.clkr = {
+		.enable_reg = 0x0d88,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_tsif_ref_clk",
+			.parent_names = (const char *[]){
+				"tsif_ref_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ufs_ahb_clk = {
+	.halt_reg = 0x1d48,
+	.clkr = {
+		.enable_reg = 0x1d48,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_ahb_clk",
+			.parent_names = (const char *[]){
+				"config_noc_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ufs_axi_clk = {
+	.halt_reg = 0x1d44,
+	.clkr = {
+		.enable_reg = 0x1d44,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_axi_clk",
+			.parent_names = (const char *[]){
+				"ufs_axi_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ufs_rx_cfg_clk = {
+	.halt_reg = 0x1d50,
+	.clkr = {
+		.enable_reg = 0x1d50,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_rx_cfg_clk",
+			.parent_names = (const char *[]){
+				"ufs_axi_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ufs_rx_symbol_0_clk = {
+	.halt_reg = 0x1d5c,
+	.clkr = {
+		.enable_reg = 0x1d5c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_rx_symbol_0_clk",
+			.parent_names = (const char *[]){
+				"ufs_rx_symbol_0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ufs_rx_symbol_1_clk = {
+	.halt_reg = 0x1d60,
+	.clkr = {
+		.enable_reg = 0x1d60,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_rx_symbol_1_clk",
+			.parent_names = (const char *[]){
+				"ufs_rx_symbol_1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ufs_tx_cfg_clk = {
+	.halt_reg = 0x1d4c,
+	.clkr = {
+		.enable_reg = 0x1d4c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_tx_cfg_clk",
+			.parent_names = (const char *[]){
+				"ufs_axi_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ufs_tx_symbol_0_clk = {
+	.halt_reg = 0x1d54,
+	.clkr = {
+		.enable_reg = 0x1d54,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_tx_symbol_0_clk",
+			.parent_names = (const char *[]){
+				"ufs_tx_symbol_0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ufs_tx_symbol_1_clk = {
+	.halt_reg = 0x1d58,
+	.clkr = {
+		.enable_reg = 0x1d58,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_tx_symbol_1_clk",
+			.parent_names = (const char *[]){
+				"ufs_tx_symbol_1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_usb2a_phy_sleep_clk = {
+	.halt_reg = 0x04ac,
+	.clkr = {
+		.enable_reg = 0x04ac,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb2a_phy_sleep_clk",
+			.parent_names = (const char *[]){
+				"sleep_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_usb2b_phy_sleep_clk = {
+	.halt_reg = 0x04b4,
+	.clkr = {
+		.enable_reg = 0x04b4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb2b_phy_sleep_clk",
+			.parent_names = (const char *[]){
+				"sleep_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_usb30_master_clk = {
+	.halt_reg = 0x03c8,
+	.clkr = {
+		.enable_reg = 0x03c8,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb30_master_clk",
+			.parent_names = (const char *[]){
+				"usb30_master_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_usb30_sec_master_clk = {
+	.halt_reg = 0x1bc8,
+	.clkr = {
+		.enable_reg = 0x1bc8,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb30_sec_master_clk",
+			.parent_names = (const char *[]){
+				"usb30_sec_master_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_usb30_mock_utmi_clk = {
+	.halt_reg = 0x03d0,
+	.clkr = {
+		.enable_reg = 0x03d0,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb30_mock_utmi_clk",
+			.parent_names = (const char *[]){
+				"usb30_mock_utmi_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_usb30_sleep_clk = {
+	.halt_reg = 0x03cc,
+	.clkr = {
+		.enable_reg = 0x03cc,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb30_sleep_clk",
+			.parent_names = (const char *[]){
+				"sleep_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_usb_hs_ahb_clk = {
+	.halt_reg = 0x0488,
+	.clkr = {
+		.enable_reg = 0x0488,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb_hs_ahb_clk",
+			.parent_names = (const char *[]){
+				"periph_noc_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_usb_hs_inactivity_timers_clk = {
+	.halt_reg = 0x048c,
+	.clkr = {
+		.enable_reg = 0x048c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb_hs_inactivity_timers_clk",
+			.parent_names = (const char *[]){
+				"sleep_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_usb_hs_system_clk = {
+	.halt_reg = 0x0484,
+	.clkr = {
+		.enable_reg = 0x0484,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb_hs_system_clk",
+			.parent_names = (const char *[]){
+				"usb_hs_system_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_usb_hsic_ahb_clk = {
+	.halt_reg = 0x0408,
+	.clkr = {
+		.enable_reg = 0x0408,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb_hsic_ahb_clk",
+			.parent_names = (const char *[]){
+				"periph_noc_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_usb_hsic_clk = {
+	.halt_reg = 0x0410,
+	.clkr = {
+		.enable_reg = 0x0410,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb_hsic_clk",
+			.parent_names = (const char *[]){
+				"usb_hsic_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_usb_hsic_io_cal_clk = {
+	.halt_reg = 0x0414,
+	.clkr = {
+		.enable_reg = 0x0414,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb_hsic_io_cal_clk",
+			.parent_names = (const char *[]){
+				"usb_hsic_io_cal_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_usb_hsic_io_cal_sleep_clk = {
+	.halt_reg = 0x0418,
+	.clkr = {
+		.enable_reg = 0x0418,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb_hsic_io_cal_sleep_clk",
+			.parent_names = (const char *[]){
+				"sleep_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_usb_hsic_system_clk = {
+	.halt_reg = 0x040c,
+	.clkr = {
+		.enable_reg = 0x040c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb_hsic_system_clk",
+			.parent_names = (const char *[]){
+				"usb_hsic_system_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_regmap *gcc_apq8084_clocks[] = {
+	[GPLL0] = &gpll0.clkr,
+	[GPLL0_VOTE] = &gpll0_vote,
+	[GPLL1] = &gpll1.clkr,
+	[GPLL1_VOTE] = &gpll1_vote,
+	[GPLL4] = &gpll4.clkr,
+	[GPLL4_VOTE] = &gpll4_vote,
+	[CONFIG_NOC_CLK_SRC] = &config_noc_clk_src.clkr,
+	[PERIPH_NOC_CLK_SRC] = &periph_noc_clk_src.clkr,
+	[SYSTEM_NOC_CLK_SRC] = &system_noc_clk_src.clkr,
+	[UFS_AXI_CLK_SRC] = &ufs_axi_clk_src.clkr,
+	[USB30_MASTER_CLK_SRC] = &usb30_master_clk_src.clkr,
+	[USB30_SEC_MASTER_CLK_SRC] = &usb30_sec_master_clk_src.clkr,
+	[USB_HSIC_AHB_CLK_SRC] = &usb_hsic_ahb_clk_src.clkr,
+	[BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr,
+	[BLSP1_QUP1_SPI_APPS_CLK_SRC] = &blsp1_qup1_spi_apps_clk_src.clkr,
+	[BLSP1_QUP2_I2C_APPS_CLK_SRC] = &blsp1_qup2_i2c_apps_clk_src.clkr,
+	[BLSP1_QUP2_SPI_APPS_CLK_SRC] = &blsp1_qup2_spi_apps_clk_src.clkr,
+	[BLSP1_QUP3_I2C_APPS_CLK_SRC] = &blsp1_qup3_i2c_apps_clk_src.clkr,
+	[BLSP1_QUP3_SPI_APPS_CLK_SRC] = &blsp1_qup3_spi_apps_clk_src.clkr,
+	[BLSP1_QUP4_I2C_APPS_CLK_SRC] = &blsp1_qup4_i2c_apps_clk_src.clkr,
+	[BLSP1_QUP4_SPI_APPS_CLK_SRC] = &blsp1_qup4_spi_apps_clk_src.clkr,
+	[BLSP1_QUP5_I2C_APPS_CLK_SRC] = &blsp1_qup5_i2c_apps_clk_src.clkr,
+	[BLSP1_QUP5_SPI_APPS_CLK_SRC] = &blsp1_qup5_spi_apps_clk_src.clkr,
+	[BLSP1_QUP6_I2C_APPS_CLK_SRC] = &blsp1_qup6_i2c_apps_clk_src.clkr,
+	[BLSP1_QUP6_SPI_APPS_CLK_SRC] = &blsp1_qup6_spi_apps_clk_src.clkr,
+	[BLSP1_UART1_APPS_CLK_SRC] = &blsp1_uart1_apps_clk_src.clkr,
+	[BLSP1_UART2_APPS_CLK_SRC] = &blsp1_uart2_apps_clk_src.clkr,
+	[BLSP1_UART3_APPS_CLK_SRC] = &blsp1_uart3_apps_clk_src.clkr,
+	[BLSP1_UART4_APPS_CLK_SRC] = &blsp1_uart4_apps_clk_src.clkr,
+	[BLSP1_UART5_APPS_CLK_SRC] = &blsp1_uart5_apps_clk_src.clkr,
+	[BLSP1_UART6_APPS_CLK_SRC] = &blsp1_uart6_apps_clk_src.clkr,
+	[BLSP2_QUP1_I2C_APPS_CLK_SRC] = &blsp2_qup1_i2c_apps_clk_src.clkr,
+	[BLSP2_QUP1_SPI_APPS_CLK_SRC] = &blsp2_qup1_spi_apps_clk_src.clkr,
+	[BLSP2_QUP2_I2C_APPS_CLK_SRC] = &blsp2_qup2_i2c_apps_clk_src.clkr,
+	[BLSP2_QUP2_SPI_APPS_CLK_SRC] = &blsp2_qup2_spi_apps_clk_src.clkr,
+	[BLSP2_QUP3_I2C_APPS_CLK_SRC] = &blsp2_qup3_i2c_apps_clk_src.clkr,
+	[BLSP2_QUP3_SPI_APPS_CLK_SRC] = &blsp2_qup3_spi_apps_clk_src.clkr,
+	[BLSP2_QUP4_I2C_APPS_CLK_SRC] = &blsp2_qup4_i2c_apps_clk_src.clkr,
+	[BLSP2_QUP4_SPI_APPS_CLK_SRC] = &blsp2_qup4_spi_apps_clk_src.clkr,
+	[BLSP2_QUP5_I2C_APPS_CLK_SRC] = &blsp2_qup5_i2c_apps_clk_src.clkr,
+	[BLSP2_QUP5_SPI_APPS_CLK_SRC] = &blsp2_qup5_spi_apps_clk_src.clkr,
+	[BLSP2_QUP6_I2C_APPS_CLK_SRC] = &blsp2_qup6_i2c_apps_clk_src.clkr,
+	[BLSP2_QUP6_SPI_APPS_CLK_SRC] = &blsp2_qup6_spi_apps_clk_src.clkr,
+	[BLSP2_UART1_APPS_CLK_SRC] = &blsp2_uart1_apps_clk_src.clkr,
+	[BLSP2_UART2_APPS_CLK_SRC] = &blsp2_uart2_apps_clk_src.clkr,
+	[BLSP2_UART3_APPS_CLK_SRC] = &blsp2_uart3_apps_clk_src.clkr,
+	[BLSP2_UART4_APPS_CLK_SRC] = &blsp2_uart4_apps_clk_src.clkr,
+	[BLSP2_UART5_APPS_CLK_SRC] = &blsp2_uart5_apps_clk_src.clkr,
+	[BLSP2_UART6_APPS_CLK_SRC] = &blsp2_uart6_apps_clk_src.clkr,
+	[CE1_CLK_SRC] = &ce1_clk_src.clkr,
+	[CE2_CLK_SRC] = &ce2_clk_src.clkr,
+	[CE3_CLK_SRC] = &ce3_clk_src.clkr,
+	[GP1_CLK_SRC] = &gp1_clk_src.clkr,
+	[GP2_CLK_SRC] = &gp2_clk_src.clkr,
+	[GP3_CLK_SRC] = &gp3_clk_src.clkr,
+	[PCIE_0_AUX_CLK_SRC] = &pcie_0_aux_clk_src.clkr,
+	[PCIE_0_PIPE_CLK_SRC] = &pcie_0_pipe_clk_src.clkr,
+	[PCIE_1_AUX_CLK_SRC] = &pcie_1_aux_clk_src.clkr,
+	[PCIE_1_PIPE_CLK_SRC] = &pcie_1_pipe_clk_src.clkr,
+	[PDM2_CLK_SRC] = &pdm2_clk_src.clkr,
+	[SATA_ASIC0_CLK_SRC] = &sata_asic0_clk_src.clkr,
+	[SATA_PMALIVE_CLK_SRC] = &sata_pmalive_clk_src.clkr,
+	[SATA_RX_CLK_SRC] = &sata_rx_clk_src.clkr,
+	[SATA_RX_OOB_CLK_SRC] = &sata_rx_oob_clk_src.clkr,
+	[SDCC1_APPS_CLK_SRC] = &sdcc1_apps_clk_src.clkr,
+	[SDCC2_APPS_CLK_SRC] = &sdcc2_apps_clk_src.clkr,
+	[SDCC3_APPS_CLK_SRC] = &sdcc3_apps_clk_src.clkr,
+	[SDCC4_APPS_CLK_SRC] = &sdcc4_apps_clk_src.clkr,
+	[TSIF_REF_CLK_SRC] = &tsif_ref_clk_src.clkr,
+	[USB30_MOCK_UTMI_CLK_SRC] = &usb30_mock_utmi_clk_src.clkr,
+	[USB30_SEC_MOCK_UTMI_CLK_SRC] = &usb30_sec_mock_utmi_clk_src.clkr,
+	[USB_HS_SYSTEM_CLK_SRC] = &usb_hs_system_clk_src.clkr,
+	[USB_HSIC_CLK_SRC] = &usb_hsic_clk_src.clkr,
+	[USB_HSIC_IO_CAL_CLK_SRC] = &usb_hsic_io_cal_clk_src.clkr,
+	[USB_HSIC_MOCK_UTMI_CLK_SRC] = &usb_hsic_mock_utmi_clk_src.clkr,
+	[USB_HSIC_SYSTEM_CLK_SRC] = &usb_hsic_system_clk_src.clkr,
+	[GCC_BAM_DMA_AHB_CLK] = &gcc_bam_dma_ahb_clk.clkr,
+	[GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr,
+	[GCC_BLSP1_QUP1_I2C_APPS_CLK] = &gcc_blsp1_qup1_i2c_apps_clk.clkr,
+	[GCC_BLSP1_QUP1_SPI_APPS_CLK] = &gcc_blsp1_qup1_spi_apps_clk.clkr,
+	[GCC_BLSP1_QUP2_I2C_APPS_CLK] = &gcc_blsp1_qup2_i2c_apps_clk.clkr,
+	[GCC_BLSP1_QUP2_SPI_APPS_CLK] = &gcc_blsp1_qup2_spi_apps_clk.clkr,
+	[GCC_BLSP1_QUP3_I2C_APPS_CLK] = &gcc_blsp1_qup3_i2c_apps_clk.clkr,
+	[GCC_BLSP1_QUP3_SPI_APPS_CLK] = &gcc_blsp1_qup3_spi_apps_clk.clkr,
+	[GCC_BLSP1_QUP4_I2C_APPS_CLK] = &gcc_blsp1_qup4_i2c_apps_clk.clkr,
+	[GCC_BLSP1_QUP4_SPI_APPS_CLK] = &gcc_blsp1_qup4_spi_apps_clk.clkr,
+	[GCC_BLSP1_QUP5_I2C_APPS_CLK] = &gcc_blsp1_qup5_i2c_apps_clk.clkr,
+	[GCC_BLSP1_QUP5_SPI_APPS_CLK] = &gcc_blsp1_qup5_spi_apps_clk.clkr,
+	[GCC_BLSP1_QUP6_I2C_APPS_CLK] = &gcc_blsp1_qup6_i2c_apps_clk.clkr,
+	[GCC_BLSP1_QUP6_SPI_APPS_CLK] = &gcc_blsp1_qup6_spi_apps_clk.clkr,
+	[GCC_BLSP1_UART1_APPS_CLK] = &gcc_blsp1_uart1_apps_clk.clkr,
+	[GCC_BLSP1_UART2_APPS_CLK] = &gcc_blsp1_uart2_apps_clk.clkr,
+	[GCC_BLSP1_UART3_APPS_CLK] = &gcc_blsp1_uart3_apps_clk.clkr,
+	[GCC_BLSP1_UART4_APPS_CLK] = &gcc_blsp1_uart4_apps_clk.clkr,
+	[GCC_BLSP1_UART5_APPS_CLK] = &gcc_blsp1_uart5_apps_clk.clkr,
+	[GCC_BLSP1_UART6_APPS_CLK] = &gcc_blsp1_uart6_apps_clk.clkr,
+	[GCC_BLSP2_AHB_CLK] = &gcc_blsp2_ahb_clk.clkr,
+	[GCC_BLSP2_QUP1_I2C_APPS_CLK] = &gcc_blsp2_qup1_i2c_apps_clk.clkr,
+	[GCC_BLSP2_QUP1_SPI_APPS_CLK] = &gcc_blsp2_qup1_spi_apps_clk.clkr,
+	[GCC_BLSP2_QUP2_I2C_APPS_CLK] = &gcc_blsp2_qup2_i2c_apps_clk.clkr,
+	[GCC_BLSP2_QUP2_SPI_APPS_CLK] = &gcc_blsp2_qup2_spi_apps_clk.clkr,
+	[GCC_BLSP2_QUP3_I2C_APPS_CLK] = &gcc_blsp2_qup3_i2c_apps_clk.clkr,
+	[GCC_BLSP2_QUP3_SPI_APPS_CLK] = &gcc_blsp2_qup3_spi_apps_clk.clkr,
+	[GCC_BLSP2_QUP4_I2C_APPS_CLK] = &gcc_blsp2_qup4_i2c_apps_clk.clkr,
+	[GCC_BLSP2_QUP4_SPI_APPS_CLK] = &gcc_blsp2_qup4_spi_apps_clk.clkr,
+	[GCC_BLSP2_QUP5_I2C_APPS_CLK] = &gcc_blsp2_qup5_i2c_apps_clk.clkr,
+	[GCC_BLSP2_QUP5_SPI_APPS_CLK] = &gcc_blsp2_qup5_spi_apps_clk.clkr,
+	[GCC_BLSP2_QUP6_I2C_APPS_CLK] = &gcc_blsp2_qup6_i2c_apps_clk.clkr,
+	[GCC_BLSP2_QUP6_SPI_APPS_CLK] = &gcc_blsp2_qup6_spi_apps_clk.clkr,
+	[GCC_BLSP2_UART1_APPS_CLK] = &gcc_blsp2_uart1_apps_clk.clkr,
+	[GCC_BLSP2_UART2_APPS_CLK] = &gcc_blsp2_uart2_apps_clk.clkr,
+	[GCC_BLSP2_UART3_APPS_CLK] = &gcc_blsp2_uart3_apps_clk.clkr,
+	[GCC_BLSP2_UART4_APPS_CLK] = &gcc_blsp2_uart4_apps_clk.clkr,
+	[GCC_BLSP2_UART5_APPS_CLK] = &gcc_blsp2_uart5_apps_clk.clkr,
+	[GCC_BLSP2_UART6_APPS_CLK] = &gcc_blsp2_uart6_apps_clk.clkr,
+	[GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+	[GCC_CE1_AHB_CLK] = &gcc_ce1_ahb_clk.clkr,
+	[GCC_CE1_AXI_CLK] = &gcc_ce1_axi_clk.clkr,
+	[GCC_CE1_CLK] = &gcc_ce1_clk.clkr,
+	[GCC_CE2_AHB_CLK] = &gcc_ce2_ahb_clk.clkr,
+	[GCC_CE2_AXI_CLK] = &gcc_ce2_axi_clk.clkr,
+	[GCC_CE2_CLK] = &gcc_ce2_clk.clkr,
+	[GCC_CE3_AHB_CLK] = &gcc_ce3_ahb_clk.clkr,
+	[GCC_CE3_AXI_CLK] = &gcc_ce3_axi_clk.clkr,
+	[GCC_CE3_CLK] = &gcc_ce3_clk.clkr,
+	[GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+	[GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+	[GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+	[GCC_OCMEM_NOC_CFG_AHB_CLK] = &gcc_ocmem_noc_cfg_ahb_clk.clkr,
+	[GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+	[GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
+	[GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr,
+	[GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+	[GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr,
+	[GCC_PCIE_1_AUX_CLK] = &gcc_pcie_1_aux_clk.clkr,
+	[GCC_PCIE_1_CFG_AHB_CLK] = &gcc_pcie_1_cfg_ahb_clk.clkr,
+	[GCC_PCIE_1_MSTR_AXI_CLK] = &gcc_pcie_1_mstr_axi_clk.clkr,
+	[GCC_PCIE_1_PIPE_CLK] = &gcc_pcie_1_pipe_clk.clkr,
+	[GCC_PCIE_1_SLV_AXI_CLK] = &gcc_pcie_1_slv_axi_clk.clkr,
+	[GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+	[GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+	[GCC_PERIPH_NOC_USB_HSIC_AHB_CLK] = &gcc_periph_noc_usb_hsic_ahb_clk.clkr,
+	[GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+	[GCC_SATA_ASIC0_CLK] = &gcc_sata_asic0_clk.clkr,
+	[GCC_SATA_AXI_CLK] = &gcc_sata_axi_clk.clkr,
+	[GCC_SATA_CFG_AHB_CLK] = &gcc_sata_cfg_ahb_clk.clkr,
+	[GCC_SATA_PMALIVE_CLK] = &gcc_sata_pmalive_clk.clkr,
+	[GCC_SATA_RX_CLK] = &gcc_sata_rx_clk.clkr,
+	[GCC_SATA_RX_OOB_CLK] = &gcc_sata_rx_oob_clk.clkr,
+	[GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+	[GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+	[GCC_SDCC1_CDCCAL_FF_CLK] = &gcc_sdcc1_cdccal_ff_clk.clkr,
+	[GCC_SDCC1_CDCCAL_SLEEP_CLK] = &gcc_sdcc1_cdccal_sleep_clk.clkr,
+	[GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+	[GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+	[GCC_SDCC3_AHB_CLK] = &gcc_sdcc3_ahb_clk.clkr,
+	[GCC_SDCC3_APPS_CLK] = &gcc_sdcc3_apps_clk.clkr,
+	[GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr,
+	[GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr,
+	[GCC_SYS_NOC_UFS_AXI_CLK] = &gcc_sys_noc_ufs_axi_clk.clkr,
+	[GCC_SYS_NOC_USB3_AXI_CLK] = &gcc_sys_noc_usb3_axi_clk.clkr,
+	[GCC_SYS_NOC_USB3_SEC_AXI_CLK] = &gcc_sys_noc_usb3_sec_axi_clk.clkr,
+	[GCC_TSIF_AHB_CLK] = &gcc_tsif_ahb_clk.clkr,
+	[GCC_TSIF_INACTIVITY_TIMERS_CLK] = &gcc_tsif_inactivity_timers_clk.clkr,
+	[GCC_TSIF_REF_CLK] = &gcc_tsif_ref_clk.clkr,
+	[GCC_UFS_AHB_CLK] = &gcc_ufs_ahb_clk.clkr,
+	[GCC_UFS_AXI_CLK] = &gcc_ufs_axi_clk.clkr,
+	[GCC_UFS_RX_CFG_CLK] = &gcc_ufs_rx_cfg_clk.clkr,
+	[GCC_UFS_RX_SYMBOL_0_CLK] = &gcc_ufs_rx_symbol_0_clk.clkr,
+	[GCC_UFS_RX_SYMBOL_1_CLK] = &gcc_ufs_rx_symbol_1_clk.clkr,
+	[GCC_UFS_TX_CFG_CLK] = &gcc_ufs_tx_cfg_clk.clkr,
+	[GCC_UFS_TX_SYMBOL_0_CLK] = &gcc_ufs_tx_symbol_0_clk.clkr,
+	[GCC_UFS_TX_SYMBOL_1_CLK] = &gcc_ufs_tx_symbol_1_clk.clkr,
+	[GCC_USB2A_PHY_SLEEP_CLK] = &gcc_usb2a_phy_sleep_clk.clkr,
+	[GCC_USB2B_PHY_SLEEP_CLK] = &gcc_usb2b_phy_sleep_clk.clkr,
+	[GCC_USB30_MASTER_CLK] = &gcc_usb30_master_clk.clkr,
+	[GCC_USB30_MOCK_UTMI_CLK] = &gcc_usb30_mock_utmi_clk.clkr,
+	[GCC_USB30_SLEEP_CLK] = &gcc_usb30_sleep_clk.clkr,
+	[GCC_USB30_SEC_MASTER_CLK] = &gcc_usb30_sec_master_clk.clkr,
+	[GCC_USB30_SEC_MOCK_UTMI_CLK] = &gcc_usb30_sec_mock_utmi_clk.clkr,
+	[GCC_USB30_SEC_SLEEP_CLK] = &gcc_usb30_sec_sleep_clk.clkr,
+	[GCC_USB_HS_AHB_CLK] = &gcc_usb_hs_ahb_clk.clkr,
+	[GCC_USB_HS_INACTIVITY_TIMERS_CLK] = &gcc_usb_hs_inactivity_timers_clk.clkr,
+	[GCC_USB_HS_SYSTEM_CLK] = &gcc_usb_hs_system_clk.clkr,
+	[GCC_USB_HSIC_AHB_CLK] = &gcc_usb_hsic_ahb_clk.clkr,
+	[GCC_USB_HSIC_CLK] = &gcc_usb_hsic_clk.clkr,
+	[GCC_USB_HSIC_IO_CAL_CLK] = &gcc_usb_hsic_io_cal_clk.clkr,
+	[GCC_USB_HSIC_IO_CAL_SLEEP_CLK] = &gcc_usb_hsic_io_cal_sleep_clk.clkr,
+	[GCC_USB_HSIC_MOCK_UTMI_CLK] = &gcc_usb_hsic_mock_utmi_clk.clkr,
+	[GCC_USB_HSIC_SYSTEM_CLK] = &gcc_usb_hsic_system_clk.clkr,
+};
+
+static const struct qcom_reset_map gcc_apq8084_resets[] = {
+	[GCC_SYSTEM_NOC_BCR] = { 0x0100 },
+	[GCC_CONFIG_NOC_BCR] = { 0x0140 },
+	[GCC_PERIPH_NOC_BCR] = { 0x0180 },
+	[GCC_IMEM_BCR] = { 0x0200 },
+	[GCC_MMSS_BCR] = { 0x0240 },
+	[GCC_QDSS_BCR] = { 0x0300 },
+	[GCC_USB_30_BCR] = { 0x03c0 },
+	[GCC_USB3_PHY_BCR] = { 0x03fc },
+	[GCC_USB_HS_HSIC_BCR] = { 0x0400 },
+	[GCC_USB_HS_BCR] = { 0x0480 },
+	[GCC_USB2A_PHY_BCR] = { 0x04a8 },
+	[GCC_USB2B_PHY_BCR] = { 0x04b0 },
+	[GCC_SDCC1_BCR] = { 0x04c0 },
+	[GCC_SDCC2_BCR] = { 0x0500 },
+	[GCC_SDCC3_BCR] = { 0x0540 },
+	[GCC_SDCC4_BCR] = { 0x0580 },
+	[GCC_BLSP1_BCR] = { 0x05c0 },
+	[GCC_BLSP1_QUP1_BCR] = { 0x0640 },
+	[GCC_BLSP1_UART1_BCR] = { 0x0680 },
+	[GCC_BLSP1_QUP2_BCR] = { 0x06c0 },
+	[GCC_BLSP1_UART2_BCR] = { 0x0700 },
+	[GCC_BLSP1_QUP3_BCR] = { 0x0740 },
+	[GCC_BLSP1_UART3_BCR] = { 0x0780 },
+	[GCC_BLSP1_QUP4_BCR] = { 0x07c0 },
+	[GCC_BLSP1_UART4_BCR] = { 0x0800 },
+	[GCC_BLSP1_QUP5_BCR] = { 0x0840 },
+	[GCC_BLSP1_UART5_BCR] = { 0x0880 },
+	[GCC_BLSP1_QUP6_BCR] = { 0x08c0 },
+	[GCC_BLSP1_UART6_BCR] = { 0x0900 },
+	[GCC_BLSP2_BCR] = { 0x0940 },
+	[GCC_BLSP2_QUP1_BCR] = { 0x0980 },
+	[GCC_BLSP2_UART1_BCR] = { 0x09c0 },
+	[GCC_BLSP2_QUP2_BCR] = { 0x0a00 },
+	[GCC_BLSP2_UART2_BCR] = { 0x0a40 },
+	[GCC_BLSP2_QUP3_BCR] = { 0x0a80 },
+	[GCC_BLSP2_UART3_BCR] = { 0x0ac0 },
+	[GCC_BLSP2_QUP4_BCR] = { 0x0b00 },
+	[GCC_BLSP2_UART4_BCR] = { 0x0b40 },
+	[GCC_BLSP2_QUP5_BCR] = { 0x0b80 },
+	[GCC_BLSP2_UART5_BCR] = { 0x0bc0 },
+	[GCC_BLSP2_QUP6_BCR] = { 0x0c00 },
+	[GCC_BLSP2_UART6_BCR] = { 0x0c40 },
+	[GCC_PDM_BCR] = { 0x0cc0 },
+	[GCC_PRNG_BCR] = { 0x0d00 },
+	[GCC_BAM_DMA_BCR] = { 0x0d40 },
+	[GCC_TSIF_BCR] = { 0x0d80 },
+	[GCC_TCSR_BCR] = { 0x0dc0 },
+	[GCC_BOOT_ROM_BCR] = { 0x0e00 },
+	[GCC_MSG_RAM_BCR] = { 0x0e40 },
+	[GCC_TLMM_BCR] = { 0x0e80 },
+	[GCC_MPM_BCR] = { 0x0ec0 },
+	[GCC_MPM_AHB_RESET] = { 0x0ec4, 1 },
+	[GCC_MPM_NON_AHB_RESET] = { 0x0ec4, 2 },
+	[GCC_SEC_CTRL_BCR] = { 0x0f40 },
+	[GCC_SPMI_BCR] = { 0x0fc0 },
+	[GCC_SPDM_BCR] = { 0x1000 },
+	[GCC_CE1_BCR] = { 0x1040 },
+	[GCC_CE2_BCR] = { 0x1080 },
+	[GCC_BIMC_BCR] = { 0x1100 },
+	[GCC_SNOC_BUS_TIMEOUT0_BCR] = { 0x1240 },
+	[GCC_SNOC_BUS_TIMEOUT2_BCR] = { 0x1248 },
+	[GCC_PNOC_BUS_TIMEOUT0_BCR] = { 0x1280 },
+	[GCC_PNOC_BUS_TIMEOUT1_BCR] = { 0x1288 },
+	[GCC_PNOC_BUS_TIMEOUT2_BCR] = { 0x1290 },
+	[GCC_PNOC_BUS_TIMEOUT3_BCR] = { 0x1298 },
+	[GCC_PNOC_BUS_TIMEOUT4_BCR] = { 0x12a0 },
+	[GCC_CNOC_BUS_TIMEOUT0_BCR] = { 0x12c0 },
+	[GCC_CNOC_BUS_TIMEOUT1_BCR] = { 0x12c8 },
+	[GCC_CNOC_BUS_TIMEOUT2_BCR] = { 0x12d0 },
+	[GCC_CNOC_BUS_TIMEOUT3_BCR] = { 0x12d8 },
+	[GCC_CNOC_BUS_TIMEOUT4_BCR] = { 0x12e0 },
+	[GCC_CNOC_BUS_TIMEOUT5_BCR] = { 0x12e8 },
+	[GCC_CNOC_BUS_TIMEOUT6_BCR] = { 0x12f0 },
+	[GCC_DEHR_BCR] = { 0x1300 },
+	[GCC_RBCPR_BCR] = { 0x1380 },
+	[GCC_MSS_RESTART] = { 0x1680 },
+	[GCC_LPASS_RESTART] = { 0x16c0 },
+	[GCC_WCSS_RESTART] = { 0x1700 },
+	[GCC_VENUS_RESTART] = { 0x1740 },
+	[GCC_COPSS_SMMU_BCR] = { 0x1a40 },
+	[GCC_SPSS_BCR] = { 0x1a80 },
+	[GCC_PCIE_0_BCR] = { 0x1ac0 },
+	[GCC_PCIE_0_PHY_BCR] = { 0x1b00 },
+	[GCC_PCIE_1_BCR] = { 0x1b40 },
+	[GCC_PCIE_1_PHY_BCR] = { 0x1b80 },
+	[GCC_USB_30_SEC_BCR] = { 0x1bc0 },
+	[GCC_USB3_SEC_PHY_BCR] = { 0x1bfc },
+	[GCC_SATA_BCR] = { 0x1c40 },
+	[GCC_CE3_BCR] = { 0x1d00 },
+	[GCC_UFS_BCR] = { 0x1d40 },
+	[GCC_USB30_PHY_COM_BCR] = { 0x1e80 },
+};
+
+static const struct regmap_config gcc_apq8084_regmap_config = {
+	.reg_bits	= 32,
+	.reg_stride	= 4,
+	.val_bits	= 32,
+	.max_register	= 0x1fc0,
+	.fast_io	= true,
+};
+
+static const struct qcom_cc_desc gcc_apq8084_desc = {
+	.config = &gcc_apq8084_regmap_config,
+	.clks = gcc_apq8084_clocks,
+	.num_clks = ARRAY_SIZE(gcc_apq8084_clocks),
+	.resets = gcc_apq8084_resets,
+	.num_resets = ARRAY_SIZE(gcc_apq8084_resets),
+};
+
+static const struct of_device_id gcc_apq8084_match_table[] = {
+	{ .compatible = "qcom,gcc-apq8084" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, gcc_apq8084_match_table);
+
+static int gcc_apq8084_probe(struct platform_device *pdev)
+{
+	struct clk *clk;
+	struct device *dev = &pdev->dev;
+
+	/* Temporary until RPM clocks supported */
+	clk = clk_register_fixed_rate(dev, "xo", NULL, CLK_IS_ROOT, 19200000);
+	if (IS_ERR(clk))
+		return PTR_ERR(clk);
+
+	clk = clk_register_fixed_rate(dev, "sleep_clk_src", NULL,
+				      CLK_IS_ROOT, 32768);
+	if (IS_ERR(clk))
+		return PTR_ERR(clk);
+
+	return qcom_cc_probe(pdev, &gcc_apq8084_desc);
+}
+
+static int gcc_apq8084_remove(struct platform_device *pdev)
+{
+	qcom_cc_remove(pdev);
+	return 0;
+}
+
+static struct platform_driver gcc_apq8084_driver = {
+	.probe		= gcc_apq8084_probe,
+	.remove		= gcc_apq8084_remove,
+	.driver		= {
+		.name	= "gcc-apq8084",
+		.owner	= THIS_MODULE,
+		.of_match_table = gcc_apq8084_match_table,
+	},
+};
+
+static int __init gcc_apq8084_init(void)
+{
+	return platform_driver_register(&gcc_apq8084_driver);
+}
+core_initcall(gcc_apq8084_init);
+
+static void __exit gcc_apq8084_exit(void)
+{
+	platform_driver_unregister(&gcc_apq8084_driver);
+}
+module_exit(gcc_apq8084_exit);
+
+MODULE_DESCRIPTION("QCOM GCC APQ8084 Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:gcc-apq8084");
diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c
new file mode 100644
index 0000000..4032e51
--- /dev/null
+++ b/drivers/clk/qcom/gcc-ipq806x.c
@@ -0,0 +1,2424 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,gcc-ipq806x.h>
+#include <dt-bindings/reset/qcom,gcc-ipq806x.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "reset.h"
+
+static struct clk_pll pll3 = {
+	.l_reg = 0x3164,
+	.m_reg = 0x3168,
+	.n_reg = 0x316c,
+	.config_reg = 0x3174,
+	.mode_reg = 0x3160,
+	.status_reg = 0x3178,
+	.status_bit = 16,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "pll3",
+		.parent_names = (const char *[]){ "pxo" },
+		.num_parents = 1,
+		.ops = &clk_pll_ops,
+	},
+};
+
+static struct clk_pll pll8 = {
+	.l_reg = 0x3144,
+	.m_reg = 0x3148,
+	.n_reg = 0x314c,
+	.config_reg = 0x3154,
+	.mode_reg = 0x3140,
+	.status_reg = 0x3158,
+	.status_bit = 16,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "pll8",
+		.parent_names = (const char *[]){ "pxo" },
+		.num_parents = 1,
+		.ops = &clk_pll_ops,
+	},
+};
+
+static struct clk_regmap pll8_vote = {
+	.enable_reg = 0x34c0,
+	.enable_mask = BIT(8),
+	.hw.init = &(struct clk_init_data){
+		.name = "pll8_vote",
+		.parent_names = (const char *[]){ "pll8" },
+		.num_parents = 1,
+		.ops = &clk_pll_vote_ops,
+	},
+};
+
+static struct clk_pll pll14 = {
+	.l_reg = 0x31c4,
+	.m_reg = 0x31c8,
+	.n_reg = 0x31cc,
+	.config_reg = 0x31d4,
+	.mode_reg = 0x31c0,
+	.status_reg = 0x31d8,
+	.status_bit = 16,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "pll14",
+		.parent_names = (const char *[]){ "pxo" },
+		.num_parents = 1,
+		.ops = &clk_pll_ops,
+	},
+};
+
+static struct clk_regmap pll14_vote = {
+	.enable_reg = 0x34c0,
+	.enable_mask = BIT(14),
+	.hw.init = &(struct clk_init_data){
+		.name = "pll14_vote",
+		.parent_names = (const char *[]){ "pll14" },
+		.num_parents = 1,
+		.ops = &clk_pll_vote_ops,
+	},
+};
+
+#define P_PXO	0
+#define P_PLL8	1
+#define P_PLL3	1
+#define P_PLL0	2
+#define P_CXO	2
+
+static const u8 gcc_pxo_pll8_map[] = {
+	[P_PXO]		= 0,
+	[P_PLL8]	= 3,
+};
+
+static const char *gcc_pxo_pll8[] = {
+	"pxo",
+	"pll8_vote",
+};
+
+static const u8 gcc_pxo_pll8_cxo_map[] = {
+	[P_PXO]		= 0,
+	[P_PLL8]	= 3,
+	[P_CXO]		= 5,
+};
+
+static const char *gcc_pxo_pll8_cxo[] = {
+	"pxo",
+	"pll8_vote",
+	"cxo",
+};
+
+static const u8 gcc_pxo_pll3_map[] = {
+	[P_PXO]		= 0,
+	[P_PLL3]	= 1,
+};
+
+static const u8 gcc_pxo_pll3_sata_map[] = {
+	[P_PXO]		= 0,
+	[P_PLL3]	= 6,
+};
+
+static const char *gcc_pxo_pll3[] = {
+	"pxo",
+	"pll3",
+};
+
+static const u8 gcc_pxo_pll8_pll0[] = {
+	[P_PXO]		= 0,
+	[P_PLL8]	= 3,
+	[P_PLL0]	= 2,
+};
+
+static const char *gcc_pxo_pll8_pll0_map[] = {
+	"pxo",
+	"pll8_vote",
+	"pll0",
+};
+
+static struct freq_tbl clk_tbl_gsbi_uart[] = {
+	{  1843200, P_PLL8, 2,  6, 625 },
+	{  3686400, P_PLL8, 2, 12, 625 },
+	{  7372800, P_PLL8, 2, 24, 625 },
+	{ 14745600, P_PLL8, 2, 48, 625 },
+	{ 16000000, P_PLL8, 4,  1,   6 },
+	{ 24000000, P_PLL8, 4,  1,   4 },
+	{ 32000000, P_PLL8, 4,  1,   3 },
+	{ 40000000, P_PLL8, 1,  5,  48 },
+	{ 46400000, P_PLL8, 1, 29, 240 },
+	{ 48000000, P_PLL8, 4,  1,   2 },
+	{ 51200000, P_PLL8, 1,  2,  15 },
+	{ 56000000, P_PLL8, 1,  7,  48 },
+	{ 58982400, P_PLL8, 1, 96, 625 },
+	{ 64000000, P_PLL8, 2,  1,   3 },
+	{ }
+};
+
+static struct clk_rcg gsbi1_uart_src = {
+	.ns_reg = 0x29d4,
+	.md_reg = 0x29d0,
+	.mn = {
+		.mnctr_en_bit = 8,
+		.mnctr_reset_bit = 7,
+		.mnctr_mode_shift = 5,
+		.n_val_shift = 16,
+		.m_val_shift = 16,
+		.width = 16,
+	},
+	.p = {
+		.pre_div_shift = 3,
+		.pre_div_width = 2,
+	},
+	.s = {
+		.src_sel_shift = 0,
+		.parent_map = gcc_pxo_pll8_map,
+	},
+	.freq_tbl = clk_tbl_gsbi_uart,
+	.clkr = {
+		.enable_reg = 0x29d4,
+		.enable_mask = BIT(11),
+		.hw.init = &(struct clk_init_data){
+			.name = "gsbi1_uart_src",
+			.parent_names = gcc_pxo_pll8,
+			.num_parents = 2,
+			.ops = &clk_rcg_ops,
+			.flags = CLK_SET_PARENT_GATE,
+		},
+	},
+};
+
+static struct clk_branch gsbi1_uart_clk = {
+	.halt_reg = 0x2fcc,
+	.halt_bit = 12,
+	.clkr = {
+		.enable_reg = 0x29d4,
+		.enable_mask = BIT(9),
+		.hw.init = &(struct clk_init_data){
+			.name = "gsbi1_uart_clk",
+			.parent_names = (const char *[]){
+				"gsbi1_uart_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_rcg gsbi2_uart_src = {
+	.ns_reg = 0x29f4,
+	.md_reg = 0x29f0,
+	.mn = {
+		.mnctr_en_bit = 8,
+		.mnctr_reset_bit = 7,
+		.mnctr_mode_shift = 5,
+		.n_val_shift = 16,
+		.m_val_shift = 16,
+		.width = 16,
+	},
+	.p = {
+		.pre_div_shift = 3,
+		.pre_div_width = 2,
+	},
+	.s = {
+		.src_sel_shift = 0,
+		.parent_map = gcc_pxo_pll8_map,
+	},
+	.freq_tbl = clk_tbl_gsbi_uart,
+	.clkr = {
+		.enable_reg = 0x29f4,
+		.enable_mask = BIT(11),
+		.hw.init = &(struct clk_init_data){
+			.name = "gsbi2_uart_src",
+			.parent_names = gcc_pxo_pll8,
+			.num_parents = 2,
+			.ops = &clk_rcg_ops,
+			.flags = CLK_SET_PARENT_GATE,
+		},
+	},
+};
+
+static struct clk_branch gsbi2_uart_clk = {
+	.halt_reg = 0x2fcc,
+	.halt_bit = 8,
+	.clkr = {
+		.enable_reg = 0x29f4,
+		.enable_mask = BIT(9),
+		.hw.init = &(struct clk_init_data){
+			.name = "gsbi2_uart_clk",
+			.parent_names = (const char *[]){
+				"gsbi2_uart_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_rcg gsbi4_uart_src = {
+	.ns_reg = 0x2a34,
+	.md_reg = 0x2a30,
+	.mn = {
+		.mnctr_en_bit = 8,
+		.mnctr_reset_bit = 7,
+		.mnctr_mode_shift = 5,
+		.n_val_shift = 16,
+		.m_val_shift = 16,
+		.width = 16,
+	},
+	.p = {
+		.pre_div_shift = 3,
+		.pre_div_width = 2,
+	},
+	.s = {
+		.src_sel_shift = 0,
+		.parent_map = gcc_pxo_pll8_map,
+	},
+	.freq_tbl = clk_tbl_gsbi_uart,
+	.clkr = {
+		.enable_reg = 0x2a34,
+		.enable_mask = BIT(11),
+		.hw.init = &(struct clk_init_data){
+			.name = "gsbi4_uart_src",
+			.parent_names = gcc_pxo_pll8,
+			.num_parents = 2,
+			.ops = &clk_rcg_ops,
+			.flags = CLK_SET_PARENT_GATE,
+		},
+	},
+};
+
+static struct clk_branch gsbi4_uart_clk = {
+	.halt_reg = 0x2fd0,
+	.halt_bit = 26,
+	.clkr = {
+		.enable_reg = 0x2a34,
+		.enable_mask = BIT(9),
+		.hw.init = &(struct clk_init_data){
+			.name = "gsbi4_uart_clk",
+			.parent_names = (const char *[]){
+				"gsbi4_uart_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_rcg gsbi5_uart_src = {
+	.ns_reg = 0x2a54,
+	.md_reg = 0x2a50,
+	.mn = {
+		.mnctr_en_bit = 8,
+		.mnctr_reset_bit = 7,
+		.mnctr_mode_shift = 5,
+		.n_val_shift = 16,
+		.m_val_shift = 16,
+		.width = 16,
+	},
+	.p = {
+		.pre_div_shift = 3,
+		.pre_div_width = 2,
+	},
+	.s = {
+		.src_sel_shift = 0,
+		.parent_map = gcc_pxo_pll8_map,
+	},
+	.freq_tbl = clk_tbl_gsbi_uart,
+	.clkr = {
+		.enable_reg = 0x2a54,
+		.enable_mask = BIT(11),
+		.hw.init = &(struct clk_init_data){
+			.name = "gsbi5_uart_src",
+			.parent_names = gcc_pxo_pll8,
+			.num_parents = 2,
+			.ops = &clk_rcg_ops,
+			.flags = CLK_SET_PARENT_GATE,
+		},
+	},
+};
+
+static struct clk_branch gsbi5_uart_clk = {
+	.halt_reg = 0x2fd0,
+	.halt_bit = 22,
+	.clkr = {
+		.enable_reg = 0x2a54,
+		.enable_mask = BIT(9),
+		.hw.init = &(struct clk_init_data){
+			.name = "gsbi5_uart_clk",
+			.parent_names = (const char *[]){
+				"gsbi5_uart_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_rcg gsbi6_uart_src = {
+	.ns_reg = 0x2a74,
+	.md_reg = 0x2a70,
+	.mn = {
+		.mnctr_en_bit = 8,
+		.mnctr_reset_bit = 7,
+		.mnctr_mode_shift = 5,
+		.n_val_shift = 16,
+		.m_val_shift = 16,
+		.width = 16,
+	},
+	.p = {
+		.pre_div_shift = 3,
+		.pre_div_width = 2,
+	},
+	.s = {
+		.src_sel_shift = 0,
+		.parent_map = gcc_pxo_pll8_map,
+	},
+	.freq_tbl = clk_tbl_gsbi_uart,
+	.clkr = {
+		.enable_reg = 0x2a74,
+		.enable_mask = BIT(11),
+		.hw.init = &(struct clk_init_data){
+			.name = "gsbi6_uart_src",
+			.parent_names = gcc_pxo_pll8,
+			.num_parents = 2,
+			.ops = &clk_rcg_ops,
+			.flags = CLK_SET_PARENT_GATE,
+		},
+	},
+};
+
+static struct clk_branch gsbi6_uart_clk = {
+	.halt_reg = 0x2fd0,
+	.halt_bit = 18,
+	.clkr = {
+		.enable_reg = 0x2a74,
+		.enable_mask = BIT(9),
+		.hw.init = &(struct clk_init_data){
+			.name = "gsbi6_uart_clk",
+			.parent_names = (const char *[]){
+				"gsbi6_uart_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_rcg gsbi7_uart_src = {
+	.ns_reg = 0x2a94,
+	.md_reg = 0x2a90,
+	.mn = {
+		.mnctr_en_bit = 8,
+		.mnctr_reset_bit = 7,
+		.mnctr_mode_shift = 5,
+		.n_val_shift = 16,
+		.m_val_shift = 16,
+		.width = 16,
+	},
+	.p = {
+		.pre_div_shift = 3,
+		.pre_div_width = 2,
+	},
+	.s = {
+		.src_sel_shift = 0,
+		.parent_map = gcc_pxo_pll8_map,
+	},
+	.freq_tbl = clk_tbl_gsbi_uart,
+	.clkr = {
+		.enable_reg = 0x2a94,
+		.enable_mask = BIT(11),
+		.hw.init = &(struct clk_init_data){
+			.name = "gsbi7_uart_src",
+			.parent_names = gcc_pxo_pll8,
+			.num_parents = 2,
+			.ops = &clk_rcg_ops,
+			.flags = CLK_SET_PARENT_GATE,
+		},
+	},
+};
+
+static struct clk_branch gsbi7_uart_clk = {
+	.halt_reg = 0x2fd0,
+	.halt_bit = 14,
+	.clkr = {
+		.enable_reg = 0x2a94,
+		.enable_mask = BIT(9),
+		.hw.init = &(struct clk_init_data){
+			.name = "gsbi7_uart_clk",
+			.parent_names = (const char *[]){
+				"gsbi7_uart_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct freq_tbl clk_tbl_gsbi_qup[] = {
+	{  1100000, P_PXO,  1, 2, 49 },
+	{  5400000, P_PXO,  1, 1,  5 },
+	{ 10800000, P_PXO,  1, 2,  5 },
+	{ 15060000, P_PLL8, 1, 2, 51 },
+	{ 24000000, P_PLL8, 4, 1,  4 },
+	{ 25600000, P_PLL8, 1, 1, 15 },
+	{ 27000000, P_PXO,  1, 0,  0 },
+	{ 48000000, P_PLL8, 4, 1,  2 },
+	{ 51200000, P_PLL8, 1, 2, 15 },
+	{ }
+};
+
+static struct clk_rcg gsbi1_qup_src = {
+	.ns_reg = 0x29cc,
+	.md_reg = 0x29c8,
+	.mn = {
+		.mnctr_en_bit = 8,
+		.mnctr_reset_bit = 7,
+		.mnctr_mode_shift = 5,
+		.n_val_shift = 16,
+		.m_val_shift = 16,
+		.width = 8,
+	},
+	.p = {
+		.pre_div_shift = 3,
+		.pre_div_width = 2,
+	},
+	.s = {
+		.src_sel_shift = 0,
+		.parent_map = gcc_pxo_pll8_map,
+	},
+	.freq_tbl = clk_tbl_gsbi_qup,
+	.clkr = {
+		.enable_reg = 0x29cc,
+		.enable_mask = BIT(11),
+		.hw.init = &(struct clk_init_data){
+			.name = "gsbi1_qup_src",
+			.parent_names = gcc_pxo_pll8,
+			.num_parents = 2,
+			.ops = &clk_rcg_ops,
+			.flags = CLK_SET_PARENT_GATE,
+		},
+	},
+};
+
+static struct clk_branch gsbi1_qup_clk = {
+	.halt_reg = 0x2fcc,
+	.halt_bit = 11,
+	.clkr = {
+		.enable_reg = 0x29cc,
+		.enable_mask = BIT(9),
+		.hw.init = &(struct clk_init_data){
+			.name = "gsbi1_qup_clk",
+			.parent_names = (const char *[]){ "gsbi1_qup_src" },
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_rcg gsbi2_qup_src = {
+	.ns_reg = 0x29ec,
+	.md_reg = 0x29e8,
+	.mn = {
+		.mnctr_en_bit = 8,
+		.mnctr_reset_bit = 7,
+		.mnctr_mode_shift = 5,
+		.n_val_shift = 16,
+		.m_val_shift = 16,
+		.width = 8,
+	},
+	.p = {
+		.pre_div_shift = 3,
+		.pre_div_width = 2,
+	},
+	.s = {
+		.src_sel_shift = 0,
+		.parent_map = gcc_pxo_pll8_map,
+	},
+	.freq_tbl = clk_tbl_gsbi_qup,
+	.clkr = {
+		.enable_reg = 0x29ec,
+		.enable_mask = BIT(11),
+		.hw.init = &(struct clk_init_data){
+			.name = "gsbi2_qup_src",
+			.parent_names = gcc_pxo_pll8,
+			.num_parents = 2,
+			.ops = &clk_rcg_ops,
+			.flags = CLK_SET_PARENT_GATE,
+		},
+	},
+};
+
+static struct clk_branch gsbi2_qup_clk = {
+	.halt_reg = 0x2fcc,
+	.halt_bit = 6,
+	.clkr = {
+		.enable_reg = 0x29ec,
+		.enable_mask = BIT(9),
+		.hw.init = &(struct clk_init_data){
+			.name = "gsbi2_qup_clk",
+			.parent_names = (const char *[]){ "gsbi2_qup_src" },
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_rcg gsbi4_qup_src = {
+	.ns_reg = 0x2a2c,
+	.md_reg = 0x2a28,
+	.mn = {
+		.mnctr_en_bit = 8,
+		.mnctr_reset_bit = 7,
+		.mnctr_mode_shift = 5,
+		.n_val_shift = 16,
+		.m_val_shift = 16,
+		.width = 8,
+	},
+	.p = {
+		.pre_div_shift = 3,
+		.pre_div_width = 2,
+	},
+	.s = {
+		.src_sel_shift = 0,
+		.parent_map = gcc_pxo_pll8_map,
+	},
+	.freq_tbl = clk_tbl_gsbi_qup,
+	.clkr = {
+		.enable_reg = 0x2a2c,
+		.enable_mask = BIT(11),
+		.hw.init = &(struct clk_init_data){
+			.name = "gsbi4_qup_src",
+			.parent_names = gcc_pxo_pll8,
+			.num_parents = 2,
+			.ops = &clk_rcg_ops,
+			.flags = CLK_SET_PARENT_GATE,
+		},
+	},
+};
+
+static struct clk_branch gsbi4_qup_clk = {
+	.halt_reg = 0x2fd0,
+	.halt_bit = 24,
+	.clkr = {
+		.enable_reg = 0x2a2c,
+		.enable_mask = BIT(9),
+		.hw.init = &(struct clk_init_data){
+			.name = "gsbi4_qup_clk",
+			.parent_names = (const char *[]){ "gsbi4_qup_src" },
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_rcg gsbi5_qup_src = {
+	.ns_reg = 0x2a4c,
+	.md_reg = 0x2a48,
+	.mn = {
+		.mnctr_en_bit = 8,
+		.mnctr_reset_bit = 7,
+		.mnctr_mode_shift = 5,
+		.n_val_shift = 16,
+		.m_val_shift = 16,
+		.width = 8,
+	},
+	.p = {
+		.pre_div_shift = 3,
+		.pre_div_width = 2,
+	},
+	.s = {
+		.src_sel_shift = 0,
+		.parent_map = gcc_pxo_pll8_map,
+	},
+	.freq_tbl = clk_tbl_gsbi_qup,
+	.clkr = {
+		.enable_reg = 0x2a4c,
+		.enable_mask = BIT(11),
+		.hw.init = &(struct clk_init_data){
+			.name = "gsbi5_qup_src",
+			.parent_names = gcc_pxo_pll8,
+			.num_parents = 2,
+			.ops = &clk_rcg_ops,
+			.flags = CLK_SET_PARENT_GATE,
+		},
+	},
+};
+
+static struct clk_branch gsbi5_qup_clk = {
+	.halt_reg = 0x2fd0,
+	.halt_bit = 20,
+	.clkr = {
+		.enable_reg = 0x2a4c,
+		.enable_mask = BIT(9),
+		.hw.init = &(struct clk_init_data){
+			.name = "gsbi5_qup_clk",
+			.parent_names = (const char *[]){ "gsbi5_qup_src" },
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_rcg gsbi6_qup_src = {
+	.ns_reg = 0x2a6c,
+	.md_reg = 0x2a68,
+	.mn = {
+		.mnctr_en_bit = 8,
+		.mnctr_reset_bit = 7,
+		.mnctr_mode_shift = 5,
+		.n_val_shift = 16,
+		.m_val_shift = 16,
+		.width = 8,
+	},
+	.p = {
+		.pre_div_shift = 3,
+		.pre_div_width = 2,
+	},
+	.s = {
+		.src_sel_shift = 0,
+		.parent_map = gcc_pxo_pll8_map,
+	},
+	.freq_tbl = clk_tbl_gsbi_qup,
+	.clkr = {
+		.enable_reg = 0x2a6c,
+		.enable_mask = BIT(11),
+		.hw.init = &(struct clk_init_data){
+			.name = "gsbi6_qup_src",
+			.parent_names = gcc_pxo_pll8,
+			.num_parents = 2,
+			.ops = &clk_rcg_ops,
+			.flags = CLK_SET_PARENT_GATE,
+		},
+	},
+};
+
+static struct clk_branch gsbi6_qup_clk = {
+	.halt_reg = 0x2fd0,
+	.halt_bit = 16,
+	.clkr = {
+		.enable_reg = 0x2a6c,
+		.enable_mask = BIT(9),
+		.hw.init = &(struct clk_init_data){
+			.name = "gsbi6_qup_clk",
+			.parent_names = (const char *[]){ "gsbi6_qup_src" },
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_rcg gsbi7_qup_src = {
+	.ns_reg = 0x2a8c,
+	.md_reg = 0x2a88,
+	.mn = {
+		.mnctr_en_bit = 8,
+		.mnctr_reset_bit = 7,
+		.mnctr_mode_shift = 5,
+		.n_val_shift = 16,
+		.m_val_shift = 16,
+		.width = 8,
+	},
+	.p = {
+		.pre_div_shift = 3,
+		.pre_div_width = 2,
+	},
+	.s = {
+		.src_sel_shift = 0,
+		.parent_map = gcc_pxo_pll8_map,
+	},
+	.freq_tbl = clk_tbl_gsbi_qup,
+	.clkr = {
+		.enable_reg = 0x2a8c,
+		.enable_mask = BIT(11),
+		.hw.init = &(struct clk_init_data){
+			.name = "gsbi7_qup_src",
+			.parent_names = gcc_pxo_pll8,
+			.num_parents = 2,
+			.ops = &clk_rcg_ops,
+			.flags = CLK_SET_PARENT_GATE,
+		},
+	},
+};
+
+static struct clk_branch gsbi7_qup_clk = {
+	.halt_reg = 0x2fd0,
+	.halt_bit = 12,
+	.clkr = {
+		.enable_reg = 0x2a8c,
+		.enable_mask = BIT(9),
+		.hw.init = &(struct clk_init_data){
+			.name = "gsbi7_qup_clk",
+			.parent_names = (const char *[]){ "gsbi7_qup_src" },
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch gsbi1_h_clk = {
+	.hwcg_reg = 0x29c0,
+	.hwcg_bit = 6,
+	.halt_reg = 0x2fcc,
+	.halt_bit = 13,
+	.clkr = {
+		.enable_reg = 0x29c0,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "gsbi1_h_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_IS_ROOT,
+		},
+	},
+};
+
+static struct clk_branch gsbi2_h_clk = {
+	.hwcg_reg = 0x29e0,
+	.hwcg_bit = 6,
+	.halt_reg = 0x2fcc,
+	.halt_bit = 9,
+	.clkr = {
+		.enable_reg = 0x29e0,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "gsbi2_h_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_IS_ROOT,
+		},
+	},
+};
+
+static struct clk_branch gsbi4_h_clk = {
+	.hwcg_reg = 0x2a20,
+	.hwcg_bit = 6,
+	.halt_reg = 0x2fd0,
+	.halt_bit = 27,
+	.clkr = {
+		.enable_reg = 0x2a20,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "gsbi4_h_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_IS_ROOT,
+		},
+	},
+};
+
+static struct clk_branch gsbi5_h_clk = {
+	.hwcg_reg = 0x2a40,
+	.hwcg_bit = 6,
+	.halt_reg = 0x2fd0,
+	.halt_bit = 23,
+	.clkr = {
+		.enable_reg = 0x2a40,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "gsbi5_h_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_IS_ROOT,
+		},
+	},
+};
+
+static struct clk_branch gsbi6_h_clk = {
+	.hwcg_reg = 0x2a60,
+	.hwcg_bit = 6,
+	.halt_reg = 0x2fd0,
+	.halt_bit = 19,
+	.clkr = {
+		.enable_reg = 0x2a60,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "gsbi6_h_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_IS_ROOT,
+		},
+	},
+};
+
+static struct clk_branch gsbi7_h_clk = {
+	.hwcg_reg = 0x2a80,
+	.hwcg_bit = 6,
+	.halt_reg = 0x2fd0,
+	.halt_bit = 15,
+	.clkr = {
+		.enable_reg = 0x2a80,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "gsbi7_h_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_IS_ROOT,
+		},
+	},
+};
+
+static const struct freq_tbl clk_tbl_gp[] = {
+	{ 12500000, P_PXO,  2, 0, 0 },
+	{ 25000000, P_PXO,  1, 0, 0 },
+	{ 64000000, P_PLL8, 2, 1, 3 },
+	{ 76800000, P_PLL8, 1, 1, 5 },
+	{ 96000000, P_PLL8, 4, 0, 0 },
+	{ 128000000, P_PLL8, 3, 0, 0 },
+	{ 192000000, P_PLL8, 2, 0, 0 },
+	{ }
+};
+
+static struct clk_rcg gp0_src = {
+	.ns_reg = 0x2d24,
+	.md_reg = 0x2d00,
+	.mn = {
+		.mnctr_en_bit = 8,
+		.mnctr_reset_bit = 7,
+		.mnctr_mode_shift = 5,
+		.n_val_shift = 16,
+		.m_val_shift = 16,
+		.width = 8,
+	},
+	.p = {
+		.pre_div_shift = 3,
+		.pre_div_width = 2,
+	},
+	.s = {
+		.src_sel_shift = 0,
+		.parent_map = gcc_pxo_pll8_cxo_map,
+	},
+	.freq_tbl = clk_tbl_gp,
+	.clkr = {
+		.enable_reg = 0x2d24,
+		.enable_mask = BIT(11),
+		.hw.init = &(struct clk_init_data){
+			.name = "gp0_src",
+			.parent_names = gcc_pxo_pll8_cxo,
+			.num_parents = 3,
+			.ops = &clk_rcg_ops,
+			.flags = CLK_SET_PARENT_GATE,
+		},
+	}
+};
+
+static struct clk_branch gp0_clk = {
+	.halt_reg = 0x2fd8,
+	.halt_bit = 7,
+	.clkr = {
+		.enable_reg = 0x2d24,
+		.enable_mask = BIT(9),
+		.hw.init = &(struct clk_init_data){
+			.name = "gp0_clk",
+			.parent_names = (const char *[]){ "gp0_src" },
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_rcg gp1_src = {
+	.ns_reg = 0x2d44,
+	.md_reg = 0x2d40,
+	.mn = {
+		.mnctr_en_bit = 8,
+		.mnctr_reset_bit = 7,
+		.mnctr_mode_shift = 5,
+		.n_val_shift = 16,
+		.m_val_shift = 16,
+		.width = 8,
+	},
+	.p = {
+		.pre_div_shift = 3,
+		.pre_div_width = 2,
+	},
+	.s = {
+		.src_sel_shift = 0,
+		.parent_map = gcc_pxo_pll8_cxo_map,
+	},
+	.freq_tbl = clk_tbl_gp,
+	.clkr = {
+		.enable_reg = 0x2d44,
+		.enable_mask = BIT(11),
+		.hw.init = &(struct clk_init_data){
+			.name = "gp1_src",
+			.parent_names = gcc_pxo_pll8_cxo,
+			.num_parents = 3,
+			.ops = &clk_rcg_ops,
+			.flags = CLK_SET_RATE_GATE,
+		},
+	}
+};
+
+static struct clk_branch gp1_clk = {
+	.halt_reg = 0x2fd8,
+	.halt_bit = 6,
+	.clkr = {
+		.enable_reg = 0x2d44,
+		.enable_mask = BIT(9),
+		.hw.init = &(struct clk_init_data){
+			.name = "gp1_clk",
+			.parent_names = (const char *[]){ "gp1_src" },
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_rcg gp2_src = {
+	.ns_reg = 0x2d64,
+	.md_reg = 0x2d60,
+	.mn = {
+		.mnctr_en_bit = 8,
+		.mnctr_reset_bit = 7,
+		.mnctr_mode_shift = 5,
+		.n_val_shift = 16,
+		.m_val_shift = 16,
+		.width = 8,
+	},
+	.p = {
+		.pre_div_shift = 3,
+		.pre_div_width = 2,
+	},
+	.s = {
+		.src_sel_shift = 0,
+		.parent_map = gcc_pxo_pll8_cxo_map,
+	},
+	.freq_tbl = clk_tbl_gp,
+	.clkr = {
+		.enable_reg = 0x2d64,
+		.enable_mask = BIT(11),
+		.hw.init = &(struct clk_init_data){
+			.name = "gp2_src",
+			.parent_names = gcc_pxo_pll8_cxo,
+			.num_parents = 3,
+			.ops = &clk_rcg_ops,
+			.flags = CLK_SET_RATE_GATE,
+		},
+	}
+};
+
+static struct clk_branch gp2_clk = {
+	.halt_reg = 0x2fd8,
+	.halt_bit = 5,
+	.clkr = {
+		.enable_reg = 0x2d64,
+		.enable_mask = BIT(9),
+		.hw.init = &(struct clk_init_data){
+			.name = "gp2_clk",
+			.parent_names = (const char *[]){ "gp2_src" },
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch pmem_clk = {
+	.hwcg_reg = 0x25a0,
+	.hwcg_bit = 6,
+	.halt_reg = 0x2fc8,
+	.halt_bit = 20,
+	.clkr = {
+		.enable_reg = 0x25a0,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "pmem_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_IS_ROOT,
+		},
+	},
+};
+
+static struct clk_rcg prng_src = {
+	.ns_reg = 0x2e80,
+	.p = {
+		.pre_div_shift = 3,
+		.pre_div_width = 4,
+	},
+	.s = {
+		.src_sel_shift = 0,
+		.parent_map = gcc_pxo_pll8_map,
+	},
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "prng_src",
+			.parent_names = gcc_pxo_pll8,
+			.num_parents = 2,
+			.ops = &clk_rcg_ops,
+		},
+	},
+};
+
+static struct clk_branch prng_clk = {
+	.halt_reg = 0x2fd8,
+	.halt_check = BRANCH_HALT_VOTED,
+	.halt_bit = 10,
+	.clkr = {
+		.enable_reg = 0x3080,
+		.enable_mask = BIT(10),
+		.hw.init = &(struct clk_init_data){
+			.name = "prng_clk",
+			.parent_names = (const char *[]){ "prng_src" },
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+		},
+	},
+};
+
+static const struct freq_tbl clk_tbl_sdc[] = {
+	{    144000, P_PXO,   5, 18,625 },
+	{    400000, P_PLL8,  4, 1, 240 },
+	{  16000000, P_PLL8,  4, 1,   6 },
+	{  17070000, P_PLL8,  1, 2,  45 },
+	{  20210000, P_PLL8,  1, 1,  19 },
+	{  24000000, P_PLL8,  4, 1,   4 },
+	{  48000000, P_PLL8,  4, 1,   2 },
+	{  64000000, P_PLL8,  3, 1,   2 },
+	{  96000000, P_PLL8,  4, 0,   0 },
+	{ 192000000, P_PLL8,  2, 0,   0 },
+	{ }
+};
+
+static struct clk_rcg sdc1_src = {
+	.ns_reg = 0x282c,
+	.md_reg = 0x2828,
+	.mn = {
+		.mnctr_en_bit = 8,
+		.mnctr_reset_bit = 7,
+		.mnctr_mode_shift = 5,
+		.n_val_shift = 16,
+		.m_val_shift = 16,
+		.width = 8,
+	},
+	.p = {
+		.pre_div_shift = 3,
+		.pre_div_width = 2,
+	},
+	.s = {
+		.src_sel_shift = 0,
+		.parent_map = gcc_pxo_pll8_map,
+	},
+	.freq_tbl = clk_tbl_sdc,
+	.clkr = {
+		.enable_reg = 0x282c,
+		.enable_mask = BIT(11),
+		.hw.init = &(struct clk_init_data){
+			.name = "sdc1_src",
+			.parent_names = gcc_pxo_pll8,
+			.num_parents = 2,
+			.ops = &clk_rcg_ops,
+			.flags = CLK_SET_RATE_GATE,
+		},
+	}
+};
+
+static struct clk_branch sdc1_clk = {
+	.halt_reg = 0x2fc8,
+	.halt_bit = 6,
+	.clkr = {
+		.enable_reg = 0x282c,
+		.enable_mask = BIT(9),
+		.hw.init = &(struct clk_init_data){
+			.name = "sdc1_clk",
+			.parent_names = (const char *[]){ "sdc1_src" },
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_rcg sdc3_src = {
+	.ns_reg = 0x286c,
+	.md_reg = 0x2868,
+	.mn = {
+		.mnctr_en_bit = 8,
+		.mnctr_reset_bit = 7,
+		.mnctr_mode_shift = 5,
+		.n_val_shift = 16,
+		.m_val_shift = 16,
+		.width = 8,
+	},
+	.p = {
+		.pre_div_shift = 3,
+		.pre_div_width = 2,
+	},
+	.s = {
+		.src_sel_shift = 0,
+		.parent_map = gcc_pxo_pll8_map,
+	},
+	.freq_tbl = clk_tbl_sdc,
+	.clkr = {
+		.enable_reg = 0x286c,
+		.enable_mask = BIT(11),
+		.hw.init = &(struct clk_init_data){
+			.name = "sdc3_src",
+			.parent_names = gcc_pxo_pll8,
+			.num_parents = 2,
+			.ops = &clk_rcg_ops,
+			.flags = CLK_SET_RATE_GATE,
+		},
+	}
+};
+
+static struct clk_branch sdc3_clk = {
+	.halt_reg = 0x2fc8,
+	.halt_bit = 4,
+	.clkr = {
+		.enable_reg = 0x286c,
+		.enable_mask = BIT(9),
+		.hw.init = &(struct clk_init_data){
+			.name = "sdc3_clk",
+			.parent_names = (const char *[]){ "sdc3_src" },
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch sdc1_h_clk = {
+	.hwcg_reg = 0x2820,
+	.hwcg_bit = 6,
+	.halt_reg = 0x2fc8,
+	.halt_bit = 11,
+	.clkr = {
+		.enable_reg = 0x2820,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "sdc1_h_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_IS_ROOT,
+		},
+	},
+};
+
+static struct clk_branch sdc3_h_clk = {
+	.hwcg_reg = 0x2860,
+	.hwcg_bit = 6,
+	.halt_reg = 0x2fc8,
+	.halt_bit = 9,
+	.clkr = {
+		.enable_reg = 0x2860,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "sdc3_h_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_IS_ROOT,
+		},
+	},
+};
+
+static const struct freq_tbl clk_tbl_tsif_ref[] = {
+	{ 105000, P_PXO,  1, 1, 256 },
+	{ }
+};
+
+static struct clk_rcg tsif_ref_src = {
+	.ns_reg = 0x2710,
+	.md_reg = 0x270c,
+	.mn = {
+		.mnctr_en_bit = 8,
+		.mnctr_reset_bit = 7,
+		.mnctr_mode_shift = 5,
+		.n_val_shift = 16,
+		.m_val_shift = 16,
+		.width = 16,
+	},
+	.p = {
+		.pre_div_shift = 3,
+		.pre_div_width = 2,
+	},
+	.s = {
+		.src_sel_shift = 0,
+		.parent_map = gcc_pxo_pll8_map,
+	},
+	.freq_tbl = clk_tbl_tsif_ref,
+	.clkr = {
+		.enable_reg = 0x2710,
+		.enable_mask = BIT(11),
+		.hw.init = &(struct clk_init_data){
+			.name = "tsif_ref_src",
+			.parent_names = gcc_pxo_pll8,
+			.num_parents = 2,
+			.ops = &clk_rcg_ops,
+			.flags = CLK_SET_RATE_GATE,
+		},
+	}
+};
+
+static struct clk_branch tsif_ref_clk = {
+	.halt_reg = 0x2fd4,
+	.halt_bit = 5,
+	.clkr = {
+		.enable_reg = 0x2710,
+		.enable_mask = BIT(9),
+		.hw.init = &(struct clk_init_data){
+			.name = "tsif_ref_clk",
+			.parent_names = (const char *[]){ "tsif_ref_src" },
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch tsif_h_clk = {
+	.hwcg_reg = 0x2700,
+	.hwcg_bit = 6,
+	.halt_reg = 0x2fd4,
+	.halt_bit = 7,
+	.clkr = {
+		.enable_reg = 0x2700,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "tsif_h_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_IS_ROOT,
+		},
+	},
+};
+
+static struct clk_branch dma_bam_h_clk = {
+	.hwcg_reg = 0x25c0,
+	.hwcg_bit = 6,
+	.halt_reg = 0x2fc8,
+	.halt_bit = 12,
+	.clkr = {
+		.enable_reg = 0x25c0,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "dma_bam_h_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_IS_ROOT,
+		},
+	},
+};
+
+static struct clk_branch adm0_clk = {
+	.halt_reg = 0x2fdc,
+	.halt_check = BRANCH_HALT_VOTED,
+	.halt_bit = 12,
+	.clkr = {
+		.enable_reg = 0x3080,
+		.enable_mask = BIT(2),
+		.hw.init = &(struct clk_init_data){
+			.name = "adm0_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_IS_ROOT,
+		},
+	},
+};
+
+static struct clk_branch adm0_pbus_clk = {
+	.hwcg_reg = 0x2208,
+	.hwcg_bit = 6,
+	.halt_reg = 0x2fdc,
+	.halt_check = BRANCH_HALT_VOTED,
+	.halt_bit = 11,
+	.clkr = {
+		.enable_reg = 0x3080,
+		.enable_mask = BIT(3),
+		.hw.init = &(struct clk_init_data){
+			.name = "adm0_pbus_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_IS_ROOT,
+		},
+	},
+};
+
+static struct clk_branch pmic_arb0_h_clk = {
+	.halt_reg = 0x2fd8,
+	.halt_check = BRANCH_HALT_VOTED,
+	.halt_bit = 22,
+	.clkr = {
+		.enable_reg = 0x3080,
+		.enable_mask = BIT(8),
+		.hw.init = &(struct clk_init_data){
+			.name = "pmic_arb0_h_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_IS_ROOT,
+		},
+	},
+};
+
+static struct clk_branch pmic_arb1_h_clk = {
+	.halt_reg = 0x2fd8,
+	.halt_check = BRANCH_HALT_VOTED,
+	.halt_bit = 21,
+	.clkr = {
+		.enable_reg = 0x3080,
+		.enable_mask = BIT(9),
+		.hw.init = &(struct clk_init_data){
+			.name = "pmic_arb1_h_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_IS_ROOT,
+		},
+	},
+};
+
+static struct clk_branch pmic_ssbi2_clk = {
+	.halt_reg = 0x2fd8,
+	.halt_check = BRANCH_HALT_VOTED,
+	.halt_bit = 23,
+	.clkr = {
+		.enable_reg = 0x3080,
+		.enable_mask = BIT(7),
+		.hw.init = &(struct clk_init_data){
+			.name = "pmic_ssbi2_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_IS_ROOT,
+		},
+	},
+};
+
+static struct clk_branch rpm_msg_ram_h_clk = {
+	.hwcg_reg = 0x27e0,
+	.hwcg_bit = 6,
+	.halt_reg = 0x2fd8,
+	.halt_check = BRANCH_HALT_VOTED,
+	.halt_bit = 12,
+	.clkr = {
+		.enable_reg = 0x3080,
+		.enable_mask = BIT(6),
+		.hw.init = &(struct clk_init_data){
+			.name = "rpm_msg_ram_h_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_IS_ROOT,
+		},
+	},
+};
+
+static const struct freq_tbl clk_tbl_pcie_ref[] = {
+	{ 100000000, P_PLL3,  12, 0, 0 },
+	{ }
+};
+
+static struct clk_rcg pcie_ref_src = {
+	.ns_reg = 0x3860,
+	.p = {
+		.pre_div_shift = 3,
+		.pre_div_width = 4,
+	},
+	.s = {
+		.src_sel_shift = 0,
+		.parent_map = gcc_pxo_pll3_map,
+	},
+	.freq_tbl = clk_tbl_pcie_ref,
+	.clkr = {
+		.enable_reg = 0x3860,
+		.enable_mask = BIT(11),
+		.hw.init = &(struct clk_init_data){
+			.name = "pcie_ref_src",
+			.parent_names = gcc_pxo_pll3,
+			.num_parents = 2,
+			.ops = &clk_rcg_ops,
+			.flags = CLK_SET_RATE_GATE,
+		},
+	},
+};
+
+static struct clk_branch pcie_ref_src_clk = {
+	.halt_reg = 0x2fdc,
+	.halt_bit = 30,
+	.clkr = {
+		.enable_reg = 0x3860,
+		.enable_mask = BIT(9),
+		.hw.init = &(struct clk_init_data){
+			.name = "pcie_ref_src_clk",
+			.parent_names = (const char *[]){ "pcie_ref_src" },
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch pcie_a_clk = {
+	.halt_reg = 0x2fc0,
+	.halt_bit = 13,
+	.clkr = {
+		.enable_reg = 0x22c0,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "pcie_a_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_IS_ROOT,
+		},
+	},
+};
+
+static struct clk_branch pcie_aux_clk = {
+	.halt_reg = 0x2fdc,
+	.halt_bit = 31,
+	.clkr = {
+		.enable_reg = 0x22c8,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "pcie_aux_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_IS_ROOT,
+		},
+	},
+};
+
+static struct clk_branch pcie_h_clk = {
+	.halt_reg = 0x2fd4,
+	.halt_bit = 8,
+	.clkr = {
+		.enable_reg = 0x22cc,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "pcie_h_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_IS_ROOT,
+		},
+	},
+};
+
+static struct clk_branch pcie_phy_clk = {
+	.halt_reg = 0x2fdc,
+	.halt_bit = 29,
+	.clkr = {
+		.enable_reg = 0x22d0,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "pcie_phy_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_IS_ROOT,
+		},
+	},
+};
+
+static struct clk_rcg pcie1_ref_src = {
+	.ns_reg = 0x3aa0,
+	.p = {
+		.pre_div_shift = 3,
+		.pre_div_width = 4,
+	},
+	.s = {
+		.src_sel_shift = 0,
+		.parent_map = gcc_pxo_pll3_map,
+	},
+	.freq_tbl = clk_tbl_pcie_ref,
+	.clkr = {
+		.enable_reg = 0x3aa0,
+		.enable_mask = BIT(11),
+		.hw.init = &(struct clk_init_data){
+			.name = "pcie1_ref_src",
+			.parent_names = gcc_pxo_pll3,
+			.num_parents = 2,
+			.ops = &clk_rcg_ops,
+			.flags = CLK_SET_RATE_GATE,
+		},
+	},
+};
+
+static struct clk_branch pcie1_ref_src_clk = {
+	.halt_reg = 0x2fdc,
+	.halt_bit = 27,
+	.clkr = {
+		.enable_reg = 0x3aa0,
+		.enable_mask = BIT(9),
+		.hw.init = &(struct clk_init_data){
+			.name = "pcie1_ref_src_clk",
+			.parent_names = (const char *[]){ "pcie1_ref_src" },
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch pcie1_a_clk = {
+	.halt_reg = 0x2fc0,
+	.halt_bit = 10,
+	.clkr = {
+		.enable_reg = 0x3a80,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "pcie1_a_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_IS_ROOT,
+		},
+	},
+};
+
+static struct clk_branch pcie1_aux_clk = {
+	.halt_reg = 0x2fdc,
+	.halt_bit = 28,
+	.clkr = {
+		.enable_reg = 0x3a88,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "pcie1_aux_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_IS_ROOT,
+		},
+	},
+};
+
+static struct clk_branch pcie1_h_clk = {
+	.halt_reg = 0x2fd4,
+	.halt_bit = 9,
+	.clkr = {
+		.enable_reg = 0x3a8c,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "pcie1_h_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_IS_ROOT,
+		},
+	},
+};
+
+static struct clk_branch pcie1_phy_clk = {
+	.halt_reg = 0x2fdc,
+	.halt_bit = 26,
+	.clkr = {
+		.enable_reg = 0x3a90,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "pcie1_phy_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_IS_ROOT,
+		},
+	},
+};
+
+static struct clk_rcg pcie2_ref_src = {
+	.ns_reg = 0x3ae0,
+	.p = {
+		.pre_div_shift = 3,
+		.pre_div_width = 4,
+	},
+	.s = {
+		.src_sel_shift = 0,
+		.parent_map = gcc_pxo_pll3_map,
+	},
+	.freq_tbl = clk_tbl_pcie_ref,
+	.clkr = {
+		.enable_reg = 0x3ae0,
+		.enable_mask = BIT(11),
+		.hw.init = &(struct clk_init_data){
+			.name = "pcie2_ref_src",
+			.parent_names = gcc_pxo_pll3,
+			.num_parents = 2,
+			.ops = &clk_rcg_ops,
+			.flags = CLK_SET_RATE_GATE,
+		},
+	},
+};
+
+static struct clk_branch pcie2_ref_src_clk = {
+	.halt_reg = 0x2fdc,
+	.halt_bit = 24,
+	.clkr = {
+		.enable_reg = 0x3ae0,
+		.enable_mask = BIT(9),
+		.hw.init = &(struct clk_init_data){
+			.name = "pcie2_ref_src_clk",
+			.parent_names = (const char *[]){ "pcie2_ref_src" },
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch pcie2_a_clk = {
+	.halt_reg = 0x2fc0,
+	.halt_bit = 9,
+	.clkr = {
+		.enable_reg = 0x3ac0,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "pcie2_a_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_IS_ROOT,
+		},
+	},
+};
+
+static struct clk_branch pcie2_aux_clk = {
+	.halt_reg = 0x2fdc,
+	.halt_bit = 25,
+	.clkr = {
+		.enable_reg = 0x3ac8,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "pcie2_aux_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_IS_ROOT,
+		},
+	},
+};
+
+static struct clk_branch pcie2_h_clk = {
+	.halt_reg = 0x2fd4,
+	.halt_bit = 10,
+	.clkr = {
+		.enable_reg = 0x3acc,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "pcie2_h_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_IS_ROOT,
+		},
+	},
+};
+
+static struct clk_branch pcie2_phy_clk = {
+	.halt_reg = 0x2fdc,
+	.halt_bit = 23,
+	.clkr = {
+		.enable_reg = 0x3ad0,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "pcie2_phy_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_IS_ROOT,
+		},
+	},
+};
+
+static const struct freq_tbl clk_tbl_sata_ref[] = {
+	{ 100000000, P_PLL3,  12, 0, 0 },
+	{ }
+};
+
+static struct clk_rcg sata_ref_src = {
+	.ns_reg = 0x2c08,
+	.p = {
+		.pre_div_shift = 3,
+		.pre_div_width = 4,
+	},
+	.s = {
+		.src_sel_shift = 0,
+		.parent_map = gcc_pxo_pll3_sata_map,
+	},
+	.freq_tbl = clk_tbl_sata_ref,
+	.clkr = {
+		.enable_reg = 0x2c08,
+		.enable_mask = BIT(7),
+		.hw.init = &(struct clk_init_data){
+			.name = "sata_ref_src",
+			.parent_names = gcc_pxo_pll3,
+			.num_parents = 2,
+			.ops = &clk_rcg_ops,
+			.flags = CLK_SET_RATE_GATE,
+		},
+	},
+};
+
+static struct clk_branch sata_rxoob_clk = {
+	.halt_reg = 0x2fdc,
+	.halt_bit = 20,
+	.clkr = {
+		.enable_reg = 0x2c0c,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "sata_rxoob_clk",
+			.parent_names = (const char *[]){ "sata_ref_src" },
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch sata_pmalive_clk = {
+	.halt_reg = 0x2fdc,
+	.halt_bit = 19,
+	.clkr = {
+		.enable_reg = 0x2c10,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "sata_pmalive_clk",
+			.parent_names = (const char *[]){ "sata_ref_src" },
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch sata_phy_ref_clk = {
+	.halt_reg = 0x2fdc,
+	.halt_bit = 18,
+	.clkr = {
+		.enable_reg = 0x2c14,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "sata_phy_ref_clk",
+			.parent_names = (const char *[]){ "pxo" },
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+		},
+	},
+};
+
+static struct clk_branch sata_a_clk = {
+	.halt_reg = 0x2fc0,
+	.halt_bit = 12,
+	.clkr = {
+		.enable_reg = 0x2c20,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "sata_a_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_IS_ROOT,
+		},
+	},
+};
+
+static struct clk_branch sata_h_clk = {
+	.halt_reg = 0x2fdc,
+	.halt_bit = 21,
+	.clkr = {
+		.enable_reg = 0x2c00,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "sata_h_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_IS_ROOT,
+		},
+	},
+};
+
+static struct clk_branch sfab_sata_s_h_clk = {
+	.halt_reg = 0x2fc4,
+	.halt_bit = 14,
+	.clkr = {
+		.enable_reg = 0x2480,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "sfab_sata_s_h_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_IS_ROOT,
+		},
+	},
+};
+
+static struct clk_branch sata_phy_cfg_clk = {
+	.halt_reg = 0x2fcc,
+	.halt_bit = 14,
+	.clkr = {
+		.enable_reg = 0x2c40,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "sata_phy_cfg_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_IS_ROOT,
+		},
+	},
+};
+
+static const struct freq_tbl clk_tbl_usb30_master[] = {
+	{ 125000000, P_PLL0,  1, 5, 32 },
+	{ }
+};
+
+static struct clk_rcg usb30_master_clk_src = {
+	.ns_reg = 0x3b2c,
+	.md_reg = 0x3b28,
+	.mn = {
+		.mnctr_en_bit = 8,
+		.mnctr_reset_bit = 7,
+		.mnctr_mode_shift = 5,
+		.n_val_shift = 16,
+		.m_val_shift = 16,
+		.width = 8,
+	},
+	.p = {
+		.pre_div_shift = 3,
+		.pre_div_width = 2,
+	},
+	.s = {
+		.src_sel_shift = 0,
+		.parent_map = gcc_pxo_pll8_pll0,
+	},
+	.freq_tbl = clk_tbl_usb30_master,
+	.clkr = {
+		.enable_reg = 0x3b2c,
+		.enable_mask = BIT(11),
+		.hw.init = &(struct clk_init_data){
+			.name = "usb30_master_ref_src",
+			.parent_names = gcc_pxo_pll8_pll0_map,
+			.num_parents = 3,
+			.ops = &clk_rcg_ops,
+			.flags = CLK_SET_RATE_GATE,
+		},
+	},
+};
+
+static struct clk_branch usb30_0_branch_clk = {
+	.halt_reg = 0x2fc4,
+	.halt_bit = 22,
+	.clkr = {
+		.enable_reg = 0x3b24,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "usb30_0_branch_clk",
+			.parent_names = (const char *[]){ "usb30_master_ref_src", },
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch usb30_1_branch_clk = {
+	.halt_reg = 0x2fc4,
+	.halt_bit = 17,
+	.clkr = {
+		.enable_reg = 0x3b34,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "usb30_1_branch_clk",
+			.parent_names = (const char *[]){ "usb30_master_ref_src", },
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static const struct freq_tbl clk_tbl_usb30_utmi[] = {
+	{ 60000000, P_PLL8,  1, 5, 32 },
+	{ }
+};
+
+static struct clk_rcg usb30_utmi_clk = {
+	.ns_reg = 0x3b44,
+	.md_reg = 0x3b40,
+	.mn = {
+		.mnctr_en_bit = 8,
+		.mnctr_reset_bit = 7,
+		.mnctr_mode_shift = 5,
+		.n_val_shift = 16,
+		.m_val_shift = 16,
+		.width = 8,
+	},
+	.p = {
+		.pre_div_shift = 3,
+		.pre_div_width = 2,
+	},
+	.s = {
+		.src_sel_shift = 0,
+		.parent_map = gcc_pxo_pll8_pll0,
+	},
+	.freq_tbl = clk_tbl_usb30_utmi,
+	.clkr = {
+		.enable_reg = 0x3b44,
+		.enable_mask = BIT(11),
+		.hw.init = &(struct clk_init_data){
+			.name = "usb30_utmi_clk",
+			.parent_names = gcc_pxo_pll8_pll0_map,
+			.num_parents = 3,
+			.ops = &clk_rcg_ops,
+			.flags = CLK_SET_RATE_GATE,
+		},
+	},
+};
+
+static struct clk_branch usb30_0_utmi_clk_ctl = {
+	.halt_reg = 0x2fc4,
+	.halt_bit = 21,
+	.clkr = {
+		.enable_reg = 0x3b48,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "usb30_0_utmi_clk_ctl",
+			.parent_names = (const char *[]){ "usb30_utmi_clk", },
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch usb30_1_utmi_clk_ctl = {
+	.halt_reg = 0x2fc4,
+	.halt_bit = 15,
+	.clkr = {
+		.enable_reg = 0x3b4c,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "usb30_1_utmi_clk_ctl",
+			.parent_names = (const char *[]){ "usb30_utmi_clk", },
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static const struct freq_tbl clk_tbl_usb[] = {
+	{ 60000000, P_PLL8,  1, 5, 32 },
+	{ }
+};
+
+static struct clk_rcg usb_hs1_xcvr_clk_src = {
+	.ns_reg = 0x290C,
+	.md_reg = 0x2908,
+	.mn = {
+		.mnctr_en_bit = 8,
+		.mnctr_reset_bit = 7,
+		.mnctr_mode_shift = 5,
+		.n_val_shift = 16,
+		.m_val_shift = 16,
+		.width = 8,
+	},
+	.p = {
+		.pre_div_shift = 3,
+		.pre_div_width = 2,
+	},
+	.s = {
+		.src_sel_shift = 0,
+		.parent_map = gcc_pxo_pll8_pll0,
+	},
+	.freq_tbl = clk_tbl_usb,
+	.clkr = {
+		.enable_reg = 0x2968,
+		.enable_mask = BIT(11),
+		.hw.init = &(struct clk_init_data){
+			.name = "usb_hs1_xcvr_src",
+			.parent_names = gcc_pxo_pll8_pll0_map,
+			.num_parents = 3,
+			.ops = &clk_rcg_ops,
+			.flags = CLK_SET_RATE_GATE,
+		},
+	},
+};
+
+static struct clk_branch usb_hs1_xcvr_clk = {
+	.halt_reg = 0x2fcc,
+	.halt_bit = 17,
+	.clkr = {
+		.enable_reg = 0x290c,
+		.enable_mask = BIT(9),
+		.hw.init = &(struct clk_init_data){
+			.name = "usb_hs1_xcvr_clk",
+			.parent_names = (const char *[]){ "usb_hs1_xcvr_src" },
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch usb_hs1_h_clk = {
+	.hwcg_reg = 0x2900,
+	.hwcg_bit = 6,
+	.halt_reg = 0x2fc8,
+	.halt_bit = 1,
+	.clkr = {
+		.enable_reg = 0x2900,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "usb_hs1_h_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_IS_ROOT,
+		},
+	},
+};
+
+static struct clk_rcg usb_fs1_xcvr_clk_src = {
+	.ns_reg = 0x2968,
+	.md_reg = 0x2964,
+	.mn = {
+		.mnctr_en_bit = 8,
+		.mnctr_reset_bit = 7,
+		.mnctr_mode_shift = 5,
+		.n_val_shift = 16,
+		.m_val_shift = 16,
+		.width = 8,
+	},
+	.p = {
+		.pre_div_shift = 3,
+		.pre_div_width = 2,
+	},
+	.s = {
+		.src_sel_shift = 0,
+		.parent_map = gcc_pxo_pll8_pll0,
+	},
+	.freq_tbl = clk_tbl_usb,
+	.clkr = {
+		.enable_reg = 0x2968,
+		.enable_mask = BIT(11),
+		.hw.init = &(struct clk_init_data){
+			.name = "usb_fs1_xcvr_src",
+			.parent_names = gcc_pxo_pll8_pll0_map,
+			.num_parents = 3,
+			.ops = &clk_rcg_ops,
+			.flags = CLK_SET_RATE_GATE,
+		},
+	},
+};
+
+static struct clk_branch usb_fs1_xcvr_clk = {
+	.halt_reg = 0x2fcc,
+	.halt_bit = 17,
+	.clkr = {
+		.enable_reg = 0x2968,
+		.enable_mask = BIT(9),
+		.hw.init = &(struct clk_init_data){
+			.name = "usb_fs1_xcvr_clk",
+			.parent_names = (const char *[]){ "usb_fs1_xcvr_src", },
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch usb_fs1_sys_clk = {
+	.halt_reg = 0x2fcc,
+	.halt_bit = 18,
+	.clkr = {
+		.enable_reg = 0x296c,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "usb_fs1_sys_clk",
+			.parent_names = (const char *[]){ "usb_fs1_xcvr_src", },
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch usb_fs1_h_clk = {
+	.halt_reg = 0x2fcc,
+	.halt_bit = 19,
+	.clkr = {
+		.enable_reg = 0x2960,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "usb_fs1_h_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_IS_ROOT,
+		},
+	},
+};
+
+static struct clk_regmap *gcc_ipq806x_clks[] = {
+	[PLL3] = &pll3.clkr,
+	[PLL8] = &pll8.clkr,
+	[PLL8_VOTE] = &pll8_vote,
+	[PLL14] = &pll14.clkr,
+	[PLL14_VOTE] = &pll14_vote,
+	[GSBI1_UART_SRC] = &gsbi1_uart_src.clkr,
+	[GSBI1_UART_CLK] = &gsbi1_uart_clk.clkr,
+	[GSBI2_UART_SRC] = &gsbi2_uart_src.clkr,
+	[GSBI2_UART_CLK] = &gsbi2_uart_clk.clkr,
+	[GSBI4_UART_SRC] = &gsbi4_uart_src.clkr,
+	[GSBI4_UART_CLK] = &gsbi4_uart_clk.clkr,
+	[GSBI5_UART_SRC] = &gsbi5_uart_src.clkr,
+	[GSBI5_UART_CLK] = &gsbi5_uart_clk.clkr,
+	[GSBI6_UART_SRC] = &gsbi6_uart_src.clkr,
+	[GSBI6_UART_CLK] = &gsbi6_uart_clk.clkr,
+	[GSBI7_UART_SRC] = &gsbi7_uart_src.clkr,
+	[GSBI7_UART_CLK] = &gsbi7_uart_clk.clkr,
+	[GSBI1_QUP_SRC] = &gsbi1_qup_src.clkr,
+	[GSBI1_QUP_CLK] = &gsbi1_qup_clk.clkr,
+	[GSBI2_QUP_SRC] = &gsbi2_qup_src.clkr,
+	[GSBI2_QUP_CLK] = &gsbi2_qup_clk.clkr,
+	[GSBI4_QUP_SRC] = &gsbi4_qup_src.clkr,
+	[GSBI4_QUP_CLK] = &gsbi4_qup_clk.clkr,
+	[GSBI5_QUP_SRC] = &gsbi5_qup_src.clkr,
+	[GSBI5_QUP_CLK] = &gsbi5_qup_clk.clkr,
+	[GSBI6_QUP_SRC] = &gsbi6_qup_src.clkr,
+	[GSBI6_QUP_CLK] = &gsbi6_qup_clk.clkr,
+	[GSBI7_QUP_SRC] = &gsbi7_qup_src.clkr,
+	[GSBI7_QUP_CLK] = &gsbi7_qup_clk.clkr,
+	[GP0_SRC] = &gp0_src.clkr,
+	[GP0_CLK] = &gp0_clk.clkr,
+	[GP1_SRC] = &gp1_src.clkr,
+	[GP1_CLK] = &gp1_clk.clkr,
+	[GP2_SRC] = &gp2_src.clkr,
+	[GP2_CLK] = &gp2_clk.clkr,
+	[PMEM_A_CLK] = &pmem_clk.clkr,
+	[PRNG_SRC] = &prng_src.clkr,
+	[PRNG_CLK] = &prng_clk.clkr,
+	[SDC1_SRC] = &sdc1_src.clkr,
+	[SDC1_CLK] = &sdc1_clk.clkr,
+	[SDC3_SRC] = &sdc3_src.clkr,
+	[SDC3_CLK] = &sdc3_clk.clkr,
+	[TSIF_REF_SRC] = &tsif_ref_src.clkr,
+	[TSIF_REF_CLK] = &tsif_ref_clk.clkr,
+	[DMA_BAM_H_CLK] = &dma_bam_h_clk.clkr,
+	[GSBI1_H_CLK] = &gsbi1_h_clk.clkr,
+	[GSBI2_H_CLK] = &gsbi2_h_clk.clkr,
+	[GSBI4_H_CLK] = &gsbi4_h_clk.clkr,
+	[GSBI5_H_CLK] = &gsbi5_h_clk.clkr,
+	[GSBI6_H_CLK] = &gsbi6_h_clk.clkr,
+	[GSBI7_H_CLK] = &gsbi7_h_clk.clkr,
+	[TSIF_H_CLK] = &tsif_h_clk.clkr,
+	[SDC1_H_CLK] = &sdc1_h_clk.clkr,
+	[SDC3_H_CLK] = &sdc3_h_clk.clkr,
+	[ADM0_CLK] = &adm0_clk.clkr,
+	[ADM0_PBUS_CLK] = &adm0_pbus_clk.clkr,
+	[PCIE_A_CLK] = &pcie_a_clk.clkr,
+	[PCIE_AUX_CLK] = &pcie_aux_clk.clkr,
+	[PCIE_H_CLK] = &pcie_h_clk.clkr,
+	[PCIE_PHY_CLK] = &pcie_phy_clk.clkr,
+	[SFAB_SATA_S_H_CLK] = &sfab_sata_s_h_clk.clkr,
+	[PMIC_ARB0_H_CLK] = &pmic_arb0_h_clk.clkr,
+	[PMIC_ARB1_H_CLK] = &pmic_arb1_h_clk.clkr,
+	[PMIC_SSBI2_CLK] = &pmic_ssbi2_clk.clkr,
+	[RPM_MSG_RAM_H_CLK] = &rpm_msg_ram_h_clk.clkr,
+	[SATA_H_CLK] = &sata_h_clk.clkr,
+	[SATA_CLK_SRC] = &sata_ref_src.clkr,
+	[SATA_RXOOB_CLK] = &sata_rxoob_clk.clkr,
+	[SATA_PMALIVE_CLK] = &sata_pmalive_clk.clkr,
+	[SATA_PHY_REF_CLK] = &sata_phy_ref_clk.clkr,
+	[SATA_A_CLK] = &sata_a_clk.clkr,
+	[SATA_PHY_CFG_CLK] = &sata_phy_cfg_clk.clkr,
+	[PCIE_ALT_REF_SRC] = &pcie_ref_src.clkr,
+	[PCIE_ALT_REF_CLK] = &pcie_ref_src_clk.clkr,
+	[PCIE_1_A_CLK] = &pcie1_a_clk.clkr,
+	[PCIE_1_AUX_CLK] = &pcie1_aux_clk.clkr,
+	[PCIE_1_H_CLK] = &pcie1_h_clk.clkr,
+	[PCIE_1_PHY_CLK] = &pcie1_phy_clk.clkr,
+	[PCIE_1_ALT_REF_SRC] = &pcie1_ref_src.clkr,
+	[PCIE_1_ALT_REF_CLK] = &pcie1_ref_src_clk.clkr,
+	[PCIE_2_A_CLK] = &pcie2_a_clk.clkr,
+	[PCIE_2_AUX_CLK] = &pcie2_aux_clk.clkr,
+	[PCIE_2_H_CLK] = &pcie2_h_clk.clkr,
+	[PCIE_2_PHY_CLK] = &pcie2_phy_clk.clkr,
+	[PCIE_2_ALT_REF_SRC] = &pcie2_ref_src.clkr,
+	[PCIE_2_ALT_REF_CLK] = &pcie2_ref_src_clk.clkr,
+	[USB30_MASTER_SRC] = &usb30_master_clk_src.clkr,
+	[USB30_0_MASTER_CLK] = &usb30_0_branch_clk.clkr,
+	[USB30_1_MASTER_CLK] = &usb30_1_branch_clk.clkr,
+	[USB30_UTMI_SRC] = &usb30_utmi_clk.clkr,
+	[USB30_0_UTMI_CLK] = &usb30_0_utmi_clk_ctl.clkr,
+	[USB30_1_UTMI_CLK] = &usb30_1_utmi_clk_ctl.clkr,
+	[USB_HS1_H_CLK] = &usb_hs1_h_clk.clkr,
+	[USB_HS1_XCVR_SRC] = &usb_hs1_xcvr_clk_src.clkr,
+	[USB_HS1_XCVR_CLK] = &usb_hs1_xcvr_clk.clkr,
+	[USB_FS1_H_CLK] = &usb_fs1_h_clk.clkr,
+	[USB_FS1_XCVR_SRC] = &usb_fs1_xcvr_clk_src.clkr,
+	[USB_FS1_XCVR_CLK] = &usb_fs1_xcvr_clk.clkr,
+	[USB_FS1_SYSTEM_CLK] = &usb_fs1_sys_clk.clkr,
+};
+
+static const struct qcom_reset_map gcc_ipq806x_resets[] = {
+	[QDSS_STM_RESET] = { 0x2060, 6 },
+	[AFAB_SMPSS_S_RESET] = { 0x20b8, 2 },
+	[AFAB_SMPSS_M1_RESET] = { 0x20b8, 1 },
+	[AFAB_SMPSS_M0_RESET] = { 0x20b8, 0 },
+	[AFAB_EBI1_CH0_RESET] = { 0x20c0, 7 },
+	[AFAB_EBI1_CH1_RESET] = { 0x20c4, 7 },
+	[SFAB_ADM0_M0_RESET] = { 0x21e0, 7 },
+	[SFAB_ADM0_M1_RESET] = { 0x21e4, 7 },
+	[SFAB_ADM0_M2_RESET] = { 0x21e8, 7 },
+	[ADM0_C2_RESET] = { 0x220c, 4 },
+	[ADM0_C1_RESET] = { 0x220c, 3 },
+	[ADM0_C0_RESET] = { 0x220c, 2 },
+	[ADM0_PBUS_RESET] = { 0x220c, 1 },
+	[ADM0_RESET] = { 0x220c, 0 },
+	[QDSS_CLKS_SW_RESET] = { 0x2260, 5 },
+	[QDSS_POR_RESET] = { 0x2260, 4 },
+	[QDSS_TSCTR_RESET] = { 0x2260, 3 },
+	[QDSS_HRESET_RESET] = { 0x2260, 2 },
+	[QDSS_AXI_RESET] = { 0x2260, 1 },
+	[QDSS_DBG_RESET] = { 0x2260, 0 },
+	[SFAB_PCIE_M_RESET] = { 0x22d8, 1 },
+	[SFAB_PCIE_S_RESET] = { 0x22d8, 0 },
+	[PCIE_EXT_RESET] = { 0x22dc, 6 },
+	[PCIE_PHY_RESET] = { 0x22dc, 5 },
+	[PCIE_PCI_RESET] = { 0x22dc, 4 },
+	[PCIE_POR_RESET] = { 0x22dc, 3 },
+	[PCIE_HCLK_RESET] = { 0x22dc, 2 },
+	[PCIE_ACLK_RESET] = { 0x22dc, 0 },
+	[SFAB_LPASS_RESET] = { 0x23a0, 7 },
+	[SFAB_AFAB_M_RESET] = { 0x23e0, 7 },
+	[AFAB_SFAB_M0_RESET] = { 0x2420, 7 },
+	[AFAB_SFAB_M1_RESET] = { 0x2424, 7 },
+	[SFAB_SATA_S_RESET] = { 0x2480, 7 },
+	[SFAB_DFAB_M_RESET] = { 0x2500, 7 },
+	[DFAB_SFAB_M_RESET] = { 0x2520, 7 },
+	[DFAB_SWAY0_RESET] = { 0x2540, 7 },
+	[DFAB_SWAY1_RESET] = { 0x2544, 7 },
+	[DFAB_ARB0_RESET] = { 0x2560, 7 },
+	[DFAB_ARB1_RESET] = { 0x2564, 7 },
+	[PPSS_PROC_RESET] = { 0x2594, 1 },
+	[PPSS_RESET] = { 0x2594, 0 },
+	[DMA_BAM_RESET] = { 0x25c0, 7 },
+	[SPS_TIC_H_RESET] = { 0x2600, 7 },
+	[SFAB_CFPB_M_RESET] = { 0x2680, 7 },
+	[SFAB_CFPB_S_RESET] = { 0x26c0, 7 },
+	[TSIF_H_RESET] = { 0x2700, 7 },
+	[CE1_H_RESET] = { 0x2720, 7 },
+	[CE1_CORE_RESET] = { 0x2724, 7 },
+	[CE1_SLEEP_RESET] = { 0x2728, 7 },
+	[CE2_H_RESET] = { 0x2740, 7 },
+	[CE2_CORE_RESET] = { 0x2744, 7 },
+	[SFAB_SFPB_M_RESET] = { 0x2780, 7 },
+	[SFAB_SFPB_S_RESET] = { 0x27a0, 7 },
+	[RPM_PROC_RESET] = { 0x27c0, 7 },
+	[PMIC_SSBI2_RESET] = { 0x280c, 12 },
+	[SDC1_RESET] = { 0x2830, 0 },
+	[SDC2_RESET] = { 0x2850, 0 },
+	[SDC3_RESET] = { 0x2870, 0 },
+	[SDC4_RESET] = { 0x2890, 0 },
+	[USB_HS1_RESET] = { 0x2910, 0 },
+	[USB_HSIC_RESET] = { 0x2934, 0 },
+	[USB_FS1_XCVR_RESET] = { 0x2974, 1 },
+	[USB_FS1_RESET] = { 0x2974, 0 },
+	[GSBI1_RESET] = { 0x29dc, 0 },
+	[GSBI2_RESET] = { 0x29fc, 0 },
+	[GSBI3_RESET] = { 0x2a1c, 0 },
+	[GSBI4_RESET] = { 0x2a3c, 0 },
+	[GSBI5_RESET] = { 0x2a5c, 0 },
+	[GSBI6_RESET] = { 0x2a7c, 0 },
+	[GSBI7_RESET] = { 0x2a9c, 0 },
+	[SPDM_RESET] = { 0x2b6c, 0 },
+	[SEC_CTRL_RESET] = { 0x2b80, 7 },
+	[TLMM_H_RESET] = { 0x2ba0, 7 },
+	[SFAB_SATA_M_RESET] = { 0x2c18, 0 },
+	[SATA_RESET] = { 0x2c1c, 0 },
+	[TSSC_RESET] = { 0x2ca0, 7 },
+	[PDM_RESET] = { 0x2cc0, 12 },
+	[MPM_H_RESET] = { 0x2da0, 7 },
+	[MPM_RESET] = { 0x2da4, 0 },
+	[SFAB_SMPSS_S_RESET] = { 0x2e00, 7 },
+	[PRNG_RESET] = { 0x2e80, 12 },
+	[SFAB_CE3_M_RESET] = { 0x36c8, 1 },
+	[SFAB_CE3_S_RESET] = { 0x36c8, 0 },
+	[CE3_SLEEP_RESET] = { 0x36d0, 7 },
+	[PCIE_1_M_RESET] = { 0x3a98, 1 },
+	[PCIE_1_S_RESET] = { 0x3a98, 0 },
+	[PCIE_1_EXT_RESET] = { 0x3a9c, 6 },
+	[PCIE_1_PHY_RESET] = { 0x3a9c, 5 },
+	[PCIE_1_PCI_RESET] = { 0x3a9c, 4 },
+	[PCIE_1_POR_RESET] = { 0x3a9c, 3 },
+	[PCIE_1_HCLK_RESET] = { 0x3a9c, 2 },
+	[PCIE_1_ACLK_RESET] = { 0x3a9c, 0 },
+	[PCIE_2_M_RESET] = { 0x3ad8, 1 },
+	[PCIE_2_S_RESET] = { 0x3ad8, 0 },
+	[PCIE_2_EXT_RESET] = { 0x3adc, 6 },
+	[PCIE_2_PHY_RESET] = { 0x3adc, 5 },
+	[PCIE_2_PCI_RESET] = { 0x3adc, 4 },
+	[PCIE_2_POR_RESET] = { 0x3adc, 3 },
+	[PCIE_2_HCLK_RESET] = { 0x3adc, 2 },
+	[PCIE_2_ACLK_RESET] = { 0x3adc, 0 },
+	[SFAB_USB30_S_RESET] = { 0x3b54, 1 },
+	[SFAB_USB30_M_RESET] = { 0x3b54, 0 },
+	[USB30_0_PORT2_HS_PHY_RESET] = { 0x3b50, 5 },
+	[USB30_0_MASTER_RESET] = { 0x3b50, 4 },
+	[USB30_0_SLEEP_RESET] = { 0x3b50, 3 },
+	[USB30_0_UTMI_PHY_RESET] = { 0x3b50, 2 },
+	[USB30_0_POWERON_RESET] = { 0x3b50, 1 },
+	[USB30_0_PHY_RESET] = { 0x3b50, 0 },
+	[USB30_1_MASTER_RESET] = { 0x3b58, 4 },
+	[USB30_1_SLEEP_RESET] = { 0x3b58, 3 },
+	[USB30_1_UTMI_PHY_RESET] = { 0x3b58, 2 },
+	[USB30_1_POWERON_RESET] = { 0x3b58, 1 },
+	[USB30_1_PHY_RESET] = { 0x3b58, 0 },
+	[NSSFB0_RESET] = { 0x3b60, 6 },
+	[NSSFB1_RESET] = { 0x3b60, 7 },
+};
+
+static const struct regmap_config gcc_ipq806x_regmap_config = {
+	.reg_bits	= 32,
+	.reg_stride	= 4,
+	.val_bits	= 32,
+	.max_register	= 0x3e40,
+	.fast_io	= true,
+};
+
+static const struct qcom_cc_desc gcc_ipq806x_desc = {
+	.config = &gcc_ipq806x_regmap_config,
+	.clks = gcc_ipq806x_clks,
+	.num_clks = ARRAY_SIZE(gcc_ipq806x_clks),
+	.resets = gcc_ipq806x_resets,
+	.num_resets = ARRAY_SIZE(gcc_ipq806x_resets),
+};
+
+static const struct of_device_id gcc_ipq806x_match_table[] = {
+	{ .compatible = "qcom,gcc-ipq8064" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, gcc_ipq806x_match_table);
+
+static int gcc_ipq806x_probe(struct platform_device *pdev)
+{
+	struct clk *clk;
+	struct device *dev = &pdev->dev;
+
+	/* Temporary until RPM clocks supported */
+	clk = clk_register_fixed_rate(dev, "cxo", NULL, CLK_IS_ROOT, 25000000);
+	if (IS_ERR(clk))
+		return PTR_ERR(clk);
+
+	clk = clk_register_fixed_rate(dev, "pxo", NULL, CLK_IS_ROOT, 25000000);
+	if (IS_ERR(clk))
+		return PTR_ERR(clk);
+
+	return qcom_cc_probe(pdev, &gcc_ipq806x_desc);
+}
+
+static int gcc_ipq806x_remove(struct platform_device *pdev)
+{
+	qcom_cc_remove(pdev);
+	return 0;
+}
+
+static struct platform_driver gcc_ipq806x_driver = {
+	.probe		= gcc_ipq806x_probe,
+	.remove		= gcc_ipq806x_remove,
+	.driver		= {
+		.name	= "gcc-ipq806x",
+		.owner	= THIS_MODULE,
+		.of_match_table = gcc_ipq806x_match_table,
+	},
+};
+
+static int __init gcc_ipq806x_init(void)
+{
+	return platform_driver_register(&gcc_ipq806x_driver);
+}
+core_initcall(gcc_ipq806x_init);
+
+static void __exit gcc_ipq806x_exit(void)
+{
+	platform_driver_unregister(&gcc_ipq806x_driver);
+}
+module_exit(gcc_ipq806x_exit);
+
+MODULE_DESCRIPTION("QCOM GCC IPQ806x Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:gcc-ipq806x");
diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
index f4ffd91..007534f 100644
--- a/drivers/clk/qcom/gcc-msm8960.c
+++ b/drivers/clk/qcom/gcc-msm8960.c
@@ -104,6 +104,7 @@
 
 #define P_PXO	0
 #define P_PLL8	1
+#define P_PLL3	2
 #define P_CXO	2
 
 static const u8 gcc_pxo_pll8_map[] = {
@@ -128,6 +129,18 @@
 	"cxo",
 };
 
+static const u8 gcc_pxo_pll8_pll3_map[] = {
+	[P_PXO]		= 0,
+	[P_PLL8]	= 3,
+	[P_PLL3]	= 6,
+};
+
+static const char *gcc_pxo_pll8_pll3[] = {
+	"pxo",
+	"pll8_vote",
+	"pll3",
+};
+
 static struct freq_tbl clk_tbl_gsbi_uart[] = {
 	{  1843200, P_PLL8, 2,  6, 625 },
 	{  3686400, P_PLL8, 2, 12, 625 },
@@ -1928,6 +1941,104 @@
 	},
 };
 
+static struct clk_rcg usb_hs3_xcvr_src = {
+	.ns_reg = 0x370c,
+	.md_reg = 0x3708,
+	.mn = {
+		.mnctr_en_bit = 8,
+		.mnctr_reset_bit = 7,
+		.mnctr_mode_shift = 5,
+		.n_val_shift = 16,
+		.m_val_shift = 16,
+		.width = 8,
+	},
+	.p = {
+		.pre_div_shift = 3,
+		.pre_div_width = 2,
+	},
+	.s = {
+		.src_sel_shift = 0,
+		.parent_map = gcc_pxo_pll8_map,
+	},
+	.freq_tbl = clk_tbl_usb,
+	.clkr = {
+		.enable_reg = 0x370c,
+		.enable_mask = BIT(11),
+		.hw.init = &(struct clk_init_data){
+			.name = "usb_hs3_xcvr_src",
+			.parent_names = gcc_pxo_pll8,
+			.num_parents = 2,
+			.ops = &clk_rcg_ops,
+			.flags = CLK_SET_RATE_GATE,
+		},
+	}
+};
+
+static struct clk_branch usb_hs3_xcvr_clk = {
+	.halt_reg = 0x2fc8,
+	.halt_bit = 30,
+	.clkr = {
+		.enable_reg = 0x370c,
+		.enable_mask = BIT(9),
+		.hw.init = &(struct clk_init_data){
+			.name = "usb_hs3_xcvr_clk",
+			.parent_names = (const char *[]){ "usb_hs3_xcvr_src" },
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_rcg usb_hs4_xcvr_src = {
+	.ns_reg = 0x372c,
+	.md_reg = 0x3728,
+	.mn = {
+		.mnctr_en_bit = 8,
+		.mnctr_reset_bit = 7,
+		.mnctr_mode_shift = 5,
+		.n_val_shift = 16,
+		.m_val_shift = 16,
+		.width = 8,
+	},
+	.p = {
+		.pre_div_shift = 3,
+		.pre_div_width = 2,
+	},
+	.s = {
+		.src_sel_shift = 0,
+		.parent_map = gcc_pxo_pll8_map,
+	},
+	.freq_tbl = clk_tbl_usb,
+	.clkr = {
+		.enable_reg = 0x372c,
+		.enable_mask = BIT(11),
+		.hw.init = &(struct clk_init_data){
+			.name = "usb_hs4_xcvr_src",
+			.parent_names = gcc_pxo_pll8,
+			.num_parents = 2,
+			.ops = &clk_rcg_ops,
+			.flags = CLK_SET_RATE_GATE,
+		},
+	}
+};
+
+static struct clk_branch usb_hs4_xcvr_clk = {
+	.halt_reg = 0x2fc8,
+	.halt_bit = 2,
+	.clkr = {
+		.enable_reg = 0x372c,
+		.enable_mask = BIT(9),
+		.hw.init = &(struct clk_init_data){
+			.name = "usb_hs4_xcvr_clk",
+			.parent_names = (const char *[]){ "usb_hs4_xcvr_src" },
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
 static struct clk_rcg usb_hsic_xcvr_fs_src = {
 	.ns_reg = 0x2928,
 	.md_reg = 0x2924,
@@ -2456,6 +2567,34 @@
 	},
 };
 
+static struct clk_branch usb_hs3_h_clk = {
+	.halt_reg = 0x2fc8,
+	.halt_bit = 31,
+	.clkr = {
+		.enable_reg = 0x3700,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "usb_hs3_h_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_IS_ROOT,
+		},
+	},
+};
+
+static struct clk_branch usb_hs4_h_clk = {
+	.halt_reg = 0x2fc8,
+	.halt_bit = 7,
+	.clkr = {
+		.enable_reg = 0x3720,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "usb_hs4_h_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_IS_ROOT,
+		},
+	},
+};
+
 static struct clk_branch usb_hsic_h_clk = {
 	.halt_reg = 0x2fcc,
 	.halt_bit = 28,
@@ -2582,6 +2721,244 @@
 	},
 };
 
+static struct freq_tbl clk_tbl_ce3[] = {
+	{ 48000000, P_PLL8, 8 },
+	{ 100000000, P_PLL3, 12 },
+	{ 120000000, P_PLL3, 10 },
+	{ }
+};
+
+static struct clk_rcg ce3_src = {
+	.ns_reg = 0x36c0,
+	.p = {
+		.pre_div_shift = 3,
+		.pre_div_width = 4,
+	},
+	.s = {
+		.src_sel_shift = 0,
+		.parent_map = gcc_pxo_pll8_pll3_map,
+	},
+	.freq_tbl = clk_tbl_ce3,
+	.clkr = {
+		.enable_reg = 0x2c08,
+		.enable_mask = BIT(7),
+		.hw.init = &(struct clk_init_data){
+			.name = "ce3_src",
+			.parent_names = gcc_pxo_pll8_pll3,
+			.num_parents = 3,
+			.ops = &clk_rcg_ops,
+			.flags = CLK_SET_RATE_GATE,
+		},
+	},
+};
+
+static struct clk_branch ce3_core_clk = {
+	.halt_reg = 0x2fdc,
+	.halt_bit = 5,
+	.clkr = {
+		.enable_reg = 0x36c4,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "ce3_core_clk",
+			.parent_names = (const char *[]){ "ce3_src" },
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch ce3_h_clk = {
+	.halt_reg = 0x2fc4,
+	.halt_bit = 16,
+	.clkr = {
+		.enable_reg = 0x36c4,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "ce3_h_clk",
+			.parent_names = (const char *[]){ "ce3_src" },
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static const struct freq_tbl clk_tbl_sata_ref[] = {
+	{ 48000000, P_PLL8, 8, 0, 0 },
+	{ 100000000, P_PLL3, 12, 0, 0 },
+	{ }
+};
+
+static struct clk_rcg sata_clk_src = {
+	.ns_reg = 0x2c08,
+	.p = {
+		.pre_div_shift = 3,
+		.pre_div_width = 4,
+	},
+	.s = {
+		.src_sel_shift = 0,
+		.parent_map = gcc_pxo_pll8_pll3_map,
+	},
+	.freq_tbl = clk_tbl_sata_ref,
+	.clkr = {
+		.enable_reg = 0x2c08,
+		.enable_mask = BIT(7),
+		.hw.init = &(struct clk_init_data){
+			.name = "sata_clk_src",
+			.parent_names = gcc_pxo_pll8_pll3,
+			.num_parents = 3,
+			.ops = &clk_rcg_ops,
+			.flags = CLK_SET_RATE_GATE,
+		},
+	},
+};
+
+static struct clk_branch sata_rxoob_clk = {
+	.halt_reg = 0x2fdc,
+	.halt_bit = 26,
+	.clkr = {
+		.enable_reg = 0x2c0c,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "sata_rxoob_clk",
+			.parent_names = (const char *[]){ "sata_clk_src" },
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch sata_pmalive_clk = {
+	.halt_reg = 0x2fdc,
+	.halt_bit = 25,
+	.clkr = {
+		.enable_reg = 0x2c10,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "sata_pmalive_clk",
+			.parent_names = (const char *[]){ "sata_clk_src" },
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch sata_phy_ref_clk = {
+	.halt_reg = 0x2fdc,
+	.halt_bit = 24,
+	.clkr = {
+		.enable_reg = 0x2c14,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "sata_phy_ref_clk",
+			.parent_names = (const char *[]){ "pxo" },
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+		},
+	},
+};
+
+static struct clk_branch sata_a_clk = {
+	.halt_reg = 0x2fc0,
+	.halt_bit = 12,
+	.clkr = {
+		.enable_reg = 0x2c20,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "sata_a_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_IS_ROOT,
+		},
+	},
+};
+
+static struct clk_branch sata_h_clk = {
+	.halt_reg = 0x2fdc,
+	.halt_bit = 27,
+	.clkr = {
+		.enable_reg = 0x2c00,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "sata_h_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_IS_ROOT,
+		},
+	},
+};
+
+static struct clk_branch sfab_sata_s_h_clk = {
+	.halt_reg = 0x2fc4,
+	.halt_bit = 14,
+	.clkr = {
+		.enable_reg = 0x2480,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "sfab_sata_s_h_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_IS_ROOT,
+		},
+	},
+};
+
+static struct clk_branch sata_phy_cfg_clk = {
+	.halt_reg = 0x2fcc,
+	.halt_bit = 12,
+	.clkr = {
+		.enable_reg = 0x2c40,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "sata_phy_cfg_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_IS_ROOT,
+		},
+	},
+};
+
+static struct clk_branch pcie_phy_ref_clk = {
+	.halt_reg = 0x2fdc,
+	.halt_bit = 29,
+	.clkr = {
+		.enable_reg = 0x22d0,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "pcie_phy_ref_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_IS_ROOT,
+		},
+	},
+};
+
+static struct clk_branch pcie_h_clk = {
+	.halt_reg = 0x2fd4,
+	.halt_bit = 8,
+	.clkr = {
+		.enable_reg = 0x22cc,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "pcie_h_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_IS_ROOT,
+		},
+	},
+};
+
+static struct clk_branch pcie_a_clk = {
+	.halt_reg = 0x2fc0,
+	.halt_bit = 13,
+	.clkr = {
+		.enable_reg = 0x22c0,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "pcie_a_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_IS_ROOT,
+		},
+	},
+};
+
 static struct clk_branch pmic_arb0_h_clk = {
 	.halt_reg = 0x2fd8,
 	.halt_check = BRANCH_HALT_VOTED,
@@ -2869,13 +3246,205 @@
 };
 
 static struct clk_regmap *gcc_apq8064_clks[] = {
+	[PLL3] = &pll3.clkr,
 	[PLL8] = &pll8.clkr,
 	[PLL8_VOTE] = &pll8_vote,
+	[PLL14] = &pll14.clkr,
+	[PLL14_VOTE] = &pll14_vote,
+	[GSBI1_UART_SRC] = &gsbi1_uart_src.clkr,
+	[GSBI1_UART_CLK] = &gsbi1_uart_clk.clkr,
+	[GSBI2_UART_SRC] = &gsbi2_uart_src.clkr,
+	[GSBI2_UART_CLK] = &gsbi2_uart_clk.clkr,
+	[GSBI3_UART_SRC] = &gsbi3_uart_src.clkr,
+	[GSBI3_UART_CLK] = &gsbi3_uart_clk.clkr,
+	[GSBI4_UART_SRC] = &gsbi4_uart_src.clkr,
+	[GSBI4_UART_CLK] = &gsbi4_uart_clk.clkr,
+	[GSBI5_UART_SRC] = &gsbi5_uart_src.clkr,
+	[GSBI5_UART_CLK] = &gsbi5_uart_clk.clkr,
+	[GSBI6_UART_SRC] = &gsbi6_uart_src.clkr,
+	[GSBI6_UART_CLK] = &gsbi6_uart_clk.clkr,
 	[GSBI7_UART_SRC] = &gsbi7_uart_src.clkr,
 	[GSBI7_UART_CLK] = &gsbi7_uart_clk.clkr,
+	[GSBI1_QUP_SRC] = &gsbi1_qup_src.clkr,
+	[GSBI1_QUP_CLK] = &gsbi1_qup_clk.clkr,
+	[GSBI2_QUP_SRC] = &gsbi2_qup_src.clkr,
+	[GSBI2_QUP_CLK] = &gsbi2_qup_clk.clkr,
+	[GSBI3_QUP_SRC] = &gsbi3_qup_src.clkr,
+	[GSBI3_QUP_CLK] = &gsbi3_qup_clk.clkr,
+	[GSBI4_QUP_SRC] = &gsbi4_qup_src.clkr,
+	[GSBI4_QUP_CLK] = &gsbi4_qup_clk.clkr,
+	[GSBI5_QUP_SRC] = &gsbi5_qup_src.clkr,
+	[GSBI5_QUP_CLK] = &gsbi5_qup_clk.clkr,
+	[GSBI6_QUP_SRC] = &gsbi6_qup_src.clkr,
+	[GSBI6_QUP_CLK] = &gsbi6_qup_clk.clkr,
 	[GSBI7_QUP_SRC] = &gsbi7_qup_src.clkr,
 	[GSBI7_QUP_CLK] = &gsbi7_qup_clk.clkr,
+	[GP0_SRC] = &gp0_src.clkr,
+	[GP0_CLK] = &gp0_clk.clkr,
+	[GP1_SRC] = &gp1_src.clkr,
+	[GP1_CLK] = &gp1_clk.clkr,
+	[GP2_SRC] = &gp2_src.clkr,
+	[GP2_CLK] = &gp2_clk.clkr,
+	[PMEM_A_CLK] = &pmem_clk.clkr,
+	[PRNG_SRC] = &prng_src.clkr,
+	[PRNG_CLK] = &prng_clk.clkr,
+	[SDC1_SRC] = &sdc1_src.clkr,
+	[SDC1_CLK] = &sdc1_clk.clkr,
+	[SDC2_SRC] = &sdc2_src.clkr,
+	[SDC2_CLK] = &sdc2_clk.clkr,
+	[SDC3_SRC] = &sdc3_src.clkr,
+	[SDC3_CLK] = &sdc3_clk.clkr,
+	[SDC4_SRC] = &sdc4_src.clkr,
+	[SDC4_CLK] = &sdc4_clk.clkr,
+	[TSIF_REF_SRC] = &tsif_ref_src.clkr,
+	[TSIF_REF_CLK] = &tsif_ref_clk.clkr,
+	[USB_HS1_XCVR_SRC] = &usb_hs1_xcvr_src.clkr,
+	[USB_HS1_XCVR_CLK] = &usb_hs1_xcvr_clk.clkr,
+	[USB_HS3_XCVR_SRC] = &usb_hs3_xcvr_src.clkr,
+	[USB_HS3_XCVR_CLK] = &usb_hs3_xcvr_clk.clkr,
+	[USB_HS4_XCVR_SRC] = &usb_hs4_xcvr_src.clkr,
+	[USB_HS4_XCVR_CLK] = &usb_hs4_xcvr_clk.clkr,
+	[USB_HSIC_XCVR_FS_SRC] = &usb_hsic_xcvr_fs_src.clkr,
+	[USB_HSIC_XCVR_FS_CLK] = &usb_hsic_xcvr_fs_clk.clkr,
+	[USB_HSIC_SYSTEM_CLK] = &usb_hsic_system_clk.clkr,
+	[USB_HSIC_HSIC_CLK] = &usb_hsic_hsic_clk.clkr,
+	[USB_HSIC_HSIO_CAL_CLK] = &usb_hsic_hsio_cal_clk.clkr,
+	[USB_FS1_XCVR_FS_SRC] = &usb_fs1_xcvr_fs_src.clkr,
+	[USB_FS1_XCVR_FS_CLK] = &usb_fs1_xcvr_fs_clk.clkr,
+	[USB_FS1_SYSTEM_CLK] = &usb_fs1_system_clk.clkr,
+	[SATA_H_CLK] = &sata_h_clk.clkr,
+	[SATA_CLK_SRC] = &sata_clk_src.clkr,
+	[SATA_RXOOB_CLK] = &sata_rxoob_clk.clkr,
+	[SATA_PMALIVE_CLK] = &sata_pmalive_clk.clkr,
+	[SATA_PHY_REF_CLK] = &sata_phy_ref_clk.clkr,
+	[SATA_PHY_CFG_CLK] = &sata_phy_cfg_clk.clkr,
+	[SATA_A_CLK] = &sata_a_clk.clkr,
+	[SFAB_SATA_S_H_CLK] = &sfab_sata_s_h_clk.clkr,
+	[CE3_SRC] = &ce3_src.clkr,
+	[CE3_CORE_CLK] = &ce3_core_clk.clkr,
+	[CE3_H_CLK] = &ce3_h_clk.clkr,
+	[DMA_BAM_H_CLK] = &dma_bam_h_clk.clkr,
+	[GSBI1_H_CLK] = &gsbi1_h_clk.clkr,
+	[GSBI2_H_CLK] = &gsbi2_h_clk.clkr,
+	[GSBI3_H_CLK] = &gsbi3_h_clk.clkr,
+	[GSBI4_H_CLK] = &gsbi4_h_clk.clkr,
+	[GSBI5_H_CLK] = &gsbi5_h_clk.clkr,
+	[GSBI6_H_CLK] = &gsbi6_h_clk.clkr,
 	[GSBI7_H_CLK] = &gsbi7_h_clk.clkr,
+	[TSIF_H_CLK] = &tsif_h_clk.clkr,
+	[USB_FS1_H_CLK] = &usb_fs1_h_clk.clkr,
+	[USB_HS1_H_CLK] = &usb_hs1_h_clk.clkr,
+	[USB_HSIC_H_CLK] = &usb_hsic_h_clk.clkr,
+	[USB_HS3_H_CLK] = &usb_hs3_h_clk.clkr,
+	[USB_HS4_H_CLK] = &usb_hs4_h_clk.clkr,
+	[SDC1_H_CLK] = &sdc1_h_clk.clkr,
+	[SDC2_H_CLK] = &sdc2_h_clk.clkr,
+	[SDC3_H_CLK] = &sdc3_h_clk.clkr,
+	[SDC4_H_CLK] = &sdc4_h_clk.clkr,
+	[ADM0_CLK] = &adm0_clk.clkr,
+	[ADM0_PBUS_CLK] = &adm0_pbus_clk.clkr,
+	[PCIE_A_CLK] = &pcie_a_clk.clkr,
+	[PCIE_PHY_REF_CLK] = &pcie_phy_ref_clk.clkr,
+	[PCIE_H_CLK] = &pcie_h_clk.clkr,
+	[PMIC_ARB0_H_CLK] = &pmic_arb0_h_clk.clkr,
+	[PMIC_ARB1_H_CLK] = &pmic_arb1_h_clk.clkr,
+	[PMIC_SSBI2_CLK] = &pmic_ssbi2_clk.clkr,
+	[RPM_MSG_RAM_H_CLK] = &rpm_msg_ram_h_clk.clkr,
+};
+
+static const struct qcom_reset_map gcc_apq8064_resets[] = {
+	[QDSS_STM_RESET] = { 0x2060, 6 },
+	[AFAB_SMPSS_S_RESET] = { 0x20b8, 2 },
+	[AFAB_SMPSS_M1_RESET] = { 0x20b8, 1 },
+	[AFAB_SMPSS_M0_RESET] = { 0x20b8 },
+	[AFAB_EBI1_CH0_RESET] = { 0x20c0, 7 },
+	[AFAB_EBI1_CH1_RESET] = { 0x20c4, 7},
+	[SFAB_ADM0_M0_RESET] = { 0x21e0, 7 },
+	[SFAB_ADM0_M1_RESET] = { 0x21e4, 7 },
+	[SFAB_ADM0_M2_RESET] = { 0x21e8, 7 },
+	[ADM0_C2_RESET] = { 0x220c, 4},
+	[ADM0_C1_RESET] = { 0x220c, 3},
+	[ADM0_C0_RESET] = { 0x220c, 2},
+	[ADM0_PBUS_RESET] = { 0x220c, 1 },
+	[ADM0_RESET] = { 0x220c },
+	[QDSS_CLKS_SW_RESET] = { 0x2260, 5 },
+	[QDSS_POR_RESET] = { 0x2260, 4 },
+	[QDSS_TSCTR_RESET] = { 0x2260, 3 },
+	[QDSS_HRESET_RESET] = { 0x2260, 2 },
+	[QDSS_AXI_RESET] = { 0x2260, 1 },
+	[QDSS_DBG_RESET] = { 0x2260 },
+	[SFAB_PCIE_M_RESET] = { 0x22d8, 1 },
+	[SFAB_PCIE_S_RESET] = { 0x22d8 },
+	[PCIE_EXT_PCI_RESET] = { 0x22dc, 6 },
+	[PCIE_PHY_RESET] = { 0x22dc, 5 },
+	[PCIE_PCI_RESET] = { 0x22dc, 4 },
+	[PCIE_POR_RESET] = { 0x22dc, 3 },
+	[PCIE_HCLK_RESET] = { 0x22dc, 2 },
+	[PCIE_ACLK_RESET] = { 0x22dc },
+	[SFAB_USB3_M_RESET] = { 0x2360, 7 },
+	[SFAB_RIVA_M_RESET] = { 0x2380, 7 },
+	[SFAB_LPASS_RESET] = { 0x23a0, 7 },
+	[SFAB_AFAB_M_RESET] = { 0x23e0, 7 },
+	[AFAB_SFAB_M0_RESET] = { 0x2420, 7 },
+	[AFAB_SFAB_M1_RESET] = { 0x2424, 7 },
+	[SFAB_SATA_S_RESET] = { 0x2480, 7 },
+	[SFAB_DFAB_M_RESET] = { 0x2500, 7 },
+	[DFAB_SFAB_M_RESET] = { 0x2520, 7 },
+	[DFAB_SWAY0_RESET] = { 0x2540, 7 },
+	[DFAB_SWAY1_RESET] = { 0x2544, 7 },
+	[DFAB_ARB0_RESET] = { 0x2560, 7 },
+	[DFAB_ARB1_RESET] = { 0x2564, 7 },
+	[PPSS_PROC_RESET] = { 0x2594, 1 },
+	[PPSS_RESET] = { 0x2594},
+	[DMA_BAM_RESET] = { 0x25c0, 7 },
+	[SPS_TIC_H_RESET] = { 0x2600, 7 },
+	[SFAB_CFPB_M_RESET] = { 0x2680, 7 },
+	[SFAB_CFPB_S_RESET] = { 0x26c0, 7 },
+	[TSIF_H_RESET] = { 0x2700, 7 },
+	[CE1_H_RESET] = { 0x2720, 7 },
+	[CE1_CORE_RESET] = { 0x2724, 7 },
+	[CE1_SLEEP_RESET] = { 0x2728, 7 },
+	[CE2_H_RESET] = { 0x2740, 7 },
+	[CE2_CORE_RESET] = { 0x2744, 7 },
+	[SFAB_SFPB_M_RESET] = { 0x2780, 7 },
+	[SFAB_SFPB_S_RESET] = { 0x27a0, 7 },
+	[RPM_PROC_RESET] = { 0x27c0, 7 },
+	[PMIC_SSBI2_RESET] = { 0x280c, 12 },
+	[SDC1_RESET] = { 0x2830 },
+	[SDC2_RESET] = { 0x2850 },
+	[SDC3_RESET] = { 0x2870 },
+	[SDC4_RESET] = { 0x2890 },
+	[USB_HS1_RESET] = { 0x2910 },
+	[USB_HSIC_RESET] = { 0x2934 },
+	[USB_FS1_XCVR_RESET] = { 0x2974, 1 },
+	[USB_FS1_RESET] = { 0x2974 },
+	[GSBI1_RESET] = { 0x29dc },
+	[GSBI2_RESET] = { 0x29fc },
+	[GSBI3_RESET] = { 0x2a1c },
+	[GSBI4_RESET] = { 0x2a3c },
+	[GSBI5_RESET] = { 0x2a5c },
+	[GSBI6_RESET] = { 0x2a7c },
+	[GSBI7_RESET] = { 0x2a9c },
+	[SPDM_RESET] = { 0x2b6c },
+	[TLMM_H_RESET] = { 0x2ba0, 7 },
+	[SATA_SFAB_M_RESET] = { 0x2c18 },
+	[SATA_RESET] = { 0x2c1c },
+	[GSS_SLP_RESET] = { 0x2c60, 7 },
+	[GSS_RESET] = { 0x2c64 },
+	[TSSC_RESET] = { 0x2ca0, 7 },
+	[PDM_RESET] = { 0x2cc0, 12 },
+	[MPM_H_RESET] = { 0x2da0, 7 },
+	[MPM_RESET] = { 0x2da4 },
+	[SFAB_SMPSS_S_RESET] = { 0x2e00, 7 },
+	[PRNG_RESET] = { 0x2e80, 12 },
+	[RIVA_RESET] = { 0x35e0 },
+	[CE3_H_RESET] = { 0x36c4, 7 },
+	[SFAB_CE3_M_RESET] = { 0x36c8, 1 },
+	[SFAB_CE3_S_RESET] = { 0x36c8 },
+	[CE3_RESET] = { 0x36cc, 7 },
+	[CE3_SLEEP_RESET] = { 0x36d0, 7 },
+	[USB_HS3_RESET] = { 0x3710 },
+	[USB_HS4_RESET] = { 0x3730 },
 };
 
 static const struct regmap_config gcc_msm8960_regmap_config = {
@@ -2886,6 +3455,14 @@
 	.fast_io	= true,
 };
 
+static const struct regmap_config gcc_apq8064_regmap_config = {
+	.reg_bits	= 32,
+	.reg_stride	= 4,
+	.val_bits	= 32,
+	.max_register	= 0x3880,
+	.fast_io	= true,
+};
+
 static const struct qcom_cc_desc gcc_msm8960_desc = {
 	.config = &gcc_msm8960_regmap_config,
 	.clks = gcc_msm8960_clks,
@@ -2895,11 +3472,11 @@
 };
 
 static const struct qcom_cc_desc gcc_apq8064_desc = {
-	.config = &gcc_msm8960_regmap_config,
+	.config = &gcc_apq8064_regmap_config,
 	.clks = gcc_apq8064_clks,
 	.num_clks = ARRAY_SIZE(gcc_apq8064_clks),
-	.resets = gcc_msm8960_resets,
-	.num_resets = ARRAY_SIZE(gcc_msm8960_resets),
+	.resets = gcc_apq8064_resets,
+	.num_resets = ARRAY_SIZE(gcc_apq8064_resets),
 };
 
 static const struct of_device_id gcc_msm8960_match_table[] = {
diff --git a/drivers/clk/qcom/mmcc-apq8084.c b/drivers/clk/qcom/mmcc-apq8084.c
new file mode 100644
index 0000000..751eea3
--- /dev/null
+++ b/drivers/clk/qcom/mmcc-apq8084.c
@@ -0,0 +1,3352 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,mmcc-apq8084.h>
+#include <dt-bindings/reset/qcom,mmcc-apq8084.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "reset.h"
+
+#define P_XO		0
+#define P_MMPLL0	1
+#define P_EDPLINK	1
+#define P_MMPLL1	2
+#define P_HDMIPLL	2
+#define P_GPLL0		3
+#define P_EDPVCO	3
+#define P_MMPLL4	4
+#define P_DSI0PLL	4
+#define P_DSI0PLL_BYTE	4
+#define P_MMPLL2	4
+#define P_MMPLL3	4
+#define P_GPLL1		5
+#define P_DSI1PLL	5
+#define P_DSI1PLL_BYTE	5
+#define P_MMSLEEP	6
+
+static const u8 mmcc_xo_mmpll0_mmpll1_gpll0_map[] = {
+	[P_XO]		= 0,
+	[P_MMPLL0]	= 1,
+	[P_MMPLL1]	= 2,
+	[P_GPLL0]	= 5,
+};
+
+static const char *mmcc_xo_mmpll0_mmpll1_gpll0[] = {
+	"xo",
+	"mmpll0_vote",
+	"mmpll1_vote",
+	"mmss_gpll0_vote",
+};
+
+static const u8 mmcc_xo_mmpll0_dsi_hdmi_gpll0_map[] = {
+	[P_XO]		= 0,
+	[P_MMPLL0]	= 1,
+	[P_HDMIPLL]	= 4,
+	[P_GPLL0]	= 5,
+	[P_DSI0PLL]	= 2,
+	[P_DSI1PLL]	= 3,
+};
+
+static const char *mmcc_xo_mmpll0_dsi_hdmi_gpll0[] = {
+	"xo",
+	"mmpll0_vote",
+	"hdmipll",
+	"mmss_gpll0_vote",
+	"dsi0pll",
+	"dsi1pll",
+};
+
+static const u8 mmcc_xo_mmpll0_1_2_gpll0_map[] = {
+	[P_XO]		= 0,
+	[P_MMPLL0]	= 1,
+	[P_MMPLL1]	= 2,
+	[P_GPLL0]	= 5,
+	[P_MMPLL2]	= 3,
+};
+
+static const char *mmcc_xo_mmpll0_1_2_gpll0[] = {
+	"xo",
+	"mmpll0_vote",
+	"mmpll1_vote",
+	"mmss_gpll0_vote",
+	"mmpll2",
+};
+
+static const u8 mmcc_xo_mmpll0_1_3_gpll0_map[] = {
+	[P_XO]		= 0,
+	[P_MMPLL0]	= 1,
+	[P_MMPLL1]	= 2,
+	[P_GPLL0]	= 5,
+	[P_MMPLL3]	= 3,
+};
+
+static const char *mmcc_xo_mmpll0_1_3_gpll0[] = {
+	"xo",
+	"mmpll0_vote",
+	"mmpll1_vote",
+	"mmss_gpll0_vote",
+	"mmpll3",
+};
+
+static const u8 mmcc_xo_dsi_hdmi_edp_map[] = {
+	[P_XO]		= 0,
+	[P_EDPLINK]	= 4,
+	[P_HDMIPLL]	= 3,
+	[P_EDPVCO]	= 5,
+	[P_DSI0PLL]	= 1,
+	[P_DSI1PLL]	= 2,
+};
+
+static const char *mmcc_xo_dsi_hdmi_edp[] = {
+	"xo",
+	"edp_link_clk",
+	"hdmipll",
+	"edp_vco_div",
+	"dsi0pll",
+	"dsi1pll",
+};
+
+static const u8 mmcc_xo_dsi_hdmi_edp_gpll0_map[] = {
+	[P_XO]		= 0,
+	[P_EDPLINK]	= 4,
+	[P_HDMIPLL]	= 3,
+	[P_GPLL0]	= 5,
+	[P_DSI0PLL]	= 1,
+	[P_DSI1PLL]	= 2,
+};
+
+static const char *mmcc_xo_dsi_hdmi_edp_gpll0[] = {
+	"xo",
+	"edp_link_clk",
+	"hdmipll",
+	"gpll0_vote",
+	"dsi0pll",
+	"dsi1pll",
+};
+
+static const u8 mmcc_xo_dsibyte_hdmi_edp_gpll0_map[] = {
+	[P_XO]			= 0,
+	[P_EDPLINK]		= 4,
+	[P_HDMIPLL]		= 3,
+	[P_GPLL0]		= 5,
+	[P_DSI0PLL_BYTE]	= 1,
+	[P_DSI1PLL_BYTE]	= 2,
+};
+
+static const char *mmcc_xo_dsibyte_hdmi_edp_gpll0[] = {
+	"xo",
+	"edp_link_clk",
+	"hdmipll",
+	"gpll0_vote",
+	"dsi0pllbyte",
+	"dsi1pllbyte",
+};
+
+static const u8 mmcc_xo_mmpll0_1_4_gpll0_map[] = {
+	[P_XO]		= 0,
+	[P_MMPLL0]	= 1,
+	[P_MMPLL1]	= 2,
+	[P_GPLL0]	= 5,
+	[P_MMPLL4]	= 3,
+};
+
+static const char *mmcc_xo_mmpll0_1_4_gpll0[] = {
+	"xo",
+	"mmpll0",
+	"mmpll1",
+	"mmpll4",
+	"gpll0",
+};
+
+static const u8 mmcc_xo_mmpll0_1_4_gpll1_0_map[] = {
+	[P_XO]		= 0,
+	[P_MMPLL0]	= 1,
+	[P_MMPLL1]	= 2,
+	[P_MMPLL4]	= 3,
+	[P_GPLL0]	= 5,
+	[P_GPLL1]	= 4,
+};
+
+static const char *mmcc_xo_mmpll0_1_4_gpll1_0[] = {
+	"xo",
+	"mmpll0",
+	"mmpll1",
+	"mmpll4",
+	"gpll1",
+	"gpll0",
+};
+
+static const u8 mmcc_xo_mmpll0_1_4_gpll1_0_sleep_map[] = {
+	[P_XO]		= 0,
+	[P_MMPLL0]	= 1,
+	[P_MMPLL1]	= 2,
+	[P_MMPLL4]	= 3,
+	[P_GPLL0]	= 5,
+	[P_GPLL1]	= 4,
+	[P_MMSLEEP]	= 6,
+};
+
+static const char *mmcc_xo_mmpll0_1_4_gpll1_0_sleep[] = {
+	"xo",
+	"mmpll0",
+	"mmpll1",
+	"mmpll4",
+	"gpll1",
+	"gpll0",
+	"sleep_clk_src",
+};
+
+#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
+
+static struct clk_pll mmpll0 = {
+	.l_reg = 0x0004,
+	.m_reg = 0x0008,
+	.n_reg = 0x000c,
+	.config_reg = 0x0014,
+	.mode_reg = 0x0000,
+	.status_reg = 0x001c,
+	.status_bit = 17,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "mmpll0",
+		.parent_names = (const char *[]){ "xo" },
+		.num_parents = 1,
+		.ops = &clk_pll_ops,
+	},
+};
+
+static struct clk_regmap mmpll0_vote = {
+	.enable_reg = 0x0100,
+	.enable_mask = BIT(0),
+	.hw.init = &(struct clk_init_data){
+		.name = "mmpll0_vote",
+		.parent_names = (const char *[]){ "mmpll0" },
+		.num_parents = 1,
+		.ops = &clk_pll_vote_ops,
+	},
+};
+
+static struct clk_pll mmpll1 = {
+	.l_reg = 0x0044,
+	.m_reg = 0x0048,
+	.n_reg = 0x004c,
+	.config_reg = 0x0050,
+	.mode_reg = 0x0040,
+	.status_reg = 0x005c,
+	.status_bit = 17,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "mmpll1",
+		.parent_names = (const char *[]){ "xo" },
+		.num_parents = 1,
+		.ops = &clk_pll_ops,
+	},
+};
+
+static struct clk_regmap mmpll1_vote = {
+	.enable_reg = 0x0100,
+	.enable_mask = BIT(1),
+	.hw.init = &(struct clk_init_data){
+		.name = "mmpll1_vote",
+		.parent_names = (const char *[]){ "mmpll1" },
+		.num_parents = 1,
+		.ops = &clk_pll_vote_ops,
+	},
+};
+
+static struct clk_pll mmpll2 = {
+	.l_reg = 0x4104,
+	.m_reg = 0x4108,
+	.n_reg = 0x410c,
+	.config_reg = 0x4110,
+	.mode_reg = 0x4100,
+	.status_reg = 0x411c,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "mmpll2",
+		.parent_names = (const char *[]){ "xo" },
+		.num_parents = 1,
+		.ops = &clk_pll_ops,
+	},
+};
+
+static struct clk_pll mmpll3 = {
+	.l_reg = 0x0084,
+	.m_reg = 0x0088,
+	.n_reg = 0x008c,
+	.config_reg = 0x0090,
+	.mode_reg = 0x0080,
+	.status_reg = 0x009c,
+	.status_bit = 17,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "mmpll3",
+		.parent_names = (const char *[]){ "xo" },
+		.num_parents = 1,
+		.ops = &clk_pll_ops,
+	},
+};
+
+static struct clk_pll mmpll4 = {
+	.l_reg = 0x00a4,
+	.m_reg = 0x00a8,
+	.n_reg = 0x00ac,
+	.config_reg = 0x00b0,
+	.mode_reg = 0x0080,
+	.status_reg = 0x00bc,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "mmpll4",
+		.parent_names = (const char *[]){ "xo" },
+		.num_parents = 1,
+		.ops = &clk_pll_ops,
+	},
+};
+
+static struct clk_rcg2 mmss_ahb_clk_src = {
+	.cmd_rcgr = 0x5000,
+	.hid_width = 5,
+	.parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "mmss_ahb_clk_src",
+		.parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct freq_tbl ftbl_mmss_axi_clk[] = {
+	F(19200000, P_XO, 1, 0, 0),
+	F(37500000, P_GPLL0, 16, 0, 0),
+	F(50000000, P_GPLL0, 12, 0, 0),
+	F(75000000, P_GPLL0, 8, 0, 0),
+	F(100000000, P_GPLL0, 6, 0, 0),
+	F(150000000, P_GPLL0, 4, 0, 0),
+	F(333430000, P_MMPLL1, 3.5, 0, 0),
+	F(400000000, P_MMPLL0, 2, 0, 0),
+	F(466800000, P_MMPLL1, 2.5, 0, 0),
+};
+
+static struct clk_rcg2 mmss_axi_clk_src = {
+	.cmd_rcgr = 0x5040,
+	.hid_width = 5,
+	.parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+	.freq_tbl = ftbl_mmss_axi_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "mmss_axi_clk_src",
+		.parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct freq_tbl ftbl_ocmemnoc_clk[] = {
+	F(19200000, P_XO, 1, 0, 0),
+	F(37500000, P_GPLL0, 16, 0, 0),
+	F(50000000, P_GPLL0, 12, 0, 0),
+	F(75000000, P_GPLL0, 8, 0, 0),
+	F(109090000, P_GPLL0, 5.5, 0, 0),
+	F(150000000, P_GPLL0, 4, 0, 0),
+	F(228570000, P_MMPLL0, 3.5, 0, 0),
+	F(320000000, P_MMPLL0, 2.5, 0, 0),
+};
+
+static struct clk_rcg2 ocmemnoc_clk_src = {
+	.cmd_rcgr = 0x5090,
+	.hid_width = 5,
+	.parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+	.freq_tbl = ftbl_ocmemnoc_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "ocmemnoc_clk_src",
+		.parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct freq_tbl ftbl_camss_csi0_3_clk[] = {
+	F(100000000, P_GPLL0, 6, 0, 0),
+	F(200000000, P_MMPLL0, 4, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 csi0_clk_src = {
+	.cmd_rcgr = 0x3090,
+	.hid_width = 5,
+	.parent_map = mmcc_xo_mmpll0_1_4_gpll0_map,
+	.freq_tbl = ftbl_camss_csi0_3_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "csi0_clk_src",
+		.parent_names = mmcc_xo_mmpll0_1_4_gpll0,
+		.num_parents = 5,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 csi1_clk_src = {
+	.cmd_rcgr = 0x3100,
+	.hid_width = 5,
+	.parent_map = mmcc_xo_mmpll0_1_4_gpll0_map,
+	.freq_tbl = ftbl_camss_csi0_3_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "csi1_clk_src",
+		.parent_names = mmcc_xo_mmpll0_1_4_gpll0,
+		.num_parents = 5,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 csi2_clk_src = {
+	.cmd_rcgr = 0x3160,
+	.hid_width = 5,
+	.parent_map = mmcc_xo_mmpll0_1_4_gpll0_map,
+	.freq_tbl = ftbl_camss_csi0_3_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "csi2_clk_src",
+		.parent_names = mmcc_xo_mmpll0_1_4_gpll0,
+		.num_parents = 5,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 csi3_clk_src = {
+	.cmd_rcgr = 0x31c0,
+	.hid_width = 5,
+	.parent_map = mmcc_xo_mmpll0_1_4_gpll0_map,
+	.freq_tbl = ftbl_camss_csi0_3_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "csi3_clk_src",
+		.parent_names = mmcc_xo_mmpll0_1_4_gpll0,
+		.num_parents = 5,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct freq_tbl ftbl_camss_vfe_vfe0_1_clk[] = {
+	F(37500000, P_GPLL0, 16, 0, 0),
+	F(50000000, P_GPLL0, 12, 0, 0),
+	F(60000000, P_GPLL0, 10, 0, 0),
+	F(80000000, P_GPLL0, 7.5, 0, 0),
+	F(100000000, P_GPLL0, 6, 0, 0),
+	F(109090000, P_GPLL0, 5.5, 0, 0),
+	F(133330000, P_GPLL0, 4.5, 0, 0),
+	F(200000000, P_GPLL0, 3, 0, 0),
+	F(228570000, P_MMPLL0, 3.5, 0, 0),
+	F(266670000, P_MMPLL0, 3, 0, 0),
+	F(320000000, P_MMPLL0, 2.5, 0, 0),
+	F(465000000, P_MMPLL4, 2, 0, 0),
+	F(600000000, P_GPLL0, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 vfe0_clk_src = {
+	.cmd_rcgr = 0x3600,
+	.hid_width = 5,
+	.parent_map = mmcc_xo_mmpll0_1_4_gpll0_map,
+	.freq_tbl = ftbl_camss_vfe_vfe0_1_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "vfe0_clk_src",
+		.parent_names = mmcc_xo_mmpll0_1_4_gpll0,
+		.num_parents = 5,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 vfe1_clk_src = {
+	.cmd_rcgr = 0x3620,
+	.hid_width = 5,
+	.parent_map = mmcc_xo_mmpll0_1_4_gpll0_map,
+	.freq_tbl = ftbl_camss_vfe_vfe0_1_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "vfe1_clk_src",
+		.parent_names = mmcc_xo_mmpll0_1_4_gpll0,
+		.num_parents = 5,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct freq_tbl ftbl_mdss_mdp_clk[] = {
+	F(37500000, P_GPLL0, 16, 0, 0),
+	F(60000000, P_GPLL0, 10, 0, 0),
+	F(75000000, P_GPLL0, 8, 0, 0),
+	F(85710000, P_GPLL0, 7, 0, 0),
+	F(100000000, P_GPLL0, 6, 0, 0),
+	F(150000000, P_GPLL0, 4, 0, 0),
+	F(160000000, P_MMPLL0, 5, 0, 0),
+	F(200000000, P_MMPLL0, 4, 0, 0),
+	F(228570000, P_MMPLL0, 3.5, 0, 0),
+	F(300000000, P_GPLL0, 2, 0, 0),
+	F(320000000, P_MMPLL0, 2.5, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 mdp_clk_src = {
+	.cmd_rcgr = 0x2040,
+	.hid_width = 5,
+	.parent_map = mmcc_xo_mmpll0_dsi_hdmi_gpll0_map,
+	.freq_tbl = ftbl_mdss_mdp_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "mdp_clk_src",
+		.parent_names = mmcc_xo_mmpll0_dsi_hdmi_gpll0,
+		.num_parents = 6,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 gfx3d_clk_src = {
+	.cmd_rcgr = 0x4000,
+	.hid_width = 5,
+	.parent_map = mmcc_xo_mmpll0_1_2_gpll0_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gfx3d_clk_src",
+		.parent_names = mmcc_xo_mmpll0_1_2_gpll0,
+		.num_parents = 5,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct freq_tbl ftbl_camss_jpeg_jpeg0_2_clk[] = {
+	F(75000000, P_GPLL0, 8, 0, 0),
+	F(133330000, P_GPLL0, 4.5, 0, 0),
+	F(200000000, P_GPLL0, 3, 0, 0),
+	F(228570000, P_MMPLL0, 3.5, 0, 0),
+	F(266670000, P_MMPLL0, 3, 0, 0),
+	F(320000000, P_MMPLL0, 2.5, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 jpeg0_clk_src = {
+	.cmd_rcgr = 0x3500,
+	.hid_width = 5,
+	.parent_map = mmcc_xo_mmpll0_1_4_gpll0_map,
+	.freq_tbl = ftbl_camss_jpeg_jpeg0_2_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "jpeg0_clk_src",
+		.parent_names = mmcc_xo_mmpll0_1_4_gpll0,
+		.num_parents = 5,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 jpeg1_clk_src = {
+	.cmd_rcgr = 0x3520,
+	.hid_width = 5,
+	.parent_map = mmcc_xo_mmpll0_1_4_gpll0_map,
+	.freq_tbl = ftbl_camss_jpeg_jpeg0_2_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "jpeg1_clk_src",
+		.parent_names = mmcc_xo_mmpll0_1_4_gpll0,
+		.num_parents = 5,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 jpeg2_clk_src = {
+	.cmd_rcgr = 0x3540,
+	.hid_width = 5,
+	.parent_map = mmcc_xo_mmpll0_1_4_gpll0_map,
+	.freq_tbl = ftbl_camss_jpeg_jpeg0_2_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "jpeg2_clk_src",
+		.parent_names = mmcc_xo_mmpll0_1_4_gpll0,
+		.num_parents = 5,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct freq_tbl pixel_freq_tbl[] = {
+	{ .src = P_DSI0PLL },
+	{ }
+};
+
+static struct clk_rcg2 pclk0_clk_src = {
+	.cmd_rcgr = 0x2000,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = mmcc_xo_dsi_hdmi_edp_gpll0_map,
+	.freq_tbl = pixel_freq_tbl,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "pclk0_clk_src",
+		.parent_names = mmcc_xo_dsi_hdmi_edp_gpll0,
+		.num_parents = 6,
+		.ops = &clk_pixel_ops,
+		.flags = CLK_SET_RATE_PARENT,
+	},
+};
+
+static struct clk_rcg2 pclk1_clk_src = {
+	.cmd_rcgr = 0x2020,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = mmcc_xo_dsi_hdmi_edp_gpll0_map,
+	.freq_tbl = pixel_freq_tbl,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "pclk1_clk_src",
+		.parent_names = mmcc_xo_dsi_hdmi_edp_gpll0,
+		.num_parents = 6,
+		.ops = &clk_pixel_ops,
+		.flags = CLK_SET_RATE_PARENT,
+	},
+};
+
+static struct freq_tbl ftbl_venus0_vcodec0_clk[] = {
+	F(50000000, P_GPLL0, 12, 0, 0),
+	F(100000000, P_GPLL0, 6, 0, 0),
+	F(133330000, P_GPLL0, 4.5, 0, 0),
+	F(200000000, P_MMPLL0, 4, 0, 0),
+	F(266670000, P_MMPLL0, 3, 0, 0),
+	F(465000000, P_MMPLL3, 2, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 vcodec0_clk_src = {
+	.cmd_rcgr = 0x1000,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = mmcc_xo_mmpll0_1_3_gpll0_map,
+	.freq_tbl = ftbl_venus0_vcodec0_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "vcodec0_clk_src",
+		.parent_names = mmcc_xo_mmpll0_1_3_gpll0,
+		.num_parents = 5,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct freq_tbl ftbl_avsync_vp_clk[] = {
+	F(150000000, P_GPLL0, 4, 0, 0),
+	F(320000000, P_MMPLL0, 2.5, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 vp_clk_src = {
+	.cmd_rcgr = 0x2430,
+	.hid_width = 5,
+	.parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+	.freq_tbl = ftbl_avsync_vp_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "vp_clk_src",
+		.parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct freq_tbl ftbl_camss_cci_cci_clk[] = {
+	F(19200000, P_XO, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cci_clk_src = {
+	.cmd_rcgr = 0x3300,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = mmcc_xo_mmpll0_1_4_gpll1_0_map,
+	.freq_tbl = ftbl_camss_cci_cci_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cci_clk_src",
+		.parent_names = mmcc_xo_mmpll0_1_4_gpll1_0,
+		.num_parents = 6,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct freq_tbl ftbl_camss_gp0_1_clk[] = {
+	F(10000, P_XO, 16, 1, 120),
+	F(24000, P_XO, 16, 1, 50),
+	F(6000000, P_GPLL0, 10, 1, 10),
+	F(12000000, P_GPLL0, 10, 1, 5),
+	F(13000000, P_GPLL0, 4, 13, 150),
+	F(24000000, P_GPLL0, 5, 1, 5),
+	{ }
+};
+
+static struct clk_rcg2 camss_gp0_clk_src = {
+	.cmd_rcgr = 0x3420,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = mmcc_xo_mmpll0_1_4_gpll1_0_sleep_map,
+	.freq_tbl = ftbl_camss_gp0_1_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "camss_gp0_clk_src",
+		.parent_names = mmcc_xo_mmpll0_1_4_gpll1_0_sleep,
+		.num_parents = 7,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 camss_gp1_clk_src = {
+	.cmd_rcgr = 0x3450,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = mmcc_xo_mmpll0_1_4_gpll1_0_sleep_map,
+	.freq_tbl = ftbl_camss_gp0_1_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "camss_gp1_clk_src",
+		.parent_names = mmcc_xo_mmpll0_1_4_gpll1_0_sleep,
+		.num_parents = 7,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct freq_tbl ftbl_camss_mclk0_3_clk[] = {
+	F(4800000, P_XO, 4, 0, 0),
+	F(6000000, P_GPLL0, 10, 1, 10),
+	F(8000000, P_GPLL0, 15, 1, 5),
+	F(9600000, P_XO, 2, 0, 0),
+	F(16000000, P_MMPLL0, 10, 1, 5),
+	F(19200000, P_XO, 1, 0, 0),
+	F(24000000, P_GPLL0, 5, 1, 5),
+	F(32000000, P_MMPLL0, 5, 1, 5),
+	F(48000000, P_GPLL0, 12.5, 0, 0),
+	F(64000000, P_MMPLL0, 12.5, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 mclk0_clk_src = {
+	.cmd_rcgr = 0x3360,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = mmcc_xo_mmpll0_1_4_gpll1_0_map,
+	.freq_tbl = ftbl_camss_mclk0_3_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "mclk0_clk_src",
+		.parent_names = mmcc_xo_mmpll0_1_4_gpll1_0,
+		.num_parents = 6,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 mclk1_clk_src = {
+	.cmd_rcgr = 0x3390,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = mmcc_xo_mmpll0_1_4_gpll1_0_map,
+	.freq_tbl = ftbl_camss_mclk0_3_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "mclk1_clk_src",
+		.parent_names = mmcc_xo_mmpll0_1_4_gpll1_0,
+		.num_parents = 6,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 mclk2_clk_src = {
+	.cmd_rcgr = 0x33c0,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = mmcc_xo_mmpll0_1_4_gpll1_0_map,
+	.freq_tbl = ftbl_camss_mclk0_3_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "mclk2_clk_src",
+		.parent_names = mmcc_xo_mmpll0_1_4_gpll1_0,
+		.num_parents = 6,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 mclk3_clk_src = {
+	.cmd_rcgr = 0x33f0,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = mmcc_xo_mmpll0_1_4_gpll1_0_map,
+	.freq_tbl = ftbl_camss_mclk0_3_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "mclk3_clk_src",
+		.parent_names = mmcc_xo_mmpll0_1_4_gpll1_0,
+		.num_parents = 6,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct freq_tbl ftbl_camss_phy0_2_csi0_2phytimer_clk[] = {
+	F(100000000, P_GPLL0, 6, 0, 0),
+	F(200000000, P_MMPLL0, 4, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 csi0phytimer_clk_src = {
+	.cmd_rcgr = 0x3000,
+	.hid_width = 5,
+	.parent_map = mmcc_xo_mmpll0_1_4_gpll0_map,
+	.freq_tbl = ftbl_camss_phy0_2_csi0_2phytimer_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "csi0phytimer_clk_src",
+		.parent_names = mmcc_xo_mmpll0_1_4_gpll0,
+		.num_parents = 5,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 csi1phytimer_clk_src = {
+	.cmd_rcgr = 0x3030,
+	.hid_width = 5,
+	.parent_map = mmcc_xo_mmpll0_1_4_gpll0_map,
+	.freq_tbl = ftbl_camss_phy0_2_csi0_2phytimer_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "csi1phytimer_clk_src",
+		.parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+		.num_parents = 5,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 csi2phytimer_clk_src = {
+	.cmd_rcgr = 0x3060,
+	.hid_width = 5,
+	.parent_map = mmcc_xo_mmpll0_1_4_gpll0_map,
+	.freq_tbl = ftbl_camss_phy0_2_csi0_2phytimer_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "csi2phytimer_clk_src",
+		.parent_names = mmcc_xo_mmpll0_1_4_gpll0,
+		.num_parents = 5,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct freq_tbl ftbl_camss_vfe_cpp_clk[] = {
+	F(133330000, P_GPLL0, 4.5, 0, 0),
+	F(266670000, P_MMPLL0, 3, 0, 0),
+	F(320000000, P_MMPLL0, 2.5, 0, 0),
+	F(372000000, P_MMPLL4, 2.5, 0, 0),
+	F(465000000, P_MMPLL4, 2, 0, 0),
+	F(600000000, P_GPLL0, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cpp_clk_src = {
+	.cmd_rcgr = 0x3640,
+	.hid_width = 5,
+	.parent_map = mmcc_xo_mmpll0_1_4_gpll0_map,
+	.freq_tbl = ftbl_camss_vfe_cpp_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cpp_clk_src",
+		.parent_names = mmcc_xo_mmpll0_1_4_gpll0,
+		.num_parents = 5,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct freq_tbl byte_freq_tbl[] = {
+	{ .src = P_DSI0PLL_BYTE },
+	{ }
+};
+
+static struct clk_rcg2 byte0_clk_src = {
+	.cmd_rcgr = 0x2120,
+	.hid_width = 5,
+	.parent_map = mmcc_xo_dsibyte_hdmi_edp_gpll0_map,
+	.freq_tbl = byte_freq_tbl,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "byte0_clk_src",
+		.parent_names = mmcc_xo_dsibyte_hdmi_edp_gpll0,
+		.num_parents = 6,
+		.ops = &clk_byte_ops,
+		.flags = CLK_SET_RATE_PARENT,
+	},
+};
+
+static struct clk_rcg2 byte1_clk_src = {
+	.cmd_rcgr = 0x2140,
+	.hid_width = 5,
+	.parent_map = mmcc_xo_dsibyte_hdmi_edp_gpll0_map,
+	.freq_tbl = byte_freq_tbl,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "byte1_clk_src",
+		.parent_names = mmcc_xo_dsibyte_hdmi_edp_gpll0,
+		.num_parents = 6,
+		.ops = &clk_byte_ops,
+		.flags = CLK_SET_RATE_PARENT,
+	},
+};
+
+static struct freq_tbl ftbl_mdss_edpaux_clk[] = {
+	F(19200000, P_XO, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 edpaux_clk_src = {
+	.cmd_rcgr = 0x20e0,
+	.hid_width = 5,
+	.parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+	.freq_tbl = ftbl_mdss_edpaux_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "edpaux_clk_src",
+		.parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct freq_tbl ftbl_mdss_edplink_clk[] = {
+	F(135000000, P_EDPLINK, 2, 0, 0),
+	F(270000000, P_EDPLINK, 11, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 edplink_clk_src = {
+	.cmd_rcgr = 0x20c0,
+	.hid_width = 5,
+	.parent_map = mmcc_xo_dsi_hdmi_edp_gpll0_map,
+	.freq_tbl = ftbl_mdss_edplink_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "edplink_clk_src",
+		.parent_names = mmcc_xo_dsi_hdmi_edp_gpll0,
+		.num_parents = 6,
+		.ops = &clk_rcg2_ops,
+		.flags = CLK_SET_RATE_PARENT,
+	},
+};
+
+static struct freq_tbl edp_pixel_freq_tbl[] = {
+	{ .src = P_EDPVCO },
+	{ }
+};
+
+static struct clk_rcg2 edppixel_clk_src = {
+	.cmd_rcgr = 0x20a0,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = mmcc_xo_dsi_hdmi_edp_map,
+	.freq_tbl = edp_pixel_freq_tbl,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "edppixel_clk_src",
+		.parent_names = mmcc_xo_dsi_hdmi_edp,
+		.num_parents = 6,
+		.ops = &clk_edp_pixel_ops,
+	},
+};
+
+static struct freq_tbl ftbl_mdss_esc0_1_clk[] = {
+	F(19200000, P_XO, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 esc0_clk_src = {
+	.cmd_rcgr = 0x2160,
+	.hid_width = 5,
+	.parent_map = mmcc_xo_dsibyte_hdmi_edp_gpll0_map,
+	.freq_tbl = ftbl_mdss_esc0_1_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "esc0_clk_src",
+		.parent_names = mmcc_xo_dsibyte_hdmi_edp_gpll0,
+		.num_parents = 6,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 esc1_clk_src = {
+	.cmd_rcgr = 0x2180,
+	.hid_width = 5,
+	.parent_map = mmcc_xo_dsibyte_hdmi_edp_gpll0_map,
+	.freq_tbl = ftbl_mdss_esc0_1_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "esc1_clk_src",
+		.parent_names = mmcc_xo_dsibyte_hdmi_edp_gpll0,
+		.num_parents = 6,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct freq_tbl extpclk_freq_tbl[] = {
+	{ .src = P_HDMIPLL },
+	{ }
+};
+
+static struct clk_rcg2 extpclk_clk_src = {
+	.cmd_rcgr = 0x2060,
+	.hid_width = 5,
+	.parent_map = mmcc_xo_dsi_hdmi_edp_gpll0_map,
+	.freq_tbl = extpclk_freq_tbl,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "extpclk_clk_src",
+		.parent_names = mmcc_xo_dsi_hdmi_edp_gpll0,
+		.num_parents = 6,
+		.ops = &clk_byte_ops,
+		.flags = CLK_SET_RATE_PARENT,
+	},
+};
+
+static struct freq_tbl ftbl_mdss_hdmi_clk[] = {
+	F(19200000, P_XO, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 hdmi_clk_src = {
+	.cmd_rcgr = 0x2100,
+	.hid_width = 5,
+	.parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+	.freq_tbl = ftbl_mdss_hdmi_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "hdmi_clk_src",
+		.parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct freq_tbl ftbl_mdss_vsync_clk[] = {
+	F(19200000, P_XO, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 vsync_clk_src = {
+	.cmd_rcgr = 0x2080,
+	.hid_width = 5,
+	.parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+	.freq_tbl = ftbl_mdss_vsync_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "vsync_clk_src",
+		.parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct freq_tbl ftbl_mmss_rbcpr_clk[] = {
+	F(50000000, P_GPLL0, 12, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 rbcpr_clk_src = {
+	.cmd_rcgr = 0x4060,
+	.hid_width = 5,
+	.parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+	.freq_tbl = ftbl_mmss_rbcpr_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "rbcpr_clk_src",
+		.parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct freq_tbl ftbl_oxili_rbbmtimer_clk[] = {
+	F(19200000, P_XO, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 rbbmtimer_clk_src = {
+	.cmd_rcgr = 0x4090,
+	.hid_width = 5,
+	.parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+	.freq_tbl = ftbl_oxili_rbbmtimer_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "rbbmtimer_clk_src",
+		.parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct freq_tbl ftbl_vpu_maple_clk[] = {
+	F(50000000, P_GPLL0, 12, 0, 0),
+	F(100000000, P_GPLL0, 6, 0, 0),
+	F(133330000, P_GPLL0, 4.5, 0, 0),
+	F(200000000, P_MMPLL0, 4, 0, 0),
+	F(266670000, P_MMPLL0, 3, 0, 0),
+	F(465000000, P_MMPLL3, 2, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 maple_clk_src = {
+	.cmd_rcgr = 0x1320,
+	.hid_width = 5,
+	.parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+	.freq_tbl = ftbl_vpu_maple_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "maple_clk_src",
+		.parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct freq_tbl ftbl_vpu_vdp_clk[] = {
+	F(50000000, P_GPLL0, 12, 0, 0),
+	F(100000000, P_GPLL0, 6, 0, 0),
+	F(200000000, P_MMPLL0, 4, 0, 0),
+	F(320000000, P_MMPLL0, 2.5, 0, 0),
+	F(400000000, P_MMPLL0, 2, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 vdp_clk_src = {
+	.cmd_rcgr = 0x1300,
+	.hid_width = 5,
+	.parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+	.freq_tbl = ftbl_vpu_vdp_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "vdp_clk_src",
+		.parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct freq_tbl ftbl_vpu_bus_clk[] = {
+	F(40000000, P_GPLL0, 15, 0, 0),
+	F(80000000, P_MMPLL0, 10, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 vpu_bus_clk_src = {
+	.cmd_rcgr = 0x1340,
+	.hid_width = 5,
+	.parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+	.freq_tbl = ftbl_vpu_bus_clk,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "vpu_bus_clk_src",
+		.parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_branch mmss_cxo_clk = {
+	.halt_reg = 0x5104,
+	.clkr = {
+		.enable_reg = 0x5104,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mmss_cxo_clk",
+			.parent_names = (const char *[]){ "xo" },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch mmss_sleepclk_clk = {
+	.halt_reg = 0x5100,
+	.clkr = {
+		.enable_reg = 0x5100,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mmss_sleepclk_clk",
+			.parent_names = (const char *[]){
+				"sleep_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch avsync_ahb_clk = {
+	.halt_reg = 0x2414,
+	.clkr = {
+		.enable_reg = 0x2414,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "avsync_ahb_clk",
+			.parent_names = (const char *[]){
+				"mmss_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch avsync_edppixel_clk = {
+	.halt_reg = 0x2418,
+	.clkr = {
+		.enable_reg = 0x2418,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "avsync_edppixel_clk",
+			.parent_names = (const char *[]){
+				"edppixel_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch avsync_extpclk_clk = {
+	.halt_reg = 0x2410,
+	.clkr = {
+		.enable_reg = 0x2410,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "avsync_extpclk_clk",
+			.parent_names = (const char *[]){
+				"extpclk_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch avsync_pclk0_clk = {
+	.halt_reg = 0x241c,
+	.clkr = {
+		.enable_reg = 0x241c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "avsync_pclk0_clk",
+			.parent_names = (const char *[]){
+				"pclk0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch avsync_pclk1_clk = {
+	.halt_reg = 0x2420,
+	.clkr = {
+		.enable_reg = 0x2420,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "avsync_pclk1_clk",
+			.parent_names = (const char *[]){
+				"pclk1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch avsync_vp_clk = {
+	.halt_reg = 0x2404,
+	.clkr = {
+		.enable_reg = 0x2404,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "avsync_vp_clk",
+			.parent_names = (const char *[]){
+				"vp_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_ahb_clk = {
+	.halt_reg = 0x348c,
+	.clkr = {
+		.enable_reg = 0x348c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_ahb_clk",
+			.parent_names = (const char *[]){
+				"mmss_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_cci_cci_ahb_clk = {
+	.halt_reg = 0x3348,
+	.clkr = {
+		.enable_reg = 0x3348,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_cci_cci_ahb_clk",
+			.parent_names = (const char *[]){
+				"mmss_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_cci_cci_clk = {
+	.halt_reg = 0x3344,
+	.clkr = {
+		.enable_reg = 0x3344,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_cci_cci_clk",
+			.parent_names = (const char *[]){
+				"cci_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_csi0_ahb_clk = {
+	.halt_reg = 0x30bc,
+	.clkr = {
+		.enable_reg = 0x30bc,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_csi0_ahb_clk",
+			.parent_names = (const char *[]){
+				"mmss_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_csi0_clk = {
+	.halt_reg = 0x30b4,
+	.clkr = {
+		.enable_reg = 0x30b4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_csi0_clk",
+			.parent_names = (const char *[]){
+				"csi0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_csi0phy_clk = {
+	.halt_reg = 0x30c4,
+	.clkr = {
+		.enable_reg = 0x30c4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_csi0phy_clk",
+			.parent_names = (const char *[]){
+				"csi0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_csi0pix_clk = {
+	.halt_reg = 0x30e4,
+	.clkr = {
+		.enable_reg = 0x30e4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_csi0pix_clk",
+			.parent_names = (const char *[]){
+				"csi0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_csi0rdi_clk = {
+	.halt_reg = 0x30d4,
+	.clkr = {
+		.enable_reg = 0x30d4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_csi0rdi_clk",
+			.parent_names = (const char *[]){
+				"csi0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_csi1_ahb_clk = {
+	.halt_reg = 0x3128,
+	.clkr = {
+		.enable_reg = 0x3128,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_csi1_ahb_clk",
+			.parent_names = (const char *[]){
+				"mmss_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_csi1_clk = {
+	.halt_reg = 0x3124,
+	.clkr = {
+		.enable_reg = 0x3124,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_csi1_clk",
+			.parent_names = (const char *[]){
+				"csi1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_csi1phy_clk = {
+	.halt_reg = 0x3134,
+	.clkr = {
+		.enable_reg = 0x3134,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_csi1phy_clk",
+			.parent_names = (const char *[]){
+				"csi1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_csi1pix_clk = {
+	.halt_reg = 0x3154,
+	.clkr = {
+		.enable_reg = 0x3154,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_csi1pix_clk",
+			.parent_names = (const char *[]){
+				"csi1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_csi1rdi_clk = {
+	.halt_reg = 0x3144,
+	.clkr = {
+		.enable_reg = 0x3144,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_csi1rdi_clk",
+			.parent_names = (const char *[]){
+				"csi1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_csi2_ahb_clk = {
+	.halt_reg = 0x3188,
+	.clkr = {
+		.enable_reg = 0x3188,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_csi2_ahb_clk",
+			.parent_names = (const char *[]){
+				"mmss_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_csi2_clk = {
+	.halt_reg = 0x3184,
+	.clkr = {
+		.enable_reg = 0x3184,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_csi2_clk",
+			.parent_names = (const char *[]){
+				"csi2_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_csi2phy_clk = {
+	.halt_reg = 0x3194,
+	.clkr = {
+		.enable_reg = 0x3194,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_csi2phy_clk",
+			.parent_names = (const char *[]){
+				"csi2_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_csi2pix_clk = {
+	.halt_reg = 0x31b4,
+	.clkr = {
+		.enable_reg = 0x31b4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_csi2pix_clk",
+			.parent_names = (const char *[]){
+				"csi2_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_csi2rdi_clk = {
+	.halt_reg = 0x31a4,
+	.clkr = {
+		.enable_reg = 0x31a4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_csi2rdi_clk",
+			.parent_names = (const char *[]){
+				"csi2_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_csi3_ahb_clk = {
+	.halt_reg = 0x31e8,
+	.clkr = {
+		.enable_reg = 0x31e8,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_csi3_ahb_clk",
+			.parent_names = (const char *[]){
+				"mmss_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_csi3_clk = {
+	.halt_reg = 0x31e4,
+	.clkr = {
+		.enable_reg = 0x31e4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_csi3_clk",
+			.parent_names = (const char *[]){
+				"csi3_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_csi3phy_clk = {
+	.halt_reg = 0x31f4,
+	.clkr = {
+		.enable_reg = 0x31f4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_csi3phy_clk",
+			.parent_names = (const char *[]){
+				"csi3_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_csi3pix_clk = {
+	.halt_reg = 0x3214,
+	.clkr = {
+		.enable_reg = 0x3214,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_csi3pix_clk",
+			.parent_names = (const char *[]){
+				"csi3_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_csi3rdi_clk = {
+	.halt_reg = 0x3204,
+	.clkr = {
+		.enable_reg = 0x3204,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_csi3rdi_clk",
+			.parent_names = (const char *[]){
+				"csi3_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_csi_vfe0_clk = {
+	.halt_reg = 0x3704,
+	.clkr = {
+		.enable_reg = 0x3704,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_csi_vfe0_clk",
+			.parent_names = (const char *[]){
+				"vfe0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_csi_vfe1_clk = {
+	.halt_reg = 0x3714,
+	.clkr = {
+		.enable_reg = 0x3714,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_csi_vfe1_clk",
+			.parent_names = (const char *[]){
+				"vfe1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_gp0_clk = {
+	.halt_reg = 0x3444,
+	.clkr = {
+		.enable_reg = 0x3444,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_gp0_clk",
+			.parent_names = (const char *[]){
+				"camss_gp0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_gp1_clk = {
+	.halt_reg = 0x3474,
+	.clkr = {
+		.enable_reg = 0x3474,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_gp1_clk",
+			.parent_names = (const char *[]){
+				"camss_gp1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_ispif_ahb_clk = {
+	.halt_reg = 0x3224,
+	.clkr = {
+		.enable_reg = 0x3224,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_ispif_ahb_clk",
+			.parent_names = (const char *[]){
+				"mmss_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_jpeg_jpeg0_clk = {
+	.halt_reg = 0x35a8,
+	.clkr = {
+		.enable_reg = 0x35a8,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_jpeg_jpeg0_clk",
+			.parent_names = (const char *[]){
+				"jpeg0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_jpeg_jpeg1_clk = {
+	.halt_reg = 0x35ac,
+	.clkr = {
+		.enable_reg = 0x35ac,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_jpeg_jpeg1_clk",
+			.parent_names = (const char *[]){
+				"jpeg1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_jpeg_jpeg2_clk = {
+	.halt_reg = 0x35b0,
+	.clkr = {
+		.enable_reg = 0x35b0,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_jpeg_jpeg2_clk",
+			.parent_names = (const char *[]){
+				"jpeg2_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_jpeg_jpeg_ahb_clk = {
+	.halt_reg = 0x35b4,
+	.clkr = {
+		.enable_reg = 0x35b4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_jpeg_jpeg_ahb_clk",
+			.parent_names = (const char *[]){
+				"mmss_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_jpeg_jpeg_axi_clk = {
+	.halt_reg = 0x35b8,
+	.clkr = {
+		.enable_reg = 0x35b8,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_jpeg_jpeg_axi_clk",
+			.parent_names = (const char *[]){
+				"mmss_axi_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_mclk0_clk = {
+	.halt_reg = 0x3384,
+	.clkr = {
+		.enable_reg = 0x3384,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_mclk0_clk",
+			.parent_names = (const char *[]){
+				"mclk0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_mclk1_clk = {
+	.halt_reg = 0x33b4,
+	.clkr = {
+		.enable_reg = 0x33b4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_mclk1_clk",
+			.parent_names = (const char *[]){
+				"mclk1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_mclk2_clk = {
+	.halt_reg = 0x33e4,
+	.clkr = {
+		.enable_reg = 0x33e4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_mclk2_clk",
+			.parent_names = (const char *[]){
+				"mclk2_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_mclk3_clk = {
+	.halt_reg = 0x3414,
+	.clkr = {
+		.enable_reg = 0x3414,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_mclk3_clk",
+			.parent_names = (const char *[]){
+				"mclk3_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_micro_ahb_clk = {
+	.halt_reg = 0x3494,
+	.clkr = {
+		.enable_reg = 0x3494,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_micro_ahb_clk",
+			.parent_names = (const char *[]){
+				"mmss_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_phy0_csi0phytimer_clk = {
+	.halt_reg = 0x3024,
+	.clkr = {
+		.enable_reg = 0x3024,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_phy0_csi0phytimer_clk",
+			.parent_names = (const char *[]){
+				"csi0phytimer_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_phy1_csi1phytimer_clk = {
+	.halt_reg = 0x3054,
+	.clkr = {
+		.enable_reg = 0x3054,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_phy1_csi1phytimer_clk",
+			.parent_names = (const char *[]){
+				"csi1phytimer_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_phy2_csi2phytimer_clk = {
+	.halt_reg = 0x3084,
+	.clkr = {
+		.enable_reg = 0x3084,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_phy2_csi2phytimer_clk",
+			.parent_names = (const char *[]){
+				"csi2phytimer_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_top_ahb_clk = {
+	.halt_reg = 0x3484,
+	.clkr = {
+		.enable_reg = 0x3484,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_top_ahb_clk",
+			.parent_names = (const char *[]){
+				"mmss_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_vfe_cpp_ahb_clk = {
+	.halt_reg = 0x36b4,
+	.clkr = {
+		.enable_reg = 0x36b4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_vfe_cpp_ahb_clk",
+			.parent_names = (const char *[]){
+				"mmss_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_vfe_cpp_clk = {
+	.halt_reg = 0x36b0,
+	.clkr = {
+		.enable_reg = 0x36b0,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_vfe_cpp_clk",
+			.parent_names = (const char *[]){
+				"cpp_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_vfe_vfe0_clk = {
+	.halt_reg = 0x36a8,
+	.clkr = {
+		.enable_reg = 0x36a8,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_vfe_vfe0_clk",
+			.parent_names = (const char *[]){
+				"vfe0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_vfe_vfe1_clk = {
+	.halt_reg = 0x36ac,
+	.clkr = {
+		.enable_reg = 0x36ac,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_vfe_vfe1_clk",
+			.parent_names = (const char *[]){
+				"vfe1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_vfe_vfe_ahb_clk = {
+	.halt_reg = 0x36b8,
+	.clkr = {
+		.enable_reg = 0x36b8,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_vfe_vfe_ahb_clk",
+			.parent_names = (const char *[]){
+				"mmss_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_vfe_vfe_axi_clk = {
+	.halt_reg = 0x36bc,
+	.clkr = {
+		.enable_reg = 0x36bc,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_vfe_vfe_axi_clk",
+			.parent_names = (const char *[]){
+				"mmss_axi_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch mdss_ahb_clk = {
+	.halt_reg = 0x2308,
+	.clkr = {
+		.enable_reg = 0x2308,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mdss_ahb_clk",
+			.parent_names = (const char *[]){
+				"mmss_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch mdss_axi_clk = {
+	.halt_reg = 0x2310,
+	.clkr = {
+		.enable_reg = 0x2310,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mdss_axi_clk",
+			.parent_names = (const char *[]){
+				"mmss_axi_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch mdss_byte0_clk = {
+	.halt_reg = 0x233c,
+	.clkr = {
+		.enable_reg = 0x233c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mdss_byte0_clk",
+			.parent_names = (const char *[]){
+				"byte0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch mdss_byte1_clk = {
+	.halt_reg = 0x2340,
+	.clkr = {
+		.enable_reg = 0x2340,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mdss_byte1_clk",
+			.parent_names = (const char *[]){
+				"byte1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch mdss_edpaux_clk = {
+	.halt_reg = 0x2334,
+	.clkr = {
+		.enable_reg = 0x2334,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mdss_edpaux_clk",
+			.parent_names = (const char *[]){
+				"edpaux_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch mdss_edplink_clk = {
+	.halt_reg = 0x2330,
+	.clkr = {
+		.enable_reg = 0x2330,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mdss_edplink_clk",
+			.parent_names = (const char *[]){
+				"edplink_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch mdss_edppixel_clk = {
+	.halt_reg = 0x232c,
+	.clkr = {
+		.enable_reg = 0x232c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mdss_edppixel_clk",
+			.parent_names = (const char *[]){
+				"edppixel_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch mdss_esc0_clk = {
+	.halt_reg = 0x2344,
+	.clkr = {
+		.enable_reg = 0x2344,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mdss_esc0_clk",
+			.parent_names = (const char *[]){
+				"esc0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch mdss_esc1_clk = {
+	.halt_reg = 0x2348,
+	.clkr = {
+		.enable_reg = 0x2348,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mdss_esc1_clk",
+			.parent_names = (const char *[]){
+				"esc1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch mdss_extpclk_clk = {
+	.halt_reg = 0x2324,
+	.clkr = {
+		.enable_reg = 0x2324,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mdss_extpclk_clk",
+			.parent_names = (const char *[]){
+				"extpclk_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch mdss_hdmi_ahb_clk = {
+	.halt_reg = 0x230c,
+	.clkr = {
+		.enable_reg = 0x230c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mdss_hdmi_ahb_clk",
+			.parent_names = (const char *[]){
+				"mmss_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch mdss_hdmi_clk = {
+	.halt_reg = 0x2338,
+	.clkr = {
+		.enable_reg = 0x2338,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mdss_hdmi_clk",
+			.parent_names = (const char *[]){
+				"hdmi_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch mdss_mdp_clk = {
+	.halt_reg = 0x231c,
+	.clkr = {
+		.enable_reg = 0x231c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mdss_mdp_clk",
+			.parent_names = (const char *[]){
+				"mdp_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch mdss_mdp_lut_clk = {
+	.halt_reg = 0x2320,
+	.clkr = {
+		.enable_reg = 0x2320,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mdss_mdp_lut_clk",
+			.parent_names = (const char *[]){
+				"mdp_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch mdss_pclk0_clk = {
+	.halt_reg = 0x2314,
+	.clkr = {
+		.enable_reg = 0x2314,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mdss_pclk0_clk",
+			.parent_names = (const char *[]){
+				"pclk0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch mdss_pclk1_clk = {
+	.halt_reg = 0x2318,
+	.clkr = {
+		.enable_reg = 0x2318,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mdss_pclk1_clk",
+			.parent_names = (const char *[]){
+				"pclk1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch mdss_vsync_clk = {
+	.halt_reg = 0x2328,
+	.clkr = {
+		.enable_reg = 0x2328,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mdss_vsync_clk",
+			.parent_names = (const char *[]){
+				"vsync_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch mmss_rbcpr_ahb_clk = {
+	.halt_reg = 0x4088,
+	.clkr = {
+		.enable_reg = 0x4088,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mmss_rbcpr_ahb_clk",
+			.parent_names = (const char *[]){
+				"mmss_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch mmss_rbcpr_clk = {
+	.halt_reg = 0x4084,
+	.clkr = {
+		.enable_reg = 0x4084,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mmss_rbcpr_clk",
+			.parent_names = (const char *[]){
+				"rbcpr_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch mmss_spdm_ahb_clk = {
+	.halt_reg = 0x0230,
+	.clkr = {
+		.enable_reg = 0x0230,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mmss_spdm_ahb_clk",
+			.parent_names = (const char *[]){
+				"mmss_spdm_ahb_div_clk",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch mmss_spdm_axi_clk = {
+	.halt_reg = 0x0210,
+	.clkr = {
+		.enable_reg = 0x0210,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mmss_spdm_axi_clk",
+			.parent_names = (const char *[]){
+				"mmss_spdm_axi_div_clk",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch mmss_spdm_csi0_clk = {
+	.halt_reg = 0x023c,
+	.clkr = {
+		.enable_reg = 0x023c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mmss_spdm_csi0_clk",
+			.parent_names = (const char *[]){
+				"mmss_spdm_csi0_div_clk",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch mmss_spdm_gfx3d_clk = {
+	.halt_reg = 0x022c,
+	.clkr = {
+		.enable_reg = 0x022c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mmss_spdm_gfx3d_clk",
+			.parent_names = (const char *[]){
+				"mmss_spdm_gfx3d_div_clk",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch mmss_spdm_jpeg0_clk = {
+	.halt_reg = 0x0204,
+	.clkr = {
+		.enable_reg = 0x0204,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mmss_spdm_jpeg0_clk",
+			.parent_names = (const char *[]){
+				"mmss_spdm_jpeg0_div_clk",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch mmss_spdm_jpeg1_clk = {
+	.halt_reg = 0x0208,
+	.clkr = {
+		.enable_reg = 0x0208,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mmss_spdm_jpeg1_clk",
+			.parent_names = (const char *[]){
+				"mmss_spdm_jpeg1_div_clk",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch mmss_spdm_jpeg2_clk = {
+	.halt_reg = 0x0224,
+	.clkr = {
+		.enable_reg = 0x0224,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mmss_spdm_jpeg2_clk",
+			.parent_names = (const char *[]){
+				"mmss_spdm_jpeg2_div_clk",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch mmss_spdm_mdp_clk = {
+	.halt_reg = 0x020c,
+	.clkr = {
+		.enable_reg = 0x020c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mmss_spdm_mdp_clk",
+			.parent_names = (const char *[]){
+				"mmss_spdm_mdp_div_clk",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch mmss_spdm_pclk0_clk = {
+	.halt_reg = 0x0234,
+	.clkr = {
+		.enable_reg = 0x0234,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mmss_spdm_pclk0_clk",
+			.parent_names = (const char *[]){
+				"mmss_spdm_pclk0_div_clk",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch mmss_spdm_pclk1_clk = {
+	.halt_reg = 0x0228,
+	.clkr = {
+		.enable_reg = 0x0228,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mmss_spdm_pclk1_clk",
+			.parent_names = (const char *[]){
+				"mmss_spdm_pclk1_div_clk",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch mmss_spdm_vcodec0_clk = {
+	.halt_reg = 0x0214,
+	.clkr = {
+		.enable_reg = 0x0214,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mmss_spdm_vcodec0_clk",
+			.parent_names = (const char *[]){
+				"mmss_spdm_vcodec0_div_clk",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch mmss_spdm_vfe0_clk = {
+	.halt_reg = 0x0218,
+	.clkr = {
+		.enable_reg = 0x0218,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mmss_spdm_vfe0_clk",
+			.parent_names = (const char *[]){
+				"mmss_spdm_vfe0_div_clk",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch mmss_spdm_vfe1_clk = {
+	.halt_reg = 0x021c,
+	.clkr = {
+		.enable_reg = 0x021c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mmss_spdm_vfe1_clk",
+			.parent_names = (const char *[]){
+				"mmss_spdm_vfe1_div_clk",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch mmss_spdm_rm_axi_clk = {
+	.halt_reg = 0x0304,
+	.clkr = {
+		.enable_reg = 0x0304,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mmss_spdm_rm_axi_clk",
+			.parent_names = (const char *[]){
+				"mmss_axi_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch mmss_spdm_rm_ocmemnoc_clk = {
+	.halt_reg = 0x0308,
+	.clkr = {
+		.enable_reg = 0x0308,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mmss_spdm_rm_ocmemnoc_clk",
+			.parent_names = (const char *[]){
+				"ocmemnoc_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+
+static struct clk_branch mmss_misc_ahb_clk = {
+	.halt_reg = 0x502c,
+	.clkr = {
+		.enable_reg = 0x502c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mmss_misc_ahb_clk",
+			.parent_names = (const char *[]){
+				"mmss_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch mmss_mmssnoc_ahb_clk = {
+	.halt_reg = 0x5024,
+	.clkr = {
+		.enable_reg = 0x5024,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mmss_mmssnoc_ahb_clk",
+			.parent_names = (const char *[]){
+				"mmss_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+		},
+	},
+};
+
+static struct clk_branch mmss_mmssnoc_bto_ahb_clk = {
+	.halt_reg = 0x5028,
+	.clkr = {
+		.enable_reg = 0x5028,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mmss_mmssnoc_bto_ahb_clk",
+			.parent_names = (const char *[]){
+				"mmss_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+		},
+	},
+};
+
+static struct clk_branch mmss_mmssnoc_axi_clk = {
+	.halt_reg = 0x506c,
+	.clkr = {
+		.enable_reg = 0x506c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mmss_mmssnoc_axi_clk",
+			.parent_names = (const char *[]){
+				"mmss_axi_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch mmss_s0_axi_clk = {
+	.halt_reg = 0x5064,
+	.clkr = {
+		.enable_reg = 0x5064,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mmss_s0_axi_clk",
+			.parent_names = (const char *[]){
+				"mmss_axi_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+		},
+	},
+};
+
+static struct clk_branch ocmemcx_ahb_clk = {
+	.halt_reg = 0x405c,
+	.clkr = {
+		.enable_reg = 0x405c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "ocmemcx_ahb_clk",
+			.parent_names = (const char *[]){
+				"mmss_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch ocmemcx_ocmemnoc_clk = {
+	.halt_reg = 0x4058,
+	.clkr = {
+		.enable_reg = 0x4058,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "ocmemcx_ocmemnoc_clk",
+			.parent_names = (const char *[]){
+				"ocmemnoc_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch oxili_ocmemgx_clk = {
+	.halt_reg = 0x402c,
+	.clkr = {
+		.enable_reg = 0x402c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "oxili_ocmemgx_clk",
+			.parent_names = (const char *[]){
+				"gfx3d_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch oxili_gfx3d_clk = {
+	.halt_reg = 0x4028,
+	.clkr = {
+		.enable_reg = 0x4028,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "oxili_gfx3d_clk",
+			.parent_names = (const char *[]){
+				"gfx3d_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch oxili_rbbmtimer_clk = {
+	.halt_reg = 0x40b0,
+	.clkr = {
+		.enable_reg = 0x40b0,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "oxili_rbbmtimer_clk",
+			.parent_names = (const char *[]){
+				"rbbmtimer_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch oxilicx_ahb_clk = {
+	.halt_reg = 0x403c,
+	.clkr = {
+		.enable_reg = 0x403c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "oxilicx_ahb_clk",
+			.parent_names = (const char *[]){
+				"mmss_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch venus0_ahb_clk = {
+	.halt_reg = 0x1030,
+	.clkr = {
+		.enable_reg = 0x1030,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "venus0_ahb_clk",
+			.parent_names = (const char *[]){
+				"mmss_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch venus0_axi_clk = {
+	.halt_reg = 0x1034,
+	.clkr = {
+		.enable_reg = 0x1034,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "venus0_axi_clk",
+			.parent_names = (const char *[]){
+				"mmss_axi_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch venus0_core0_vcodec_clk = {
+	.halt_reg = 0x1048,
+	.clkr = {
+		.enable_reg = 0x1048,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "venus0_core0_vcodec_clk",
+			.parent_names = (const char *[]){
+				"vcodec0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch venus0_core1_vcodec_clk = {
+	.halt_reg = 0x104c,
+	.clkr = {
+		.enable_reg = 0x104c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "venus0_core1_vcodec_clk",
+			.parent_names = (const char *[]){
+				"vcodec0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch venus0_ocmemnoc_clk = {
+	.halt_reg = 0x1038,
+	.clkr = {
+		.enable_reg = 0x1038,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "venus0_ocmemnoc_clk",
+			.parent_names = (const char *[]){
+				"ocmemnoc_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch venus0_vcodec0_clk = {
+	.halt_reg = 0x1028,
+	.clkr = {
+		.enable_reg = 0x1028,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "venus0_vcodec0_clk",
+			.parent_names = (const char *[]){
+				"vcodec0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch vpu_ahb_clk = {
+	.halt_reg = 0x1430,
+	.clkr = {
+		.enable_reg = 0x1430,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "vpu_ahb_clk",
+			.parent_names = (const char *[]){
+				"mmss_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch vpu_axi_clk = {
+	.halt_reg = 0x143c,
+	.clkr = {
+		.enable_reg = 0x143c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "vpu_axi_clk",
+			.parent_names = (const char *[]){
+				"mmss_axi_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch vpu_bus_clk = {
+	.halt_reg = 0x1440,
+	.clkr = {
+		.enable_reg = 0x1440,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "vpu_bus_clk",
+			.parent_names = (const char *[]){
+				"vpu_bus_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch vpu_cxo_clk = {
+	.halt_reg = 0x1434,
+	.clkr = {
+		.enable_reg = 0x1434,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "vpu_cxo_clk",
+			.parent_names = (const char *[]){ "xo" },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch vpu_maple_clk = {
+	.halt_reg = 0x142c,
+	.clkr = {
+		.enable_reg = 0x142c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "vpu_maple_clk",
+			.parent_names = (const char *[]){
+				"maple_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch vpu_sleep_clk = {
+	.halt_reg = 0x1438,
+	.clkr = {
+		.enable_reg = 0x1438,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "vpu_sleep_clk",
+			.parent_names = (const char *[]){
+				"sleep_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch vpu_vdp_clk = {
+	.halt_reg = 0x1428,
+	.clkr = {
+		.enable_reg = 0x1428,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "vpu_vdp_clk",
+			.parent_names = (const char *[]){
+				"vdp_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static const struct pll_config mmpll1_config = {
+	.l = 60,
+	.m = 25,
+	.n = 32,
+	.vco_val = 0x0,
+	.vco_mask = 0x3 << 20,
+	.pre_div_val = 0x0,
+	.pre_div_mask = 0x7 << 12,
+	.post_div_val = 0x0,
+	.post_div_mask = 0x3 << 8,
+	.mn_ena_mask = BIT(24),
+	.main_output_mask = BIT(0),
+};
+
+static const struct pll_config mmpll3_config = {
+	.l = 48,
+	.m = 7,
+	.n = 16,
+	.vco_val = 0x0,
+	.vco_mask = 0x3 << 20,
+	.pre_div_val = 0x0,
+	.pre_div_mask = 0x7 << 12,
+	.post_div_val = 0x0,
+	.post_div_mask = 0x3 << 8,
+	.mn_ena_mask = BIT(24),
+	.main_output_mask = BIT(0),
+	.aux_output_mask = BIT(1),
+};
+
+static struct clk_regmap *mmcc_apq8084_clocks[] = {
+	[MMSS_AHB_CLK_SRC] = &mmss_ahb_clk_src.clkr,
+	[MMSS_AXI_CLK_SRC] = &mmss_axi_clk_src.clkr,
+	[MMPLL0] = &mmpll0.clkr,
+	[MMPLL0_VOTE] = &mmpll0_vote,
+	[MMPLL1] = &mmpll1.clkr,
+	[MMPLL1_VOTE] = &mmpll1_vote,
+	[MMPLL2] = &mmpll2.clkr,
+	[MMPLL3] = &mmpll3.clkr,
+	[MMPLL4] = &mmpll4.clkr,
+	[CSI0_CLK_SRC] = &csi0_clk_src.clkr,
+	[CSI1_CLK_SRC] = &csi1_clk_src.clkr,
+	[CSI2_CLK_SRC] = &csi2_clk_src.clkr,
+	[CSI3_CLK_SRC] = &csi3_clk_src.clkr,
+	[VCODEC0_CLK_SRC] = &vcodec0_clk_src.clkr,
+	[VFE0_CLK_SRC] = &vfe0_clk_src.clkr,
+	[VFE1_CLK_SRC] = &vfe1_clk_src.clkr,
+	[MDP_CLK_SRC] = &mdp_clk_src.clkr,
+	[PCLK0_CLK_SRC] = &pclk0_clk_src.clkr,
+	[PCLK1_CLK_SRC] = &pclk1_clk_src.clkr,
+	[OCMEMNOC_CLK_SRC] = &ocmemnoc_clk_src.clkr,
+	[GFX3D_CLK_SRC] = &gfx3d_clk_src.clkr,
+	[JPEG0_CLK_SRC] = &jpeg0_clk_src.clkr,
+	[JPEG1_CLK_SRC] = &jpeg1_clk_src.clkr,
+	[JPEG2_CLK_SRC] = &jpeg2_clk_src.clkr,
+	[EDPPIXEL_CLK_SRC] = &edppixel_clk_src.clkr,
+	[EXTPCLK_CLK_SRC] = &extpclk_clk_src.clkr,
+	[VP_CLK_SRC] = &vp_clk_src.clkr,
+	[CCI_CLK_SRC] = &cci_clk_src.clkr,
+	[CAMSS_GP0_CLK_SRC] = &camss_gp0_clk_src.clkr,
+	[CAMSS_GP1_CLK_SRC] = &camss_gp1_clk_src.clkr,
+	[MCLK0_CLK_SRC] = &mclk0_clk_src.clkr,
+	[MCLK1_CLK_SRC] = &mclk1_clk_src.clkr,
+	[MCLK2_CLK_SRC] = &mclk2_clk_src.clkr,
+	[MCLK3_CLK_SRC] = &mclk3_clk_src.clkr,
+	[CSI0PHYTIMER_CLK_SRC] = &csi0phytimer_clk_src.clkr,
+	[CSI1PHYTIMER_CLK_SRC] = &csi1phytimer_clk_src.clkr,
+	[CSI2PHYTIMER_CLK_SRC] = &csi2phytimer_clk_src.clkr,
+	[CPP_CLK_SRC] = &cpp_clk_src.clkr,
+	[BYTE0_CLK_SRC] = &byte0_clk_src.clkr,
+	[BYTE1_CLK_SRC] = &byte1_clk_src.clkr,
+	[EDPAUX_CLK_SRC] = &edpaux_clk_src.clkr,
+	[EDPLINK_CLK_SRC] = &edplink_clk_src.clkr,
+	[ESC0_CLK_SRC] = &esc0_clk_src.clkr,
+	[ESC1_CLK_SRC] = &esc1_clk_src.clkr,
+	[HDMI_CLK_SRC] = &hdmi_clk_src.clkr,
+	[VSYNC_CLK_SRC] = &vsync_clk_src.clkr,
+	[RBCPR_CLK_SRC] = &rbcpr_clk_src.clkr,
+	[RBBMTIMER_CLK_SRC] = &rbbmtimer_clk_src.clkr,
+	[MAPLE_CLK_SRC] = &maple_clk_src.clkr,
+	[VDP_CLK_SRC] = &vdp_clk_src.clkr,
+	[VPU_BUS_CLK_SRC] = &vpu_bus_clk_src.clkr,
+	[MMSS_CXO_CLK] = &mmss_cxo_clk.clkr,
+	[MMSS_SLEEPCLK_CLK] = &mmss_sleepclk_clk.clkr,
+	[AVSYNC_AHB_CLK] = &avsync_ahb_clk.clkr,
+	[AVSYNC_EDPPIXEL_CLK] = &avsync_edppixel_clk.clkr,
+	[AVSYNC_EXTPCLK_CLK] = &avsync_extpclk_clk.clkr,
+	[AVSYNC_PCLK0_CLK] = &avsync_pclk0_clk.clkr,
+	[AVSYNC_PCLK1_CLK] = &avsync_pclk1_clk.clkr,
+	[AVSYNC_VP_CLK] = &avsync_vp_clk.clkr,
+	[CAMSS_AHB_CLK] = &camss_ahb_clk.clkr,
+	[CAMSS_CCI_CCI_AHB_CLK] = &camss_cci_cci_ahb_clk.clkr,
+	[CAMSS_CCI_CCI_CLK] = &camss_cci_cci_clk.clkr,
+	[CAMSS_CSI0_AHB_CLK] = &camss_csi0_ahb_clk.clkr,
+	[CAMSS_CSI0_CLK] = &camss_csi0_clk.clkr,
+	[CAMSS_CSI0PHY_CLK] = &camss_csi0phy_clk.clkr,
+	[CAMSS_CSI0PIX_CLK] = &camss_csi0pix_clk.clkr,
+	[CAMSS_CSI0RDI_CLK] = &camss_csi0rdi_clk.clkr,
+	[CAMSS_CSI1_AHB_CLK] = &camss_csi1_ahb_clk.clkr,
+	[CAMSS_CSI1_CLK] = &camss_csi1_clk.clkr,
+	[CAMSS_CSI1PHY_CLK] = &camss_csi1phy_clk.clkr,
+	[CAMSS_CSI1PIX_CLK] = &camss_csi1pix_clk.clkr,
+	[CAMSS_CSI1RDI_CLK] = &camss_csi1rdi_clk.clkr,
+	[CAMSS_CSI2_AHB_CLK] = &camss_csi2_ahb_clk.clkr,
+	[CAMSS_CSI2_CLK] = &camss_csi2_clk.clkr,
+	[CAMSS_CSI2PHY_CLK] = &camss_csi2phy_clk.clkr,
+	[CAMSS_CSI2PIX_CLK] = &camss_csi2pix_clk.clkr,
+	[CAMSS_CSI2RDI_CLK] = &camss_csi2rdi_clk.clkr,
+	[CAMSS_CSI3_AHB_CLK] = &camss_csi3_ahb_clk.clkr,
+	[CAMSS_CSI3_CLK] = &camss_csi3_clk.clkr,
+	[CAMSS_CSI3PHY_CLK] = &camss_csi3phy_clk.clkr,
+	[CAMSS_CSI3PIX_CLK] = &camss_csi3pix_clk.clkr,
+	[CAMSS_CSI3RDI_CLK] = &camss_csi3rdi_clk.clkr,
+	[CAMSS_CSI_VFE0_CLK] = &camss_csi_vfe0_clk.clkr,
+	[CAMSS_CSI_VFE1_CLK] = &camss_csi_vfe1_clk.clkr,
+	[CAMSS_GP0_CLK] = &camss_gp0_clk.clkr,
+	[CAMSS_GP1_CLK] = &camss_gp1_clk.clkr,
+	[CAMSS_ISPIF_AHB_CLK] = &camss_ispif_ahb_clk.clkr,
+	[CAMSS_JPEG_JPEG0_CLK] = &camss_jpeg_jpeg0_clk.clkr,
+	[CAMSS_JPEG_JPEG1_CLK] = &camss_jpeg_jpeg1_clk.clkr,
+	[CAMSS_JPEG_JPEG2_CLK] = &camss_jpeg_jpeg2_clk.clkr,
+	[CAMSS_JPEG_JPEG_AHB_CLK] = &camss_jpeg_jpeg_ahb_clk.clkr,
+	[CAMSS_JPEG_JPEG_AXI_CLK] = &camss_jpeg_jpeg_axi_clk.clkr,
+	[CAMSS_MCLK0_CLK] = &camss_mclk0_clk.clkr,
+	[CAMSS_MCLK1_CLK] = &camss_mclk1_clk.clkr,
+	[CAMSS_MCLK2_CLK] = &camss_mclk2_clk.clkr,
+	[CAMSS_MCLK3_CLK] = &camss_mclk3_clk.clkr,
+	[CAMSS_MICRO_AHB_CLK] = &camss_micro_ahb_clk.clkr,
+	[CAMSS_PHY0_CSI0PHYTIMER_CLK] = &camss_phy0_csi0phytimer_clk.clkr,
+	[CAMSS_PHY1_CSI1PHYTIMER_CLK] = &camss_phy1_csi1phytimer_clk.clkr,
+	[CAMSS_PHY2_CSI2PHYTIMER_CLK] = &camss_phy2_csi2phytimer_clk.clkr,
+	[CAMSS_TOP_AHB_CLK] = &camss_top_ahb_clk.clkr,
+	[CAMSS_VFE_CPP_AHB_CLK] = &camss_vfe_cpp_ahb_clk.clkr,
+	[CAMSS_VFE_CPP_CLK] = &camss_vfe_cpp_clk.clkr,
+	[CAMSS_VFE_VFE0_CLK] = &camss_vfe_vfe0_clk.clkr,
+	[CAMSS_VFE_VFE1_CLK] = &camss_vfe_vfe1_clk.clkr,
+	[CAMSS_VFE_VFE_AHB_CLK] = &camss_vfe_vfe_ahb_clk.clkr,
+	[CAMSS_VFE_VFE_AXI_CLK] = &camss_vfe_vfe_axi_clk.clkr,
+	[MDSS_AHB_CLK] = &mdss_ahb_clk.clkr,
+	[MDSS_AXI_CLK] = &mdss_axi_clk.clkr,
+	[MDSS_BYTE0_CLK] = &mdss_byte0_clk.clkr,
+	[MDSS_BYTE1_CLK] = &mdss_byte1_clk.clkr,
+	[MDSS_EDPAUX_CLK] = &mdss_edpaux_clk.clkr,
+	[MDSS_EDPLINK_CLK] = &mdss_edplink_clk.clkr,
+	[MDSS_EDPPIXEL_CLK] = &mdss_edppixel_clk.clkr,
+	[MDSS_ESC0_CLK] = &mdss_esc0_clk.clkr,
+	[MDSS_ESC1_CLK] = &mdss_esc1_clk.clkr,
+	[MDSS_EXTPCLK_CLK] = &mdss_extpclk_clk.clkr,
+	[MDSS_HDMI_AHB_CLK] = &mdss_hdmi_ahb_clk.clkr,
+	[MDSS_HDMI_CLK] = &mdss_hdmi_clk.clkr,
+	[MDSS_MDP_CLK] = &mdss_mdp_clk.clkr,
+	[MDSS_MDP_LUT_CLK] = &mdss_mdp_lut_clk.clkr,
+	[MDSS_PCLK0_CLK] = &mdss_pclk0_clk.clkr,
+	[MDSS_PCLK1_CLK] = &mdss_pclk1_clk.clkr,
+	[MDSS_VSYNC_CLK] = &mdss_vsync_clk.clkr,
+	[MMSS_RBCPR_AHB_CLK] = &mmss_rbcpr_ahb_clk.clkr,
+	[MMSS_RBCPR_CLK] = &mmss_rbcpr_clk.clkr,
+	[MMSS_SPDM_AHB_CLK] = &mmss_spdm_ahb_clk.clkr,
+	[MMSS_SPDM_AXI_CLK] = &mmss_spdm_axi_clk.clkr,
+	[MMSS_SPDM_CSI0_CLK] = &mmss_spdm_csi0_clk.clkr,
+	[MMSS_SPDM_GFX3D_CLK] = &mmss_spdm_gfx3d_clk.clkr,
+	[MMSS_SPDM_JPEG0_CLK] = &mmss_spdm_jpeg0_clk.clkr,
+	[MMSS_SPDM_JPEG1_CLK] = &mmss_spdm_jpeg1_clk.clkr,
+	[MMSS_SPDM_JPEG2_CLK] = &mmss_spdm_jpeg2_clk.clkr,
+	[MMSS_SPDM_MDP_CLK] = &mmss_spdm_mdp_clk.clkr,
+	[MMSS_SPDM_PCLK0_CLK] = &mmss_spdm_pclk0_clk.clkr,
+	[MMSS_SPDM_PCLK1_CLK] = &mmss_spdm_pclk1_clk.clkr,
+	[MMSS_SPDM_VCODEC0_CLK] = &mmss_spdm_vcodec0_clk.clkr,
+	[MMSS_SPDM_VFE0_CLK] = &mmss_spdm_vfe0_clk.clkr,
+	[MMSS_SPDM_VFE1_CLK] = &mmss_spdm_vfe1_clk.clkr,
+	[MMSS_SPDM_RM_AXI_CLK] = &mmss_spdm_rm_axi_clk.clkr,
+	[MMSS_SPDM_RM_OCMEMNOC_CLK] = &mmss_spdm_rm_ocmemnoc_clk.clkr,
+	[MMSS_MISC_AHB_CLK] = &mmss_misc_ahb_clk.clkr,
+	[MMSS_MMSSNOC_AHB_CLK] = &mmss_mmssnoc_ahb_clk.clkr,
+	[MMSS_MMSSNOC_BTO_AHB_CLK] = &mmss_mmssnoc_bto_ahb_clk.clkr,
+	[MMSS_MMSSNOC_AXI_CLK] = &mmss_mmssnoc_axi_clk.clkr,
+	[MMSS_S0_AXI_CLK] = &mmss_s0_axi_clk.clkr,
+	[OCMEMCX_AHB_CLK] = &ocmemcx_ahb_clk.clkr,
+	[OCMEMCX_OCMEMNOC_CLK] = &ocmemcx_ocmemnoc_clk.clkr,
+	[OXILI_OCMEMGX_CLK] = &oxili_ocmemgx_clk.clkr,
+	[OXILI_GFX3D_CLK] = &oxili_gfx3d_clk.clkr,
+	[OXILI_RBBMTIMER_CLK] = &oxili_rbbmtimer_clk.clkr,
+	[OXILICX_AHB_CLK] = &oxilicx_ahb_clk.clkr,
+	[VENUS0_AHB_CLK] = &venus0_ahb_clk.clkr,
+	[VENUS0_AXI_CLK] = &venus0_axi_clk.clkr,
+	[VENUS0_CORE0_VCODEC_CLK] = &venus0_core0_vcodec_clk.clkr,
+	[VENUS0_CORE1_VCODEC_CLK] = &venus0_core1_vcodec_clk.clkr,
+	[VENUS0_OCMEMNOC_CLK] = &venus0_ocmemnoc_clk.clkr,
+	[VENUS0_VCODEC0_CLK] = &venus0_vcodec0_clk.clkr,
+	[VPU_AHB_CLK] = &vpu_ahb_clk.clkr,
+	[VPU_AXI_CLK] = &vpu_axi_clk.clkr,
+	[VPU_BUS_CLK] = &vpu_bus_clk.clkr,
+	[VPU_CXO_CLK] = &vpu_cxo_clk.clkr,
+	[VPU_MAPLE_CLK] = &vpu_maple_clk.clkr,
+	[VPU_SLEEP_CLK] = &vpu_sleep_clk.clkr,
+	[VPU_VDP_CLK] = &vpu_vdp_clk.clkr,
+};
+
+static const struct qcom_reset_map mmcc_apq8084_resets[] = {
+	[MMSS_SPDM_RESET] = { 0x0200 },
+	[MMSS_SPDM_RM_RESET] = { 0x0300 },
+	[VENUS0_RESET] = { 0x1020 },
+	[VPU_RESET] = { 0x1400 },
+	[MDSS_RESET] = { 0x2300 },
+	[AVSYNC_RESET] = { 0x2400 },
+	[CAMSS_PHY0_RESET] = { 0x3020 },
+	[CAMSS_PHY1_RESET] = { 0x3050 },
+	[CAMSS_PHY2_RESET] = { 0x3080 },
+	[CAMSS_CSI0_RESET] = { 0x30b0 },
+	[CAMSS_CSI0PHY_RESET] = { 0x30c0 },
+	[CAMSS_CSI0RDI_RESET] = { 0x30d0 },
+	[CAMSS_CSI0PIX_RESET] = { 0x30e0 },
+	[CAMSS_CSI1_RESET] = { 0x3120 },
+	[CAMSS_CSI1PHY_RESET] = { 0x3130 },
+	[CAMSS_CSI1RDI_RESET] = { 0x3140 },
+	[CAMSS_CSI1PIX_RESET] = { 0x3150 },
+	[CAMSS_CSI2_RESET] = { 0x3180 },
+	[CAMSS_CSI2PHY_RESET] = { 0x3190 },
+	[CAMSS_CSI2RDI_RESET] = { 0x31a0 },
+	[CAMSS_CSI2PIX_RESET] = { 0x31b0 },
+	[CAMSS_CSI3_RESET] = { 0x31e0 },
+	[CAMSS_CSI3PHY_RESET] = { 0x31f0 },
+	[CAMSS_CSI3RDI_RESET] = { 0x3200 },
+	[CAMSS_CSI3PIX_RESET] = { 0x3210 },
+	[CAMSS_ISPIF_RESET] = { 0x3220 },
+	[CAMSS_CCI_RESET] = { 0x3340 },
+	[CAMSS_MCLK0_RESET] = { 0x3380 },
+	[CAMSS_MCLK1_RESET] = { 0x33b0 },
+	[CAMSS_MCLK2_RESET] = { 0x33e0 },
+	[CAMSS_MCLK3_RESET] = { 0x3410 },
+	[CAMSS_GP0_RESET] = { 0x3440 },
+	[CAMSS_GP1_RESET] = { 0x3470 },
+	[CAMSS_TOP_RESET] = { 0x3480 },
+	[CAMSS_AHB_RESET] = { 0x3488 },
+	[CAMSS_MICRO_RESET] = { 0x3490 },
+	[CAMSS_JPEG_RESET] = { 0x35a0 },
+	[CAMSS_VFE_RESET] = { 0x36a0 },
+	[CAMSS_CSI_VFE0_RESET] = { 0x3700 },
+	[CAMSS_CSI_VFE1_RESET] = { 0x3710 },
+	[OXILI_RESET] = { 0x4020 },
+	[OXILICX_RESET] = { 0x4030 },
+	[OCMEMCX_RESET] = { 0x4050 },
+	[MMSS_RBCRP_RESET] = { 0x4080 },
+	[MMSSNOCAHB_RESET] = { 0x5020 },
+	[MMSSNOCAXI_RESET] = { 0x5060 },
+};
+
+static const struct regmap_config mmcc_apq8084_regmap_config = {
+	.reg_bits	= 32,
+	.reg_stride	= 4,
+	.val_bits	= 32,
+	.max_register	= 0x5104,
+	.fast_io	= true,
+};
+
+static const struct qcom_cc_desc mmcc_apq8084_desc = {
+	.config = &mmcc_apq8084_regmap_config,
+	.clks = mmcc_apq8084_clocks,
+	.num_clks = ARRAY_SIZE(mmcc_apq8084_clocks),
+	.resets = mmcc_apq8084_resets,
+	.num_resets = ARRAY_SIZE(mmcc_apq8084_resets),
+};
+
+static const struct of_device_id mmcc_apq8084_match_table[] = {
+	{ .compatible = "qcom,mmcc-apq8084" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, mmcc_apq8084_match_table);
+
+static int mmcc_apq8084_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct regmap *regmap;
+
+	ret = qcom_cc_probe(pdev, &mmcc_apq8084_desc);
+	if (ret)
+		return ret;
+
+	regmap = dev_get_regmap(&pdev->dev, NULL);
+	clk_pll_configure_sr_hpm_lp(&mmpll1, regmap, &mmpll1_config, true);
+	clk_pll_configure_sr_hpm_lp(&mmpll3, regmap, &mmpll3_config, false);
+
+	return 0;
+}
+
+static int mmcc_apq8084_remove(struct platform_device *pdev)
+{
+	qcom_cc_remove(pdev);
+	return 0;
+}
+
+static struct platform_driver mmcc_apq8084_driver = {
+	.probe		= mmcc_apq8084_probe,
+	.remove		= mmcc_apq8084_remove,
+	.driver		= {
+		.name	= "mmcc-apq8084",
+		.owner	= THIS_MODULE,
+		.of_match_table = mmcc_apq8084_match_table,
+	},
+};
+module_platform_driver(mmcc_apq8084_driver);
+
+MODULE_DESCRIPTION("QCOM MMCC APQ8084 Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:mmcc-apq8084");
diff --git a/drivers/clk/qcom/mmcc-msm8960.c b/drivers/clk/qcom/mmcc-msm8960.c
index 4c449b3..2e80a21 100644
--- a/drivers/clk/qcom/mmcc-msm8960.c
+++ b/drivers/clk/qcom/mmcc-msm8960.c
@@ -37,6 +37,9 @@
 #define P_PLL8	1
 #define P_PLL2	2
 #define P_PLL3	3
+#define P_PLL15	3
+
+#define F_MN(f, s, _m, _n) { .freq = f, .src = s, .m = _m, .n = _n }
 
 static u8 mmcc_pxo_pll8_pll2_map[] = {
 	[P_PXO]		= 0,
@@ -57,10 +60,24 @@
 	[P_PLL3]	= 3,
 };
 
+static const char *mmcc_pxo_pll8_pll2_pll15[] = {
+	"pxo",
+	"pll8_vote",
+	"pll2",
+	"pll15",
+};
+
+static u8 mmcc_pxo_pll8_pll2_pll15_map[] = {
+	[P_PXO]		= 0,
+	[P_PLL8]	= 2,
+	[P_PLL2]	= 1,
+	[P_PLL15]	= 3,
+};
+
 static const char *mmcc_pxo_pll8_pll2_pll3[] = {
 	"pxo",
-	"pll2",
 	"pll8_vote",
+	"pll2",
 	"pll3",
 };
 
@@ -80,6 +97,36 @@
 	},
 };
 
+static struct clk_pll pll15 = {
+	.l_reg = 0x33c,
+	.m_reg = 0x340,
+	.n_reg = 0x344,
+	.config_reg = 0x348,
+	.mode_reg = 0x338,
+	.status_reg = 0x350,
+	.status_bit = 16,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "pll15",
+		.parent_names = (const char *[]){ "pxo" },
+		.num_parents = 1,
+		.ops = &clk_pll_ops,
+	},
+};
+
+static const struct pll_config pll15_config = {
+	.l = 33,
+	.m = 1,
+	.n = 3,
+	.vco_val = 0x2 << 16,
+	.vco_mask = 0x3 << 16,
+	.pre_div_val = 0x0,
+	.pre_div_mask = BIT(19),
+	.post_div_val = 0x0,
+	.post_div_mask = 0x3 << 20,
+	.mn_ena_mask = BIT(22),
+	.main_output_mask = BIT(23),
+};
+
 static struct freq_tbl clk_tbl_cam[] = {
 	{   6000000, P_PLL8, 4, 1, 16 },
 	{   8000000, P_PLL8, 4, 1, 12 },
@@ -710,18 +757,18 @@
 };
 
 static struct freq_tbl clk_tbl_gfx2d[] = {
-	{  27000000, P_PXO,  1,  0 },
-	{  48000000, P_PLL8, 1,  8 },
-	{  54857000, P_PLL8, 1,  7 },
-	{  64000000, P_PLL8, 1,  6 },
-	{  76800000, P_PLL8, 1,  5 },
-	{  96000000, P_PLL8, 1,  4 },
-	{ 128000000, P_PLL8, 1,  3 },
-	{ 145455000, P_PLL2, 2, 11 },
-	{ 160000000, P_PLL2, 1,  5 },
-	{ 177778000, P_PLL2, 2,  9 },
-	{ 200000000, P_PLL2, 1,  4 },
-	{ 228571000, P_PLL2, 2,  7 },
+	F_MN( 27000000, P_PXO,  1,  0),
+	F_MN( 48000000, P_PLL8, 1,  8),
+	F_MN( 54857000, P_PLL8, 1,  7),
+	F_MN( 64000000, P_PLL8, 1,  6),
+	F_MN( 76800000, P_PLL8, 1,  5),
+	F_MN( 96000000, P_PLL8, 1,  4),
+	F_MN(128000000, P_PLL8, 1,  3),
+	F_MN(145455000, P_PLL2, 2, 11),
+	F_MN(160000000, P_PLL2, 1,  5),
+	F_MN(177778000, P_PLL2, 2,  9),
+	F_MN(200000000, P_PLL2, 1,  4),
+	F_MN(228571000, P_PLL2, 2,  7),
 	{ }
 };
 
@@ -842,22 +889,43 @@
 };
 
 static struct freq_tbl clk_tbl_gfx3d[] = {
-	{  27000000, P_PXO,  1,  0 },
-	{  48000000, P_PLL8, 1,  8 },
-	{  54857000, P_PLL8, 1,  7 },
-	{  64000000, P_PLL8, 1,  6 },
-	{  76800000, P_PLL8, 1,  5 },
-	{  96000000, P_PLL8, 1,  4 },
-	{ 128000000, P_PLL8, 1,  3 },
-	{ 145455000, P_PLL2, 2, 11 },
-	{ 160000000, P_PLL2, 1,  5 },
-	{ 177778000, P_PLL2, 2,  9 },
-	{ 200000000, P_PLL2, 1,  4 },
-	{ 228571000, P_PLL2, 2,  7 },
-	{ 266667000, P_PLL2, 1,  3 },
-	{ 300000000, P_PLL3, 1,  4 },
-	{ 320000000, P_PLL2, 2,  5 },
-	{ 400000000, P_PLL2, 1,  2 },
+	F_MN( 27000000, P_PXO,  1,  0),
+	F_MN( 48000000, P_PLL8, 1,  8),
+	F_MN( 54857000, P_PLL8, 1,  7),
+	F_MN( 64000000, P_PLL8, 1,  6),
+	F_MN( 76800000, P_PLL8, 1,  5),
+	F_MN( 96000000, P_PLL8, 1,  4),
+	F_MN(128000000, P_PLL8, 1,  3),
+	F_MN(145455000, P_PLL2, 2, 11),
+	F_MN(160000000, P_PLL2, 1,  5),
+	F_MN(177778000, P_PLL2, 2,  9),
+	F_MN(200000000, P_PLL2, 1,  4),
+	F_MN(228571000, P_PLL2, 2,  7),
+	F_MN(266667000, P_PLL2, 1,  3),
+	F_MN(300000000, P_PLL3, 1,  4),
+	F_MN(320000000, P_PLL2, 2,  5),
+	F_MN(400000000, P_PLL2, 1,  2),
+	{ }
+};
+
+static struct freq_tbl clk_tbl_gfx3d_8064[] = {
+	F_MN( 27000000, P_PXO,   0,  0),
+	F_MN( 48000000, P_PLL8,  1,  8),
+	F_MN( 54857000, P_PLL8,  1,  7),
+	F_MN( 64000000, P_PLL8,  1,  6),
+	F_MN( 76800000, P_PLL8,  1,  5),
+	F_MN( 96000000, P_PLL8,  1,  4),
+	F_MN(128000000, P_PLL8,  1,  3),
+	F_MN(145455000, P_PLL2,  2, 11),
+	F_MN(160000000, P_PLL2,  1,  5),
+	F_MN(177778000, P_PLL2,  2,  9),
+	F_MN(192000000, P_PLL8,  1,  2),
+	F_MN(200000000, P_PLL2,  1,  4),
+	F_MN(228571000, P_PLL2,  2,  7),
+	F_MN(266667000, P_PLL2,  1,  3),
+	F_MN(320000000, P_PLL2,  2,  5),
+	F_MN(400000000, P_PLL2,  1,  2),
+	F_MN(450000000, P_PLL15, 1,  2),
 	{ }
 };
 
@@ -897,12 +965,19 @@
 		.hw.init = &(struct clk_init_data){
 			.name = "gfx3d_src",
 			.parent_names = mmcc_pxo_pll8_pll2_pll3,
-			.num_parents = 3,
+			.num_parents = 4,
 			.ops = &clk_dyn_rcg_ops,
 		},
 	},
 };
 
+static const struct clk_init_data gfx3d_8064_init = {
+	.name = "gfx3d_src",
+	.parent_names = mmcc_pxo_pll8_pll2_pll15,
+	.num_parents = 4,
+	.ops = &clk_dyn_rcg_ops,
+};
+
 static struct clk_branch gfx3d_clk = {
 	.halt_reg = 0x01c8,
 	.halt_bit = 4,
@@ -919,6 +994,91 @@
 	},
 };
 
+static struct freq_tbl clk_tbl_vcap[] = {
+	F_MN( 27000000, P_PXO,  0,  0),
+	F_MN( 54860000, P_PLL8, 1,  7),
+	F_MN( 64000000, P_PLL8, 1,  6),
+	F_MN( 76800000, P_PLL8, 1,  5),
+	F_MN(128000000, P_PLL8, 1,  3),
+	F_MN(160000000, P_PLL2, 1,  5),
+	F_MN(200000000, P_PLL2, 1,  4),
+	{ }
+};
+
+static struct clk_dyn_rcg vcap_src = {
+	.ns_reg = 0x021c,
+	.md_reg[0] = 0x01ec,
+	.md_reg[1] = 0x0218,
+	.mn[0] = {
+		.mnctr_en_bit = 8,
+		.mnctr_reset_bit = 23,
+		.mnctr_mode_shift = 9,
+		.n_val_shift = 18,
+		.m_val_shift = 4,
+		.width = 4,
+	},
+	.mn[1] = {
+		.mnctr_en_bit = 5,
+		.mnctr_reset_bit = 22,
+		.mnctr_mode_shift = 6,
+		.n_val_shift = 14,
+		.m_val_shift = 4,
+		.width = 4,
+	},
+	.s[0] = {
+		.src_sel_shift = 3,
+		.parent_map = mmcc_pxo_pll8_pll2_map,
+	},
+	.s[1] = {
+		.src_sel_shift = 0,
+		.parent_map = mmcc_pxo_pll8_pll2_map,
+	},
+	.mux_sel_bit = 11,
+	.freq_tbl = clk_tbl_vcap,
+	.clkr = {
+		.enable_reg = 0x0178,
+		.enable_mask = BIT(2),
+		.hw.init = &(struct clk_init_data){
+			.name = "vcap_src",
+			.parent_names = mmcc_pxo_pll8_pll2,
+			.num_parents = 3,
+			.ops = &clk_dyn_rcg_ops,
+		},
+	},
+};
+
+static struct clk_branch vcap_clk = {
+	.halt_reg = 0x0240,
+	.halt_bit = 15,
+	.clkr = {
+		.enable_reg = 0x0178,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "vcap_clk",
+			.parent_names = (const char *[]){ "vcap_src" },
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch vcap_npl_clk = {
+	.halt_reg = 0x0240,
+	.halt_bit = 25,
+	.clkr = {
+		.enable_reg = 0x0178,
+		.enable_mask = BIT(13),
+		.hw.init = &(struct clk_init_data){
+			.name = "vcap_npl_clk",
+			.parent_names = (const char *[]){ "vcap_src" },
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
 static struct freq_tbl clk_tbl_ijpeg[] = {
 	{  27000000, P_PXO,  1, 0,  0 },
 	{  36570000, P_PLL8, 1, 2, 21 },
@@ -995,7 +1155,7 @@
 	.ns_reg = 0x00ac,
 	.p = {
 		.pre_div_shift = 12,
-		.pre_div_width = 2,
+		.pre_div_width = 4,
 	},
 	.s = {
 		.src_sel_shift = 0,
@@ -1115,7 +1275,7 @@
 		.enable_reg = 0x016c,
 		.enable_mask = BIT(0),
 		.hw.init = &(struct clk_init_data){
-			.parent_names = (const char *[]){ "mdp_clk" },
+			.parent_names = (const char *[]){ "mdp_src" },
 			.num_parents = 1,
 			.name = "mdp_lut_clk",
 			.ops = &clk_branch_ops,
@@ -1218,12 +1378,7 @@
 };
 
 static struct freq_tbl clk_tbl_tv[] = {
-	{  25200000, P_HDMI_PLL, 1, 0, 0 },
-	{  27000000, P_HDMI_PLL, 1, 0, 0 },
-	{  27030000, P_HDMI_PLL, 1, 0, 0 },
-	{  74250000, P_HDMI_PLL, 1, 0, 0 },
-	{ 108000000, P_HDMI_PLL, 1, 0, 0 },
-	{ 148500000, P_HDMI_PLL, 1, 0, 0 },
+	{  .src = P_HDMI_PLL, .pre_div = 1 },
 	{ }
 };
 
@@ -1254,7 +1409,7 @@
 			.name = "tv_src",
 			.parent_names = mmcc_pxo_hdmi,
 			.num_parents = 2,
-			.ops = &clk_rcg_ops,
+			.ops = &clk_rcg_bypass_ops,
 			.flags = CLK_SET_RATE_PARENT,
 		},
 	},
@@ -1326,6 +1481,38 @@
 	},
 };
 
+static struct clk_branch rgb_tv_clk = {
+	.halt_reg = 0x0240,
+	.halt_bit = 27,
+	.clkr = {
+		.enable_reg = 0x0124,
+		.enable_mask = BIT(14),
+		.hw.init = &(struct clk_init_data){
+			.parent_names = tv_src_name,
+			.num_parents = 1,
+			.name = "rgb_tv_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch npl_tv_clk = {
+	.halt_reg = 0x0240,
+	.halt_bit = 26,
+	.clkr = {
+		.enable_reg = 0x0124,
+		.enable_mask = BIT(16),
+		.hw.init = &(struct clk_init_data){
+			.parent_names = tv_src_name,
+			.num_parents = 1,
+			.name = "npl_tv_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
 static struct clk_branch hdmi_app_clk = {
 	.halt_reg = 0x01cc,
 	.halt_bit = 25,
@@ -1342,15 +1529,15 @@
 };
 
 static struct freq_tbl clk_tbl_vcodec[] = {
-	{  27000000, P_PXO,  1,  0 },
-	{  32000000, P_PLL8, 1, 12 },
-	{  48000000, P_PLL8, 1,  8 },
-	{  54860000, P_PLL8, 1,  7 },
-	{  96000000, P_PLL8, 1,  4 },
-	{ 133330000, P_PLL2, 1,  6 },
-	{ 200000000, P_PLL2, 1,  4 },
-	{ 228570000, P_PLL2, 2,  7 },
-	{ 266670000, P_PLL2, 1,  3 },
+	F_MN( 27000000, P_PXO,  1,  0),
+	F_MN( 32000000, P_PLL8, 1, 12),
+	F_MN( 48000000, P_PLL8, 1,  8),
+	F_MN( 54860000, P_PLL8, 1,  7),
+	F_MN( 96000000, P_PLL8, 1,  4),
+	F_MN(133330000, P_PLL2, 1,  6),
+	F_MN(200000000, P_PLL2, 1,  4),
+	F_MN(228570000, P_PLL2, 2,  7),
+	F_MN(266670000, P_PLL2, 1,  3),
 	{ }
 };
 
@@ -1701,6 +1888,22 @@
 	},
 };
 
+static struct clk_branch vcap_axi_clk = {
+	.halt_reg = 0x0240,
+	.halt_bit = 20,
+	.hwcg_reg = 0x0244,
+	.hwcg_bit = 11,
+	.clkr = {
+		.enable_reg = 0x0244,
+		.enable_mask = BIT(12),
+		.hw.init = &(struct clk_init_data){
+			.name = "vcap_axi_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_IS_ROOT,
+		},
+	},
+};
+
 static struct clk_branch vpe_axi_clk = {
 	.hwcg_reg = 0x0020,
 	.hwcg_bit = 27,
@@ -2003,6 +2206,20 @@
 	},
 };
 
+static struct clk_branch vcap_ahb_clk = {
+	.halt_reg = 0x0240,
+	.halt_bit = 23,
+	.clkr = {
+		.enable_reg = 0x0248,
+		.enable_mask = BIT(1),
+		.hw.init = &(struct clk_init_data){
+			.name = "vcap_ahb_clk",
+			.ops = &clk_branch_ops,
+			.flags = CLK_IS_ROOT,
+		},
+	},
+};
+
 static struct clk_branch vcodec_ahb_clk = {
 	.hwcg_reg = 0x0038,
 	.hwcg_bit = 26,
@@ -2215,6 +2432,175 @@
 	[CSI_RDI2_RESET] = { 0x0214 },
 };
 
+static struct clk_regmap *mmcc_apq8064_clks[] = {
+	[AMP_AHB_CLK] = &amp_ahb_clk.clkr,
+	[DSI2_S_AHB_CLK] = &dsi2_s_ahb_clk.clkr,
+	[JPEGD_AHB_CLK] = &jpegd_ahb_clk.clkr,
+	[DSI_S_AHB_CLK] = &dsi_s_ahb_clk.clkr,
+	[DSI2_M_AHB_CLK] = &dsi2_m_ahb_clk.clkr,
+	[VPE_AHB_CLK] = &vpe_ahb_clk.clkr,
+	[SMMU_AHB_CLK] = &smmu_ahb_clk.clkr,
+	[HDMI_M_AHB_CLK] = &hdmi_m_ahb_clk.clkr,
+	[VFE_AHB_CLK] = &vfe_ahb_clk.clkr,
+	[ROT_AHB_CLK] = &rot_ahb_clk.clkr,
+	[VCODEC_AHB_CLK] = &vcodec_ahb_clk.clkr,
+	[MDP_AHB_CLK] = &mdp_ahb_clk.clkr,
+	[DSI_M_AHB_CLK] = &dsi_m_ahb_clk.clkr,
+	[CSI_AHB_CLK] = &csi_ahb_clk.clkr,
+	[MMSS_IMEM_AHB_CLK] = &mmss_imem_ahb_clk.clkr,
+	[IJPEG_AHB_CLK] = &ijpeg_ahb_clk.clkr,
+	[HDMI_S_AHB_CLK] = &hdmi_s_ahb_clk.clkr,
+	[GFX3D_AHB_CLK] = &gfx3d_ahb_clk.clkr,
+	[JPEGD_AXI_CLK] = &jpegd_axi_clk.clkr,
+	[GMEM_AXI_CLK] = &gmem_axi_clk.clkr,
+	[MDP_AXI_CLK] = &mdp_axi_clk.clkr,
+	[MMSS_IMEM_AXI_CLK] = &mmss_imem_axi_clk.clkr,
+	[IJPEG_AXI_CLK] = &ijpeg_axi_clk.clkr,
+	[GFX3D_AXI_CLK] = &gfx3d_axi_clk.clkr,
+	[VCODEC_AXI_CLK] = &vcodec_axi_clk.clkr,
+	[VFE_AXI_CLK] = &vfe_axi_clk.clkr,
+	[VPE_AXI_CLK] = &vpe_axi_clk.clkr,
+	[ROT_AXI_CLK] = &rot_axi_clk.clkr,
+	[VCODEC_AXI_A_CLK] = &vcodec_axi_a_clk.clkr,
+	[VCODEC_AXI_B_CLK] = &vcodec_axi_b_clk.clkr,
+	[CSI0_SRC] = &csi0_src.clkr,
+	[CSI0_CLK] = &csi0_clk.clkr,
+	[CSI0_PHY_CLK] = &csi0_phy_clk.clkr,
+	[CSI1_SRC] = &csi1_src.clkr,
+	[CSI1_CLK] = &csi1_clk.clkr,
+	[CSI1_PHY_CLK] = &csi1_phy_clk.clkr,
+	[CSI2_SRC] = &csi2_src.clkr,
+	[CSI2_CLK] = &csi2_clk.clkr,
+	[CSI2_PHY_CLK] = &csi2_phy_clk.clkr,
+	[CSI_PIX_CLK] = &csi_pix_clk.clkr,
+	[CSI_RDI_CLK] = &csi_rdi_clk.clkr,
+	[MDP_VSYNC_CLK] = &mdp_vsync_clk.clkr,
+	[HDMI_APP_CLK] = &hdmi_app_clk.clkr,
+	[CSI_PIX1_CLK] = &csi_pix1_clk.clkr,
+	[CSI_RDI2_CLK] = &csi_rdi2_clk.clkr,
+	[CSI_RDI1_CLK] = &csi_rdi1_clk.clkr,
+	[GFX3D_SRC] = &gfx3d_src.clkr,
+	[GFX3D_CLK] = &gfx3d_clk.clkr,
+	[IJPEG_SRC] = &ijpeg_src.clkr,
+	[IJPEG_CLK] = &ijpeg_clk.clkr,
+	[JPEGD_SRC] = &jpegd_src.clkr,
+	[JPEGD_CLK] = &jpegd_clk.clkr,
+	[MDP_SRC] = &mdp_src.clkr,
+	[MDP_CLK] = &mdp_clk.clkr,
+	[MDP_LUT_CLK] = &mdp_lut_clk.clkr,
+	[ROT_SRC] = &rot_src.clkr,
+	[ROT_CLK] = &rot_clk.clkr,
+	[TV_DAC_CLK] = &tv_dac_clk.clkr,
+	[HDMI_TV_CLK] = &hdmi_tv_clk.clkr,
+	[MDP_TV_CLK] = &mdp_tv_clk.clkr,
+	[TV_SRC] = &tv_src.clkr,
+	[VCODEC_SRC] = &vcodec_src.clkr,
+	[VCODEC_CLK] = &vcodec_clk.clkr,
+	[VFE_SRC] = &vfe_src.clkr,
+	[VFE_CLK] = &vfe_clk.clkr,
+	[VFE_CSI_CLK] = &vfe_csi_clk.clkr,
+	[VPE_SRC] = &vpe_src.clkr,
+	[VPE_CLK] = &vpe_clk.clkr,
+	[CAMCLK0_SRC] = &camclk0_src.clkr,
+	[CAMCLK0_CLK] = &camclk0_clk.clkr,
+	[CAMCLK1_SRC] = &camclk1_src.clkr,
+	[CAMCLK1_CLK] = &camclk1_clk.clkr,
+	[CAMCLK2_SRC] = &camclk2_src.clkr,
+	[CAMCLK2_CLK] = &camclk2_clk.clkr,
+	[CSIPHYTIMER_SRC] = &csiphytimer_src.clkr,
+	[CSIPHY2_TIMER_CLK] = &csiphy2_timer_clk.clkr,
+	[CSIPHY1_TIMER_CLK] = &csiphy1_timer_clk.clkr,
+	[CSIPHY0_TIMER_CLK] = &csiphy0_timer_clk.clkr,
+	[PLL2] = &pll2.clkr,
+	[RGB_TV_CLK] = &rgb_tv_clk.clkr,
+	[NPL_TV_CLK] = &npl_tv_clk.clkr,
+	[VCAP_AHB_CLK] = &vcap_ahb_clk.clkr,
+	[VCAP_AXI_CLK] = &vcap_axi_clk.clkr,
+	[VCAP_SRC] = &vcap_src.clkr,
+	[VCAP_CLK] = &vcap_clk.clkr,
+	[VCAP_NPL_CLK] = &vcap_npl_clk.clkr,
+	[PLL15] = &pll15.clkr,
+};
+
+static const struct qcom_reset_map mmcc_apq8064_resets[] = {
+	[GFX3D_AXI_RESET] = { 0x0208, 17 },
+	[VCAP_AXI_RESET] = { 0x0208, 16 },
+	[VPE_AXI_RESET] = { 0x0208, 15 },
+	[IJPEG_AXI_RESET] = { 0x0208, 14 },
+	[MPD_AXI_RESET] = { 0x0208, 13 },
+	[VFE_AXI_RESET] = { 0x0208, 9 },
+	[SP_AXI_RESET] = { 0x0208, 8 },
+	[VCODEC_AXI_RESET] = { 0x0208, 7 },
+	[ROT_AXI_RESET] = { 0x0208, 6 },
+	[VCODEC_AXI_A_RESET] = { 0x0208, 5 },
+	[VCODEC_AXI_B_RESET] = { 0x0208, 4 },
+	[FAB_S3_AXI_RESET] = { 0x0208, 3 },
+	[FAB_S2_AXI_RESET] = { 0x0208, 2 },
+	[FAB_S1_AXI_RESET] = { 0x0208, 1 },
+	[FAB_S0_AXI_RESET] = { 0x0208 },
+	[SMMU_GFX3D_ABH_RESET] = { 0x020c, 31 },
+	[SMMU_VPE_AHB_RESET] = { 0x020c, 30 },
+	[SMMU_VFE_AHB_RESET] = { 0x020c, 29 },
+	[SMMU_ROT_AHB_RESET] = { 0x020c, 28 },
+	[SMMU_VCODEC_B_AHB_RESET] = { 0x020c, 27 },
+	[SMMU_VCODEC_A_AHB_RESET] = { 0x020c, 26 },
+	[SMMU_MDP1_AHB_RESET] = { 0x020c, 25 },
+	[SMMU_MDP0_AHB_RESET] = { 0x020c, 24 },
+	[SMMU_JPEGD_AHB_RESET] = { 0x020c, 23 },
+	[SMMU_IJPEG_AHB_RESET] = { 0x020c, 22 },
+	[APU_AHB_RESET] = { 0x020c, 18 },
+	[CSI_AHB_RESET] = { 0x020c, 17 },
+	[TV_ENC_AHB_RESET] = { 0x020c, 15 },
+	[VPE_AHB_RESET] = { 0x020c, 14 },
+	[FABRIC_AHB_RESET] = { 0x020c, 13 },
+	[GFX3D_AHB_RESET] = { 0x020c, 10 },
+	[HDMI_AHB_RESET] = { 0x020c, 9 },
+	[MSSS_IMEM_AHB_RESET] = { 0x020c, 8 },
+	[IJPEG_AHB_RESET] = { 0x020c, 7 },
+	[DSI_M_AHB_RESET] = { 0x020c, 6 },
+	[DSI_S_AHB_RESET] = { 0x020c, 5 },
+	[JPEGD_AHB_RESET] = { 0x020c, 4 },
+	[MDP_AHB_RESET] = { 0x020c, 3 },
+	[ROT_AHB_RESET] = { 0x020c, 2 },
+	[VCODEC_AHB_RESET] = { 0x020c, 1 },
+	[VFE_AHB_RESET] = { 0x020c, 0 },
+	[SMMU_VCAP_AHB_RESET] = { 0x0200, 3 },
+	[VCAP_AHB_RESET] = { 0x0200, 2 },
+	[DSI2_M_AHB_RESET] = { 0x0200, 1 },
+	[DSI2_S_AHB_RESET] = { 0x0200, 0 },
+	[CSIPHY2_RESET] = { 0x0210, 31 },
+	[CSI_PIX1_RESET] = { 0x0210, 30 },
+	[CSIPHY0_RESET] = { 0x0210, 29 },
+	[CSIPHY1_RESET] = { 0x0210, 28 },
+	[CSI_RDI_RESET] = { 0x0210, 27 },
+	[CSI_PIX_RESET] = { 0x0210, 26 },
+	[DSI2_RESET] = { 0x0210, 25 },
+	[VFE_CSI_RESET] = { 0x0210, 24 },
+	[MDP_RESET] = { 0x0210, 21 },
+	[AMP_RESET] = { 0x0210, 20 },
+	[JPEGD_RESET] = { 0x0210, 19 },
+	[CSI1_RESET] = { 0x0210, 18 },
+	[VPE_RESET] = { 0x0210, 17 },
+	[MMSS_FABRIC_RESET] = { 0x0210, 16 },
+	[VFE_RESET] = { 0x0210, 15 },
+	[GFX3D_RESET] = { 0x0210, 12 },
+	[HDMI_RESET] = { 0x0210, 11 },
+	[MMSS_IMEM_RESET] = { 0x0210, 10 },
+	[IJPEG_RESET] = { 0x0210, 9 },
+	[CSI0_RESET] = { 0x0210, 8 },
+	[DSI_RESET] = { 0x0210, 7 },
+	[VCODEC_RESET] = { 0x0210, 6 },
+	[MDP_TV_RESET] = { 0x0210, 4 },
+	[MDP_VSYNC_RESET] = { 0x0210, 3 },
+	[ROT_RESET] = { 0x0210, 2 },
+	[TV_HDMI_RESET] = { 0x0210, 1 },
+	[VCAP_NPL_RESET] = { 0x0214, 4 },
+	[VCAP_RESET] = { 0x0214, 3 },
+	[CSI2_RESET] = { 0x0214, 2 },
+	[CSI_RDI1_RESET] = { 0x0214, 1 },
+	[CSI_RDI2_RESET] = { 0x0214 },
+};
+
 static const struct regmap_config mmcc_msm8960_regmap_config = {
 	.reg_bits	= 32,
 	.reg_stride	= 4,
@@ -2223,6 +2609,14 @@
 	.fast_io	= true,
 };
 
+static const struct regmap_config mmcc_apq8064_regmap_config = {
+	.reg_bits	= 32,
+	.reg_stride	= 4,
+	.val_bits	= 32,
+	.max_register	= 0x350,
+	.fast_io	= true,
+};
+
 static const struct qcom_cc_desc mmcc_msm8960_desc = {
 	.config = &mmcc_msm8960_regmap_config,
 	.clks = mmcc_msm8960_clks,
@@ -2231,15 +2625,47 @@
 	.num_resets = ARRAY_SIZE(mmcc_msm8960_resets),
 };
 
+static const struct qcom_cc_desc mmcc_apq8064_desc = {
+	.config = &mmcc_apq8064_regmap_config,
+	.clks = mmcc_apq8064_clks,
+	.num_clks = ARRAY_SIZE(mmcc_apq8064_clks),
+	.resets = mmcc_apq8064_resets,
+	.num_resets = ARRAY_SIZE(mmcc_apq8064_resets),
+};
+
 static const struct of_device_id mmcc_msm8960_match_table[] = {
-	{ .compatible = "qcom,mmcc-msm8960" },
+	{ .compatible = "qcom,mmcc-msm8960", .data = &mmcc_msm8960_desc },
+	{ .compatible = "qcom,mmcc-apq8064", .data = &mmcc_apq8064_desc },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, mmcc_msm8960_match_table);
 
 static int mmcc_msm8960_probe(struct platform_device *pdev)
 {
-	return qcom_cc_probe(pdev, &mmcc_msm8960_desc);
+	const struct of_device_id *match;
+	struct regmap *regmap;
+	bool is_8064;
+	struct device *dev = &pdev->dev;
+
+	match = of_match_device(mmcc_msm8960_match_table, dev);
+	if (!match)
+		return -EINVAL;
+
+	is_8064 = of_device_is_compatible(dev->of_node, "qcom,mmcc-apq8064");
+	if (is_8064) {
+		gfx3d_src.freq_tbl = clk_tbl_gfx3d_8064;
+		gfx3d_src.clkr.hw.init = &gfx3d_8064_init;
+		gfx3d_src.s[0].parent_map = mmcc_pxo_pll8_pll2_pll15_map;
+		gfx3d_src.s[1].parent_map = mmcc_pxo_pll8_pll2_pll15_map;
+	}
+
+	regmap = qcom_cc_map(pdev, match->data);
+	if (IS_ERR(regmap))
+		return PTR_ERR(regmap);
+
+	clk_pll_configure_sr(&pll15, regmap, &pll15_config, false);
+
+	return qcom_cc_really_probe(pdev, match->data, regmap);
 }
 
 static int mmcc_msm8960_remove(struct platform_device *pdev)
diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c
index c65b905..bc8f519 100644
--- a/drivers/clk/qcom/mmcc-msm8974.c
+++ b/drivers/clk/qcom/mmcc-msm8974.c
@@ -2547,18 +2547,16 @@
 
 static int mmcc_msm8974_probe(struct platform_device *pdev)
 {
-	int ret;
 	struct regmap *regmap;
 
-	ret = qcom_cc_probe(pdev, &mmcc_msm8974_desc);
-	if (ret)
-		return ret;
+	regmap = qcom_cc_map(pdev, &mmcc_msm8974_desc);
+	if (IS_ERR(regmap))
+		return PTR_ERR(regmap);
 
-	regmap = dev_get_regmap(&pdev->dev, NULL);
 	clk_pll_configure_sr_hpm_lp(&mmpll1, regmap, &mmpll1_config, true);
 	clk_pll_configure_sr_hpm_lp(&mmpll3, regmap, &mmpll3_config, false);
 
-	return 0;
+	return qcom_cc_really_probe(pdev, &mmcc_msm8974_desc, regmap);
 }
 
 static int mmcc_msm8974_remove(struct platform_device *pdev)
diff --git a/drivers/clk/rockchip/Makefile b/drivers/clk/rockchip/Makefile
index 8d3aefa..ee6b077 100644
--- a/drivers/clk/rockchip/Makefile
+++ b/drivers/clk/rockchip/Makefile
@@ -3,3 +3,9 @@
 #
 
 obj-y	+= clk-rockchip.o
+obj-y	+= clk.o
+obj-y	+= clk-pll.o
+obj-$(CONFIG_RESET_CONTROLLER)	+= softrst.o
+
+obj-y	+= clk-rk3188.o
+obj-y	+= clk-rk3288.o
diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c
new file mode 100644
index 0000000..f2a1c7a
--- /dev/null
+++ b/drivers/clk/rockchip/clk-pll.c
@@ -0,0 +1,431 @@
+/*
+ * Copyright (c) 2014 MundoReader S.L.
+ * Author: Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <asm/div64.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include "clk.h"
+
+#define PLL_MODE_MASK		0x3
+#define PLL_MODE_SLOW		0x0
+#define PLL_MODE_NORM		0x1
+#define PLL_MODE_DEEP		0x2
+
+struct rockchip_clk_pll {
+	struct clk_hw		hw;
+
+	struct clk_mux		pll_mux;
+	const struct clk_ops	*pll_mux_ops;
+
+	struct notifier_block	clk_nb;
+	bool			rate_change_remuxed;
+
+	void __iomem		*reg_base;
+	int			lock_offset;
+	unsigned int		lock_shift;
+	enum rockchip_pll_type	type;
+	const struct rockchip_pll_rate_table *rate_table;
+	unsigned int		rate_count;
+	spinlock_t		*lock;
+};
+
+#define to_rockchip_clk_pll(_hw) container_of(_hw, struct rockchip_clk_pll, hw)
+#define to_rockchip_clk_pll_nb(nb) \
+			container_of(nb, struct rockchip_clk_pll, clk_nb)
+
+static const struct rockchip_pll_rate_table *rockchip_get_pll_settings(
+			    struct rockchip_clk_pll *pll, unsigned long rate)
+{
+	const struct rockchip_pll_rate_table  *rate_table = pll->rate_table;
+	int i;
+
+	for (i = 0; i < pll->rate_count; i++) {
+		if (rate == rate_table[i].rate)
+			return &rate_table[i];
+	}
+
+	return NULL;
+}
+
+static long rockchip_pll_round_rate(struct clk_hw *hw,
+			    unsigned long drate, unsigned long *prate)
+{
+	struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
+	const struct rockchip_pll_rate_table *rate_table = pll->rate_table;
+	int i;
+
+	/* Assumming rate_table is in descending order */
+	for (i = 0; i < pll->rate_count; i++) {
+		if (drate >= rate_table[i].rate)
+			return rate_table[i].rate;
+	}
+
+	/* return minimum supported value */
+	return rate_table[i - 1].rate;
+}
+
+/*
+ * Wait for the pll to reach the locked state.
+ * The calling set_rate function is responsible for making sure the
+ * grf regmap is available.
+ */
+static int rockchip_pll_wait_lock(struct rockchip_clk_pll *pll)
+{
+	struct regmap *grf = rockchip_clk_get_grf();
+	unsigned int val;
+	int delay = 24000000, ret;
+
+	while (delay > 0) {
+		ret = regmap_read(grf, pll->lock_offset, &val);
+		if (ret) {
+			pr_err("%s: failed to read pll lock status: %d\n",
+			       __func__, ret);
+			return ret;
+		}
+
+		if (val & BIT(pll->lock_shift))
+			return 0;
+		delay--;
+	}
+
+	pr_err("%s: timeout waiting for pll to lock\n", __func__);
+	return -ETIMEDOUT;
+}
+
+/**
+ * Set pll mux when changing the pll rate.
+ * This makes sure to move the pll mux away from the actual pll before
+ * changing its rate and back to the original parent after the change.
+ */
+static int rockchip_pll_notifier_cb(struct notifier_block *nb,
+					unsigned long event, void *data)
+{
+	struct rockchip_clk_pll *pll = to_rockchip_clk_pll_nb(nb);
+	struct clk_mux *pll_mux = &pll->pll_mux;
+	const struct clk_ops *pll_mux_ops = pll->pll_mux_ops;
+	int cur_parent;
+
+	switch (event) {
+	case PRE_RATE_CHANGE:
+		cur_parent = pll_mux_ops->get_parent(&pll_mux->hw);
+		if (cur_parent == PLL_MODE_NORM) {
+			pll_mux_ops->set_parent(&pll_mux->hw, PLL_MODE_SLOW);
+			pll->rate_change_remuxed = 1;
+		}
+		break;
+	case POST_RATE_CHANGE:
+		if (pll->rate_change_remuxed) {
+			pll_mux_ops->set_parent(&pll_mux->hw, PLL_MODE_NORM);
+			pll->rate_change_remuxed = 0;
+		}
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+
+/**
+ * PLL used in RK3066, RK3188 and RK3288
+ */
+
+#define RK3066_PLL_RESET_DELAY(nr)	((nr * 500) / 24 + 1)
+
+#define RK3066_PLLCON(i)		(i * 0x4)
+#define RK3066_PLLCON0_OD_MASK		0xf
+#define RK3066_PLLCON0_OD_SHIFT		0
+#define RK3066_PLLCON0_NR_MASK		0x3f
+#define RK3066_PLLCON0_NR_SHIFT		8
+#define RK3066_PLLCON1_NF_MASK		0x1fff
+#define RK3066_PLLCON1_NF_SHIFT		0
+#define RK3066_PLLCON2_BWADJ_MASK	0xfff
+#define RK3066_PLLCON2_BWADJ_SHIFT	0
+#define RK3066_PLLCON3_RESET		(1 << 5)
+#define RK3066_PLLCON3_PWRDOWN		(1 << 1)
+#define RK3066_PLLCON3_BYPASS		(1 << 0)
+
+static unsigned long rockchip_rk3066_pll_recalc_rate(struct clk_hw *hw,
+						     unsigned long prate)
+{
+	struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
+	u64 nf, nr, no, rate64 = prate;
+	u32 pllcon;
+
+	pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(3));
+	if (pllcon & RK3066_PLLCON3_BYPASS) {
+		pr_debug("%s: pll %s is bypassed\n", __func__,
+			__clk_get_name(hw->clk));
+		return prate;
+	}
+
+	pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(1));
+	nf = (pllcon >> RK3066_PLLCON1_NF_SHIFT) & RK3066_PLLCON1_NF_MASK;
+
+	pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(0));
+	nr = (pllcon >> RK3066_PLLCON0_NR_SHIFT) & RK3066_PLLCON0_NR_MASK;
+	no = (pllcon >> RK3066_PLLCON0_OD_SHIFT) & RK3066_PLLCON0_OD_MASK;
+
+	rate64 *= (nf + 1);
+	do_div(rate64, nr + 1);
+	do_div(rate64, no + 1);
+
+	return (unsigned long)rate64;
+}
+
+static int rockchip_rk3066_pll_set_rate(struct clk_hw *hw, unsigned long drate,
+					unsigned long prate)
+{
+	struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
+	const struct rockchip_pll_rate_table *rate;
+	unsigned long old_rate = rockchip_rk3066_pll_recalc_rate(hw, prate);
+	struct regmap *grf = rockchip_clk_get_grf();
+	int ret;
+
+	if (IS_ERR(grf)) {
+		pr_debug("%s: grf regmap not available, aborting rate change\n",
+			 __func__);
+		return PTR_ERR(grf);
+	}
+
+	pr_debug("%s: changing %s from %lu to %lu with a parent rate of %lu\n",
+		 __func__, __clk_get_name(hw->clk), old_rate, drate, prate);
+
+	/* Get required rate settings from table */
+	rate = rockchip_get_pll_settings(pll, drate);
+	if (!rate) {
+		pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
+			drate, __clk_get_name(hw->clk));
+		return -EINVAL;
+	}
+
+	pr_debug("%s: rate settings for %lu (nr, no, nf): (%d, %d, %d)\n",
+		 __func__, rate->rate, rate->nr, rate->no, rate->nf);
+
+	/* enter reset mode */
+	writel(HIWORD_UPDATE(RK3066_PLLCON3_RESET, RK3066_PLLCON3_RESET, 0),
+	       pll->reg_base + RK3066_PLLCON(3));
+
+	/* update pll values */
+	writel(HIWORD_UPDATE(rate->nr - 1, RK3066_PLLCON0_NR_MASK,
+					   RK3066_PLLCON0_NR_SHIFT) |
+	       HIWORD_UPDATE(rate->no - 1, RK3066_PLLCON0_OD_MASK,
+					   RK3066_PLLCON0_OD_SHIFT),
+	       pll->reg_base + RK3066_PLLCON(0));
+
+	writel_relaxed(HIWORD_UPDATE(rate->nf - 1, RK3066_PLLCON1_NF_MASK,
+						   RK3066_PLLCON1_NF_SHIFT),
+		       pll->reg_base + RK3066_PLLCON(1));
+	writel_relaxed(HIWORD_UPDATE(rate->bwadj, RK3066_PLLCON2_BWADJ_MASK,
+						  RK3066_PLLCON2_BWADJ_SHIFT),
+		       pll->reg_base + RK3066_PLLCON(2));
+
+	/* leave reset and wait the reset_delay */
+	writel(HIWORD_UPDATE(0, RK3066_PLLCON3_RESET, 0),
+	       pll->reg_base + RK3066_PLLCON(3));
+	udelay(RK3066_PLL_RESET_DELAY(rate->nr));
+
+	/* wait for the pll to lock */
+	ret = rockchip_pll_wait_lock(pll);
+	if (ret) {
+		pr_warn("%s: pll did not lock, trying to restore old rate %lu\n",
+			__func__, old_rate);
+		rockchip_rk3066_pll_set_rate(hw, old_rate, prate);
+	}
+
+	return ret;
+}
+
+static int rockchip_rk3066_pll_enable(struct clk_hw *hw)
+{
+	struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
+
+	writel(HIWORD_UPDATE(0, RK3066_PLLCON3_PWRDOWN, 0),
+	       pll->reg_base + RK3066_PLLCON(3));
+
+	return 0;
+}
+
+static void rockchip_rk3066_pll_disable(struct clk_hw *hw)
+{
+	struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
+
+	writel(HIWORD_UPDATE(RK3066_PLLCON3_PWRDOWN,
+			     RK3066_PLLCON3_PWRDOWN, 0),
+	       pll->reg_base + RK3066_PLLCON(3));
+}
+
+static int rockchip_rk3066_pll_is_enabled(struct clk_hw *hw)
+{
+	struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
+	u32 pllcon = readl(pll->reg_base + RK3066_PLLCON(3));
+
+	return !(pllcon & RK3066_PLLCON3_PWRDOWN);
+}
+
+static const struct clk_ops rockchip_rk3066_pll_clk_norate_ops = {
+	.recalc_rate = rockchip_rk3066_pll_recalc_rate,
+	.enable = rockchip_rk3066_pll_enable,
+	.disable = rockchip_rk3066_pll_disable,
+	.is_enabled = rockchip_rk3066_pll_is_enabled,
+};
+
+static const struct clk_ops rockchip_rk3066_pll_clk_ops = {
+	.recalc_rate = rockchip_rk3066_pll_recalc_rate,
+	.round_rate = rockchip_pll_round_rate,
+	.set_rate = rockchip_rk3066_pll_set_rate,
+	.enable = rockchip_rk3066_pll_enable,
+	.disable = rockchip_rk3066_pll_disable,
+	.is_enabled = rockchip_rk3066_pll_is_enabled,
+};
+
+/*
+ * Common registering of pll clocks
+ */
+
+struct clk *rockchip_clk_register_pll(enum rockchip_pll_type pll_type,
+		const char *name, const char **parent_names, u8 num_parents,
+		void __iomem *base, int con_offset, int grf_lock_offset,
+		int lock_shift, int mode_offset, int mode_shift,
+		struct rockchip_pll_rate_table *rate_table,
+		spinlock_t *lock)
+{
+	const char *pll_parents[3];
+	struct clk_init_data init;
+	struct rockchip_clk_pll *pll;
+	struct clk_mux *pll_mux;
+	struct clk *pll_clk, *mux_clk;
+	char pll_name[20];
+	int ret;
+
+	if (num_parents != 2) {
+		pr_err("%s: needs two parent clocks\n", __func__);
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* name the actual pll */
+	snprintf(pll_name, sizeof(pll_name), "pll_%s", name);
+
+	pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+	if (!pll)
+		return ERR_PTR(-ENOMEM);
+
+	init.name = pll_name;
+
+	/* keep all plls untouched for now */
+	init.flags = CLK_IGNORE_UNUSED;
+
+	init.parent_names = &parent_names[0];
+	init.num_parents = 1;
+
+	if (rate_table) {
+		int len;
+
+		/* find count of rates in rate_table */
+		for (len = 0; rate_table[len].rate != 0; )
+			len++;
+
+		pll->rate_count = len;
+		pll->rate_table = kmemdup(rate_table,
+					pll->rate_count *
+					sizeof(struct rockchip_pll_rate_table),
+					GFP_KERNEL);
+		WARN(!pll->rate_table,
+			"%s: could not allocate rate table for %s\n",
+			__func__, name);
+	}
+
+	switch (pll_type) {
+	case pll_rk3066:
+		if (!pll->rate_table)
+			init.ops = &rockchip_rk3066_pll_clk_norate_ops;
+		else
+			init.ops = &rockchip_rk3066_pll_clk_ops;
+		break;
+	default:
+		pr_warn("%s: Unknown pll type for pll clk %s\n",
+			__func__, name);
+	}
+
+	pll->hw.init = &init;
+	pll->type = pll_type;
+	pll->reg_base = base + con_offset;
+	pll->lock_offset = grf_lock_offset;
+	pll->lock_shift = lock_shift;
+	pll->lock = lock;
+	pll->clk_nb.notifier_call = rockchip_pll_notifier_cb;
+
+	pll_clk = clk_register(NULL, &pll->hw);
+	if (IS_ERR(pll_clk)) {
+		pr_err("%s: failed to register pll clock %s : %ld\n",
+			__func__, name, PTR_ERR(pll_clk));
+		mux_clk = pll_clk;
+		goto err_pll;
+	}
+
+	ret = clk_notifier_register(pll_clk, &pll->clk_nb);
+	if (ret) {
+		pr_err("%s: failed to register clock notifier for %s : %d\n",
+				__func__, name, ret);
+		mux_clk = ERR_PTR(ret);
+		goto err_pll_notifier;
+	}
+
+	/* create the mux on top of the real pll */
+	pll->pll_mux_ops = &clk_mux_ops;
+	pll_mux = &pll->pll_mux;
+
+	/* the actual muxing is xin24m, pll-output, xin32k */
+	pll_parents[0] = parent_names[0];
+	pll_parents[1] = pll_name;
+	pll_parents[2] = parent_names[1];
+
+	init.name = name;
+	init.flags = CLK_SET_RATE_PARENT;
+	init.ops = pll->pll_mux_ops;
+	init.parent_names = pll_parents;
+	init.num_parents = ARRAY_SIZE(pll_parents);
+
+	pll_mux->reg = base + mode_offset;
+	pll_mux->shift = mode_shift;
+	pll_mux->mask = PLL_MODE_MASK;
+	pll_mux->flags = 0;
+	pll_mux->lock = lock;
+	pll_mux->hw.init = &init;
+
+	if (pll_type == pll_rk3066)
+		pll_mux->flags |= CLK_MUX_HIWORD_MASK;
+
+	mux_clk = clk_register(NULL, &pll_mux->hw);
+	if (IS_ERR(mux_clk))
+		goto err_mux;
+
+	return mux_clk;
+
+err_mux:
+	ret = clk_notifier_unregister(pll_clk, &pll->clk_nb);
+	if (ret) {
+		pr_err("%s: could not unregister clock notifier in error path : %d\n",
+		       __func__, ret);
+		return mux_clk;
+	}
+err_pll_notifier:
+	clk_unregister(pll_clk);
+err_pll:
+	kfree(pll);
+	return mux_clk;
+}
diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
new file mode 100644
index 0000000..a83a6d8
--- /dev/null
+++ b/drivers/clk/rockchip/clk-rk3188.c
@@ -0,0 +1,672 @@
+/*
+ * Copyright (c) 2014 MundoReader S.L.
+ * Author: Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <dt-bindings/clock/rk3188-cru-common.h>
+#include "clk.h"
+
+#define RK3188_GRF_SOC_STATUS	0xac
+
+enum rk3188_plls {
+	apll, cpll, dpll, gpll,
+};
+
+struct rockchip_pll_rate_table rk3188_pll_rates[] = {
+	RK3066_PLL_RATE(2208000000, 1, 92, 1),
+	RK3066_PLL_RATE(2184000000, 1, 91, 1),
+	RK3066_PLL_RATE(2160000000, 1, 90, 1),
+	RK3066_PLL_RATE(2136000000, 1, 89, 1),
+	RK3066_PLL_RATE(2112000000, 1, 88, 1),
+	RK3066_PLL_RATE(2088000000, 1, 87, 1),
+	RK3066_PLL_RATE(2064000000, 1, 86, 1),
+	RK3066_PLL_RATE(2040000000, 1, 85, 1),
+	RK3066_PLL_RATE(2016000000, 1, 84, 1),
+	RK3066_PLL_RATE(1992000000, 1, 83, 1),
+	RK3066_PLL_RATE(1968000000, 1, 82, 1),
+	RK3066_PLL_RATE(1944000000, 1, 81, 1),
+	RK3066_PLL_RATE(1920000000, 1, 80, 1),
+	RK3066_PLL_RATE(1896000000, 1, 79, 1),
+	RK3066_PLL_RATE(1872000000, 1, 78, 1),
+	RK3066_PLL_RATE(1848000000, 1, 77, 1),
+	RK3066_PLL_RATE(1824000000, 1, 76, 1),
+	RK3066_PLL_RATE(1800000000, 1, 75, 1),
+	RK3066_PLL_RATE(1776000000, 1, 74, 1),
+	RK3066_PLL_RATE(1752000000, 1, 73, 1),
+	RK3066_PLL_RATE(1728000000, 1, 72, 1),
+	RK3066_PLL_RATE(1704000000, 1, 71, 1),
+	RK3066_PLL_RATE(1680000000, 1, 70, 1),
+	RK3066_PLL_RATE(1656000000, 1, 69, 1),
+	RK3066_PLL_RATE(1632000000, 1, 68, 1),
+	RK3066_PLL_RATE(1608000000, 1, 67, 1),
+	RK3066_PLL_RATE(1560000000, 1, 65, 1),
+	RK3066_PLL_RATE(1512000000, 1, 63, 1),
+	RK3066_PLL_RATE(1488000000, 1, 62, 1),
+	RK3066_PLL_RATE(1464000000, 1, 61, 1),
+	RK3066_PLL_RATE(1440000000, 1, 60, 1),
+	RK3066_PLL_RATE(1416000000, 1, 59, 1),
+	RK3066_PLL_RATE(1392000000, 1, 58, 1),
+	RK3066_PLL_RATE(1368000000, 1, 57, 1),
+	RK3066_PLL_RATE(1344000000, 1, 56, 1),
+	RK3066_PLL_RATE(1320000000, 1, 55, 1),
+	RK3066_PLL_RATE(1296000000, 1, 54, 1),
+	RK3066_PLL_RATE(1272000000, 1, 53, 1),
+	RK3066_PLL_RATE(1248000000, 1, 52, 1),
+	RK3066_PLL_RATE(1224000000, 1, 51, 1),
+	RK3066_PLL_RATE(1200000000, 1, 50, 1),
+	RK3066_PLL_RATE(1188000000, 2, 99, 1),
+	RK3066_PLL_RATE(1176000000, 1, 49, 1),
+	RK3066_PLL_RATE(1128000000, 1, 47, 1),
+	RK3066_PLL_RATE(1104000000, 1, 46, 1),
+	RK3066_PLL_RATE(1008000000, 1, 84, 2),
+	RK3066_PLL_RATE( 912000000, 1, 76, 2),
+	RK3066_PLL_RATE( 891000000, 8, 594, 2),
+	RK3066_PLL_RATE( 888000000, 1, 74, 2),
+	RK3066_PLL_RATE( 816000000, 1, 68, 2),
+	RK3066_PLL_RATE( 798000000, 2, 133, 2),
+	RK3066_PLL_RATE( 792000000, 1, 66, 2),
+	RK3066_PLL_RATE( 768000000, 1, 64, 2),
+	RK3066_PLL_RATE( 742500000, 8, 495, 2),
+	RK3066_PLL_RATE( 696000000, 1, 58, 2),
+	RK3066_PLL_RATE( 600000000, 1, 50, 2),
+	RK3066_PLL_RATE( 594000000, 2, 198, 4),
+	RK3066_PLL_RATE( 552000000, 1, 46, 2),
+	RK3066_PLL_RATE( 504000000, 1, 84, 4),
+	RK3066_PLL_RATE( 456000000, 1, 76, 4),
+	RK3066_PLL_RATE( 408000000, 1, 68, 4),
+	RK3066_PLL_RATE( 384000000, 2, 128, 4),
+	RK3066_PLL_RATE( 360000000, 1, 60, 4),
+	RK3066_PLL_RATE( 312000000, 1, 52, 4),
+	RK3066_PLL_RATE( 300000000, 1, 50, 4),
+	RK3066_PLL_RATE( 297000000, 2, 198, 8),
+	RK3066_PLL_RATE( 252000000, 1, 84, 8),
+	RK3066_PLL_RATE( 216000000, 1, 72, 8),
+	RK3066_PLL_RATE( 148500000, 2, 99, 8),
+	RK3066_PLL_RATE( 126000000, 1, 84, 16),
+	RK3066_PLL_RATE(  48000000, 1, 64, 32),
+	{ /* sentinel */ },
+};
+
+PNAME(mux_pll_p)		= { "xin24m", "xin32k" };
+PNAME(mux_armclk_p)		= { "apll", "gpll_armclk" };
+PNAME(mux_ddrphy_p)		= { "dpll", "gpll_ddr" };
+PNAME(mux_pll_src_gpll_cpll_p)	= { "gpll", "cpll" };
+PNAME(mux_pll_src_cpll_gpll_p)	= { "cpll", "gpll" };
+PNAME(mux_aclk_cpu_p)		= { "apll", "gpll" };
+PNAME(mux_sclk_cif0_p)		= { "cif0_pre", "xin24m" };
+PNAME(mux_sclk_i2s0_p)		= { "i2s0_pre", "i2s0_frac", "xin12m" };
+PNAME(mux_sclk_spdif_p)		= { "spdif_src", "spdif_frac", "xin12m" };
+PNAME(mux_sclk_uart0_p)		= { "uart0_pre", "uart0_frac", "xin24m" };
+PNAME(mux_sclk_uart1_p)		= { "uart1_pre", "uart1_frac", "xin24m" };
+PNAME(mux_sclk_uart2_p)		= { "uart2_pre", "uart2_frac", "xin24m" };
+PNAME(mux_sclk_uart3_p)		= { "uart3_pre", "uart3_frac", "xin24m" };
+PNAME(mux_sclk_hsadc_p)		= { "hsadc_src", "hsadc_frac", "ext_hsadc" };
+PNAME(mux_mac_p)		= { "gpll", "dpll" };
+PNAME(mux_sclk_macref_p)	= { "mac_src", "ext_rmii" };
+
+static struct rockchip_pll_clock rk3188_pll_clks[] __initdata = {
+	[apll] = PLL(pll_rk3066, PLL_APLL, "apll", mux_pll_p, 0, RK2928_PLL_CON(0),
+		     RK2928_MODE_CON, 0, 6, rk3188_pll_rates),
+	[dpll] = PLL(pll_rk3066, PLL_DPLL, "dpll", mux_pll_p, 0, RK2928_PLL_CON(4),
+		     RK2928_MODE_CON, 4, 5, NULL),
+	[cpll] = PLL(pll_rk3066, PLL_CPLL, "cpll", mux_pll_p, 0, RK2928_PLL_CON(8),
+		     RK2928_MODE_CON, 8, 7, rk3188_pll_rates),
+	[gpll] = PLL(pll_rk3066, PLL_GPLL, "gpll", mux_pll_p, 0, RK2928_PLL_CON(12),
+		     RK2928_MODE_CON, 12, 8, rk3188_pll_rates),
+};
+
+#define MFLAGS CLK_MUX_HIWORD_MASK
+#define DFLAGS CLK_DIVIDER_HIWORD_MASK
+#define GFLAGS (CLK_GATE_HIWORD_MASK | CLK_GATE_SET_TO_DISABLE)
+
+/* 2 ^ (val + 1) */
+static struct clk_div_table div_core_peri_t[] = {
+	{ .val = 0, .div = 2 },
+	{ .val = 1, .div = 4 },
+	{ .val = 2, .div = 8 },
+	{ .val = 3, .div = 16 },
+	{ /* sentinel */ },
+};
+
+static struct rockchip_clk_branch common_clk_branches[] __initdata = {
+	/*
+	 * Clock-Architecture Diagram 2
+	 */
+
+	GATE(0, "gpll_armclk", "gpll", 0, RK2928_CLKGATE_CON(0), 1, GFLAGS),
+
+	/* these two are set by the cpuclk and should not be changed */
+	COMPOSITE_NOMUX_DIVTBL(CORE_PERI, "core_peri", "armclk", 0,
+			RK2928_CLKSEL_CON(0), 6, 2, DFLAGS | CLK_DIVIDER_READ_ONLY,
+			div_core_peri_t, RK2928_CLKGATE_CON(0), 0, GFLAGS),
+
+	COMPOSITE(0, "aclk_vepu", mux_pll_src_cpll_gpll_p, 0,
+			RK2928_CLKSEL_CON(32), 7, 1, MFLAGS, 0, 5, DFLAGS,
+			RK2928_CLKGATE_CON(3), 9, GFLAGS),
+	GATE(0, "hclk_vepu", "aclk_vepu", 0,
+			RK2928_CLKGATE_CON(3), 10, GFLAGS),
+	COMPOSITE(0, "aclk_vdpu", mux_pll_src_cpll_gpll_p, 0,
+			RK2928_CLKSEL_CON(32), 15, 1, MFLAGS, 8, 5, DFLAGS,
+			RK2928_CLKGATE_CON(3), 11, GFLAGS),
+	GATE(0, "hclk_vdpu", "aclk_vdpu", 0,
+			RK2928_CLKGATE_CON(3), 12, GFLAGS),
+
+	GATE(0, "gpll_ddr", "gpll", 0,
+			RK2928_CLKGATE_CON(1), 7, GFLAGS),
+	COMPOSITE(0, "ddrphy", mux_ddrphy_p, 0,
+			RK2928_CLKSEL_CON(26), 8, 1, MFLAGS, 0, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
+			RK2928_CLKGATE_CON(0), 2, GFLAGS),
+
+	GATE(0, "aclk_cpu", "aclk_cpu_pre", 0,
+			RK2928_CLKGATE_CON(0), 3, GFLAGS),
+
+	DIV(0, "pclk_cpu_pre", "aclk_cpu_pre", 0,
+			RK2928_CLKSEL_CON(1), 12, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO),
+	GATE(0, "atclk_cpu", "pclk_cpu_pre", 0,
+			RK2928_CLKGATE_CON(0), 6, GFLAGS),
+	GATE(0, "pclk_cpu", "pclk_cpu_pre", 0,
+			RK2928_CLKGATE_CON(0), 5, GFLAGS),
+	DIV(0, "hclk_cpu_pre", "aclk_cpu_pre", 0,
+			RK2928_CLKSEL_CON(1), 8, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO),
+	COMPOSITE_NOMUX(0, "hclk_ahb2apb", "hclk_cpu_pre", 0,
+			RK2928_CLKSEL_CON(1), 14, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
+			RK2928_CLKGATE_CON(4), 9, GFLAGS),
+	GATE(0, "hclk_cpu", "hclk_cpu_pre", 0,
+			RK2928_CLKGATE_CON(0), 4, GFLAGS),
+
+	COMPOSITE(0, "aclk_lcdc0_pre", mux_pll_src_cpll_gpll_p, 0,
+			RK2928_CLKSEL_CON(31), 7, 1, MFLAGS, 0, 5, DFLAGS,
+			RK2928_CLKGATE_CON(3), 0, GFLAGS),
+	COMPOSITE(0, "aclk_lcdc1_pre", mux_pll_src_cpll_gpll_p, 0,
+			RK2928_CLKSEL_CON(31), 15, 1, MFLAGS, 8, 5, DFLAGS,
+			RK2928_CLKGATE_CON(1), 4, GFLAGS),
+
+	GATE(0, "aclk_peri", "aclk_peri_pre", 0,
+			RK2928_CLKGATE_CON(2), 1, GFLAGS),
+	COMPOSITE_NOMUX(0, "hclk_peri", "aclk_peri_pre", 0,
+			RK2928_CLKSEL_CON(10), 8, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
+			RK2928_CLKGATE_CON(2), 2, GFLAGS),
+	COMPOSITE_NOMUX(0, "pclk_peri", "aclk_peri_pre", 0,
+			RK2928_CLKSEL_CON(10), 12, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
+			RK2928_CLKGATE_CON(2), 3, GFLAGS),
+
+	MUX(0, "cif_src", mux_pll_src_cpll_gpll_p, 0,
+			RK2928_CLKSEL_CON(29), 0, 1, MFLAGS),
+	COMPOSITE_NOMUX(0, "cif0_pre", "cif_src", 0,
+			RK2928_CLKSEL_CON(29), 1, 5, DFLAGS,
+			RK2928_CLKGATE_CON(3), 7, GFLAGS),
+	MUX(SCLK_CIF0, "sclk_cif0", mux_sclk_cif0_p, 0,
+			RK2928_CLKSEL_CON(29), 7, 1, MFLAGS),
+
+	GATE(0, "pclkin_cif0", "ext_cif0", 0,
+			RK2928_CLKGATE_CON(3), 3, GFLAGS),
+
+	/*
+	 * the 480m are generated inside the usb block from these clocks,
+	 * but they are also a source for the hsicphy clock.
+	 */
+	GATE(SCLK_OTGPHY0, "sclk_otgphy0", "usb480m", 0,
+			RK2928_CLKGATE_CON(1), 5, GFLAGS),
+	GATE(SCLK_OTGPHY1, "sclk_otgphy1", "usb480m", 0,
+			RK2928_CLKGATE_CON(1), 6, GFLAGS),
+
+	COMPOSITE(0, "mac_src", mux_mac_p, 0,
+			RK2928_CLKSEL_CON(21), 0, 1, MFLAGS, 8, 5, DFLAGS,
+			RK2928_CLKGATE_CON(2), 5, GFLAGS),
+	MUX(SCLK_MAC, "sclk_macref", mux_sclk_macref_p, CLK_SET_RATE_PARENT,
+			RK2928_CLKSEL_CON(21), 4, 1, MFLAGS),
+	GATE(0, "sclk_mac_lbtest", "sclk_macref",
+			RK2928_CLKGATE_CON(2), 12, 0, GFLAGS),
+
+	COMPOSITE(0, "hsadc_src", mux_pll_src_gpll_cpll_p, 0,
+			RK2928_CLKSEL_CON(22), 0, 1, MFLAGS, 8, 8, DFLAGS,
+			RK2928_CLKGATE_CON(2), 6, GFLAGS),
+	COMPOSITE_FRAC(0, "hsadc_frac", "hsadc_src",
+			RK2928_CLKSEL_CON(23), 0,
+			RK2928_CLKGATE_CON(2), 7, 0, GFLAGS),
+	MUX(SCLK_HSADC, "sclk_hsadc", mux_sclk_hsadc_p, 0,
+			RK2928_CLKSEL_CON(22), 4, 2, MFLAGS),
+
+	COMPOSITE_NOMUX(SCLK_SARADC, "sclk_saradc", "xin24m", 0,
+			RK2928_CLKSEL_CON(24), 8, 8, DFLAGS,
+			RK2928_CLKGATE_CON(2), 8, GFLAGS),
+
+	/*
+	 * Clock-Architecture Diagram 4
+	 */
+
+	GATE(SCLK_SMC, "sclk_smc", "hclk_peri",
+			RK2928_CLKGATE_CON(2), 4, 0, GFLAGS),
+
+	COMPOSITE_NOMUX(SCLK_SPI0, "sclk_spi0", "pclk_peri", 0,
+			RK2928_CLKSEL_CON(25), 0, 7, DFLAGS,
+			RK2928_CLKGATE_CON(2), 9, GFLAGS),
+	COMPOSITE_NOMUX(SCLK_SPI1, "sclk_spi1", "pclk_peri", 0,
+			RK2928_CLKSEL_CON(25), 8, 7, DFLAGS,
+			RK2928_CLKGATE_CON(2), 10, GFLAGS),
+
+	COMPOSITE_NOMUX(SCLK_SDMMC, "sclk_sdmmc", "hclk_peri", 0,
+			RK2928_CLKSEL_CON(11), 0, 6, DFLAGS,
+			RK2928_CLKGATE_CON(2), 11, GFLAGS),
+	COMPOSITE_NOMUX(SCLK_SDIO, "sclk_sdio", "hclk_peri", 0,
+			RK2928_CLKSEL_CON(12), 0, 6, DFLAGS,
+			RK2928_CLKGATE_CON(2), 13, GFLAGS),
+	COMPOSITE_NOMUX(SCLK_EMMC, "sclk_emmc", "hclk_peri", 0,
+			RK2928_CLKSEL_CON(12), 8, 6, DFLAGS,
+			RK2928_CLKGATE_CON(2), 14, GFLAGS),
+
+	MUX(0, "uart_src", mux_pll_src_gpll_cpll_p, 0,
+			RK2928_CLKSEL_CON(12), 15, 1, MFLAGS),
+	COMPOSITE_NOMUX(0, "uart0_pre", "uart_src", 0,
+			RK2928_CLKSEL_CON(13), 0, 7, DFLAGS,
+			RK2928_CLKGATE_CON(1), 8, GFLAGS),
+	COMPOSITE_FRAC(0, "uart0_frac", "uart0_pre", 0,
+			RK2928_CLKSEL_CON(17), 0,
+			RK2928_CLKGATE_CON(1), 9, GFLAGS),
+	MUX(SCLK_UART0, "sclk_uart0", mux_sclk_uart0_p, 0,
+			RK2928_CLKSEL_CON(13), 8, 2, MFLAGS),
+	COMPOSITE_NOMUX(0, "uart1_pre", "uart_src", 0,
+			RK2928_CLKSEL_CON(14), 0, 7, DFLAGS,
+			RK2928_CLKGATE_CON(1), 10, GFLAGS),
+	COMPOSITE_FRAC(0, "uart1_frac", "uart1_pre", 0,
+			RK2928_CLKSEL_CON(18), 0,
+			RK2928_CLKGATE_CON(1), 11, GFLAGS),
+	MUX(SCLK_UART1, "sclk_uart1", mux_sclk_uart1_p, 0,
+			RK2928_CLKSEL_CON(14), 8, 2, MFLAGS),
+	COMPOSITE_NOMUX(0, "uart2_pre", "uart_src", 0,
+			RK2928_CLKSEL_CON(15), 0, 7, DFLAGS,
+			RK2928_CLKGATE_CON(1), 12, GFLAGS),
+	COMPOSITE_FRAC(0, "uart2_frac", "uart2_pre", 0,
+			RK2928_CLKSEL_CON(19), 0,
+			RK2928_CLKGATE_CON(1), 13, GFLAGS),
+	MUX(SCLK_UART2, "sclk_uart2", mux_sclk_uart2_p, 0,
+			RK2928_CLKSEL_CON(15), 8, 2, MFLAGS),
+	COMPOSITE_NOMUX(0, "uart3_pre", "uart_src", 0,
+			RK2928_CLKSEL_CON(16), 0, 7, DFLAGS,
+			RK2928_CLKGATE_CON(1), 14, GFLAGS),
+	COMPOSITE_FRAC(0, "uart3_frac", "uart3_pre", 0,
+			RK2928_CLKSEL_CON(20), 0,
+			RK2928_CLKGATE_CON(1), 15, GFLAGS),
+	MUX(SCLK_UART3, "sclk_uart3", mux_sclk_uart3_p, 0,
+			RK2928_CLKSEL_CON(16), 8, 2, MFLAGS),
+
+	GATE(SCLK_JTAG, "jtag", "ext_jtag", 0, RK2928_CLKGATE_CON(1), 3, GFLAGS),
+
+	GATE(SCLK_TIMER0, "timer0", "xin24m", 0, RK2928_CLKGATE_CON(1), 0, GFLAGS),
+	GATE(SCLK_TIMER1, "timer1", "xin24m", 0, RK2928_CLKGATE_CON(1), 1, GFLAGS),
+
+	/* clk_core_pre gates */
+	GATE(0, "core_dbg", "armclk", 0, RK2928_CLKGATE_CON(9), 0, GFLAGS),
+
+	/* aclk_cpu gates */
+	GATE(ACLK_DMA1, "aclk_dma1", "aclk_cpu", 0, RK2928_CLKGATE_CON(5), 0, GFLAGS),
+	GATE(0, "aclk_intmem", "aclk_cpu", 0, RK2928_CLKGATE_CON(4), 12, GFLAGS),
+	GATE(0, "aclk_strc_sys", "aclk_cpu", 0, RK2928_CLKGATE_CON(4), 10, GFLAGS),
+
+	/* hclk_cpu gates */
+	GATE(HCLK_ROM, "hclk_rom", "hclk_cpu", 0, RK2928_CLKGATE_CON(5), 6, GFLAGS),
+	GATE(HCLK_I2S0, "hclk_i2s0", "hclk_cpu", 0, RK2928_CLKGATE_CON(7), 2, GFLAGS),
+	GATE(HCLK_SPDIF, "hclk_spdif", "hclk_cpu", 0, RK2928_CLKGATE_CON(7), 1, GFLAGS),
+	GATE(0, "hclk_cpubus", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 8, GFLAGS),
+	/* hclk_ahb2apb is part of a clk branch */
+	GATE(0, "hclk_vio_bus", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 12, GFLAGS),
+	GATE(HCLK_LCDC0, "hclk_lcdc0", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 1, GFLAGS),
+	GATE(HCLK_LCDC1, "hclk_lcdc1", "aclk_cpu", 0, RK2928_CLKGATE_CON(6), 2, GFLAGS),
+	GATE(HCLK_CIF0, "hclk_cif0", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 4, GFLAGS),
+	GATE(HCLK_IPP, "hclk_ipp", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 9, GFLAGS),
+	GATE(HCLK_RGA, "hclk_rga", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 10, GFLAGS),
+
+	/* hclk_peri gates */
+	GATE(0, "hclk_peri_axi_matrix", "hclk_peri", 0, RK2928_CLKGATE_CON(4), 0, GFLAGS),
+	GATE(0, "hclk_peri_ahb_arbi", "hclk_peri", 0, RK2928_CLKGATE_CON(4), 6, GFLAGS),
+	GATE(0, "hclk_emem_peri", "hclk_peri", 0, RK2928_CLKGATE_CON(4), 7, GFLAGS),
+	GATE(HCLK_EMAC, "hclk_emac", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 0, GFLAGS),
+	GATE(HCLK_NANDC0, "hclk_nandc0", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 9, GFLAGS),
+	GATE(0, "hclk_usb_peri", "hclk_peri", 0, RK2928_CLKGATE_CON(4), 5, GFLAGS),
+	GATE(HCLK_OTG0, "hclk_usbotg0", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 13, GFLAGS),
+	GATE(HCLK_HSADC, "hclk_hsadc", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 5, GFLAGS),
+	GATE(HCLK_PIDF, "hclk_pidfilter", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 6, GFLAGS),
+	GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 10, GFLAGS),
+	GATE(HCLK_SDIO, "hclk_sdio", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 11, GFLAGS),
+	GATE(HCLK_EMMC, "hclk_emmc", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 12, GFLAGS),
+
+	/* aclk_lcdc0_pre gates */
+	GATE(0, "aclk_vio0", "aclk_lcdc0_pre", 0, RK2928_CLKGATE_CON(6), 13, GFLAGS),
+	GATE(ACLK_LCDC0, "aclk_lcdc0", "aclk_vio0", 0, RK2928_CLKGATE_CON(6), 0, GFLAGS),
+	GATE(ACLK_CIF0, "aclk_cif0", "aclk_vio0", 0, RK2928_CLKGATE_CON(6), 5, GFLAGS),
+	GATE(ACLK_IPP, "aclk_ipp", "aclk_vio0", 0, RK2928_CLKGATE_CON(6), 8, GFLAGS),
+
+	/* aclk_lcdc1_pre gates */
+	GATE(0, "aclk_vio1", "aclk_lcdc1_pre", 0, RK2928_CLKGATE_CON(9), 5, GFLAGS),
+	GATE(ACLK_LCDC1, "aclk_lcdc1", "aclk_vio1", 0, RK2928_CLKGATE_CON(6), 3, GFLAGS),
+	GATE(ACLK_RGA, "aclk_rga", "aclk_vio1", 0, RK2928_CLKGATE_CON(6), 11, GFLAGS),
+
+	/* atclk_cpu gates */
+	GATE(0, "atclk", "atclk_cpu", 0, RK2928_CLKGATE_CON(9), 3, GFLAGS),
+	GATE(0, "trace", "atclk_cpu", 0, RK2928_CLKGATE_CON(9), 2, GFLAGS),
+
+	/* pclk_cpu gates */
+	GATE(PCLK_PWM01, "pclk_pwm01", "pclk_cpu", 0, RK2928_CLKGATE_CON(7), 10, GFLAGS),
+	GATE(PCLK_TIMER0, "pclk_timer0", "pclk_cpu", 0, RK2928_CLKGATE_CON(7), 7, GFLAGS),
+	GATE(PCLK_I2C0, "pclk_i2c0", "pclk_cpu", 0, RK2928_CLKGATE_CON(8), 4, GFLAGS),
+	GATE(PCLK_I2C1, "pclk_i2c1", "pclk_cpu", 0, RK2928_CLKGATE_CON(8), 5, GFLAGS),
+	GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_cpu", 0, RK2928_CLKGATE_CON(8), 9, GFLAGS),
+	GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_cpu", 0, RK2928_CLKGATE_CON(8), 10, GFLAGS),
+	GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_cpu", 0, RK2928_CLKGATE_CON(8), 11, GFLAGS),
+	GATE(PCLK_EFUSE, "pclk_efuse", "pclk_cpu", 0, RK2928_CLKGATE_CON(5), 2, GFLAGS),
+	GATE(PCLK_TZPC, "pclk_tzpc", "pclk_cpu", 0, RK2928_CLKGATE_CON(5), 3, GFLAGS),
+	GATE(0, "pclk_ddrupctl", "pclk_cpu", 0, RK2928_CLKGATE_CON(5), 7, GFLAGS),
+	GATE(0, "pclk_ddrpubl", "pclk_cpu", 0, RK2928_CLKGATE_CON(9), 6, GFLAGS),
+	GATE(0, "pclk_dbg", "pclk_cpu", 0, RK2928_CLKGATE_CON(9), 1, GFLAGS),
+	GATE(PCLK_GRF, "pclk_grf", "pclk_cpu", 0, RK2928_CLKGATE_CON(5), 4, GFLAGS),
+	GATE(PCLK_PMU, "pclk_pmu", "pclk_cpu", 0, RK2928_CLKGATE_CON(5), 5, GFLAGS),
+
+	/* aclk_peri */
+	GATE(ACLK_DMA2, "aclk_dma2", "aclk_peri", 0, RK2928_CLKGATE_CON(5), 1, GFLAGS),
+	GATE(ACLK_SMC, "aclk_smc", "aclk_peri", 0, RK2928_CLKGATE_CON(5), 8, GFLAGS),
+	GATE(0, "aclk_peri_niu", "aclk_peri", 0, RK2928_CLKGATE_CON(4), 4, GFLAGS),
+	GATE(0, "aclk_cpu_peri", "aclk_peri", 0, RK2928_CLKGATE_CON(4), 2, GFLAGS),
+	GATE(0, "aclk_peri_axi_matrix", "aclk_peri", 0, RK2928_CLKGATE_CON(4), 3, GFLAGS),
+
+	/* pclk_peri gates */
+	GATE(0, "pclk_peri_axi_matrix", "pclk_peri", 0, RK2928_CLKGATE_CON(4), 1, GFLAGS),
+	GATE(PCLK_PWM23, "pclk_pwm23", "pclk_peri", 0, RK2928_CLKGATE_CON(7), 11, GFLAGS),
+	GATE(PCLK_WDT, "pclk_wdt", "pclk_peri", 0, RK2928_CLKGATE_CON(7), 15, GFLAGS),
+	GATE(PCLK_SPI0, "pclk_spi0", "pclk_peri", 0, RK2928_CLKGATE_CON(7), 12, GFLAGS),
+	GATE(PCLK_SPI1, "pclk_spi1", "pclk_peri", 0, RK2928_CLKGATE_CON(7), 13, GFLAGS),
+	GATE(PCLK_UART2, "pclk_uart2", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 2, GFLAGS),
+	GATE(PCLK_UART3, "pclk_uart3", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 3, GFLAGS),
+	GATE(PCLK_I2C2, "pclk_i2c2", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 6, GFLAGS),
+	GATE(PCLK_I2C3, "pclk_i2c3", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 7, GFLAGS),
+	GATE(PCLK_I2C4, "pclk_i2c4", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 8, GFLAGS),
+	GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 12, GFLAGS),
+	GATE(PCLK_SARADC, "pclk_saradc", "pclk_peri", 0, RK2928_CLKGATE_CON(7), 14, GFLAGS),
+};
+
+PNAME(mux_rk3066_lcdc0_p)	= { "dclk_lcdc0_src", "xin27m" };
+PNAME(mux_rk3066_lcdc1_p)	= { "dclk_lcdc1_src", "xin27m" };
+PNAME(mux_sclk_cif1_p)		= { "cif1_pre", "xin24m" };
+PNAME(mux_sclk_i2s1_p)		= { "i2s1_pre", "i2s1_frac", "xin12m" };
+PNAME(mux_sclk_i2s2_p)		= { "i2s2_pre", "i2s2_frac", "xin12m" };
+
+static struct clk_div_table div_aclk_cpu_t[] = {
+	{ .val = 0, .div = 1 },
+	{ .val = 1, .div = 2 },
+	{ .val = 2, .div = 3 },
+	{ .val = 3, .div = 4 },
+	{ .val = 4, .div = 8 },
+	{ /* sentinel */ },
+};
+
+static struct rockchip_clk_branch rk3066a_clk_branches[] __initdata = {
+	COMPOSITE_NOGATE(0, "armclk", mux_armclk_p, 0,
+			RK2928_CLKSEL_CON(0), 8, 1, MFLAGS, 0, 5, DFLAGS),
+	DIVTBL(0, "aclk_cpu_pre", "armclk", 0,
+			RK2928_CLKSEL_CON(1), 0, 3, DFLAGS, div_aclk_cpu_t),
+
+	GATE(CORE_L2C, "core_l2c", "aclk_cpu", 0,
+			RK2928_CLKGATE_CON(9), 4, GFLAGS),
+
+	COMPOSITE(0, "aclk_peri_pre", mux_pll_src_gpll_cpll_p, 0,
+			RK2928_CLKSEL_CON(10), 15, 1, MFLAGS, 0, 5, DFLAGS,
+			RK2928_CLKGATE_CON(2), 0, GFLAGS),
+
+	COMPOSITE(0, "dclk_lcdc0_src", mux_pll_src_cpll_gpll_p, 0,
+			RK2928_CLKSEL_CON(27), 0, 1, MFLAGS, 8, 8, DFLAGS,
+			RK2928_CLKGATE_CON(3), 1, GFLAGS),
+	MUX(DCLK_LCDC0, "dclk_lcdc0", mux_rk3066_lcdc0_p, 0,
+			RK2928_CLKSEL_CON(27), 4, 1, MFLAGS),
+	COMPOSITE(0, "dclk_lcdc1_src", mux_pll_src_cpll_gpll_p, 0,
+			RK2928_CLKSEL_CON(28), 0, 1, MFLAGS, 8, 8, DFLAGS,
+			RK2928_CLKGATE_CON(3), 2, GFLAGS),
+	MUX(DCLK_LCDC1, "dclk_lcdc1", mux_rk3066_lcdc1_p, 0,
+			RK2928_CLKSEL_CON(28), 4, 1, MFLAGS),
+
+	COMPOSITE_NOMUX(0, "cif1_pre", "cif_src", 0,
+			RK2928_CLKSEL_CON(29), 8, 5, DFLAGS,
+			RK2928_CLKGATE_CON(3), 8, GFLAGS),
+	MUX(SCLK_CIF1, "sclk_cif1", mux_sclk_cif1_p, 0,
+			RK2928_CLKSEL_CON(29), 15, 1, MFLAGS),
+
+	GATE(0, "pclkin_cif1", "ext_cif1", 0,
+			RK2928_CLKGATE_CON(3), 4, GFLAGS),
+
+	COMPOSITE(0, "aclk_gpu_src", mux_pll_src_cpll_gpll_p, 0,
+			RK2928_CLKSEL_CON(33), 8, 1, MFLAGS, 0, 5, DFLAGS,
+			RK2928_CLKGATE_CON(3), 13, GFLAGS),
+	GATE(ACLK_GPU, "aclk_gpu", "aclk_gpu_src", 0,
+			RK2928_CLKGATE_CON(5), 15, GFLAGS),
+
+	GATE(SCLK_TIMER2, "timer2", "xin24m", 0,
+			RK2928_CLKGATE_CON(3), 2, GFLAGS),
+
+	COMPOSITE_NOMUX(0, "sclk_tsadc", "xin24m", 0,
+			RK2928_CLKSEL_CON(34), 0, 16, DFLAGS,
+			RK2928_CLKGATE_CON(2), 15, GFLAGS),
+
+	MUX(0, "i2s_src", mux_pll_src_gpll_cpll_p, 0,
+			RK2928_CLKSEL_CON(2), 15, 1, MFLAGS),
+	COMPOSITE_NOMUX(0, "i2s0_pre", "i2s_src", 0,
+			RK2928_CLKSEL_CON(2), 0, 7, DFLAGS,
+			RK2928_CLKGATE_CON(0), 7, GFLAGS),
+	COMPOSITE_FRAC(0, "i2s0_frac", "i2s0_pre", 0,
+			RK2928_CLKSEL_CON(6), 0,
+			RK2928_CLKGATE_CON(0), 8, GFLAGS),
+	MUX(SCLK_I2S0, "sclk_i2s0", mux_sclk_i2s0_p, 0,
+			RK2928_CLKSEL_CON(2), 8, 2, MFLAGS),
+	COMPOSITE_NOMUX(0, "i2s1_pre", "i2s_src", 0,
+			RK2928_CLKSEL_CON(3), 0, 7, DFLAGS,
+			RK2928_CLKGATE_CON(0), 9, GFLAGS),
+	COMPOSITE_FRAC(0, "i2s1_frac", "i2s1_pre", 0,
+			RK2928_CLKSEL_CON(7), 0,
+			RK2928_CLKGATE_CON(0), 10, GFLAGS),
+	MUX(SCLK_I2S1, "sclk_i2s1", mux_sclk_i2s1_p, 0,
+			RK2928_CLKSEL_CON(3), 8, 2, MFLAGS),
+	COMPOSITE_NOMUX(0, "i2s2_pre", "i2s_src", 0,
+			RK2928_CLKSEL_CON(4), 0, 7, DFLAGS,
+			RK2928_CLKGATE_CON(0), 11, GFLAGS),
+	COMPOSITE_FRAC(0, "i2s2_frac", "i2s2_pre", 0,
+			RK2928_CLKSEL_CON(8), 0,
+			RK2928_CLKGATE_CON(0), 12, GFLAGS),
+	MUX(SCLK_I2S2, "sclk_i2s2", mux_sclk_i2s2_p, 0,
+			RK2928_CLKSEL_CON(4), 8, 2, MFLAGS),
+	COMPOSITE_NOMUX(0, "spdif_pre", "i2s_src", 0,
+			RK2928_CLKSEL_CON(5), 0, 7, DFLAGS,
+			RK2928_CLKGATE_CON(0), 13, GFLAGS),
+	COMPOSITE_FRAC(0, "spdif_frac", "spdif_pll", 0,
+			RK2928_CLKSEL_CON(9), 0,
+			RK2928_CLKGATE_CON(0), 14, GFLAGS),
+	MUX(SCLK_SPDIF, "sclk_spdif", mux_sclk_spdif_p, 0,
+			RK2928_CLKSEL_CON(5), 8, 2, MFLAGS),
+
+	GATE(HCLK_I2S1, "hclk_i2s1", "hclk_cpu", 0, RK2928_CLKGATE_CON(7), 3, GFLAGS),
+	GATE(HCLK_I2S2, "hclk_i2s2", "hclk_cpu", 0, RK2928_CLKGATE_CON(7), 4, GFLAGS),
+	GATE(0, "hclk_cif1", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 6, GFLAGS),
+	GATE(0, "hclk_hdmi", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 14, GFLAGS),
+
+	GATE(HCLK_OTG1, "hclk_usbotg1", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 14, GFLAGS),
+
+	GATE(0, "aclk_cif1", "aclk_vio1", 0, RK2928_CLKGATE_CON(6), 7, GFLAGS),
+
+	GATE(PCLK_TIMER1, "pclk_timer1", "pclk_cpu", 0, RK2928_CLKGATE_CON(7), 8, GFLAGS),
+	GATE(PCLK_TIMER2, "pclk_timer2", "pclk_cpu", 0, RK2928_CLKGATE_CON(7), 9, GFLAGS),
+	GATE(PCLK_GPIO6, "pclk_gpio6", "pclk_cpu", 0, RK2928_CLKGATE_CON(8), 15, GFLAGS),
+	GATE(PCLK_UART0, "pclk_uart0", "pclk_cpu", 0, RK2928_CLKGATE_CON(8), 0, GFLAGS),
+	GATE(PCLK_UART1, "pclk_uart1", "pclk_cpu", 0, RK2928_CLKGATE_CON(8), 1, GFLAGS),
+
+	GATE(PCLK_GPIO4, "pclk_gpio4", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 13, GFLAGS),
+	GATE(PCLK_TSADC, "pclk_tsadc", "pclk_peri", 0, RK2928_CLKGATE_CON(4), 13, GFLAGS),
+};
+
+static struct clk_div_table div_rk3188_aclk_core_t[] = {
+	{ .val = 0, .div = 1 },
+	{ .val = 1, .div = 2 },
+	{ .val = 2, .div = 3 },
+	{ .val = 3, .div = 4 },
+	{ .val = 4, .div = 8 },
+	{ /* sentinel */ },
+};
+
+PNAME(mux_hsicphy_p)		= { "sclk_otgphy0", "sclk_otgphy1",
+				    "gpll", "cpll" };
+
+static struct rockchip_clk_branch rk3188_clk_branches[] __initdata = {
+	COMPOSITE_NOGATE(0, "armclk", mux_armclk_p, 0,
+			RK2928_CLKSEL_CON(0), 8, 1, MFLAGS, 9, 5, DFLAGS),
+	COMPOSITE_NOMUX_DIVTBL(0, "aclk_core", "armclk", 0,
+			RK2928_CLKSEL_CON(1), 3, 3, DFLAGS | CLK_DIVIDER_READ_ONLY,
+			div_rk3188_aclk_core_t, RK2928_CLKGATE_CON(0), 7, GFLAGS),
+
+	/* do not source aclk_cpu_pre from the apll, to keep complexity down */
+	COMPOSITE_NOGATE(0, "aclk_cpu_pre", mux_aclk_cpu_p, CLK_SET_RATE_NO_REPARENT,
+			RK2928_CLKSEL_CON(0), 5, 1, MFLAGS, 0, 5, DFLAGS),
+
+	GATE(CORE_L2C, "core_l2c", "armclk", 0,
+			RK2928_CLKGATE_CON(9), 4, GFLAGS),
+
+	COMPOSITE(0, "aclk_peri_pre", mux_pll_src_cpll_gpll_p, 0,
+			RK2928_CLKSEL_CON(10), 15, 1, MFLAGS, 0, 5, DFLAGS,
+			RK2928_CLKGATE_CON(2), 0, GFLAGS),
+
+	COMPOSITE(DCLK_LCDC0, "dclk_lcdc0", mux_pll_src_cpll_gpll_p, 0,
+			RK2928_CLKSEL_CON(27), 0, 1, MFLAGS, 8, 8, DFLAGS,
+			RK2928_CLKGATE_CON(3), 1, GFLAGS),
+	COMPOSITE(DCLK_LCDC1, "dclk_lcdc1", mux_pll_src_cpll_gpll_p, 0,
+			RK2928_CLKSEL_CON(28), 0, 1, MFLAGS, 8, 8, DFLAGS,
+			RK2928_CLKGATE_CON(3), 2, GFLAGS),
+
+	COMPOSITE(0, "aclk_gpu_src", mux_pll_src_cpll_gpll_p, 0,
+			RK2928_CLKSEL_CON(34), 7, 1, MFLAGS, 0, 5, DFLAGS,
+			RK2928_CLKGATE_CON(3), 15, GFLAGS),
+	GATE(ACLK_GPU, "aclk_gpu", "aclk_gpu_src", 0,
+			RK2928_CLKGATE_CON(9), 7, GFLAGS),
+
+	GATE(SCLK_TIMER2, "timer2", "xin24m", 0, RK2928_CLKGATE_CON(3), 4, GFLAGS),
+	GATE(SCLK_TIMER3, "timer3", "xin24m", 0, RK2928_CLKGATE_CON(1), 2, GFLAGS),
+	GATE(SCLK_TIMER4, "timer4", "xin24m", 0, RK2928_CLKGATE_CON(3), 5, GFLAGS),
+	GATE(SCLK_TIMER5, "timer5", "xin24m", 0, RK2928_CLKGATE_CON(3), 8, GFLAGS),
+	GATE(SCLK_TIMER6, "timer6", "xin24m", 0, RK2928_CLKGATE_CON(3), 14, GFLAGS),
+
+	COMPOSITE_NODIV(0, "sclk_hsicphy_480m", mux_hsicphy_p, 0,
+			RK2928_CLKSEL_CON(30), 0, 2, DFLAGS,
+			RK2928_CLKGATE_CON(3), 6, GFLAGS),
+	DIV(0, "sclk_hsicphy_12m", "sclk_hsicphy_480m", 0,
+			RK2928_CLKGATE_CON(11), 8, 6, DFLAGS),
+
+	MUX(0, "i2s_src", mux_pll_src_gpll_cpll_p, 0,
+			RK2928_CLKSEL_CON(2), 15, 1, MFLAGS),
+	COMPOSITE_NOMUX(0, "i2s0_pre", "i2s_src", 0,
+			RK2928_CLKSEL_CON(3), 0, 7, DFLAGS,
+			RK2928_CLKGATE_CON(0), 9, GFLAGS),
+	COMPOSITE_FRAC(0, "i2s0_frac", "i2s0_pre", 0,
+			RK2928_CLKSEL_CON(7), 0,
+			RK2928_CLKGATE_CON(0), 10, GFLAGS),
+	MUX(SCLK_I2S0, "sclk_i2s0", mux_sclk_i2s0_p, 0,
+			RK2928_CLKSEL_CON(3), 8, 2, MFLAGS),
+	COMPOSITE_NOMUX(0, "spdif_pre", "i2s_src", 0,
+			RK2928_CLKSEL_CON(5), 0, 7, DFLAGS,
+			RK2928_CLKGATE_CON(13), 13, GFLAGS),
+	COMPOSITE_FRAC(0, "spdif_frac", "spdif_pll", 0,
+			RK2928_CLKSEL_CON(9), 0,
+			RK2928_CLKGATE_CON(0), 14, GFLAGS),
+	MUX(SCLK_SPDIF, "sclk_spdif", mux_sclk_spdif_p, 0,
+			RK2928_CLKSEL_CON(5), 8, 2, MFLAGS),
+
+	GATE(0, "hclk_imem0", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 14, GFLAGS),
+	GATE(0, "hclk_imem1", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 15, GFLAGS),
+
+	GATE(HCLK_OTG1, "hclk_usbotg1", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 3, GFLAGS),
+	GATE(HCLK_HSIC, "hclk_hsic", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 4, GFLAGS),
+
+	GATE(PCLK_TIMER3, "pclk_timer3", "pclk_cpu", 0, RK2928_CLKGATE_CON(7), 9, GFLAGS),
+
+	GATE(PCLK_UART0, "pclk_uart0", "hclk_ahb2apb", 0, RK2928_CLKGATE_CON(8), 0, GFLAGS),
+	GATE(PCLK_UART1, "pclk_uart1", "hclk_ahb2apb", 0, RK2928_CLKGATE_CON(8), 1, GFLAGS),
+
+	GATE(ACLK_GPS, "aclk_gps", "aclk_peri", 0, RK2928_CLKGATE_CON(8), 13, GFLAGS),
+};
+
+static void __init rk3188_common_clk_init(struct device_node *np)
+{
+	void __iomem *reg_base;
+	struct clk *clk;
+
+	reg_base = of_iomap(np, 0);
+	if (!reg_base) {
+		pr_err("%s: could not map cru region\n", __func__);
+		return;
+	}
+
+	rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
+
+	/* xin12m is created by an cru-internal divider */
+	clk = clk_register_fixed_factor(NULL, "xin12m", "xin24m", 0, 1, 2);
+	if (IS_ERR(clk))
+		pr_warn("%s: could not register clock xin12m: %ld\n",
+			__func__, PTR_ERR(clk));
+
+	clk = clk_register_fixed_factor(NULL, "usb480m", "xin24m", 0, 20, 1);
+	if (IS_ERR(clk))
+		pr_warn("%s: could not register clock usb480m: %ld\n",
+			__func__, PTR_ERR(clk));
+
+	rockchip_clk_register_plls(rk3188_pll_clks,
+				   ARRAY_SIZE(rk3188_pll_clks),
+				   RK3188_GRF_SOC_STATUS);
+	rockchip_clk_register_branches(common_clk_branches,
+				  ARRAY_SIZE(common_clk_branches));
+
+	rockchip_register_softrst(np, 9, reg_base + RK2928_SOFTRST_CON(0),
+				  ROCKCHIP_SOFTRST_HIWORD_MASK);
+}
+
+static void __init rk3066a_clk_init(struct device_node *np)
+{
+	rk3188_common_clk_init(np);
+	rockchip_clk_register_branches(rk3066a_clk_branches,
+				  ARRAY_SIZE(rk3066a_clk_branches));
+}
+CLK_OF_DECLARE(rk3066a_cru, "rockchip,rk3066a-cru", rk3066a_clk_init);
+
+static void __init rk3188a_clk_init(struct device_node *np)
+{
+	rk3188_common_clk_init(np);
+	rockchip_clk_register_branches(rk3188_clk_branches,
+				  ARRAY_SIZE(rk3188_clk_branches));
+}
+CLK_OF_DECLARE(rk3188a_cru, "rockchip,rk3188a-cru", rk3188a_clk_init);
+
+static void __init rk3188_clk_init(struct device_node *np)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(rk3188_pll_clks); i++) {
+		struct rockchip_pll_clock *pll = &rk3188_pll_clks[i];
+		struct rockchip_pll_rate_table *rate;
+
+		if (!pll->rate_table)
+			continue;
+
+		rate = pll->rate_table;
+		while (rate->rate > 0) {
+			rate->bwadj = 0;
+			rate++;
+		}
+	}
+
+	rk3188a_clk_init(np);
+}
+CLK_OF_DECLARE(rk3188_cru, "rockchip,rk3188-cru", rk3188_clk_init);
diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
new file mode 100644
index 0000000..0d8c6c5
--- /dev/null
+++ b/drivers/clk/rockchip/clk-rk3288.c
@@ -0,0 +1,717 @@
+/*
+ * Copyright (c) 2014 MundoReader S.L.
+ * Author: Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <dt-bindings/clock/rk3288-cru.h>
+#include "clk.h"
+
+#define RK3288_GRF_SOC_CON(x)	(0x244 + x * 4)
+#define RK3288_GRF_SOC_STATUS	0x280
+
+enum rk3288_plls {
+	apll, dpll, cpll, gpll, npll,
+};
+
+struct rockchip_pll_rate_table rk3288_pll_rates[] = {
+	RK3066_PLL_RATE(2208000000, 1, 92, 1),
+	RK3066_PLL_RATE(2184000000, 1, 91, 1),
+	RK3066_PLL_RATE(2160000000, 1, 90, 1),
+	RK3066_PLL_RATE(2136000000, 1, 89, 1),
+	RK3066_PLL_RATE(2112000000, 1, 88, 1),
+	RK3066_PLL_RATE(2088000000, 1, 87, 1),
+	RK3066_PLL_RATE(2064000000, 1, 86, 1),
+	RK3066_PLL_RATE(2040000000, 1, 85, 1),
+	RK3066_PLL_RATE(2016000000, 1, 84, 1),
+	RK3066_PLL_RATE(1992000000, 1, 83, 1),
+	RK3066_PLL_RATE(1968000000, 1, 82, 1),
+	RK3066_PLL_RATE(1944000000, 1, 81, 1),
+	RK3066_PLL_RATE(1920000000, 1, 80, 1),
+	RK3066_PLL_RATE(1896000000, 1, 79, 1),
+	RK3066_PLL_RATE(1872000000, 1, 78, 1),
+	RK3066_PLL_RATE(1848000000, 1, 77, 1),
+	RK3066_PLL_RATE(1824000000, 1, 76, 1),
+	RK3066_PLL_RATE(1800000000, 1, 75, 1),
+	RK3066_PLL_RATE(1776000000, 1, 74, 1),
+	RK3066_PLL_RATE(1752000000, 1, 73, 1),
+	RK3066_PLL_RATE(1728000000, 1, 72, 1),
+	RK3066_PLL_RATE(1704000000, 1, 71, 1),
+	RK3066_PLL_RATE(1680000000, 1, 70, 1),
+	RK3066_PLL_RATE(1656000000, 1, 69, 1),
+	RK3066_PLL_RATE(1632000000, 1, 68, 1),
+	RK3066_PLL_RATE(1608000000, 1, 67, 1),
+	RK3066_PLL_RATE(1560000000, 1, 65, 1),
+	RK3066_PLL_RATE(1512000000, 1, 63, 1),
+	RK3066_PLL_RATE(1488000000, 1, 62, 1),
+	RK3066_PLL_RATE(1464000000, 1, 61, 1),
+	RK3066_PLL_RATE(1440000000, 1, 60, 1),
+	RK3066_PLL_RATE(1416000000, 1, 59, 1),
+	RK3066_PLL_RATE(1392000000, 1, 58, 1),
+	RK3066_PLL_RATE(1368000000, 1, 57, 1),
+	RK3066_PLL_RATE(1344000000, 1, 56, 1),
+	RK3066_PLL_RATE(1320000000, 1, 55, 1),
+	RK3066_PLL_RATE(1296000000, 1, 54, 1),
+	RK3066_PLL_RATE(1272000000, 1, 53, 1),
+	RK3066_PLL_RATE(1248000000, 1, 52, 1),
+	RK3066_PLL_RATE(1224000000, 1, 51, 1),
+	RK3066_PLL_RATE(1200000000, 1, 50, 1),
+	RK3066_PLL_RATE(1188000000, 2, 99, 1),
+	RK3066_PLL_RATE(1176000000, 1, 49, 1),
+	RK3066_PLL_RATE(1128000000, 1, 47, 1),
+	RK3066_PLL_RATE(1104000000, 1, 46, 1),
+	RK3066_PLL_RATE(1008000000, 1, 84, 2),
+	RK3066_PLL_RATE( 912000000, 1, 76, 2),
+	RK3066_PLL_RATE( 891000000, 8, 594, 2),
+	RK3066_PLL_RATE( 888000000, 1, 74, 2),
+	RK3066_PLL_RATE( 816000000, 1, 68, 2),
+	RK3066_PLL_RATE( 798000000, 2, 133, 2),
+	RK3066_PLL_RATE( 792000000, 1, 66, 2),
+	RK3066_PLL_RATE( 768000000, 1, 64, 2),
+	RK3066_PLL_RATE( 742500000, 8, 495, 2),
+	RK3066_PLL_RATE( 696000000, 1, 58, 2),
+	RK3066_PLL_RATE( 600000000, 1, 50, 2),
+	RK3066_PLL_RATE( 594000000, 2, 198, 4),
+	RK3066_PLL_RATE( 552000000, 1, 46, 2),
+	RK3066_PLL_RATE( 504000000, 1, 84, 4),
+	RK3066_PLL_RATE( 456000000, 1, 76, 4),
+	RK3066_PLL_RATE( 408000000, 1, 68, 4),
+	RK3066_PLL_RATE( 384000000, 2, 128, 4),
+	RK3066_PLL_RATE( 360000000, 1, 60, 4),
+	RK3066_PLL_RATE( 312000000, 1, 52, 4),
+	RK3066_PLL_RATE( 300000000, 1, 50, 4),
+	RK3066_PLL_RATE( 297000000, 2, 198, 8),
+	RK3066_PLL_RATE( 252000000, 1, 84, 8),
+	RK3066_PLL_RATE( 216000000, 1, 72, 8),
+	RK3066_PLL_RATE( 148500000, 2, 99, 8),
+	RK3066_PLL_RATE( 126000000, 1, 84, 16),
+	RK3066_PLL_RATE(  48000000, 1, 64, 32),
+	{ /* sentinel */ },
+};
+
+PNAME(mux_pll_p)		= { "xin24m", "xin32k" };
+PNAME(mux_armclk_p)		= { "apll_core", "gpll_core" };
+PNAME(mux_ddrphy_p)		= { "dpll_ddr", "gpll_ddr" };
+PNAME(mux_aclk_cpu_src_p)	= { "cpll_aclk_cpu", "gpll_aclk_cpu" };
+
+PNAME(mux_pll_src_cpll_gpll_p)		= { "cpll", "gpll" };
+PNAME(mux_pll_src_npll_cpll_gpll_p)	= { "npll", "cpll", "gpll" };
+PNAME(mux_pll_src_cpll_gpll_npll_p)	= { "cpll", "gpll", "npll" };
+PNAME(mux_pll_src_cpll_gpll_usb480m_p)	= { "cpll", "gpll", "usb480m" };
+
+PNAME(mux_mmc_src_p)	= { "cpll", "gpll", "xin24m", "xin24m" };
+PNAME(mux_i2s_pre_p)	= { "i2s_src", "i2s_frac", "ext_i2s", "xin12m" };
+PNAME(mux_i2s_clkout_p)	= { "i2s_pre", "xin12m" };
+PNAME(mux_spdif_p)	= { "spdif_pre", "spdif_frac", "xin12m" };
+PNAME(mux_spdif_8ch_p)	= { "spdif_8ch_pre", "spdif_8ch_frac", "xin12m" };
+PNAME(mux_uart0_pll_p)	= { "cpll", "gpll", "usbphy_480m_src", "npll" };
+PNAME(mux_uart0_p)	= { "uart0_src", "uart0_frac", "xin24m" };
+PNAME(mux_uart1_p)	= { "uart1_src", "uart1_frac", "xin24m" };
+PNAME(mux_uart2_p)	= { "uart2_src", "uart2_frac", "xin24m" };
+PNAME(mux_uart3_p)	= { "uart3_src", "uart3_frac", "xin24m" };
+PNAME(mux_uart4_p)	= { "uart4_src", "uart4_frac", "xin24m" };
+PNAME(mux_cif_out_p)	= { "cif_src", "xin24m" };
+PNAME(mux_macref_p)	= { "mac_src", "ext_gmac" };
+PNAME(mux_hsadcout_p)	= { "hsadc_src", "ext_hsadc" };
+PNAME(mux_edp_24m_p)	= { "ext_edp_24m", "xin24m" };
+PNAME(mux_tspout_p)	= { "cpll", "gpll", "npll", "xin27m" };
+
+PNAME(mux_usbphy480m_p)		= { "sclk_otgphy0", "sclk_otgphy1",
+				    "sclk_otgphy2" };
+PNAME(mux_hsicphy480m_p)	= { "cpll", "gpll", "usbphy480m_src" };
+PNAME(mux_hsicphy12m_p)		= { "hsicphy12m_xin12m", "hsicphy12m_usbphy" };
+
+static struct rockchip_pll_clock rk3288_pll_clks[] __initdata = {
+	[apll] = PLL(pll_rk3066, PLL_APLL, "apll", mux_pll_p, 0, RK3288_PLL_CON(0),
+		     RK3288_MODE_CON, 0, 6, rk3288_pll_rates),
+	[dpll] = PLL(pll_rk3066, PLL_DPLL, "dpll", mux_pll_p, 0, RK3288_PLL_CON(4),
+		     RK3288_MODE_CON, 4, 5, NULL),
+	[cpll] = PLL(pll_rk3066, PLL_CPLL, "cpll", mux_pll_p, 0, RK3288_PLL_CON(8),
+		     RK3288_MODE_CON, 8, 7, rk3288_pll_rates),
+	[gpll] = PLL(pll_rk3066, PLL_GPLL, "gpll", mux_pll_p, 0, RK3288_PLL_CON(12),
+		     RK3288_MODE_CON, 12, 8, rk3288_pll_rates),
+	[npll] = PLL(pll_rk3066, PLL_NPLL, "npll",  mux_pll_p, 0, RK3288_PLL_CON(16),
+		     RK3288_MODE_CON, 14, 9, NULL),
+};
+
+static struct clk_div_table div_hclk_cpu_t[] = {
+	{ .val = 0, .div = 1 },
+	{ .val = 1, .div = 2 },
+	{ .val = 3, .div = 4 },
+	{ /* sentinel */},
+};
+
+#define MFLAGS CLK_MUX_HIWORD_MASK
+#define DFLAGS CLK_DIVIDER_HIWORD_MASK
+#define GFLAGS (CLK_GATE_HIWORD_MASK | CLK_GATE_SET_TO_DISABLE)
+
+static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
+	/*
+	 * Clock-Architecture Diagram 1
+	 */
+
+	GATE(0, "apll_core", "apll", 0,
+			RK3288_CLKGATE_CON(0), 1, GFLAGS),
+	GATE(0, "gpll_core", "gpll", 0,
+			RK3288_CLKGATE_CON(0), 2, GFLAGS),
+	COMPOSITE_NOGATE(0, "armclk", mux_armclk_p, 0,
+			RK3288_CLKSEL_CON(0), 15, 1, MFLAGS, 8, 5, DFLAGS),
+
+	COMPOSITE_NOMUX(0, "armcore0", "armclk", 0,
+			RK3288_CLKSEL_CON(36), 0, 3, DFLAGS,
+			RK3288_CLKGATE_CON(12), 0, GFLAGS),
+	COMPOSITE_NOMUX(0, "armcore1", "armclk", 0,
+			RK3288_CLKSEL_CON(36), 4, 3, DFLAGS,
+			RK3288_CLKGATE_CON(12), 1, GFLAGS),
+	COMPOSITE_NOMUX(0, "armcore2", "armclk", 0,
+			RK3288_CLKSEL_CON(36), 8, 3, DFLAGS,
+			RK3288_CLKGATE_CON(12), 2, GFLAGS),
+	COMPOSITE_NOMUX(0, "armcore3", "armclk", 0,
+			RK3288_CLKSEL_CON(36), 12, 3, DFLAGS,
+			RK3288_CLKGATE_CON(12), 3, GFLAGS),
+	COMPOSITE_NOMUX(0, "l2ram", "armclk", 0,
+			RK3288_CLKSEL_CON(37), 0, 3, DFLAGS,
+			RK3288_CLKGATE_CON(12), 4, GFLAGS),
+	COMPOSITE_NOMUX(0, "aclk_core_m0", "armclk", 0,
+			RK3288_CLKSEL_CON(0), 0, 4, DFLAGS,
+			RK3288_CLKGATE_CON(12), 5, GFLAGS),
+	COMPOSITE_NOMUX(0, "aclk_core_mp", "armclk", 0,
+			RK3288_CLKSEL_CON(0), 4, 4, DFLAGS,
+			RK3288_CLKGATE_CON(12), 6, GFLAGS),
+	COMPOSITE_NOMUX(0, "atclk", "armclk", 0,
+			RK3288_CLKSEL_CON(37), 4, 5, DFLAGS,
+			RK3288_CLKGATE_CON(12), 7, GFLAGS),
+	COMPOSITE_NOMUX(0, "pclk_dbg_pre", "armclk", 0,
+			RK3288_CLKSEL_CON(37), 9, 5, DFLAGS,
+			RK3288_CLKGATE_CON(12), 8, GFLAGS),
+	GATE(0, "pclk_dbg", "pclk_dbg_pre", 0,
+			RK3288_CLKGATE_CON(12), 9, GFLAGS),
+	GATE(0, "cs_dbg", "pclk_dbg_pre", 0,
+			RK3288_CLKGATE_CON(12), 10, GFLAGS),
+	GATE(0, "pclk_core_niu", "pclk_dbg_pre", 0,
+			RK3288_CLKGATE_CON(12), 11, GFLAGS),
+
+	GATE(0, "dpll_ddr", "dpll", 0,
+			RK3288_CLKGATE_CON(0), 8, GFLAGS),
+	GATE(0, "gpll_ddr", "gpll", 0,
+			RK3288_CLKGATE_CON(0), 9, GFLAGS),
+	COMPOSITE_NOGATE(0, "ddrphy", mux_ddrphy_p, 0,
+			RK3288_CLKSEL_CON(26), 2, 1, MFLAGS, 0, 2,
+					DFLAGS | CLK_DIVIDER_POWER_OF_TWO),
+
+	GATE(0, "gpll_aclk_cpu", "gpll", 0,
+			RK3288_CLKGATE_CON(0), 10, GFLAGS),
+	GATE(0, "cpll_aclk_cpu", "cpll", 0,
+			RK3288_CLKGATE_CON(0), 11, GFLAGS),
+	COMPOSITE_NOGATE(0, "aclk_cpu_src", mux_aclk_cpu_src_p, 0,
+			RK3288_CLKSEL_CON(1), 15, 1, MFLAGS, 3, 5, DFLAGS),
+	DIV(0, "aclk_cpu_pre", "aclk_cpu_src", 0,
+			RK3288_CLKSEL_CON(1), 0, 3, DFLAGS),
+	GATE(0, "aclk_cpu", "aclk_cpu_pre", 0,
+			RK3288_CLKGATE_CON(0), 3, GFLAGS),
+	COMPOSITE_NOMUX(0, "pclk_cpu", "aclk_cpu_pre", 0,
+			RK3288_CLKSEL_CON(1), 12, 3, DFLAGS,
+			RK3288_CLKGATE_CON(0), 5, GFLAGS),
+	COMPOSITE_NOMUX_DIVTBL(0, "hclk_cpu", "aclk_cpu_pre", 0,
+			RK3288_CLKSEL_CON(1), 8, 2, DFLAGS, div_hclk_cpu_t,
+			RK3288_CLKGATE_CON(0), 4, GFLAGS),
+	GATE(0, "c2c_host", "aclk_cpu_src", 0,
+			RK3288_CLKGATE_CON(13), 8, GFLAGS),
+	COMPOSITE_NOMUX(0, "crypto", "aclk_cpu_pre", 0,
+			RK3288_CLKSEL_CON(26), 6, 2, DFLAGS,
+			RK3288_CLKGATE_CON(5), 4, GFLAGS),
+	GATE(0, "aclk_bus_2pmu", "aclk_cpu_pre", 0,
+			RK3288_CLKGATE_CON(0), 7, GFLAGS),
+
+	COMPOSITE(0, "i2s_src", mux_pll_src_cpll_gpll_p, 0,
+			RK3288_CLKSEL_CON(4), 15, 1, MFLAGS, 0, 7, DFLAGS,
+			RK3288_CLKGATE_CON(4), 1, GFLAGS),
+	COMPOSITE_FRAC(0, "i2s_frac", "i2s_src", 0,
+			RK3288_CLKSEL_CON(8), 0,
+			RK3288_CLKGATE_CON(4), 2, GFLAGS),
+	MUX(0, "i2s_pre", mux_i2s_pre_p, 0,
+			RK3288_CLKSEL_CON(4), 8, 2, MFLAGS),
+	COMPOSITE_NODIV(0, "i2s0_clkout", mux_i2s_clkout_p, 0,
+			RK3288_CLKSEL_CON(4), 12, 1, MFLAGS,
+			RK3288_CLKGATE_CON(4), 0, GFLAGS),
+	GATE(SCLK_I2S0, "sclk_i2s0", "i2s_pre", 0,
+			RK3288_CLKGATE_CON(4), 3, GFLAGS),
+
+	MUX(0, "spdif_src", mux_pll_src_cpll_gpll_p, 0,
+			RK3288_CLKSEL_CON(5), 15, 1, MFLAGS),
+	COMPOSITE_NOMUX(0, "spdif_pre", "spdif_src", 0,
+			RK3288_CLKSEL_CON(5), 0, 7, DFLAGS,
+			RK3288_CLKGATE_CON(4), 4, GFLAGS),
+	COMPOSITE_FRAC(0, "spdif_frac", "spdif_src", 0,
+			RK3288_CLKSEL_CON(9), 0,
+			RK3288_CLKGATE_CON(4), 5, GFLAGS),
+	COMPOSITE_NODIV(SCLK_SPDIF, "sclk_spdif", mux_spdif_p, 0,
+			RK3288_CLKSEL_CON(5), 8, 2, MFLAGS,
+			RK3288_CLKGATE_CON(4), 6, GFLAGS),
+	COMPOSITE_NOMUX(0, "spdif_8ch_pre", "spdif_src", 0,
+			RK3288_CLKSEL_CON(40), 0, 7, DFLAGS,
+			RK3288_CLKGATE_CON(4), 7, GFLAGS),
+	COMPOSITE_FRAC(0, "spdif_8ch_frac", "spdif_8ch_src", 0,
+			RK3288_CLKSEL_CON(41), 0,
+			RK3288_CLKGATE_CON(4), 8, GFLAGS),
+	COMPOSITE_NODIV(SCLK_SPDIF8CH, "sclk_spdif_8ch", mux_spdif_8ch_p, 0,
+			RK3288_CLKSEL_CON(40), 8, 2, MFLAGS,
+			RK3288_CLKGATE_CON(4), 9, GFLAGS),
+
+	GATE(0, "sclk_acc_efuse", "xin24m", 0,
+			RK3288_CLKGATE_CON(0), 12, GFLAGS),
+
+	GATE(SCLK_TIMER0, "sclk_timer0", "xin24m", 0,
+			RK3288_CLKGATE_CON(1), 0, GFLAGS),
+	GATE(SCLK_TIMER1, "sclk_timer1", "xin24m", 0,
+			RK3288_CLKGATE_CON(1), 1, GFLAGS),
+	GATE(SCLK_TIMER2, "sclk_timer2", "xin24m", 0,
+			RK3288_CLKGATE_CON(1), 2, GFLAGS),
+	GATE(SCLK_TIMER3, "sclk_timer3", "xin24m", 0,
+			RK3288_CLKGATE_CON(1), 3, GFLAGS),
+	GATE(SCLK_TIMER4, "sclk_timer4", "xin24m", 0,
+			RK3288_CLKGATE_CON(1), 4, GFLAGS),
+	GATE(SCLK_TIMER5, "sclk_timer5", "xin24m", 0,
+			RK3288_CLKGATE_CON(1), 5, GFLAGS),
+
+	/*
+	 * Clock-Architecture Diagram 2
+	 */
+
+	COMPOSITE(0, "aclk_vepu", mux_pll_src_cpll_gpll_usb480m_p, 0,
+			RK3288_CLKSEL_CON(32), 6, 2, MFLAGS, 0, 5, DFLAGS,
+			RK3288_CLKGATE_CON(3), 9, GFLAGS),
+	COMPOSITE(0, "aclk_vdpu", mux_pll_src_cpll_gpll_usb480m_p, 0,
+			RK3288_CLKSEL_CON(32), 14, 2, MFLAGS, 8, 5, DFLAGS,
+			RK3288_CLKGATE_CON(3), 11, GFLAGS),
+
+	COMPOSITE(0, "aclk_vio0", mux_pll_src_cpll_gpll_usb480m_p, 0,
+			RK3288_CLKSEL_CON(31), 6, 2, MFLAGS, 0, 5, DFLAGS,
+			RK3288_CLKGATE_CON(3), 0, GFLAGS),
+	DIV(0, "hclk_vio", "aclk_vio0", 0,
+			RK3288_CLKSEL_CON(28), 8, 5, DFLAGS),
+	COMPOSITE(0, "aclk_vio1", mux_pll_src_cpll_gpll_usb480m_p, 0,
+			RK3288_CLKSEL_CON(31), 14, 2, MFLAGS, 8, 5, DFLAGS,
+			RK3288_CLKGATE_CON(3), 2, GFLAGS),
+
+	COMPOSITE(0, "aclk_rga_pre", mux_pll_src_cpll_gpll_usb480m_p, 0,
+			RK3288_CLKSEL_CON(30), 6, 2, MFLAGS, 0, 5, DFLAGS,
+			RK3288_CLKGATE_CON(3), 5, GFLAGS),
+	COMPOSITE(0, "sclk_rga", mux_pll_src_cpll_gpll_usb480m_p, 0,
+			RK3288_CLKSEL_CON(30), 14, 2, MFLAGS, 8, 5, DFLAGS,
+			RK3288_CLKGATE_CON(3), 4, GFLAGS),
+
+	COMPOSITE(DCLK_VOP0, "dclk_vop0", mux_pll_src_cpll_gpll_npll_p, 0,
+			RK3288_CLKSEL_CON(27), 0, 2, MFLAGS, 8, 8, DFLAGS,
+			RK3288_CLKGATE_CON(3), 1, GFLAGS),
+	COMPOSITE(DCLK_VOP1, "dclk_vop1", mux_pll_src_cpll_gpll_npll_p, 0,
+			RK3288_CLKSEL_CON(29), 6, 2, MFLAGS, 8, 8, DFLAGS,
+			RK3288_CLKGATE_CON(3), 3, GFLAGS),
+
+	COMPOSITE_NODIV(0, "sclk_edp_24m", mux_edp_24m_p, 0,
+			RK3288_CLKSEL_CON(28), 15, 1, MFLAGS,
+			RK3288_CLKGATE_CON(3), 12, GFLAGS),
+	COMPOSITE(0, "sclk_edp", mux_pll_src_cpll_gpll_npll_p, 0,
+			RK3288_CLKSEL_CON(28), 6, 2, MFLAGS, 0, 6, DFLAGS,
+			RK3288_CLKGATE_CON(3), 13, GFLAGS),
+
+	COMPOSITE(0, "sclk_isp", mux_pll_src_cpll_gpll_npll_p, 0,
+			RK3288_CLKSEL_CON(6), 6, 2, MFLAGS, 0, 6, DFLAGS,
+			RK3288_CLKGATE_CON(3), 14, GFLAGS),
+	COMPOSITE(0, "sclk_isp_jpe", mux_pll_src_cpll_gpll_npll_p, 0,
+			RK3288_CLKSEL_CON(6), 14, 2, MFLAGS, 8, 6, DFLAGS,
+			RK3288_CLKGATE_CON(3), 15, GFLAGS),
+
+	GATE(0, "sclk_hdmi_hdcp", "xin24m", 0,
+			RK3288_CLKGATE_CON(5), 12, GFLAGS),
+	GATE(0, "sclk_hdmi_cec", "xin32k", 0,
+			RK3288_CLKGATE_CON(5), 11, GFLAGS),
+
+	COMPOSITE(0, "aclk_hevc", mux_pll_src_cpll_gpll_npll_p, 0,
+			RK3288_CLKSEL_CON(39), 14, 2, MFLAGS, 8, 5, DFLAGS,
+			RK3288_CLKGATE_CON(13), 13, GFLAGS),
+	DIV(0, "hclk_hevc", "aclk_hevc", 0,
+			RK3288_CLKSEL_CON(40), 12, 2, DFLAGS),
+
+	COMPOSITE(0, "sclk_hevc_cabac", mux_pll_src_cpll_gpll_npll_p, 0,
+			RK3288_CLKSEL_CON(42), 6, 2, MFLAGS, 0, 5, DFLAGS,
+			RK3288_CLKGATE_CON(13), 14, GFLAGS),
+	COMPOSITE(0, "sclk_hevc_core", mux_pll_src_cpll_gpll_npll_p, 0,
+			RK3288_CLKSEL_CON(42), 14, 2, MFLAGS, 8, 5, DFLAGS,
+			RK3288_CLKGATE_CON(13), 15, GFLAGS),
+
+	COMPOSITE_NODIV(0, "vip_src", mux_pll_src_cpll_gpll_p, 0,
+			RK3288_CLKSEL_CON(26), 8, 1, MFLAGS,
+			RK3288_CLKGATE_CON(3), 7, GFLAGS),
+	COMPOSITE_NOGATE(0, "sclk_vip_out", mux_cif_out_p, 0,
+			RK3288_CLKSEL_CON(26), 15, 1, MFLAGS, 9, 5, DFLAGS),
+
+	DIV(0, "pclk_pd_alive", "gpll", 0,
+			RK3288_CLKSEL_CON(33), 8, 5, DFLAGS),
+	COMPOSITE_NOMUX(0, "pclk_pd_pmu", "gpll", 0,
+			RK3288_CLKSEL_CON(33), 0, 5, DFLAGS,
+			RK3288_CLKGATE_CON(5), 8, GFLAGS),
+
+	COMPOSITE(SCLK_GPU, "sclk_gpu", mux_pll_src_cpll_gpll_usb480m_p, 0,
+			RK3288_CLKSEL_CON(34), 6, 2, MFLAGS, 0, 5, DFLAGS,
+			RK3288_CLKGATE_CON(5), 7, GFLAGS),
+
+	COMPOSITE(0, "aclk_peri_src", mux_pll_src_cpll_gpll_p, 0,
+			RK3288_CLKSEL_CON(10), 15, 1, MFLAGS, 0, 5, DFLAGS,
+			RK3288_CLKGATE_CON(2), 0, GFLAGS),
+	COMPOSITE_NOMUX(0, "pclk_peri", "aclk_peri_src", 0,
+			RK3288_CLKSEL_CON(10), 12, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
+			RK3288_CLKGATE_CON(2), 3, GFLAGS),
+	COMPOSITE_NOMUX(0, "hclk_peri", "aclk_peri_src", 0,
+			RK3288_CLKSEL_CON(10), 8, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
+			RK3288_CLKGATE_CON(2), 2, GFLAGS),
+	GATE(0, "aclk_peri", "aclk_peri_src", 0,
+			RK3288_CLKGATE_CON(2), 1, GFLAGS),
+
+	/*
+	 * Clock-Architecture Diagram 3
+	 */
+
+	COMPOSITE(SCLK_SPI0, "sclk_spi0", mux_pll_src_cpll_gpll_p, 0,
+			RK3288_CLKSEL_CON(25), 7, 1, MFLAGS, 0, 7, DFLAGS,
+			RK3288_CLKGATE_CON(2), 9, GFLAGS),
+	COMPOSITE(SCLK_SPI1, "sclk_spi1", mux_pll_src_cpll_gpll_p, 0,
+			RK3288_CLKSEL_CON(25), 15, 1, MFLAGS, 8, 7, DFLAGS,
+			RK3288_CLKGATE_CON(2), 10, GFLAGS),
+	COMPOSITE(SCLK_SPI2, "sclk_spi2", mux_pll_src_cpll_gpll_p, 0,
+			RK3288_CLKSEL_CON(39), 7, 1, MFLAGS, 0, 7, DFLAGS,
+			RK3288_CLKGATE_CON(2), 11, GFLAGS),
+
+	COMPOSITE(SCLK_SDMMC, "sclk_sdmmc", mux_mmc_src_p, 0,
+			RK3288_CLKSEL_CON(11), 6, 2, MFLAGS, 0, 6, DFLAGS,
+			RK3288_CLKGATE_CON(13), 0, GFLAGS),
+	COMPOSITE(SCLK_SDIO0, "sclk_sdio0", mux_mmc_src_p, 0,
+			RK3288_CLKSEL_CON(12), 6, 2, MFLAGS, 0, 6, DFLAGS,
+			RK3288_CLKGATE_CON(13), 1, GFLAGS),
+	COMPOSITE(SCLK_SDIO1, "sclk_sdio1", mux_mmc_src_p, 0,
+			RK3288_CLKSEL_CON(34), 14, 2, MFLAGS, 8, 6, DFLAGS,
+			RK3288_CLKGATE_CON(13), 2, GFLAGS),
+	COMPOSITE(SCLK_EMMC, "sclk_emmc", mux_mmc_src_p, 0,
+			RK3288_CLKSEL_CON(12), 14, 2, MFLAGS, 8, 6, DFLAGS,
+			RK3288_CLKGATE_CON(13), 3, GFLAGS),
+
+	COMPOSITE(0, "sclk_tspout", mux_tspout_p, 0,
+			RK3288_CLKSEL_CON(35), 14, 2, MFLAGS, 8, 5, DFLAGS,
+			RK3288_CLKGATE_CON(4), 11, GFLAGS),
+	COMPOSITE(0, "sclk_tsp", mux_pll_src_cpll_gpll_npll_p, 0,
+			RK3288_CLKSEL_CON(35), 6, 2, MFLAGS, 0, 5, DFLAGS,
+			RK3288_CLKGATE_CON(4), 10, GFLAGS),
+
+	GATE(SCLK_OTGPHY0, "sclk_otgphy0", "usb480m", 0,
+			RK3288_CLKGATE_CON(13), 4, GFLAGS),
+	GATE(SCLK_OTGPHY1, "sclk_otgphy1", "usb480m", 0,
+			RK3288_CLKGATE_CON(13), 5, GFLAGS),
+	GATE(SCLK_OTGPHY2, "sclk_otgphy2", "usb480m", 0,
+			RK3288_CLKGATE_CON(13), 6, GFLAGS),
+	GATE(SCLK_OTG_ADP, "sclk_otg_adp", "xin32k", 0,
+			RK3288_CLKGATE_CON(13), 7, GFLAGS),
+
+	COMPOSITE_NOMUX(SCLK_TSADC, "sclk_tsadc", "xin32k", 0,
+			RK3288_CLKSEL_CON(2), 0, 6, DFLAGS,
+			RK3288_CLKGATE_CON(2), 7, GFLAGS),
+
+	COMPOSITE_NOMUX(SCLK_SARADC, "sclk_saradc", "xin24m", 0,
+			RK3288_CLKSEL_CON(24), 8, 8, DFLAGS,
+			RK3288_CLKGATE_CON(2), 8, GFLAGS),
+
+	GATE(SCLK_PS2C, "sclk_ps2c", "xin24m", 0,
+			RK3288_CLKGATE_CON(5), 13, GFLAGS),
+
+	COMPOSITE(SCLK_NANDC0, "sclk_nandc0", mux_pll_src_cpll_gpll_p, 0,
+			RK3288_CLKSEL_CON(38), 7, 1, MFLAGS, 0, 5, DFLAGS,
+			RK3288_CLKGATE_CON(5), 5, GFLAGS),
+	COMPOSITE(SCLK_NANDC1, "sclk_nandc1", mux_pll_src_cpll_gpll_p, 0,
+			RK3288_CLKSEL_CON(38), 15, 1, MFLAGS, 8, 5, DFLAGS,
+			RK3288_CLKGATE_CON(5), 6, GFLAGS),
+
+	COMPOSITE(0, "uart0_src", mux_uart0_pll_p, 0,
+			RK3288_CLKSEL_CON(13), 13, 2, MFLAGS, 0, 7, DFLAGS,
+			RK3288_CLKGATE_CON(1), 8, GFLAGS),
+	COMPOSITE_FRAC(0, "uart0_frac", "uart0_src", 0,
+			RK3288_CLKSEL_CON(17), 0,
+			RK3288_CLKGATE_CON(1), 9, GFLAGS),
+	MUX(SCLK_UART0, "sclk_uart0", mux_uart0_p, 0,
+			RK3288_CLKSEL_CON(13), 8, 2, MFLAGS),
+	MUX(0, "uart_src", mux_pll_src_cpll_gpll_p, 0,
+			RK3288_CLKSEL_CON(13), 15, 1, MFLAGS),
+	COMPOSITE_NOMUX(0, "uart1_src", "uart_src", 0,
+			RK3288_CLKSEL_CON(14), 0, 7, DFLAGS,
+			RK3288_CLKGATE_CON(1), 10, GFLAGS),
+	COMPOSITE_FRAC(0, "uart1_frac", "uart1_src", 0,
+			RK3288_CLKSEL_CON(18), 0,
+			RK3288_CLKGATE_CON(1), 11, GFLAGS),
+	MUX(SCLK_UART1, "sclk_uart1", mux_uart1_p, 0,
+			RK3288_CLKSEL_CON(14), 8, 2, MFLAGS),
+	COMPOSITE_NOMUX(0, "uart2_src", "uart_src", 0,
+			RK3288_CLKSEL_CON(15), 0, 7, DFLAGS,
+			RK3288_CLKGATE_CON(1), 12, GFLAGS),
+	COMPOSITE_FRAC(0, "uart2_frac", "uart2_src", 0,
+			RK3288_CLKSEL_CON(19), 0,
+			RK3288_CLKGATE_CON(1), 13, GFLAGS),
+	MUX(SCLK_UART2, "sclk_uart2", mux_uart2_p, 0,
+			RK3288_CLKSEL_CON(15), 8, 2, MFLAGS),
+	COMPOSITE_NOMUX(0, "uart3_src", "uart_src", 0,
+			RK3288_CLKSEL_CON(16), 0, 7, DFLAGS,
+			RK3288_CLKGATE_CON(1), 14, GFLAGS),
+	COMPOSITE_FRAC(0, "uart3_frac", "uart3_src", 0,
+			RK3288_CLKSEL_CON(20), 0,
+			RK3288_CLKGATE_CON(1), 15, GFLAGS),
+	MUX(SCLK_UART3, "sclk_uart3", mux_uart3_p, 0,
+			RK3288_CLKSEL_CON(16), 8, 2, MFLAGS),
+	COMPOSITE_NOMUX(0, "uart4_src", "uart_src", 0,
+			RK3288_CLKSEL_CON(3), 0, 7, DFLAGS,
+			RK3288_CLKGATE_CON(2), 12, GFLAGS),
+	COMPOSITE_FRAC(0, "uart4_frac", "uart4_src", 0,
+			RK3288_CLKSEL_CON(7), 0,
+			RK3288_CLKGATE_CON(2), 13, GFLAGS),
+	MUX(SCLK_UART4, "sclk_uart4", mux_uart4_p, 0,
+			RK3288_CLKSEL_CON(3), 8, 2, MFLAGS),
+
+	COMPOSITE(0, "mac_src", mux_pll_src_npll_cpll_gpll_p, 0,
+			RK3288_CLKSEL_CON(21), 0, 2, MFLAGS, 8, 5, DFLAGS,
+			RK3288_CLKGATE_CON(2), 5, GFLAGS),
+	MUX(0, "macref", mux_macref_p, 0,
+			RK3288_CLKSEL_CON(21), 4, 1, MFLAGS),
+	GATE(0, "sclk_macref_out", "macref", 0,
+			RK3288_CLKGATE_CON(5), 3, GFLAGS),
+	GATE(SCLK_MACREF, "sclk_macref", "macref", 0,
+			RK3288_CLKGATE_CON(5), 2, GFLAGS),
+	GATE(SCLK_MAC_RX, "sclk_mac_rx", "macref", 0,
+			RK3288_CLKGATE_CON(5), 0, GFLAGS),
+	GATE(SCLK_MAC_TX, "sclk_mac_tx", "macref", 0,
+			RK3288_CLKGATE_CON(5), 1, GFLAGS),
+
+	COMPOSITE(0, "hsadc_src", mux_pll_src_cpll_gpll_p, 0,
+			RK3288_CLKSEL_CON(22), 0, 1, MFLAGS, 8, 8, DFLAGS,
+			RK3288_CLKGATE_CON(2), 6, GFLAGS),
+	MUX(SCLK_HSADC, "sclk_hsadc_out", mux_hsadcout_p, 0,
+			RK3288_CLKSEL_CON(22), 4, 1, MFLAGS),
+
+	GATE(0, "jtag", "ext_jtag", 0,
+			RK3288_CLKGATE_CON(4), 14, GFLAGS),
+
+	COMPOSITE_NODIV(0, "usbphy480m_src", mux_usbphy480m_p, 0,
+			RK3288_CLKSEL_CON(13), 11, 2, MFLAGS,
+			RK3288_CLKGATE_CON(5), 15, GFLAGS),
+	COMPOSITE_NODIV(SCLK_HSICPHY480M, "sclk_hsicphy480m", mux_hsicphy480m_p, 0,
+			RK3288_CLKSEL_CON(29), 0, 2, MFLAGS,
+			RK3288_CLKGATE_CON(3), 6, GFLAGS),
+	GATE(0, "hsicphy12m_xin12m", "xin12m", 0,
+			RK3288_CLKGATE_CON(13), 9, GFLAGS),
+	DIV(0, "hsicphy12m_usbphy", "sclk_hsicphy480m", 0,
+			RK3288_CLKSEL_CON(11), 8, 6, DFLAGS),
+	MUX(SCLK_HSICPHY12M, "sclk_hsicphy12m", mux_hsicphy12m_p, 0,
+			RK3288_CLKSEL_CON(22), 4, 1, MFLAGS),
+
+	/*
+	 * Clock-Architecture Diagram 4
+	 */
+
+	/* aclk_cpu gates */
+	GATE(0, "sclk_intmem0", "aclk_cpu", 0, RK3288_CLKGATE_CON(10), 5, GFLAGS),
+	GATE(0, "sclk_intmem1", "aclk_cpu", 0, RK3288_CLKGATE_CON(10), 6, GFLAGS),
+	GATE(0, "sclk_intmem2", "aclk_cpu", 0, RK3288_CLKGATE_CON(10), 7, GFLAGS),
+	GATE(ACLK_DMAC1, "aclk_dmac1", "aclk_cpu", 0, RK3288_CLKGATE_CON(10), 12, GFLAGS),
+	GATE(0, "aclk_strc_sys", "aclk_cpu", 0, RK3288_CLKGATE_CON(10), 13, GFLAGS),
+	GATE(0, "aclk_intmem", "aclk_cpu", 0, RK3288_CLKGATE_CON(10), 4, GFLAGS),
+	GATE(ACLK_CRYPTO, "aclk_crypto", "aclk_cpu", 0, RK3288_CLKGATE_CON(11), 6, GFLAGS),
+	GATE(0, "aclk_ccp", "aclk_cpu", 0, RK3288_CLKGATE_CON(11), 8, GFLAGS),
+
+	/* hclk_cpu gates */
+	GATE(HCLK_CRYPTO, "hclk_crypto", "hclk_cpu", 0, RK3288_CLKGATE_CON(11), 7, GFLAGS),
+	GATE(HCLK_I2S0, "hclk_i2s0", "hclk_cpu", 0, RK3288_CLKGATE_CON(10), 8, GFLAGS),
+	GATE(HCLK_ROM, "hclk_rom", "hclk_cpu", 0, RK3288_CLKGATE_CON(10), 9, GFLAGS),
+	GATE(HCLK_SPDIF, "hclk_spdif", "hclk_cpu", 0, RK3288_CLKGATE_CON(10), 10, GFLAGS),
+	GATE(HCLK_SPDIF8CH, "hclk_spdif_8ch", "hclk_cpu", 0, RK3288_CLKGATE_CON(10), 11, GFLAGS),
+
+	/* pclk_cpu gates */
+	GATE(PCLK_PWM, "pclk_pwm", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 0, GFLAGS),
+	GATE(PCLK_TIMER, "pclk_timer", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 1, GFLAGS),
+	GATE(PCLK_I2C0, "pclk_i2c0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 2, GFLAGS),
+	GATE(PCLK_I2C1, "pclk_i2c1", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 3, GFLAGS),
+	GATE(0, "pclk_ddrupctl0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 14, GFLAGS),
+	GATE(0, "pclk_publ0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 15, GFLAGS),
+	GATE(0, "pclk_ddrupctl1", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 0, GFLAGS),
+	GATE(0, "pclk_publ1", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 1, GFLAGS),
+	GATE(0, "pclk_efuse_1024", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 2, GFLAGS),
+	GATE(PCLK_TZPC, "pclk_tzpc", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 3, GFLAGS),
+	GATE(PCLK_UART2, "pclk_uart2", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 9, GFLAGS),
+	GATE(0, "pclk_efuse_256", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 10, GFLAGS),
+	GATE(PCLK_RKPWM, "pclk_rkpwm", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 11, GFLAGS),
+
+	/* ddrctrl [DDR Controller PHY clock] gates */
+	GATE(0, "nclk_ddrupctl0", "ddrphy", 0, RK3288_CLKGATE_CON(11), 4, GFLAGS),
+	GATE(0, "nclk_ddrupctl1", "ddrphy", 0, RK3288_CLKGATE_CON(11), 5, GFLAGS),
+
+	/* ddrphy gates */
+	GATE(0, "sclk_ddrphy0", "ddrphy", 0, RK3288_CLKGATE_CON(4), 12, GFLAGS),
+	GATE(0, "sclk_ddrphy1", "ddrphy", 0, RK3288_CLKGATE_CON(4), 13, GFLAGS),
+
+	/* aclk_peri gates */
+	GATE(0, "aclk_peri_axi_matrix", "aclk_peri", 0, RK3288_CLKGATE_CON(6), 2, GFLAGS),
+	GATE(ACLK_DMAC2, "aclk_dmac2", "aclk_peri", 0, RK3288_CLKGATE_CON(6), 3, GFLAGS),
+	GATE(0, "aclk_peri_niu", "aclk_peri", 0, RK3288_CLKGATE_CON(7), 11, GFLAGS),
+	GATE(ACLK_MMU, "aclk_mmu", "aclk_peri", 0, RK3288_CLKGATE_CON(8), 12, GFLAGS),
+	GATE(ACLK_GMAC, "aclk_gmac", "aclk_peri", 0, RK3288_CLKGATE_CON(8), 0, GFLAGS),
+	GATE(HCLK_GPS, "hclk_gps", "aclk_peri", 0, RK3288_CLKGATE_CON(8), 2, GFLAGS),
+
+	/* hclk_peri gates */
+	GATE(0, "hclk_peri_matrix", "hclk_peri", 0, RK3288_CLKGATE_CON(6), 0, GFLAGS),
+	GATE(HCLK_OTG0, "hclk_otg0", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 4, GFLAGS),
+	GATE(HCLK_USBHOST0, "hclk_host0", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 6, GFLAGS),
+	GATE(HCLK_USBHOST1, "hclk_host1", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 7, GFLAGS),
+	GATE(HCLK_HSIC, "hclk_hsic", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 8, GFLAGS),
+	GATE(0, "hclk_usb_peri", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 9, GFLAGS),
+	GATE(0, "hclk_peri_ahb_arbi", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 10, GFLAGS),
+	GATE(0, "hclk_emem", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 12, GFLAGS),
+	GATE(0, "hclk_mem", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 13, GFLAGS),
+	GATE(HCLK_NANDC0, "hclk_nandc0", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 14, GFLAGS),
+	GATE(HCLK_NANDC1, "hclk_nandc1", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 15, GFLAGS),
+	GATE(HCLK_TSP, "hclk_tsp", "hclk_peri", 0, RK3288_CLKGATE_CON(8), 8, GFLAGS),
+	GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_peri", 0, RK3288_CLKGATE_CON(8), 3, GFLAGS),
+	GATE(HCLK_SDIO0, "hclk_sdio0", "hclk_peri", 0, RK3288_CLKGATE_CON(8), 4, GFLAGS),
+	GATE(HCLK_SDIO1, "hclk_sdio1", "hclk_peri", 0, RK3288_CLKGATE_CON(8), 5, GFLAGS),
+	GATE(HCLK_EMMC, "hclk_emmc", "hclk_peri", 0, RK3288_CLKGATE_CON(8), 6, GFLAGS),
+	GATE(HCLK_HSADC, "hclk_hsadc", "hclk_peri", 0, RK3288_CLKGATE_CON(8), 7, GFLAGS),
+	GATE(0, "pmu_hclk_otg0", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 5, GFLAGS),
+
+	/* pclk_peri gates */
+	GATE(0, "pclk_peri_matrix", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 1, GFLAGS),
+	GATE(PCLK_SPI0, "pclk_spi0", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 4, GFLAGS),
+	GATE(PCLK_SPI1, "pclk_spi1", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 5, GFLAGS),
+	GATE(PCLK_SPI2, "pclk_spi2", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 6, GFLAGS),
+	GATE(PCLK_PS2C, "pclk_ps2c", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 7, GFLAGS),
+	GATE(PCLK_UART0, "pclk_uart0", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 8, GFLAGS),
+	GATE(PCLK_UART1, "pclk_uart1", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 9, GFLAGS),
+	GATE(PCLK_I2C4, "pclk_i2c4", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 15, GFLAGS),
+	GATE(PCLK_UART3, "pclk_uart3", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 11, GFLAGS),
+	GATE(PCLK_UART4, "pclk_uart4", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 12, GFLAGS),
+	GATE(PCLK_I2C2, "pclk_i2c2", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 13, GFLAGS),
+	GATE(PCLK_I2C3, "pclk_i2c3", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 14, GFLAGS),
+	GATE(PCLK_SARADC, "pclk_saradc", "pclk_peri", 0, RK3288_CLKGATE_CON(7), 1, GFLAGS),
+	GATE(PCLK_TSADC, "pclk_tsadc", "pclk_peri", 0, RK3288_CLKGATE_CON(7), 2, GFLAGS),
+	GATE(PCLK_SIM, "pclk_sim", "pclk_peri", 0, RK3288_CLKGATE_CON(7), 3, GFLAGS),
+	GATE(PCLK_I2C5, "pclk_i2c5", "pclk_peri", 0, RK3288_CLKGATE_CON(7), 0, GFLAGS),
+	GATE(PCLK_GMAC, "pclk_gmac", "pclk_peri", 0, RK3288_CLKGATE_CON(8), 1, GFLAGS),
+
+	GATE(SCLK_LCDC_PWM0, "sclk_lcdc_pwm0", "xin24m", 0, RK3288_CLKGATE_CON(13), 10, GFLAGS),
+	GATE(SCLK_LCDC_PWM1, "sclk_lcdc_pwm1", "xin24m", 0, RK3288_CLKGATE_CON(13), 11, GFLAGS),
+	GATE(0, "sclk_pvtm_core", "xin24m", 0, RK3288_CLKGATE_CON(5), 9, GFLAGS),
+	GATE(0, "sclk_pvtm_gpu", "xin24m", 0, RK3288_CLKGATE_CON(5), 10, GFLAGS),
+	GATE(0, "sclk_mipidsi_24m", "xin24m", 0, RK3288_CLKGATE_CON(5), 15, GFLAGS),
+
+	/* sclk_gpu gates */
+	GATE(ACLK_GPU, "aclk_gpu", "sclk_gpu", 0, RK3288_CLKGATE_CON(18), 0, GFLAGS),
+
+	/* pclk_pd_alive gates */
+	GATE(PCLK_GPIO8, "pclk_gpio8", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 8, GFLAGS),
+	GATE(PCLK_GPIO7, "pclk_gpio7", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 7, GFLAGS),
+	GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 1, GFLAGS),
+	GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 2, GFLAGS),
+	GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 3, GFLAGS),
+	GATE(PCLK_GPIO4, "pclk_gpio4", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 4, GFLAGS),
+	GATE(PCLK_GPIO5, "pclk_gpio5", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 5, GFLAGS),
+	GATE(PCLK_GPIO6, "pclk_gpio6", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 6, GFLAGS),
+	GATE(PCLK_GRF, "pclk_grf", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 11, GFLAGS),
+	GATE(0, "pclk_alive_niu", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 12, GFLAGS),
+
+	/* pclk_pd_pmu gates */
+	GATE(PCLK_PMU, "pclk_pmu", "pclk_pd_pmu", 0, RK3288_CLKGATE_CON(17), 0, GFLAGS),
+	GATE(0, "pclk_intmem1", "pclk_pd_pmu", 0, RK3288_CLKGATE_CON(17), 1, GFLAGS),
+	GATE(0, "pclk_pmu_niu", "pclk_pd_pmu", 0, RK3288_CLKGATE_CON(17), 2, GFLAGS),
+	GATE(PCLK_SGRF, "pclk_sgrf", "pclk_pd_pmu", 0, RK3288_CLKGATE_CON(17), 3, GFLAGS),
+	GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_pd_pmu", 0, RK3288_CLKGATE_CON(17), 4, GFLAGS),
+
+	/* hclk_vio gates */
+	GATE(HCLK_RGA, "hclk_rga", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 1, GFLAGS),
+	GATE(HCLK_VOP0, "hclk_vop0", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 6, GFLAGS),
+	GATE(HCLK_VOP1, "hclk_vop1", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 8, GFLAGS),
+	GATE(0, "hclk_vio_ahb_arbi", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 9, GFLAGS),
+	GATE(0, "hclk_vio_niu", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 10, GFLAGS),
+	GATE(0, "hclk_vip", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 15, GFLAGS),
+	GATE(HCLK_IEP, "hclk_iep", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 3, GFLAGS),
+	GATE(HCLK_ISP, "hclk_isp", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 1, GFLAGS),
+	GATE(0, "hclk_vio2_h2p", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 10, GFLAGS),
+	GATE(0, "pclk_mipi_dsi0", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 4, GFLAGS),
+	GATE(0, "pclk_mipi_dsi1", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 5, GFLAGS),
+	GATE(0, "pclk_mipi_csi", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 6, GFLAGS),
+	GATE(0, "pclk_lvds_phy", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 7, GFLAGS),
+	GATE(0, "pclk_edp_ctrl", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 8, GFLAGS),
+	GATE(0, "pclk_hdmi_ctrl", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 9, GFLAGS),
+	GATE(0, "pclk_vio2_h2p", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 11, GFLAGS),
+
+	/* aclk_vio0 gates */
+	GATE(ACLK_VOP0, "aclk_vop0", "aclk_vio0", 0, RK3288_CLKGATE_CON(15), 5, GFLAGS),
+	GATE(0, "aclk_iep", "aclk_vio0", 0, RK3288_CLKGATE_CON(15), 2, GFLAGS),
+	GATE(0, "aclk_vio0_niu", "aclk_vio0", 0, RK3288_CLKGATE_CON(15), 11, GFLAGS),
+	GATE(0, "aclk_vip", "aclk_vio0", 0, RK3288_CLKGATE_CON(15), 14, GFLAGS),
+
+	/* aclk_vio1 gates */
+	GATE(ACLK_VOP1, "aclk_vop1", "aclk_vio1", 0, RK3288_CLKGATE_CON(15), 7, GFLAGS),
+	GATE(0, "aclk_isp", "aclk_vio1", 0, RK3288_CLKGATE_CON(16), 2, GFLAGS),
+	GATE(0, "aclk_vio1_niu", "aclk_vio1", 0, RK3288_CLKGATE_CON(15), 12, GFLAGS),
+
+	/* aclk_rga_pre gates */
+	GATE(ACLK_RGA, "aclk_rga", "aclk_rga_pre", 0, RK3288_CLKGATE_CON(15), 0, GFLAGS),
+	GATE(0, "aclk_rga_niu", "aclk_rga_pre", 0, RK3288_CLKGATE_CON(15), 13, GFLAGS),
+
+	/*
+	 * Other ungrouped clocks.
+	 */
+
+	GATE(0, "pclk_vip_in", "ext_vip", 0, RK3288_CLKGATE_CON(16), 0, GFLAGS),
+	GATE(0, "pclk_isp_in", "ext_isp", 0, RK3288_CLKGATE_CON(16), 3, GFLAGS),
+};
+
+static void __init rk3288_clk_init(struct device_node *np)
+{
+	void __iomem *reg_base;
+	struct clk *clk;
+
+	reg_base = of_iomap(np, 0);
+	if (!reg_base) {
+		pr_err("%s: could not map cru region\n", __func__);
+		return;
+	}
+
+	rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
+
+	/* xin12m is created by an cru-internal divider */
+	clk = clk_register_fixed_factor(NULL, "xin12m", "xin24m", 0, 1, 2);
+	if (IS_ERR(clk))
+		pr_warn("%s: could not register clock xin12m: %ld\n",
+			__func__, PTR_ERR(clk));
+
+
+	clk = clk_register_fixed_factor(NULL, "usb480m", "xin24m", 0, 20, 1);
+	if (IS_ERR(clk))
+		pr_warn("%s: could not register clock usb480m: %ld\n",
+			__func__, PTR_ERR(clk));
+
+	rockchip_clk_register_plls(rk3288_pll_clks,
+				   ARRAY_SIZE(rk3288_pll_clks),
+				   RK3288_GRF_SOC_STATUS);
+	rockchip_clk_register_branches(rk3288_clk_branches,
+				  ARRAY_SIZE(rk3288_clk_branches));
+
+	rockchip_register_softrst(np, 9, reg_base + RK3288_SOFTRST_CON(0),
+				  ROCKCHIP_SOFTRST_HIWORD_MASK);
+}
+CLK_OF_DECLARE(rk3288_cru, "rockchip,rk3288-cru", rk3288_clk_init);
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
new file mode 100644
index 0000000..278cf9d
--- /dev/null
+++ b/drivers/clk/rockchip/clk.c
@@ -0,0 +1,244 @@
+/*
+ * Copyright (c) 2014 MundoReader S.L.
+ * Author: Heiko Stuebner <heiko@sntech.de>
+ *
+ * based on
+ *
+ * samsung/clk.c
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2013 Linaro Ltd.
+ * Author: Thomas Abraham <thomas.ab@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include "clk.h"
+
+/**
+ * Register a clock branch.
+ * Most clock branches have a form like
+ *
+ * src1 --|--\
+ *        |M |--[GATE]-[DIV]-
+ * src2 --|--/
+ *
+ * sometimes without one of those components.
+ */
+struct clk *rockchip_clk_register_branch(const char *name,
+		const char **parent_names, u8 num_parents, void __iomem *base,
+		int muxdiv_offset, u8 mux_shift, u8 mux_width, u8 mux_flags,
+		u8 div_shift, u8 div_width, u8 div_flags,
+		struct clk_div_table *div_table, int gate_offset,
+		u8 gate_shift, u8 gate_flags, unsigned long flags,
+		spinlock_t *lock)
+{
+	struct clk *clk;
+	struct clk_mux *mux = NULL;
+	struct clk_gate *gate = NULL;
+	struct clk_divider *div = NULL;
+	const struct clk_ops *mux_ops = NULL, *div_ops = NULL,
+			     *gate_ops = NULL;
+
+	if (num_parents > 1) {
+		mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+		if (!mux)
+			return ERR_PTR(-ENOMEM);
+
+		mux->reg = base + muxdiv_offset;
+		mux->shift = mux_shift;
+		mux->mask = BIT(mux_width) - 1;
+		mux->flags = mux_flags;
+		mux->lock = lock;
+		mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops
+							: &clk_mux_ops;
+	}
+
+	if (gate_offset >= 0) {
+		gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+		if (!gate)
+			return ERR_PTR(-ENOMEM);
+
+		gate->flags = gate_flags;
+		gate->reg = base + gate_offset;
+		gate->bit_idx = gate_shift;
+		gate->lock = lock;
+		gate_ops = &clk_gate_ops;
+	}
+
+	if (div_width > 0) {
+		div = kzalloc(sizeof(*div), GFP_KERNEL);
+		if (!div)
+			return ERR_PTR(-ENOMEM);
+
+		div->flags = div_flags;
+		div->reg = base + muxdiv_offset;
+		div->shift = div_shift;
+		div->width = div_width;
+		div->lock = lock;
+		div->table = div_table;
+		div_ops = (div_flags & CLK_DIVIDER_READ_ONLY)
+						? &clk_divider_ro_ops
+						: &clk_divider_ops;
+	}
+
+	clk = clk_register_composite(NULL, name, parent_names, num_parents,
+				     mux ? &mux->hw : NULL, mux_ops,
+				     div ? &div->hw : NULL, div_ops,
+				     gate ? &gate->hw : NULL, gate_ops,
+				     flags);
+
+	return clk;
+}
+
+static DEFINE_SPINLOCK(clk_lock);
+static struct clk **clk_table;
+static void __iomem *reg_base;
+static struct clk_onecell_data clk_data;
+static struct device_node *cru_node;
+static struct regmap *grf;
+
+void __init rockchip_clk_init(struct device_node *np, void __iomem *base,
+			      unsigned long nr_clks)
+{
+	reg_base = base;
+	cru_node = np;
+	grf = ERR_PTR(-EPROBE_DEFER);
+
+	clk_table = kcalloc(nr_clks, sizeof(struct clk *), GFP_KERNEL);
+	if (!clk_table)
+		pr_err("%s: could not allocate clock lookup table\n", __func__);
+
+	clk_data.clks = clk_table;
+	clk_data.clk_num = nr_clks;
+	of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+}
+
+struct regmap *rockchip_clk_get_grf(void)
+{
+	if (IS_ERR(grf))
+		grf = syscon_regmap_lookup_by_phandle(cru_node, "rockchip,grf");
+	return grf;
+}
+
+void rockchip_clk_add_lookup(struct clk *clk, unsigned int id)
+{
+	if (clk_table && id)
+		clk_table[id] = clk;
+}
+
+void __init rockchip_clk_register_plls(struct rockchip_pll_clock *list,
+				unsigned int nr_pll, int grf_lock_offset)
+{
+	struct clk *clk;
+	int idx;
+
+	for (idx = 0; idx < nr_pll; idx++, list++) {
+		clk = rockchip_clk_register_pll(list->type, list->name,
+				list->parent_names, list->num_parents,
+				reg_base, list->con_offset, grf_lock_offset,
+				list->lock_shift, list->mode_offset,
+				list->mode_shift, list->rate_table, &clk_lock);
+		if (IS_ERR(clk)) {
+			pr_err("%s: failed to register clock %s\n", __func__,
+				list->name);
+			continue;
+		}
+
+		rockchip_clk_add_lookup(clk, list->id);
+	}
+}
+
+void __init rockchip_clk_register_branches(
+				      struct rockchip_clk_branch *list,
+				      unsigned int nr_clk)
+{
+	struct clk *clk = NULL;
+	unsigned int idx;
+	unsigned long flags;
+
+	for (idx = 0; idx < nr_clk; idx++, list++) {
+		flags = list->flags;
+
+		/* catch simple muxes */
+		switch (list->branch_type) {
+		case branch_mux:
+			clk = clk_register_mux(NULL, list->name,
+				list->parent_names, list->num_parents,
+				flags, reg_base + list->muxdiv_offset,
+				list->mux_shift, list->mux_width,
+				list->mux_flags, &clk_lock);
+			break;
+		case branch_divider:
+			if (list->div_table)
+				clk = clk_register_divider_table(NULL,
+					list->name, list->parent_names[0],
+					flags, reg_base + list->muxdiv_offset,
+					list->div_shift, list->div_width,
+					list->div_flags, list->div_table,
+					&clk_lock);
+			else
+				clk = clk_register_divider(NULL, list->name,
+					list->parent_names[0], flags,
+					reg_base + list->muxdiv_offset,
+					list->div_shift, list->div_width,
+					list->div_flags, &clk_lock);
+			break;
+		case branch_fraction_divider:
+			/* unimplemented */
+			continue;
+			break;
+		case branch_gate:
+			flags |= CLK_SET_RATE_PARENT;
+
+			/* keep all gates untouched for now */
+			flags |= CLK_IGNORE_UNUSED;
+
+			clk = clk_register_gate(NULL, list->name,
+				list->parent_names[0], flags,
+				reg_base + list->gate_offset,
+				list->gate_shift, list->gate_flags, &clk_lock);
+			break;
+		case branch_composite:
+			/* keep all gates untouched for now */
+			flags |= CLK_IGNORE_UNUSED;
+
+			clk = rockchip_clk_register_branch(list->name,
+				list->parent_names, list->num_parents,
+				reg_base, list->muxdiv_offset, list->mux_shift,
+				list->mux_width, list->mux_flags,
+				list->div_shift, list->div_width,
+				list->div_flags, list->div_table,
+				list->gate_offset, list->gate_shift,
+				list->gate_flags, flags, &clk_lock);
+			break;
+		}
+
+		/* none of the cases above matched */
+		if (!clk) {
+			pr_err("%s: unknown clock type %d\n",
+			       __func__, list->branch_type);
+			continue;
+		}
+
+		if (IS_ERR(clk)) {
+			pr_err("%s: failed to register clock %s: %ld\n",
+			       __func__, list->name, PTR_ERR(clk));
+			continue;
+		}
+
+		rockchip_clk_add_lookup(clk, list->id);
+	}
+}
diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h
new file mode 100644
index 0000000..887cbde
--- /dev/null
+++ b/drivers/clk/rockchip/clk.h
@@ -0,0 +1,347 @@
+/*
+ * Copyright (c) 2014 MundoReader S.L.
+ * Author: Heiko Stuebner <heiko@sntech.de>
+ *
+ * based on
+ *
+ * samsung/clk.h
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2013 Linaro Ltd.
+ * Author: Thomas Abraham <thomas.ab@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CLK_ROCKCHIP_CLK_H
+#define CLK_ROCKCHIP_CLK_H
+
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+
+#define HIWORD_UPDATE(val, mask, shift) \
+		((val) << (shift) | (mask) << ((shift) + 16))
+
+/* register positions shared by RK2928, RK3066 and RK3188 */
+#define RK2928_PLL_CON(x)		(x * 0x4)
+#define RK2928_MODE_CON		0x40
+#define RK2928_CLKSEL_CON(x)	(x * 0x4 + 0x44)
+#define RK2928_CLKGATE_CON(x)	(x * 0x4 + 0xd0)
+#define RK2928_GLB_SRST_FST		0x100
+#define RK2928_GLB_SRST_SND		0x104
+#define RK2928_SOFTRST_CON(x)	(x * 0x4 + 0x110)
+#define RK2928_MISC_CON		0x134
+
+#define RK3288_PLL_CON(x)		RK2928_PLL_CON(x)
+#define RK3288_MODE_CON			0x50
+#define RK3288_CLKSEL_CON(x)		(x * 0x4 + 0x60)
+#define RK3288_CLKGATE_CON(x)		(x * 0x4 + 0x160)
+#define RK3288_GLB_SRST_FST		0x1b0
+#define RK3288_GLB_SRST_SND		0x1b4
+#define RK3288_SOFTRST_CON(x)		(x * 0x4 + 0x1b8)
+#define RK3288_MISC_CON			0x1e8
+
+enum rockchip_pll_type {
+	pll_rk3066,
+};
+
+#define RK3066_PLL_RATE(_rate, _nr, _nf, _no)	\
+{						\
+	.rate	= _rate##U,			\
+	.nr = _nr,				\
+	.nf = _nf,				\
+	.no = _no,				\
+	.bwadj = (_nf >> 1),			\
+}
+
+struct rockchip_pll_rate_table {
+	unsigned long rate;
+	unsigned int nr;
+	unsigned int nf;
+	unsigned int no;
+	unsigned int bwadj;
+};
+
+/**
+ * struct rockchip_pll_clock: information about pll clock
+ * @id: platform specific id of the clock.
+ * @name: name of this pll clock.
+ * @parent_name: name of the parent clock.
+ * @flags: optional flags for basic clock.
+ * @con_offset: offset of the register for configuring the PLL.
+ * @mode_offset: offset of the register for configuring the PLL-mode.
+ * @mode_shift: offset inside the mode-register for the mode of this pll.
+ * @lock_shift: offset inside the lock register for the lock status.
+ * @type: Type of PLL to be registered.
+ * @rate_table: Table of usable pll rates
+ */
+struct rockchip_pll_clock {
+	unsigned int		id;
+	const char		*name;
+	const char		**parent_names;
+	u8			num_parents;
+	unsigned long		flags;
+	int			con_offset;
+	int			mode_offset;
+	int			mode_shift;
+	int			lock_shift;
+	enum rockchip_pll_type	type;
+	struct rockchip_pll_rate_table *rate_table;
+};
+
+#define PLL(_type, _id, _name, _pnames, _flags, _con, _mode, _mshift,	\
+		_lshift, _rtable)					\
+	{								\
+		.id		= _id,					\
+		.type		= _type,				\
+		.name		= _name,				\
+		.parent_names	= _pnames,				\
+		.num_parents	= ARRAY_SIZE(_pnames),			\
+		.flags		= CLK_GET_RATE_NOCACHE | _flags,	\
+		.con_offset	= _con,					\
+		.mode_offset	= _mode,				\
+		.mode_shift	= _mshift,				\
+		.lock_shift	= _lshift,				\
+		.rate_table	= _rtable,				\
+	}
+
+struct clk *rockchip_clk_register_pll(enum rockchip_pll_type pll_type,
+		const char *name, const char **parent_names, u8 num_parents,
+		void __iomem *base, int con_offset, int grf_lock_offset,
+		int lock_shift, int reg_mode, int mode_shift,
+		struct rockchip_pll_rate_table *rate_table,
+		spinlock_t *lock);
+
+#define PNAME(x) static const char *x[] __initconst
+
+enum rockchip_clk_branch_type {
+	branch_composite,
+	branch_mux,
+	branch_divider,
+	branch_fraction_divider,
+	branch_gate,
+};
+
+struct rockchip_clk_branch {
+	unsigned int			id;
+	enum rockchip_clk_branch_type	branch_type;
+	const char			*name;
+	const char			**parent_names;
+	u8				num_parents;
+	unsigned long			flags;
+	int				muxdiv_offset;
+	u8				mux_shift;
+	u8				mux_width;
+	u8				mux_flags;
+	u8				div_shift;
+	u8				div_width;
+	u8				div_flags;
+	struct clk_div_table		*div_table;
+	int				gate_offset;
+	u8				gate_shift;
+	u8				gate_flags;
+};
+
+#define COMPOSITE(_id, cname, pnames, f, mo, ms, mw, mf, ds, dw,\
+		  df, go, gs, gf)				\
+	{							\
+		.id		= _id,				\
+		.branch_type	= branch_composite,		\
+		.name		= cname,			\
+		.parent_names	= pnames,			\
+		.num_parents	= ARRAY_SIZE(pnames),		\
+		.flags		= f,				\
+		.muxdiv_offset	= mo,				\
+		.mux_shift	= ms,				\
+		.mux_width	= mw,				\
+		.mux_flags	= mf,				\
+		.div_shift	= ds,				\
+		.div_width	= dw,				\
+		.div_flags	= df,				\
+		.gate_offset	= go,				\
+		.gate_shift	= gs,				\
+		.gate_flags	= gf,				\
+	}
+
+#define COMPOSITE_NOMUX(_id, cname, pname, f, mo, ds, dw, df,	\
+			go, gs, gf)				\
+	{							\
+		.id		= _id,				\
+		.branch_type	= branch_composite,		\
+		.name		= cname,			\
+		.parent_names	= (const char *[]){ pname },	\
+		.num_parents	= 1,				\
+		.flags		= f,				\
+		.muxdiv_offset	= mo,				\
+		.div_shift	= ds,				\
+		.div_width	= dw,				\
+		.div_flags	= df,				\
+		.gate_offset	= go,				\
+		.gate_shift	= gs,				\
+		.gate_flags	= gf,				\
+	}
+
+#define COMPOSITE_NOMUX_DIVTBL(_id, cname, pname, f, mo, ds, dw,\
+			       df, dt, go, gs, gf)		\
+	{							\
+		.id		= _id,				\
+		.branch_type	= branch_composite,		\
+		.name		= cname,			\
+		.parent_names	= (const char *[]){ pname },	\
+		.num_parents	= 1,				\
+		.flags		= f,				\
+		.muxdiv_offset	= mo,				\
+		.div_shift	= ds,				\
+		.div_width	= dw,				\
+		.div_flags	= df,				\
+		.div_table	= dt,				\
+		.gate_offset	= go,				\
+		.gate_shift	= gs,				\
+		.gate_flags	= gf,				\
+	}
+
+#define COMPOSITE_NODIV(_id, cname, pnames, f, mo, ms, mw, mf,	\
+			go, gs, gf)				\
+	{							\
+		.id		= _id,				\
+		.branch_type	= branch_composite,		\
+		.name		= cname,			\
+		.parent_names	= pnames,			\
+		.num_parents	= ARRAY_SIZE(pnames),		\
+		.flags		= f,				\
+		.muxdiv_offset	= mo,				\
+		.mux_shift	= ms,				\
+		.mux_width	= mw,				\
+		.mux_flags	= mf,				\
+		.gate_offset	= go,				\
+		.gate_shift	= gs,				\
+		.gate_flags	= gf,				\
+	}
+
+#define COMPOSITE_NOGATE(_id, cname, pnames, f, mo, ms, mw, mf,	\
+			 ds, dw, df)				\
+	{							\
+		.id		= _id,				\
+		.branch_type	= branch_composite,		\
+		.name		= cname,			\
+		.parent_names	= pnames,			\
+		.num_parents	= ARRAY_SIZE(pnames),		\
+		.flags		= f,				\
+		.muxdiv_offset	= mo,				\
+		.mux_shift	= ms,				\
+		.mux_width	= mw,				\
+		.mux_flags	= mf,				\
+		.div_shift	= ds,				\
+		.div_width	= dw,				\
+		.div_flags	= df,				\
+		.gate_offset	= -1,				\
+	}
+
+#define COMPOSITE_FRAC(_id, cname, pname, f, mo, df, go, gs, gf)\
+	{							\
+		.id		= _id,				\
+		.branch_type	= branch_fraction_divider,	\
+		.name		= cname,			\
+		.parent_names	= (const char *[]){ pname },	\
+		.num_parents	= 1,				\
+		.flags		= f,				\
+		.muxdiv_offset	= mo,				\
+		.div_shift	= 16,				\
+		.div_width	= 16,				\
+		.div_flags	= df,				\
+		.gate_offset	= go,				\
+		.gate_shift	= gs,				\
+		.gate_flags	= gf,				\
+	}
+
+#define MUX(_id, cname, pnames, f, o, s, w, mf)			\
+	{							\
+		.id		= _id,				\
+		.branch_type	= branch_mux,			\
+		.name		= cname,			\
+		.parent_names	= pnames,			\
+		.num_parents	= ARRAY_SIZE(pnames),		\
+		.flags		= f,				\
+		.muxdiv_offset	= o,				\
+		.mux_shift	= s,				\
+		.mux_width	= w,				\
+		.mux_flags	= mf,				\
+		.gate_offset	= -1,				\
+	}
+
+#define DIV(_id, cname, pname, f, o, s, w, df)			\
+	{							\
+		.id		= _id,				\
+		.branch_type	= branch_divider,		\
+		.name		= cname,			\
+		.parent_names	= (const char *[]){ pname },	\
+		.num_parents	= 1,				\
+		.flags		= f,				\
+		.muxdiv_offset	= o,				\
+		.div_shift	= s,				\
+		.div_width	= w,				\
+		.div_flags	= df,				\
+		.gate_offset	= -1,				\
+	}
+
+#define DIVTBL(_id, cname, pname, f, o, s, w, df, dt)		\
+	{							\
+		.id		= _id,				\
+		.branch_type	= branch_divider,		\
+		.name		= cname,			\
+		.parent_names	= (const char *[]){ pname },	\
+		.num_parents	= 1,				\
+		.flags		= f,				\
+		.muxdiv_offset	= o,				\
+		.div_shift	= s,				\
+		.div_width	= w,				\
+		.div_flags	= df,				\
+		.div_table	= dt,				\
+	}
+
+#define GATE(_id, cname, pname, f, o, b, gf)			\
+	{							\
+		.id		= _id,				\
+		.branch_type	= branch_gate,			\
+		.name		= cname,			\
+		.parent_names	= (const char *[]){ pname },	\
+		.num_parents	= 1,				\
+		.flags		= f,				\
+		.gate_offset	= o,				\
+		.gate_shift	= b,				\
+		.gate_flags	= gf,				\
+	}
+
+
+void rockchip_clk_init(struct device_node *np, void __iomem *base,
+		       unsigned long nr_clks);
+struct regmap *rockchip_clk_get_grf(void);
+void rockchip_clk_add_lookup(struct clk *clk, unsigned int id);
+void rockchip_clk_register_branches(struct rockchip_clk_branch *clk_list,
+				    unsigned int nr_clk);
+void rockchip_clk_register_plls(struct rockchip_pll_clock *pll_list,
+				unsigned int nr_pll, int grf_lock_offset);
+
+#define ROCKCHIP_SOFTRST_HIWORD_MASK	BIT(0)
+
+#ifdef CONFIG_RESET_CONTROLLER
+void rockchip_register_softrst(struct device_node *np,
+			       unsigned int num_regs,
+			       void __iomem *base, u8 flags);
+#else
+static inline void rockchip_register_softrst(struct device_node *np,
+			       unsigned int num_regs,
+			       void __iomem *base, u8 flags)
+{
+}
+#endif
+
+#endif
diff --git a/drivers/clk/rockchip/softrst.c b/drivers/clk/rockchip/softrst.c
new file mode 100644
index 0000000..552f7bb
--- /dev/null
+++ b/drivers/clk/rockchip/softrst.c
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2014 MundoReader S.L.
+ * Author: Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/reset-controller.h>
+#include <linux/spinlock.h>
+#include "clk.h"
+
+struct rockchip_softrst {
+	struct reset_controller_dev	rcdev;
+	void __iomem			*reg_base;
+	int				num_regs;
+	int				num_per_reg;
+	u8				flags;
+	spinlock_t			lock;
+};
+
+static int rockchip_softrst_assert(struct reset_controller_dev *rcdev,
+			      unsigned long id)
+{
+	struct rockchip_softrst *softrst = container_of(rcdev,
+						     struct rockchip_softrst,
+						     rcdev);
+	int bank = id / softrst->num_per_reg;
+	int offset = id % softrst->num_per_reg;
+
+	if (softrst->flags & ROCKCHIP_SOFTRST_HIWORD_MASK) {
+		writel(BIT(offset) | (BIT(offset) << 16),
+		       softrst->reg_base + (bank * 4));
+	} else {
+		unsigned long flags;
+		u32 reg;
+
+		spin_lock_irqsave(&softrst->lock, flags);
+
+		reg = readl(softrst->reg_base + (bank * 4));
+		writel(reg | BIT(offset), softrst->reg_base + (bank * 4));
+
+		spin_unlock_irqrestore(&softrst->lock, flags);
+	}
+
+	return 0;
+}
+
+static int rockchip_softrst_deassert(struct reset_controller_dev *rcdev,
+				unsigned long id)
+{
+	struct rockchip_softrst *softrst = container_of(rcdev,
+						     struct rockchip_softrst,
+						     rcdev);
+	int bank = id / softrst->num_per_reg;
+	int offset = id % softrst->num_per_reg;
+
+	if (softrst->flags & ROCKCHIP_SOFTRST_HIWORD_MASK) {
+		writel((BIT(offset) << 16), softrst->reg_base + (bank * 4));
+	} else {
+		unsigned long flags;
+		u32 reg;
+
+		spin_lock_irqsave(&softrst->lock, flags);
+
+		reg = readl(softrst->reg_base + (bank * 4));
+		writel(reg & ~BIT(offset), softrst->reg_base + (bank * 4));
+
+		spin_unlock_irqrestore(&softrst->lock, flags);
+	}
+
+	return 0;
+}
+
+static struct reset_control_ops rockchip_softrst_ops = {
+	.assert		= rockchip_softrst_assert,
+	.deassert	= rockchip_softrst_deassert,
+};
+
+void __init rockchip_register_softrst(struct device_node *np,
+				      unsigned int num_regs,
+				      void __iomem *base, u8 flags)
+{
+	struct rockchip_softrst *softrst;
+	int ret;
+
+	softrst = kzalloc(sizeof(*softrst), GFP_KERNEL);
+	if (!softrst)
+		return;
+
+	spin_lock_init(&softrst->lock);
+
+	softrst->reg_base = base;
+	softrst->flags = flags;
+	softrst->num_regs = num_regs;
+	softrst->num_per_reg = (flags & ROCKCHIP_SOFTRST_HIWORD_MASK) ? 16
+								      : 32;
+
+	softrst->rcdev.owner = THIS_MODULE;
+	softrst->rcdev.nr_resets =  num_regs * softrst->num_per_reg;
+	softrst->rcdev.ops = &rockchip_softrst_ops;
+	softrst->rcdev.of_node = np;
+	ret = reset_controller_register(&softrst->rcdev);
+	if (ret) {
+		pr_err("%s: could not register reset controller, %d\n",
+		       __func__, ret);
+		kfree(softrst);
+	}
+};
diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile
index 69e8177..2949a55 100644
--- a/drivers/clk/samsung/Makefile
+++ b/drivers/clk/samsung/Makefile
@@ -11,6 +11,7 @@
 obj-$(CONFIG_SOC_EXYNOS5420)	+= clk-exynos5420.o
 obj-$(CONFIG_SOC_EXYNOS5440)	+= clk-exynos5440.o
 obj-$(CONFIG_ARCH_EXYNOS)	+= clk-exynos-audss.o
+obj-$(CONFIG_ARCH_EXYNOS)	+= clk-exynos-clkout.o
 obj-$(CONFIG_S3C2410_COMMON_CLK)+= clk-s3c2410.o
 obj-$(CONFIG_S3C2410_COMMON_DCLK)+= clk-s3c2410-dclk.o
 obj-$(CONFIG_S3C2412_COMMON_CLK)+= clk-s3c2412.o
diff --git a/drivers/clk/samsung/clk-exynos-clkout.c b/drivers/clk/samsung/clk-exynos-clkout.c
new file mode 100644
index 0000000..3a7cb25
--- /dev/null
+++ b/drivers/clk/samsung/clk-exynos-clkout.c
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Author: Tomasz Figa <t.figa@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Clock driver for Exynos clock output
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/syscore_ops.h>
+
+#define EXYNOS_CLKOUT_NR_CLKS		1
+#define EXYNOS_CLKOUT_PARENTS		32
+
+#define EXYNOS_PMU_DEBUG_REG		0xa00
+#define EXYNOS_CLKOUT_DISABLE_SHIFT	0
+#define EXYNOS_CLKOUT_MUX_SHIFT		8
+#define EXYNOS4_CLKOUT_MUX_MASK		0xf
+#define EXYNOS5_CLKOUT_MUX_MASK		0x1f
+
+struct exynos_clkout {
+	struct clk_gate gate;
+	struct clk_mux mux;
+	spinlock_t slock;
+	struct clk_onecell_data data;
+	struct clk *clk_table[EXYNOS_CLKOUT_NR_CLKS];
+	void __iomem *reg;
+	u32 pmu_debug_save;
+};
+
+static struct exynos_clkout *clkout;
+
+static int exynos_clkout_suspend(void)
+{
+	clkout->pmu_debug_save = readl(clkout->reg + EXYNOS_PMU_DEBUG_REG);
+
+	return 0;
+}
+
+static void exynos_clkout_resume(void)
+{
+	writel(clkout->pmu_debug_save, clkout->reg + EXYNOS_PMU_DEBUG_REG);
+}
+
+static struct syscore_ops exynos_clkout_syscore_ops = {
+	.suspend = exynos_clkout_suspend,
+	.resume = exynos_clkout_resume,
+};
+
+static void __init exynos_clkout_init(struct device_node *node, u32 mux_mask)
+{
+	const char *parent_names[EXYNOS_CLKOUT_PARENTS];
+	struct clk *parents[EXYNOS_CLKOUT_PARENTS];
+	int parent_count;
+	int ret;
+	int i;
+
+	clkout = kzalloc(sizeof(*clkout), GFP_KERNEL);
+	if (!clkout)
+		return;
+
+	spin_lock_init(&clkout->slock);
+
+	parent_count = 0;
+	for (i = 0; i < EXYNOS_CLKOUT_PARENTS; ++i) {
+		char name[] = "clkoutXX";
+
+		snprintf(name, sizeof(name), "clkout%d", i);
+		parents[i] = of_clk_get_by_name(node, name);
+		if (IS_ERR(parents[i])) {
+			parent_names[i] = "none";
+			continue;
+		}
+
+		parent_names[i] = __clk_get_name(parents[i]);
+		parent_count = i + 1;
+	}
+
+	if (!parent_count)
+		goto free_clkout;
+
+	clkout->reg = of_iomap(node, 0);
+	if (!clkout->reg)
+		goto clks_put;
+
+	clkout->gate.reg = clkout->reg + EXYNOS_PMU_DEBUG_REG;
+	clkout->gate.bit_idx = EXYNOS_CLKOUT_DISABLE_SHIFT;
+	clkout->gate.flags = CLK_GATE_SET_TO_DISABLE;
+	clkout->gate.lock = &clkout->slock;
+
+	clkout->mux.reg = clkout->reg + EXYNOS_PMU_DEBUG_REG;
+	clkout->mux.mask = mux_mask;
+	clkout->mux.shift = EXYNOS_CLKOUT_MUX_SHIFT;
+	clkout->mux.lock = &clkout->slock;
+
+	clkout->clk_table[0] = clk_register_composite(NULL, "clkout",
+				parent_names, parent_count, &clkout->mux.hw,
+				&clk_mux_ops, NULL, NULL, &clkout->gate.hw,
+				&clk_gate_ops, CLK_SET_RATE_PARENT
+				| CLK_SET_RATE_NO_REPARENT);
+	if (IS_ERR(clkout->clk_table[0]))
+		goto err_unmap;
+
+	clkout->data.clks = clkout->clk_table;
+	clkout->data.clk_num = EXYNOS_CLKOUT_NR_CLKS;
+	ret = of_clk_add_provider(node, of_clk_src_onecell_get, &clkout->data);
+	if (ret)
+		goto err_clk_unreg;
+
+	register_syscore_ops(&exynos_clkout_syscore_ops);
+
+	return;
+
+err_clk_unreg:
+	clk_unregister(clkout->clk_table[0]);
+err_unmap:
+	iounmap(clkout->reg);
+clks_put:
+	for (i = 0; i < EXYNOS_CLKOUT_PARENTS; ++i)
+		if (!IS_ERR(parents[i]))
+			clk_put(parents[i]);
+free_clkout:
+	kfree(clkout);
+
+	pr_err("%s: failed to register clkout clock\n", __func__);
+}
+
+static void __init exynos4_clkout_init(struct device_node *node)
+{
+	exynos_clkout_init(node, EXYNOS4_CLKOUT_MUX_MASK);
+}
+CLK_OF_DECLARE(exynos4210_clkout, "samsung,exynos4210-pmu",
+		exynos4_clkout_init);
+CLK_OF_DECLARE(exynos4212_clkout, "samsung,exynos4212-pmu",
+		exynos4_clkout_init);
+CLK_OF_DECLARE(exynos4412_clkout, "samsung,exynos4412-pmu",
+		exynos4_clkout_init);
+
+static void __init exynos5_clkout_init(struct device_node *node)
+{
+	exynos_clkout_init(node, EXYNOS5_CLKOUT_MUX_MASK);
+}
+CLK_OF_DECLARE(exynos5250_clkout, "samsung,exynos5250-pmu",
+		exynos5_clkout_init);
+CLK_OF_DECLARE(exynos5420_clkout, "samsung,exynos5420-pmu",
+		exynos5_clkout_init);
diff --git a/drivers/clk/samsung/clk-exynos3250.c b/drivers/clk/samsung/clk-exynos3250.c
index 7a17bd4..dc85f8e 100644
--- a/drivers/clk/samsung/clk-exynos3250.c
+++ b/drivers/clk/samsung/clk-exynos3250.c
@@ -87,6 +87,22 @@
 #define SRC_CPU			0x14200
 #define DIV_CPU0		0x14500
 #define DIV_CPU1		0x14504
+#define PWR_CTRL1		0x15020
+#define PWR_CTRL2		0x15024
+
+/* Below definitions are used for PWR_CTRL settings */
+#define PWR_CTRL1_CORE2_DOWN_RATIO(x)		(((x) & 0x7) << 28)
+#define PWR_CTRL1_CORE1_DOWN_RATIO(x)		(((x) & 0x7) << 16)
+#define PWR_CTRL1_DIV2_DOWN_EN			(1 << 9)
+#define PWR_CTRL1_DIV1_DOWN_EN			(1 << 8)
+#define PWR_CTRL1_USE_CORE3_WFE			(1 << 7)
+#define PWR_CTRL1_USE_CORE2_WFE			(1 << 6)
+#define PWR_CTRL1_USE_CORE1_WFE			(1 << 5)
+#define PWR_CTRL1_USE_CORE0_WFE			(1 << 4)
+#define PWR_CTRL1_USE_CORE3_WFI			(1 << 3)
+#define PWR_CTRL1_USE_CORE2_WFI			(1 << 2)
+#define PWR_CTRL1_USE_CORE1_WFI			(1 << 1)
+#define PWR_CTRL1_USE_CORE0_WFI			(1 << 0)
 
 /* list of PLLs to be registered */
 enum exynos3250_plls {
@@ -168,6 +184,8 @@
 	SRC_CPU,
 	DIV_CPU0,
 	DIV_CPU1,
+	PWR_CTRL1,
+	PWR_CTRL2,
 };
 
 static int exynos3250_clk_suspend(void)
@@ -748,6 +766,27 @@
 			UPLL_LOCK, UPLL_CON0, NULL),
 };
 
+static void __init exynos3_core_down_clock(void)
+{
+	unsigned int tmp;
+
+	/*
+	 * Enable arm clock down (in idle) and set arm divider
+	 * ratios in WFI/WFE state.
+	 */
+	tmp = (PWR_CTRL1_CORE2_DOWN_RATIO(7) | PWR_CTRL1_CORE1_DOWN_RATIO(7) |
+		PWR_CTRL1_DIV2_DOWN_EN | PWR_CTRL1_DIV1_DOWN_EN |
+		PWR_CTRL1_USE_CORE1_WFE | PWR_CTRL1_USE_CORE0_WFE |
+		PWR_CTRL1_USE_CORE1_WFI | PWR_CTRL1_USE_CORE0_WFI);
+	__raw_writel(tmp, reg_base + PWR_CTRL1);
+
+	/*
+	 * Disable the clock up feature on Exynos4x12, in case it was
+	 * enabled by bootloader.
+	 */
+	__raw_writel(0x0, reg_base + PWR_CTRL2);
+}
+
 static void __init exynos3250_cmu_init(struct device_node *np)
 {
 	struct samsung_clk_provider *ctx;
@@ -775,6 +814,10 @@
 	samsung_clk_register_div(ctx, div_clks, ARRAY_SIZE(div_clks));
 	samsung_clk_register_gate(ctx, gate_clks, ARRAY_SIZE(gate_clks));
 
+	exynos3_core_down_clock();
+
 	exynos3250_clk_sleep_init();
+
+	samsung_clk_of_add_provider(np, ctx);
 }
 CLK_OF_DECLARE(exynos3250_cmu, "samsung,exynos3250-cmu", exynos3250_cmu_init);
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index 7f4a473..ac163d7 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -25,10 +25,12 @@
 #define DIV_LEFTBUS		0x4500
 #define GATE_IP_LEFTBUS		0x4800
 #define E4X12_GATE_IP_IMAGE	0x4930
+#define CLKOUT_CMU_LEFTBUS	0x4a00
 #define SRC_RIGHTBUS		0x8200
 #define DIV_RIGHTBUS		0x8500
 #define GATE_IP_RIGHTBUS	0x8800
 #define E4X12_GATE_IP_PERIR	0x8960
+#define CLKOUT_CMU_RIGHTBUS	0x8a00
 #define EPLL_LOCK		0xc010
 #define VPLL_LOCK		0xc020
 #define EPLL_CON0		0xc110
@@ -98,6 +100,7 @@
 #define GATE_IP_PERIL		0xc950
 #define E4210_GATE_IP_PERIR	0xc960
 #define GATE_BLOCK		0xc970
+#define CLKOUT_CMU_TOP		0xca00
 #define E4X12_MPLL_LOCK		0x10008
 #define E4X12_MPLL_CON0		0x10108
 #define SRC_DMC			0x10200
@@ -105,6 +108,7 @@
 #define DIV_DMC0		0x10500
 #define DIV_DMC1		0x10504
 #define GATE_IP_DMC		0x10900
+#define CLKOUT_CMU_DMC		0x10a00
 #define APLL_LOCK		0x14000
 #define E4210_MPLL_LOCK		0x14008
 #define APLL_CON0		0x14100
@@ -114,11 +118,28 @@
 #define DIV_CPU1		0x14504
 #define GATE_SCLK_CPU		0x14800
 #define GATE_IP_CPU		0x14900
+#define CLKOUT_CMU_CPU		0x14a00
+#define PWR_CTRL1		0x15020
+#define E4X12_PWR_CTRL2		0x15024
 #define E4X12_DIV_ISP0		0x18300
 #define E4X12_DIV_ISP1		0x18304
 #define E4X12_GATE_ISP0		0x18800
 #define E4X12_GATE_ISP1		0x18804
 
+/* Below definitions are used for PWR_CTRL settings */
+#define PWR_CTRL1_CORE2_DOWN_RATIO(x)		(((x) & 0x7) << 28)
+#define PWR_CTRL1_CORE1_DOWN_RATIO(x)		(((x) & 0x7) << 16)
+#define PWR_CTRL1_DIV2_DOWN_EN			(1 << 9)
+#define PWR_CTRL1_DIV1_DOWN_EN			(1 << 8)
+#define PWR_CTRL1_USE_CORE3_WFE			(1 << 7)
+#define PWR_CTRL1_USE_CORE2_WFE			(1 << 6)
+#define PWR_CTRL1_USE_CORE1_WFE			(1 << 5)
+#define PWR_CTRL1_USE_CORE0_WFE			(1 << 4)
+#define PWR_CTRL1_USE_CORE3_WFI			(1 << 3)
+#define PWR_CTRL1_USE_CORE2_WFI			(1 << 2)
+#define PWR_CTRL1_USE_CORE1_WFI			(1 << 1)
+#define PWR_CTRL1_USE_CORE0_WFI			(1 << 0)
+
 /* the exynos4 soc type */
 enum exynos4_soc {
 	EXYNOS4210,
@@ -155,6 +176,7 @@
 	E4210_GATE_IP_LCD1,
 	E4210_GATE_IP_PERIR,
 	E4210_MPLL_CON0,
+	PWR_CTRL1,
 };
 
 static unsigned long exynos4x12_clk_save[] __initdata = {
@@ -164,6 +186,8 @@
 	E4X12_DIV_ISP,
 	E4X12_DIV_CAM1,
 	E4X12_MPLL_CON0,
+	PWR_CTRL1,
+	E4X12_PWR_CTRL2,
 };
 
 static unsigned long exynos4_clk_pll_regs[] __initdata = {
@@ -242,6 +266,11 @@
 	DIV_CPU1,
 	GATE_SCLK_CPU,
 	GATE_IP_CPU,
+	CLKOUT_CMU_LEFTBUS,
+	CLKOUT_CMU_RIGHTBUS,
+	CLKOUT_CMU_TOP,
+	CLKOUT_CMU_DMC,
+	CLKOUT_CMU_CPU,
 };
 
 static const struct samsung_clk_reg_dump src_mask_suspend[] = {
@@ -397,10 +426,32 @@
 				"sclk_epll", "sclk_vpll", };
 PNAME(mout_mixer_p4210)	= { "sclk_dac", "sclk_hdmi", };
 PNAME(mout_dac_p4210)	= { "sclk_vpll", "sclk_hdmiphy", };
+PNAME(mout_pwi_p4210) = { "xxti", "xusbxti", "sclk_hdmi24m", "sclk_usbphy0",
+				"sclk_usbphy1", "sclk_hdmiphy", "none",
+				"sclk_epll", "sclk_vpll" };
+PNAME(clkout_left_p4210) = { "sclk_mpll_div_2", "sclk_apll_div_2",
+				"div_gdl", "div_gpl" };
+PNAME(clkout_right_p4210) = { "sclk_mpll_div_2", "sclk_apll_div_2",
+				"div_gdr", "div_gpr" };
+PNAME(clkout_top_p4210) = { "fout_epll", "fout_vpll", "sclk_hdmi24m",
+				"sclk_usbphy0", "sclk_usbphy1", "sclk_hdmiphy",
+				"cdclk0", "cdclk1", "cdclk2", "spdif_extclk",
+				"aclk160", "aclk133", "aclk200", "aclk100",
+				"sclk_mfc", "sclk_g3d", "sclk_g2d",
+				"cam_a_pclk", "cam_b_pclk", "s_rxbyteclkhs0_2l",
+				"s_rxbyteclkhs0_4l" };
+PNAME(clkout_dmc_p4210) = { "div_dmcd", "div_dmcp", "div_acp_pclk", "div_dmc",
+				"div_dphy", "none", "div_pwi" };
+PNAME(clkout_cpu_p4210) = { "fout_apll_div_2", "none", "fout_mpll_div_2",
+				"none", "arm_clk_div_2", "div_corem0",
+				"div_corem1", "div_corem0", "div_atb",
+				"div_periph", "div_pclk_dbg", "div_hpm" };
 
 /* Exynos 4x12-specific parent groups */
 PNAME(mout_mpll_user_p4x12) = { "fin_pll", "sclk_mpll", };
 PNAME(mout_core_p4x12)	= { "mout_apll", "mout_mpll_user_c", };
+PNAME(mout_gdl_p4x12)	= { "mout_mpll_user_l", "sclk_apll", };
+PNAME(mout_gdr_p4x12)	= { "mout_mpll_user_r", "sclk_apll", };
 PNAME(sclk_ampll_p4x12)	= { "mout_mpll_user_t", "sclk_apll", };
 PNAME(group1_p4x12)	= { "xxti", "xusbxti", "sclk_hdmi24m", "sclk_usbphy0",
 				"none",	"sclk_hdmiphy", "mout_mpll_user_t",
@@ -418,6 +469,32 @@
 PNAME(mout_user_aclk400_mcuisp_p4x12) = {"fin_pll", "div_aclk400_mcuisp", };
 PNAME(mout_user_aclk200_p4x12) = {"fin_pll", "div_aclk200", };
 PNAME(mout_user_aclk266_gps_p4x12) = {"fin_pll", "div_aclk266_gps", };
+PNAME(mout_pwi_p4x12) = { "xxti", "xusbxti", "sclk_hdmi24m", "sclk_usbphy0",
+				"none", "sclk_hdmiphy", "sclk_mpll",
+				"sclk_epll", "sclk_vpll" };
+PNAME(clkout_left_p4x12) = { "sclk_mpll_user_l_div_2", "sclk_apll_div_2",
+				"div_gdl", "div_gpl" };
+PNAME(clkout_right_p4x12) = { "sclk_mpll_user_r_div_2", "sclk_apll_div_2",
+				"div_gdr", "div_gpr" };
+PNAME(clkout_top_p4x12) = { "fout_epll", "fout_vpll", "sclk_hdmi24m",
+				"sclk_usbphy0", "none", "sclk_hdmiphy",
+				"cdclk0", "cdclk1", "cdclk2", "spdif_extclk",
+				"aclk160", "aclk133", "aclk200", "aclk100",
+				"sclk_mfc", "sclk_g3d", "aclk400_mcuisp",
+				"cam_a_pclk", "cam_b_pclk", "s_rxbyteclkhs0_2l",
+				"s_rxbyteclkhs0_4l", "rx_half_byte_clk_csis0",
+				"rx_half_byte_clk_csis1", "div_jpeg",
+				"sclk_pwm_isp", "sclk_spi0_isp",
+				"sclk_spi1_isp", "sclk_uart_isp",
+				"sclk_mipihsi", "sclk_hdmi", "sclk_fimd0",
+				"sclk_pcm0" };
+PNAME(clkout_dmc_p4x12) = { "div_dmcd", "div_dmcp", "aclk_acp", "div_acp_pclk",
+				"div_dmc", "div_dphy", "fout_mpll_div_2",
+				"div_pwi", "none", "div_c2c", "div_c2c_aclk" };
+PNAME(clkout_cpu_p4x12) = { "fout_apll_div_2", "none", "none", "none",
+				"arm_clk_div_2", "div_corem0", "div_corem1",
+				"div_cores", "div_atb", "div_periph",
+				"div_pclk_dbg", "div_hpm" };
 
 /* fixed rate clocks generated outside the soc */
 static struct samsung_fixed_rate_clock exynos4_fixed_rate_ext_clks[] __initdata = {
@@ -436,6 +513,24 @@
 	FRATE(0, "sclk_usbphy1", NULL, CLK_IS_ROOT, 48000000),
 };
 
+static struct samsung_fixed_factor_clock exynos4_fixed_factor_clks[] __initdata = {
+	FFACTOR(0, "sclk_apll_div_2", "sclk_apll", 1, 2, 0),
+	FFACTOR(0, "fout_mpll_div_2", "fout_mpll", 1, 2, 0),
+	FFACTOR(0, "fout_apll_div_2", "fout_apll", 1, 2, 0),
+	FFACTOR(0, "arm_clk_div_2", "arm_clk", 1, 2, 0),
+};
+
+static struct samsung_fixed_factor_clock exynos4210_fixed_factor_clks[] __initdata = {
+	FFACTOR(0, "sclk_mpll_div_2", "sclk_mpll", 1, 2, 0),
+};
+
+static struct samsung_fixed_factor_clock exynos4x12_fixed_factor_clks[] __initdata = {
+	FFACTOR(0, "sclk_mpll_user_l_div_2", "mout_mpll_user_l", 1, 2, 0),
+	FFACTOR(0, "sclk_mpll_user_r_div_2", "mout_mpll_user_r", 1, 2, 0),
+	FFACTOR(0, "sclk_mpll_user_t_div_2", "mout_mpll_user_t", 1, 2, 0),
+	FFACTOR(0, "sclk_mpll_user_c_div_2", "mout_mpll_user_c", 1, 2, 0),
+};
+
 /* list of mux clocks supported in all exynos4 soc's */
 static struct samsung_mux_clock exynos4_mux_clks[] __initdata = {
 	MUX_FA(CLK_MOUT_APLL, "mout_apll", mout_apll_p, SRC_CPU, 0, 1,
@@ -451,6 +546,9 @@
 	MUX(0, "mout_onenand1", mout_onenand1_p, SRC_TOP0, 0, 1),
 	MUX(CLK_SCLK_EPLL, "sclk_epll", mout_epll_p, SRC_TOP0, 4, 1),
 	MUX(0, "mout_onenand", mout_onenand_p, SRC_TOP0, 28, 1),
+
+	MUX(0, "mout_dmc_bus", sclk_ampll_p4210, SRC_DMC, 4, 1),
+	MUX(0, "mout_dphy", sclk_ampll_p4210, SRC_DMC, 8, 1),
 };
 
 /* list of mux clocks supported in exynos4210 soc */
@@ -459,6 +557,14 @@
 };
 
 static struct samsung_mux_clock exynos4210_mux_clks[] __initdata = {
+	MUX(0, "mout_gdl", sclk_ampll_p4210, SRC_LEFTBUS, 0, 1),
+	MUX(0, "mout_clkout_leftbus", clkout_left_p4210,
+			CLKOUT_CMU_LEFTBUS, 0, 5),
+
+	MUX(0, "mout_gdr", sclk_ampll_p4210, SRC_RIGHTBUS, 0, 1),
+	MUX(0, "mout_clkout_rightbus", clkout_right_p4210,
+			CLKOUT_CMU_RIGHTBUS, 0, 5),
+
 	MUX(0, "mout_aclk200", sclk_ampll_p4210, SRC_TOP0, 12, 1),
 	MUX(0, "mout_aclk100", sclk_ampll_p4210, SRC_TOP0, 16, 1),
 	MUX(0, "mout_aclk160", sclk_ampll_p4210, SRC_TOP0, 20, 1),
@@ -472,6 +578,7 @@
 	MUX(0, "mout_mipi1", group1_p4210, E4210_SRC_LCD1, 12, 4),
 	MUX(CLK_SCLK_MPLL, "sclk_mpll", mout_mpll_p, SRC_CPU, 8, 1),
 	MUX(CLK_MOUT_CORE, "mout_core", mout_core_p4210, SRC_CPU, 16, 1),
+	MUX(0, "mout_hpm", mout_core_p4210, SRC_CPU, 20, 1),
 	MUX(CLK_SCLK_VPLL, "sclk_vpll", sclk_vpll_p4210, SRC_TOP0, 8, 1),
 	MUX(CLK_MOUT_FIMC0, "mout_fimc0", group1_p4210, SRC_CAM, 0, 4),
 	MUX(CLK_MOUT_FIMC1, "mout_fimc1", group1_p4210, SRC_CAM, 4, 4),
@@ -503,12 +610,30 @@
 	MUX(0, "mout_spi0", group1_p4210, SRC_PERIL1, 16, 4),
 	MUX(0, "mout_spi1", group1_p4210, SRC_PERIL1, 20, 4),
 	MUX(0, "mout_spi2", group1_p4210, SRC_PERIL1, 24, 4),
+	MUX(0, "mout_clkout_top", clkout_top_p4210, CLKOUT_CMU_TOP, 0, 5),
+
+	MUX(0, "mout_pwi", mout_pwi_p4210, SRC_DMC, 16, 4),
+	MUX(0, "mout_clkout_dmc", clkout_dmc_p4210, CLKOUT_CMU_DMC, 0, 5),
+
+	MUX(0, "mout_clkout_cpu", clkout_cpu_p4210, CLKOUT_CMU_CPU, 0, 5),
 };
 
 /* list of mux clocks supported in exynos4x12 soc */
 static struct samsung_mux_clock exynos4x12_mux_clks[] __initdata = {
+	MUX(0, "mout_mpll_user_l", mout_mpll_p, SRC_LEFTBUS, 4, 1),
+	MUX(0, "mout_gdl", mout_gdl_p4x12, SRC_LEFTBUS, 0, 1),
+	MUX(0, "mout_clkout_leftbus", clkout_left_p4x12,
+			CLKOUT_CMU_LEFTBUS, 0, 5),
+
+	MUX(0, "mout_mpll_user_r", mout_mpll_p, SRC_RIGHTBUS, 4, 1),
+	MUX(0, "mout_gdr", mout_gdr_p4x12, SRC_RIGHTBUS, 0, 1),
+	MUX(0, "mout_clkout_rightbus", clkout_right_p4x12,
+			CLKOUT_CMU_RIGHTBUS, 0, 5),
+
 	MUX(CLK_MOUT_MPLL_USER_C, "mout_mpll_user_c", mout_mpll_user_p4x12,
 			SRC_CPU, 24, 1),
+	MUX(0, "mout_clkout_cpu", clkout_cpu_p4x12, CLKOUT_CMU_CPU, 0, 5),
+
 	MUX(0, "mout_aclk266_gps", aclk_p4412, SRC_TOP1, 4, 1),
 	MUX(0, "mout_aclk400_mcuisp", aclk_p4412, SRC_TOP1, 8, 1),
 	MUX(CLK_MOUT_MPLL_USER_T, "mout_mpll_user_t", mout_mpll_user_p4x12,
@@ -531,6 +656,7 @@
 	MUX(CLK_SCLK_MPLL, "sclk_mpll", mout_mpll_p, SRC_DMC, 12, 1),
 	MUX(CLK_SCLK_VPLL, "sclk_vpll", mout_vpll_p, SRC_TOP0, 8, 1),
 	MUX(CLK_MOUT_CORE, "mout_core", mout_core_p4x12, SRC_CPU, 16, 1),
+	MUX(0, "mout_hpm", mout_core_p4x12, SRC_CPU, 20, 1),
 	MUX(CLK_MOUT_FIMC0, "mout_fimc0", group1_p4x12, SRC_CAM, 0, 4),
 	MUX(CLK_MOUT_FIMC1, "mout_fimc1", group1_p4x12, SRC_CAM, 4, 4),
 	MUX(CLK_MOUT_FIMC2, "mout_fimc2", group1_p4x12, SRC_CAM, 8, 4),
@@ -565,15 +691,39 @@
 	MUX(0, "mout_spi0_isp", group1_p4x12, E4X12_SRC_ISP, 4, 4),
 	MUX(0, "mout_spi1_isp", group1_p4x12, E4X12_SRC_ISP, 8, 4),
 	MUX(0, "mout_uart_isp", group1_p4x12, E4X12_SRC_ISP, 12, 4),
+	MUX(0, "mout_clkout_top", clkout_top_p4x12, CLKOUT_CMU_TOP, 0, 5),
+
+	MUX(0, "mout_c2c", sclk_ampll_p4210, SRC_DMC, 0, 1),
+	MUX(0, "mout_pwi", mout_pwi_p4x12, SRC_DMC, 16, 4),
 	MUX(0, "mout_g2d0", sclk_ampll_p4210, SRC_DMC, 20, 1),
 	MUX(0, "mout_g2d1", sclk_evpll_p, SRC_DMC, 24, 1),
 	MUX(0, "mout_g2d", mout_g2d_p, SRC_DMC, 28, 1),
+	MUX(0, "mout_clkout_dmc", clkout_dmc_p4x12, CLKOUT_CMU_DMC, 0, 5),
 };
 
 /* list of divider clocks supported in all exynos4 soc's */
 static struct samsung_div_clock exynos4_div_clks[] __initdata = {
+	DIV(0, "div_gdl", "mout_gdl", DIV_LEFTBUS, 0, 3),
+	DIV(0, "div_gpl", "div_gdl", DIV_LEFTBUS, 4, 3),
+	DIV(0, "div_clkout_leftbus", "mout_clkout_leftbus",
+			CLKOUT_CMU_LEFTBUS, 8, 6),
+
+	DIV(0, "div_gdr", "mout_gdr", DIV_RIGHTBUS, 0, 3),
+	DIV(0, "div_gpr", "div_gdr", DIV_RIGHTBUS, 4, 3),
+	DIV(0, "div_clkout_rightbus", "mout_clkout_rightbus",
+			CLKOUT_CMU_RIGHTBUS, 8, 6),
+
 	DIV(0, "div_core", "mout_core", DIV_CPU0, 0, 3),
+	DIV(0, "div_corem0", "div_core2", DIV_CPU0, 4, 3),
+	DIV(0, "div_corem1", "div_core2", DIV_CPU0, 8, 3),
+	DIV(0, "div_periph", "div_core2", DIV_CPU0, 12, 3),
+	DIV(0, "div_atb", "mout_core", DIV_CPU0, 16, 3),
+	DIV(0, "div_pclk_dbg", "div_atb", DIV_CPU0, 20, 3),
 	DIV(0, "div_core2", "div_core", DIV_CPU0, 28, 3),
+	DIV(0, "div_copy", "mout_hpm", DIV_CPU1, 0, 3),
+	DIV(0, "div_hpm", "div_copy", DIV_CPU1, 4, 3),
+	DIV(0, "div_clkout_cpu", "mout_clkout_cpu", CLKOUT_CMU_CPU, 8, 6),
+
 	DIV(0, "div_fimc0", "mout_fimc0", DIV_CAM, 0, 4),
 	DIV(0, "div_fimc1", "mout_fimc1", DIV_CAM, 4, 4),
 	DIV(0, "div_fimc2", "mout_fimc2", DIV_CAM, 8, 4),
@@ -631,6 +781,16 @@
 			CLK_SET_RATE_PARENT, 0),
 	DIV_F(0, "div_mmc_pre3", "div_mmc3", DIV_FSYS2, 24, 8,
 			CLK_SET_RATE_PARENT, 0),
+	DIV(0, "div_clkout_top", "mout_clkout_top", CLKOUT_CMU_TOP, 8, 6),
+
+	DIV(0, "div_acp", "mout_dmc_bus", DIV_DMC0, 0, 3),
+	DIV(0, "div_acp_pclk", "div_acp", DIV_DMC0, 4, 3),
+	DIV(0, "div_dphy", "mout_dphy", DIV_DMC0, 8, 3),
+	DIV(0, "div_dmc", "mout_dmc_bus", DIV_DMC0, 12, 3),
+	DIV(0, "div_dmcd", "div_dmc", DIV_DMC0, 16, 3),
+	DIV(0, "div_dmcp", "div_dmcd", DIV_DMC0, 20, 3),
+	DIV(0, "div_pwi", "mout_pwi", DIV_DMC1, 8, 4),
+	DIV(0, "div_clkout_dmc", "mout_clkout_dmc", CLKOUT_CMU_DMC, 8, 6),
 };
 
 /* list of divider clocks supported in exynos4210 soc */
@@ -671,6 +831,8 @@
 	DIV_F(CLK_DIV_MCUISP1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1,
 						8, 3, CLK_GET_RATE_NOCACHE, 0),
 	DIV(CLK_SCLK_FIMG2D, "sclk_fimg2d", "mout_g2d", DIV_DMC1, 0, 4),
+	DIV(0, "div_c2c", "mout_c2c", DIV_DMC1, 4, 3),
+	DIV(0, "div_c2c_aclk", "div_c2c", DIV_DMC1, 12, 3),
 };
 
 /* list of gate clocks supported in all exynos4 soc's */
@@ -680,6 +842,8 @@
 	 * the device name and clock alias names specified below for some
 	 * of the clocks can be removed.
 	 */
+	GATE(CLK_PPMULEFT, "ppmuleft", "aclk200", GATE_IP_LEFTBUS, 1, 0, 0),
+	GATE(CLK_PPMURIGHT, "ppmuright", "aclk200", GATE_IP_RIGHTBUS, 1, 0, 0),
 	GATE(CLK_SCLK_HDMI, "sclk_hdmi", "mout_hdmi", SRC_MASK_TV, 0, 0, 0),
 	GATE(CLK_SCLK_SPDIF, "sclk_spdif", "mout_spdif", SRC_MASK_PERIL1, 8, 0,
 		0),
@@ -695,11 +859,13 @@
 	GATE(CLK_SROMC, "sromc", "aclk133", GATE_IP_FSYS, 11, 0, 0),
 	GATE(CLK_SCLK_G3D, "sclk_g3d", "div_g3d", GATE_IP_G3D, 0,
 			CLK_SET_RATE_PARENT, 0),
+	GATE(CLK_PPMUG3D, "ppmug3d", "aclk200", GATE_IP_G3D, 1, 0, 0),
 	GATE(CLK_USB_DEVICE, "usb_device", "aclk133", GATE_IP_FSYS, 13, 0, 0),
 	GATE(CLK_ONENAND, "onenand", "aclk133", GATE_IP_FSYS, 15, 0, 0),
 	GATE(CLK_NFCON, "nfcon", "aclk133", GATE_IP_FSYS, 16, 0, 0),
 	GATE(CLK_GPS, "gps", "aclk133", GATE_IP_GPS, 0, 0, 0),
 	GATE(CLK_SMMU_GPS, "smmu_gps", "aclk133", GATE_IP_GPS, 1, 0, 0),
+	GATE(CLK_PPMUGPS, "ppmugps", "aclk200", GATE_IP_GPS, 2, 0, 0),
 	GATE(CLK_SLIMBUS, "slimbus", "aclk100", GATE_IP_PERIL, 25, 0, 0),
 	GATE(CLK_SCLK_CAM0, "sclk_cam0", "div_cam0", GATE_SCLK_CAM, 4,
 			CLK_SET_RATE_PARENT, 0),
@@ -781,19 +947,24 @@
 			0, 0),
 	GATE(CLK_SMMU_JPEG, "smmu_jpeg", "aclk160", GATE_IP_CAM, 11,
 			0, 0),
+	GATE(CLK_PPMUCAMIF, "ppmucamif", "aclk160", GATE_IP_CAM, 16, 0, 0),
 	GATE(CLK_PIXELASYNCM0, "pxl_async0", "aclk160", GATE_IP_CAM, 17, 0, 0),
 	GATE(CLK_PIXELASYNCM1, "pxl_async1", "aclk160", GATE_IP_CAM, 18, 0, 0),
 	GATE(CLK_SMMU_TV, "smmu_tv", "aclk160", GATE_IP_TV, 4,
 			0, 0),
+	GATE(CLK_PPMUTV, "ppmutv", "aclk160", GATE_IP_TV, 5, 0, 0),
 	GATE(CLK_MFC, "mfc", "aclk100", GATE_IP_MFC, 0, 0, 0),
 	GATE(CLK_SMMU_MFCL, "smmu_mfcl", "aclk100", GATE_IP_MFC, 1,
 			0, 0),
 	GATE(CLK_SMMU_MFCR, "smmu_mfcr", "aclk100", GATE_IP_MFC, 2,
 			0, 0),
+	GATE(CLK_PPMUMFC_L, "ppmumfc_l", "aclk100", GATE_IP_MFC, 3, 0, 0),
+	GATE(CLK_PPMUMFC_R, "ppmumfc_r", "aclk100", GATE_IP_MFC, 4, 0, 0),
 	GATE(CLK_FIMD0, "fimd0", "aclk160", GATE_IP_LCD0, 0,
 			0, 0),
 	GATE(CLK_SMMU_FIMD0, "smmu_fimd0", "aclk160", GATE_IP_LCD0, 4,
 			0, 0),
+	GATE(CLK_PPMULCD0, "ppmulcd0", "aclk160", GATE_IP_LCD0, 5, 0, 0),
 	GATE(CLK_PDMA0, "pdma0", "aclk133", GATE_IP_FSYS, 0,
 			0, 0),
 	GATE(CLK_PDMA1, "pdma1", "aclk133", GATE_IP_FSYS, 1,
@@ -806,6 +977,7 @@
 			0, 0),
 	GATE(CLK_SDMMC3, "sdmmc3", "aclk133", GATE_IP_FSYS, 8,
 			0, 0),
+	GATE(CLK_PPMUFILE, "ppmufile", "aclk133", GATE_IP_FSYS, 17, 0, 0),
 	GATE(CLK_UART0, "uart0", "aclk100", GATE_IP_PERIL, 0,
 			0, 0),
 	GATE(CLK_UART1, "uart1", "aclk100", GATE_IP_PERIL, 1,
@@ -852,6 +1024,21 @@
 			0, 0),
 	GATE(CLK_AC97, "ac97", "aclk100", GATE_IP_PERIL, 27,
 			0, 0),
+	GATE(CLK_PPMUDMC0, "ppmudmc0", "aclk133", GATE_IP_DMC, 8, 0, 0),
+	GATE(CLK_PPMUDMC1, "ppmudmc1", "aclk133", GATE_IP_DMC, 9, 0, 0),
+	GATE(CLK_PPMUCPU, "ppmucpu", "aclk133", GATE_IP_DMC, 10, 0, 0),
+	GATE(CLK_PPMUACP, "ppmuacp", "aclk133", GATE_IP_DMC, 16, 0, 0),
+
+	GATE(CLK_OUT_LEFTBUS, "clkout_leftbus", "div_clkout_leftbus",
+			CLKOUT_CMU_LEFTBUS, 16, CLK_SET_RATE_PARENT, 0),
+	GATE(CLK_OUT_RIGHTBUS, "clkout_rightbus", "div_clkout_rightbus",
+			CLKOUT_CMU_RIGHTBUS, 16, CLK_SET_RATE_PARENT, 0),
+	GATE(CLK_OUT_TOP, "clkout_top", "div_clkout_top",
+			CLKOUT_CMU_TOP, 16, CLK_SET_RATE_PARENT, 0),
+	GATE(CLK_OUT_DMC, "clkout_dmc", "div_clkout_dmc",
+			CLKOUT_CMU_DMC, 16, CLK_SET_RATE_PARENT, 0),
+	GATE(CLK_OUT_CPU, "clkout_cpu", "div_clkout_cpu",
+			CLKOUT_CMU_CPU, 16, CLK_SET_RATE_PARENT, 0),
 };
 
 /* list of gate clocks supported in exynos4210 soc */
@@ -863,6 +1050,9 @@
 	GATE(CLK_SMMU_G2D, "smmu_g2d", "aclk200", E4210_GATE_IP_IMAGE, 3, 0, 0),
 	GATE(CLK_SMMU_MDMA, "smmu_mdma", "aclk200", E4210_GATE_IP_IMAGE, 5, 0,
 		0),
+	GATE(CLK_PPMUIMAGE, "ppmuimage", "aclk200", E4210_GATE_IP_IMAGE, 9, 0,
+		0),
+	GATE(CLK_PPMULCD1, "ppmulcd1", "aclk160", E4210_GATE_IP_LCD1, 5, 0, 0),
 	GATE(CLK_PCIE_PHY, "pcie_phy", "aclk133", GATE_IP_FSYS, 2, 0, 0),
 	GATE(CLK_SATA_PHY, "sata_phy", "aclk133", GATE_IP_FSYS, 3, 0, 0),
 	GATE(CLK_SATA, "sata", "aclk133", GATE_IP_FSYS, 10, 0, 0),
@@ -906,6 +1096,8 @@
 	GATE(CLK_MDMA, "mdma", "aclk200", E4X12_GATE_IP_IMAGE, 2, 0, 0),
 	GATE(CLK_SMMU_MDMA, "smmu_mdma", "aclk200", E4X12_GATE_IP_IMAGE, 5, 0,
 		0),
+	GATE(CLK_PPMUIMAGE, "ppmuimage", "aclk200", E4X12_GATE_IP_IMAGE, 9, 0,
+		0),
 	GATE(CLK_MIPI_HSI, "mipi_hsi", "aclk133", GATE_IP_FSYS, 10, 0, 0),
 	GATE(CLK_CHIPID, "chipid", "aclk100", E4X12_GATE_IP_PERIR, 0, 0, 0),
 	GATE(CLK_SYSREG, "sysreg", "aclk100", E4X12_GATE_IP_PERIR, 1,
@@ -1062,7 +1254,7 @@
 
 }
 
-static struct of_device_id ext_clk_match[] __initdata = {
+static const struct of_device_id ext_clk_match[] __initconst = {
 	{ .compatible = "samsung,clock-xxti", .data = (void *)0, },
 	{ .compatible = "samsung,clock-xusbxti", .data = (void *)1, },
 	{},
@@ -1164,6 +1356,32 @@
 			VPLL_LOCK, VPLL_CON0, NULL),
 };
 
+static void __init exynos4_core_down_clock(enum exynos4_soc soc)
+{
+	unsigned int tmp;
+
+	/*
+	 * Enable arm clock down (in idle) and set arm divider
+	 * ratios in WFI/WFE state.
+	 */
+	tmp = (PWR_CTRL1_CORE2_DOWN_RATIO(7) | PWR_CTRL1_CORE1_DOWN_RATIO(7) |
+		PWR_CTRL1_DIV2_DOWN_EN | PWR_CTRL1_DIV1_DOWN_EN |
+		PWR_CTRL1_USE_CORE1_WFE | PWR_CTRL1_USE_CORE0_WFE |
+		PWR_CTRL1_USE_CORE1_WFI | PWR_CTRL1_USE_CORE0_WFI);
+	/* On Exynos4412 enable it also on core 2 and 3 */
+	if (num_possible_cpus() == 4)
+		tmp |= PWR_CTRL1_USE_CORE3_WFE | PWR_CTRL1_USE_CORE2_WFE |
+		       PWR_CTRL1_USE_CORE3_WFI | PWR_CTRL1_USE_CORE2_WFI;
+	__raw_writel(tmp, reg_base + PWR_CTRL1);
+
+	/*
+	 * Disable the clock up feature on Exynos4x12, in case it was
+	 * enabled by bootloader.
+	 */
+	if (exynos4_soc == EXYNOS4X12)
+		__raw_writel(0x0, reg_base + E4X12_PWR_CTRL2);
+}
+
 /* register exynos4 clocks */
 static void __init exynos4_clk_init(struct device_node *np,
 				    enum exynos4_soc soc)
@@ -1224,6 +1442,8 @@
 			ARRAY_SIZE(exynos4_div_clks));
 	samsung_clk_register_gate(ctx, exynos4_gate_clks,
 			ARRAY_SIZE(exynos4_gate_clks));
+	samsung_clk_register_fixed_factor(ctx, exynos4_fixed_factor_clks,
+			ARRAY_SIZE(exynos4_fixed_factor_clks));
 
 	if (exynos4_soc == EXYNOS4210) {
 		samsung_clk_register_fixed_rate(ctx, exynos4210_fixed_rate_clks,
@@ -1236,6 +1456,9 @@
 			ARRAY_SIZE(exynos4210_gate_clks));
 		samsung_clk_register_alias(ctx, exynos4210_aliases,
 			ARRAY_SIZE(exynos4210_aliases));
+		samsung_clk_register_fixed_factor(ctx,
+			exynos4210_fixed_factor_clks,
+			ARRAY_SIZE(exynos4210_fixed_factor_clks));
 	} else {
 		samsung_clk_register_mux(ctx, exynos4x12_mux_clks,
 			ARRAY_SIZE(exynos4x12_mux_clks));
@@ -1245,13 +1468,19 @@
 			ARRAY_SIZE(exynos4x12_gate_clks));
 		samsung_clk_register_alias(ctx, exynos4x12_aliases,
 			ARRAY_SIZE(exynos4x12_aliases));
+		samsung_clk_register_fixed_factor(ctx,
+			exynos4x12_fixed_factor_clks,
+			ARRAY_SIZE(exynos4x12_fixed_factor_clks));
 	}
 
 	samsung_clk_register_alias(ctx, exynos4_aliases,
 			ARRAY_SIZE(exynos4_aliases));
 
+	exynos4_core_down_clock(soc);
 	exynos4_clk_sleep_init();
 
+	samsung_clk_of_add_provider(np, ctx);
+
 	pr_info("%s clocks: sclk_apll = %ld, sclk_mpll = %ld\n"
 		"\tsclk_epll = %ld, sclk_vpll = %ld, arm_clk = %ld\n",
 		exynos4_soc == EXYNOS4210 ? "Exynos4210" : "Exynos4x12",
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index 184f642..70ec3d2 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -748,7 +748,7 @@
 		VPLL_LOCK, VPLL_CON0, NULL),
 };
 
-static struct of_device_id ext_clk_match[] __initdata = {
+static const struct of_device_id ext_clk_match[] __initconst = {
 	{ .compatible = "samsung,clock-xxti", .data = (void *)0, },
 	{ },
 };
@@ -820,6 +820,8 @@
 
 	exynos5250_clk_sleep_init();
 
+	samsung_clk_of_add_provider(np, ctx);
+
 	pr_info("Exynos5250: clock setup completed, armclk=%ld\n",
 			_get_rate("div_arm2"));
 }
diff --git a/drivers/clk/samsung/clk-exynos5260.c b/drivers/clk/samsung/clk-exynos5260.c
index 64596ba..ce3de97 100644
--- a/drivers/clk/samsung/clk-exynos5260.c
+++ b/drivers/clk/samsung/clk-exynos5260.c
@@ -206,6 +206,8 @@
 	if (cmu->clk_regs)
 		exynos5260_clk_sleep_init(reg_base, cmu->clk_regs,
 			cmu->nr_clk_regs);
+
+	samsung_clk_of_add_provider(np, ctx);
 }
 
 
diff --git a/drivers/clk/samsung/clk-exynos5410.c b/drivers/clk/samsung/clk-exynos5410.c
index c9505ab..231475b 100644
--- a/drivers/clk/samsung/clk-exynos5410.c
+++ b/drivers/clk/samsung/clk-exynos5410.c
@@ -204,6 +204,8 @@
 	samsung_clk_register_gate(ctx, exynos5410_gate_clks,
 			ARRAY_SIZE(exynos5410_gate_clks));
 
+	samsung_clk_of_add_provider(np, ctx);
+
 	pr_debug("Exynos5410: clock setup completed.\n");
 }
 CLK_OF_DECLARE(exynos5410_clk, "samsung,exynos5410-clock", exynos5410_clk_init);
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index a4e6cc7..848d602 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -28,6 +28,7 @@
 #define GATE_BUS_CPU		0x700
 #define GATE_SCLK_CPU		0x800
 #define CLKOUT_CMU_CPU		0xa00
+#define SRC_MASK_CPERI		0x4300
 #define GATE_IP_G2D		0x8800
 #define CPLL_LOCK		0x10020
 #define DPLL_LOCK		0x10030
@@ -70,6 +71,8 @@
 #define SRC_TOP11		0x10284
 #define SRC_TOP12		0x10288
 #define SRC_TOP13		0x1028c /* 5800 specific */
+#define SRC_MASK_TOP0		0x10300
+#define SRC_MASK_TOP1		0x10304
 #define SRC_MASK_TOP2		0x10308
 #define SRC_MASK_TOP7		0x1031c
 #define SRC_MASK_DISP10		0x1032c
@@ -77,6 +80,7 @@
 #define SRC_MASK_FSYS		0x10340
 #define SRC_MASK_PERIC0		0x10350
 #define SRC_MASK_PERIC1		0x10354
+#define SRC_MASK_ISP		0x10370
 #define DIV_TOP0		0x10500
 #define DIV_TOP1		0x10504
 #define DIV_TOP2		0x10508
@@ -98,6 +102,7 @@
 #define DIV2_RATIO0		0x10590
 #define DIV4_RATIO		0x105a0
 #define GATE_BUS_TOP		0x10700
+#define GATE_BUS_DISP1		0x10728
 #define GATE_BUS_GEN		0x1073c
 #define GATE_BUS_FSYS0		0x10740
 #define GATE_BUS_FSYS2		0x10748
@@ -190,6 +195,10 @@
 	SRC_MASK_FSYS,
 	SRC_MASK_PERIC0,
 	SRC_MASK_PERIC1,
+	SRC_MASK_TOP0,
+	SRC_MASK_TOP1,
+	SRC_MASK_MAU,
+	SRC_MASK_ISP,
 	SRC_ISP,
 	DIV_TOP0,
 	DIV_TOP1,
@@ -208,6 +217,7 @@
 	SCLK_DIV_ISP1,
 	DIV2_RATIO0,
 	DIV4_RATIO,
+	GATE_BUS_DISP1,
 	GATE_BUS_TOP,
 	GATE_BUS_GEN,
 	GATE_BUS_FSYS0,
@@ -249,6 +259,22 @@
 	GATE_IP_CAM,
 };
 
+static const struct samsung_clk_reg_dump exynos5420_set_clksrc[] = {
+	{ .offset = SRC_MASK_CPERI,		.value = 0xffffffff, },
+	{ .offset = SRC_MASK_TOP0,		.value = 0x11111111, },
+	{ .offset = SRC_MASK_TOP1,		.value = 0x11101111, },
+	{ .offset = SRC_MASK_TOP2,		.value = 0x11111110, },
+	{ .offset = SRC_MASK_TOP7,		.value = 0x00111100, },
+	{ .offset = SRC_MASK_DISP10,		.value = 0x11111110, },
+	{ .offset = SRC_MASK_MAU,		.value = 0x10000000, },
+	{ .offset = SRC_MASK_FSYS,		.value = 0x11111110, },
+	{ .offset = SRC_MASK_PERIC0,		.value = 0x11111110, },
+	{ .offset = SRC_MASK_PERIC1,		.value = 0x11111100, },
+	{ .offset = SRC_MASK_ISP,		.value = 0x11111000, },
+	{ .offset = GATE_BUS_DISP1,		.value = 0xffffffff, },
+	{ .offset = GATE_IP_PERIC,		.value = 0xffffffff, },
+};
+
 static int exynos5420_clk_suspend(void)
 {
 	samsung_clk_save(reg_base, exynos5x_save,
@@ -258,6 +284,9 @@
 		samsung_clk_save(reg_base, exynos5800_save,
 				ARRAY_SIZE(exynos5800_clk_regs));
 
+	samsung_clk_restore(reg_base, exynos5420_set_clksrc,
+				ARRAY_SIZE(exynos5420_set_clksrc));
+
 	return 0;
 }
 
@@ -1169,6 +1198,28 @@
 	GATE(CLK_G3D, "g3d", "mout_user_aclk_g3d", GATE_IP_G3D, 9, 0, 0),
 };
 
+static const struct samsung_pll_rate_table exynos5420_pll2550x_24mhz_tbl[] = {
+	PLL_35XX_RATE(2000000000, 250, 3, 0),
+	PLL_35XX_RATE(1900000000, 475, 6, 0),
+	PLL_35XX_RATE(1800000000, 225, 3, 0),
+	PLL_35XX_RATE(1700000000, 425, 6, 0),
+	PLL_35XX_RATE(1600000000, 200, 3, 0),
+	PLL_35XX_RATE(1500000000, 250, 4, 0),
+	PLL_35XX_RATE(1400000000, 175, 3, 0),
+	PLL_35XX_RATE(1300000000, 325, 6, 0),
+	PLL_35XX_RATE(1200000000, 200, 2, 1),
+	PLL_35XX_RATE(1100000000, 275, 3, 1),
+	PLL_35XX_RATE(1000000000, 250, 3, 1),
+	PLL_35XX_RATE(900000000,  150, 2, 1),
+	PLL_35XX_RATE(800000000,  200, 3, 1),
+	PLL_35XX_RATE(700000000,  175, 3, 1),
+	PLL_35XX_RATE(600000000,  200, 2, 2),
+	PLL_35XX_RATE(500000000,  250, 3, 2),
+	PLL_35XX_RATE(400000000,  200, 3, 2),
+	PLL_35XX_RATE(300000000,  200, 2, 3),
+	PLL_35XX_RATE(200000000,  200, 3, 3),
+};
+
 static struct samsung_pll_clock exynos5x_plls[nr_plls] __initdata = {
 	[apll] = PLL(pll_2550, CLK_FOUT_APLL, "fout_apll", "fin_pll", APLL_LOCK,
 		APLL_CON0, NULL),
@@ -1194,7 +1245,7 @@
 		KPLL_CON0, NULL),
 };
 
-static struct of_device_id ext_clk_match[] __initdata = {
+static const struct of_device_id ext_clk_match[] __initconst = {
 	{ .compatible = "samsung,exynos5420-oscclk", .data = (void *)0, },
 	{ },
 };
@@ -1222,6 +1273,12 @@
 	samsung_clk_of_register_fixed_ext(ctx, exynos5x_fixed_rate_ext_clks,
 			ARRAY_SIZE(exynos5x_fixed_rate_ext_clks),
 			ext_clk_match);
+
+	if (_get_rate("fin_pll") == 24 * MHZ) {
+		exynos5x_plls[apll].rate_table = exynos5420_pll2550x_24mhz_tbl;
+		exynos5x_plls[kpll].rate_table = exynos5420_pll2550x_24mhz_tbl;
+	}
+
 	samsung_clk_register_pll(ctx, exynos5x_plls, ARRAY_SIZE(exynos5x_plls),
 					reg_base);
 	samsung_clk_register_fixed_rate(ctx, exynos5x_fixed_rate_clks,
@@ -1253,6 +1310,8 @@
 	}
 
 	exynos5420_clk_sleep_init();
+
+	samsung_clk_of_add_provider(np, ctx);
 }
 
 static void __init exynos5420_clk_init(struct device_node *np)
diff --git a/drivers/clk/samsung/clk-exynos5440.c b/drivers/clk/samsung/clk-exynos5440.c
index 647f144..00d1d00 100644
--- a/drivers/clk/samsung/clk-exynos5440.c
+++ b/drivers/clk/samsung/clk-exynos5440.c
@@ -84,7 +84,7 @@
 	GATE(CLK_CS250_O, "cs250_o", "cs250", CLKEN_OV_VAL, 19, 0, 0),
 };
 
-static struct of_device_id ext_clk_match[] __initdata = {
+static const struct of_device_id ext_clk_match[] __initconst = {
 	{ .compatible = "samsung,clock-xtal", .data = (void *)0, },
 	{},
 };
@@ -123,6 +123,8 @@
 	samsung_clk_register_gate(ctx, exynos5440_gate_clks,
 			ARRAY_SIZE(exynos5440_gate_clks));
 
+	samsung_clk_of_add_provider(np, ctx);
+
 	pr_info("Exynos5440: arm_clk = %ldHz\n", _get_rate("arm_clk"));
 	pr_info("exynos5440 clock initialization complete\n");
 }
diff --git a/drivers/clk/samsung/clk-s3c2410.c b/drivers/clk/samsung/clk-s3c2410.c
index 140f473..5d2f034 100644
--- a/drivers/clk/samsung/clk-s3c2410.c
+++ b/drivers/clk/samsung/clk-s3c2410.c
@@ -466,6 +466,8 @@
 	}
 
 	s3c2410_clk_sleep_init();
+
+	samsung_clk_of_add_provider(np, ctx);
 }
 
 static void __init s3c2410_clk_init(struct device_node *np)
diff --git a/drivers/clk/samsung/clk-s3c2412.c b/drivers/clk/samsung/clk-s3c2412.c
index 23e4313..34af09f 100644
--- a/drivers/clk/samsung/clk-s3c2412.c
+++ b/drivers/clk/samsung/clk-s3c2412.c
@@ -265,6 +265,8 @@
 				   ARRAY_SIZE(s3c2412_aliases));
 
 	s3c2412_clk_sleep_init();
+
+	samsung_clk_of_add_provider(np, ctx);
 }
 
 static void __init s3c2412_clk_init(struct device_node *np)
diff --git a/drivers/clk/samsung/clk-s3c2443.c b/drivers/clk/samsung/clk-s3c2443.c
index c4bbdab..c92f853 100644
--- a/drivers/clk/samsung/clk-s3c2443.c
+++ b/drivers/clk/samsung/clk-s3c2443.c
@@ -445,6 +445,8 @@
 	}
 
 	s3c2443_clk_sleep_init();
+
+	samsung_clk_of_add_provider(np, ctx);
 }
 
 static void __init s3c2416_clk_init(struct device_node *np)
diff --git a/drivers/clk/samsung/clk-s3c64xx.c b/drivers/clk/samsung/clk-s3c64xx.c
index 8889ff1c..0f590e5 100644
--- a/drivers/clk/samsung/clk-s3c64xx.c
+++ b/drivers/clk/samsung/clk-s3c64xx.c
@@ -518,6 +518,8 @@
 					ARRAY_SIZE(s3c64xx_clock_aliases));
 	s3c64xx_clk_sleep_init();
 
+	samsung_clk_of_add_provider(np, ctx);
+
 	pr_info("%s clocks: apll = %lu, mpll = %lu\n"
 		"\tepll = %lu, arm_clk = %lu\n",
 		is_s3c6400 ? "S3C6400" : "S3C6410",
diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c
index 49629c7..deab84d 100644
--- a/drivers/clk/samsung/clk.c
+++ b/drivers/clk/samsung/clk.c
@@ -53,7 +53,6 @@
 {
 	struct samsung_clk_provider *ctx;
 	struct clk **clk_table;
-	int ret;
 	int i;
 
 	ctx = kzalloc(sizeof(struct samsung_clk_provider), GFP_KERNEL);
@@ -72,17 +71,19 @@
 	ctx->clk_data.clk_num = nr_clks;
 	spin_lock_init(&ctx->lock);
 
-	if (!np)
-		return ctx;
-
-	ret = of_clk_add_provider(np, of_clk_src_onecell_get,
-			&ctx->clk_data);
-	if (ret)
-		panic("could not register clock provide\n");
-
 	return ctx;
 }
 
+void __init samsung_clk_of_add_provider(struct device_node *np,
+				struct samsung_clk_provider *ctx)
+{
+	if (np) {
+		if (of_clk_add_provider(np, of_clk_src_onecell_get,
+					&ctx->clk_data))
+			panic("could not register clk provider\n");
+	}
+}
+
 /* add a clock instance to the clock lookup table used for dt based lookup */
 void samsung_clk_add_lookup(struct samsung_clk_provider *ctx, struct clk *clk,
 				unsigned int id)
@@ -284,7 +285,7 @@
 void __init samsung_clk_of_register_fixed_ext(struct samsung_clk_provider *ctx,
 			struct samsung_fixed_rate_clock *fixed_rate_clk,
 			unsigned int nr_fixed_rate_clk,
-			struct of_device_id *clk_matches)
+			const struct of_device_id *clk_matches)
 {
 	const struct of_device_id *match;
 	struct device_node *clk_np;
diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h
index 9693b80..66ab36b 100644
--- a/drivers/clk/samsung/clk.h
+++ b/drivers/clk/samsung/clk.h
@@ -327,11 +327,13 @@
 extern struct samsung_clk_provider *__init samsung_clk_init(
 			struct device_node *np, void __iomem *base,
 			unsigned long nr_clks);
+extern void __init samsung_clk_of_add_provider(struct device_node *np,
+			struct samsung_clk_provider *ctx);
 extern void __init samsung_clk_of_register_fixed_ext(
 			struct samsung_clk_provider *ctx,
 			struct samsung_fixed_rate_clock *fixed_rate_clk,
 			unsigned int nr_fixed_rate_clk,
-			struct of_device_id *clk_matches);
+			const struct of_device_id *clk_matches);
 
 extern void samsung_clk_add_lookup(struct samsung_clk_provider *ctx,
 			struct clk *clk, unsigned int id);
diff --git a/drivers/clk/spear/spear1310_clock.c b/drivers/clk/spear/spear1310_clock.c
index 65894f7..4daa597 100644
--- a/drivers/clk/spear/spear1310_clock.c
+++ b/drivers/clk/spear/spear1310_clock.c
@@ -742,19 +742,19 @@
 	clk = clk_register_gate(NULL, "pcie_sata_0_clk", "ahb_clk", 0,
 			SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_PCIE_SATA_0_CLK_ENB,
 			0, &_lock);
-	clk_register_clkdev(clk, NULL, "dw_pcie.0");
+	clk_register_clkdev(clk, NULL, "b1000000.pcie");
 	clk_register_clkdev(clk, NULL, "b1000000.ahci");
 
 	clk = clk_register_gate(NULL, "pcie_sata_1_clk", "ahb_clk", 0,
 			SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_PCIE_SATA_1_CLK_ENB,
 			0, &_lock);
-	clk_register_clkdev(clk, NULL, "dw_pcie.1");
+	clk_register_clkdev(clk, NULL, "b1800000.pcie");
 	clk_register_clkdev(clk, NULL, "b1800000.ahci");
 
 	clk = clk_register_gate(NULL, "pcie_sata_2_clk", "ahb_clk", 0,
 			SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_PCIE_SATA_2_CLK_ENB,
 			0, &_lock);
-	clk_register_clkdev(clk, NULL, "dw_pcie.2");
+	clk_register_clkdev(clk, NULL, "b4000000.pcie");
 	clk_register_clkdev(clk, NULL, "b4000000.ahci");
 
 	clk = clk_register_gate(NULL, "sysram0_clk", "ahb_clk", 0,
diff --git a/drivers/clk/spear/spear1340_clock.c b/drivers/clk/spear/spear1340_clock.c
index fe835c1..5a5c664 100644
--- a/drivers/clk/spear/spear1340_clock.c
+++ b/drivers/clk/spear/spear1340_clock.c
@@ -839,7 +839,7 @@
 	clk = clk_register_gate(NULL, "pcie_sata_clk", "ahb_clk", 0,
 			SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_PCIE_SATA_CLK_ENB,
 			0, &_lock);
-	clk_register_clkdev(clk, NULL, "dw_pcie");
+	clk_register_clkdev(clk, NULL, "b1000000.pcie");
 	clk_register_clkdev(clk, NULL, "b1000000.ahci");
 
 	clk = clk_register_gate(NULL, "sysram0_clk", "ahb_clk", 0,
diff --git a/drivers/clk/st/Makefile b/drivers/clk/st/Makefile
index c7455ff..ede7b2f 100644
--- a/drivers/clk/st/Makefile
+++ b/drivers/clk/st/Makefile
@@ -1 +1 @@
-obj-y += clkgen-mux.o clkgen-pll.o clkgen-fsyn.o
+obj-y += clkgen-mux.o clkgen-pll.o clkgen-fsyn.o clk-flexgen.o
diff --git a/drivers/clk/st/clk-flexgen.c b/drivers/clk/st/clk-flexgen.c
new file mode 100644
index 0000000..2282cef
--- /dev/null
+++ b/drivers/clk/st/clk-flexgen.c
@@ -0,0 +1,331 @@
+/*
+ * clk-flexgen.c
+ *
+ * Copyright (C) ST-Microelectronics SA 2013
+ * Author:  Maxime Coquelin <maxime.coquelin@st.com> for ST-Microelectronics.
+ * License terms:  GNU General Public License (GPL), version 2  */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+struct flexgen {
+	struct clk_hw hw;
+
+	/* Crossbar */
+	struct clk_mux mux;
+	/* Pre-divisor's gate */
+	struct clk_gate pgate;
+	/* Pre-divisor */
+	struct clk_divider pdiv;
+	/* Final divisor's gate */
+	struct clk_gate fgate;
+	/* Final divisor */
+	struct clk_divider fdiv;
+};
+
+#define to_flexgen(_hw) container_of(_hw, struct flexgen, hw)
+
+static int flexgen_enable(struct clk_hw *hw)
+{
+	struct flexgen *flexgen = to_flexgen(hw);
+	struct clk_hw *pgate_hw = &flexgen->pgate.hw;
+	struct clk_hw *fgate_hw = &flexgen->fgate.hw;
+
+	pgate_hw->clk = hw->clk;
+	fgate_hw->clk = hw->clk;
+
+	clk_gate_ops.enable(pgate_hw);
+
+	clk_gate_ops.enable(fgate_hw);
+
+	pr_debug("%s: flexgen output enabled\n", __clk_get_name(hw->clk));
+	return 0;
+}
+
+static void flexgen_disable(struct clk_hw *hw)
+{
+	struct flexgen *flexgen = to_flexgen(hw);
+	struct clk_hw *fgate_hw = &flexgen->fgate.hw;
+
+	/* disable only the final gate */
+	fgate_hw->clk = hw->clk;
+
+	clk_gate_ops.disable(fgate_hw);
+
+	pr_debug("%s: flexgen output disabled\n", __clk_get_name(hw->clk));
+}
+
+static int flexgen_is_enabled(struct clk_hw *hw)
+{
+	struct flexgen *flexgen = to_flexgen(hw);
+	struct clk_hw *fgate_hw = &flexgen->fgate.hw;
+
+	fgate_hw->clk = hw->clk;
+
+	if (!clk_gate_ops.is_enabled(fgate_hw))
+		return 0;
+
+	return 1;
+}
+
+static u8 flexgen_get_parent(struct clk_hw *hw)
+{
+	struct flexgen *flexgen = to_flexgen(hw);
+	struct clk_hw *mux_hw = &flexgen->mux.hw;
+
+	mux_hw->clk = hw->clk;
+
+	return clk_mux_ops.get_parent(mux_hw);
+}
+
+static int flexgen_set_parent(struct clk_hw *hw, u8 index)
+{
+	struct flexgen *flexgen = to_flexgen(hw);
+	struct clk_hw *mux_hw = &flexgen->mux.hw;
+
+	mux_hw->clk = hw->clk;
+
+	return clk_mux_ops.set_parent(mux_hw, index);
+}
+
+static inline unsigned long
+clk_best_div(unsigned long parent_rate, unsigned long rate)
+{
+	return parent_rate / rate + ((rate > (2*(parent_rate % rate))) ? 0 : 1);
+}
+
+static long flexgen_round_rate(struct clk_hw *hw, unsigned long rate,
+				   unsigned long *prate)
+{
+	unsigned long div;
+
+	/* Round div according to exact prate and wished rate */
+	div = clk_best_div(*prate, rate);
+
+	if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) {
+		*prate = rate * div;
+		return rate;
+	}
+
+	return *prate / div;
+}
+
+unsigned long flexgen_recalc_rate(struct clk_hw *hw,
+		unsigned long parent_rate)
+{
+	struct flexgen *flexgen = to_flexgen(hw);
+	struct clk_hw *pdiv_hw = &flexgen->pdiv.hw;
+	struct clk_hw *fdiv_hw = &flexgen->fdiv.hw;
+	unsigned long mid_rate;
+
+	pdiv_hw->clk = hw->clk;
+	fdiv_hw->clk = hw->clk;
+
+	mid_rate = clk_divider_ops.recalc_rate(pdiv_hw, parent_rate);
+
+	return clk_divider_ops.recalc_rate(fdiv_hw, mid_rate);
+}
+
+static int flexgen_set_rate(struct clk_hw *hw, unsigned long rate,
+				unsigned long parent_rate)
+{
+	struct flexgen *flexgen = to_flexgen(hw);
+	struct clk_hw *pdiv_hw = &flexgen->pdiv.hw;
+	struct clk_hw *fdiv_hw = &flexgen->fdiv.hw;
+	unsigned long primary_div = 0;
+	int ret = 0;
+
+	pdiv_hw->clk = hw->clk;
+	fdiv_hw->clk = hw->clk;
+
+	primary_div = clk_best_div(parent_rate, rate);
+
+	clk_divider_ops.set_rate(fdiv_hw, parent_rate, parent_rate);
+	ret = clk_divider_ops.set_rate(pdiv_hw, rate, rate * primary_div);
+
+	return ret;
+}
+
+static const struct clk_ops flexgen_ops = {
+	.enable = flexgen_enable,
+	.disable = flexgen_disable,
+	.is_enabled = flexgen_is_enabled,
+	.get_parent = flexgen_get_parent,
+	.set_parent = flexgen_set_parent,
+	.round_rate = flexgen_round_rate,
+	.recalc_rate = flexgen_recalc_rate,
+	.set_rate = flexgen_set_rate,
+};
+
+struct clk *clk_register_flexgen(const char *name,
+				const char **parent_names, u8 num_parents,
+				void __iomem *reg, spinlock_t *lock, u32 idx,
+				unsigned long flexgen_flags) {
+	struct flexgen *fgxbar;
+	struct clk *clk;
+	struct clk_init_data init;
+	u32  xbar_shift;
+	void __iomem *xbar_reg, *fdiv_reg;
+
+	fgxbar = kzalloc(sizeof(struct flexgen), GFP_KERNEL);
+	if (!fgxbar)
+		return ERR_PTR(-ENOMEM);
+
+	init.name = name;
+	init.ops = &flexgen_ops;
+	init.flags = CLK_IS_BASIC | flexgen_flags;
+	init.parent_names = parent_names;
+	init.num_parents = num_parents;
+
+	xbar_reg = reg + 0x18 + (idx & ~0x3);
+	xbar_shift = (idx % 4) * 0x8;
+	fdiv_reg = reg + 0x164 + idx * 4;
+
+	/* Crossbar element config */
+	fgxbar->mux.lock = lock;
+	fgxbar->mux.mask = BIT(6) - 1;
+	fgxbar->mux.reg = xbar_reg;
+	fgxbar->mux.shift = xbar_shift;
+	fgxbar->mux.table = NULL;
+
+
+	/* Pre-divider's gate config (in xbar register)*/
+	fgxbar->pgate.lock = lock;
+	fgxbar->pgate.reg = xbar_reg;
+	fgxbar->pgate.bit_idx = xbar_shift + 6;
+
+	/* Pre-divider config */
+	fgxbar->pdiv.lock = lock;
+	fgxbar->pdiv.reg = reg + 0x58 + idx * 4;
+	fgxbar->pdiv.width = 10;
+
+	/* Final divider's gate config */
+	fgxbar->fgate.lock = lock;
+	fgxbar->fgate.reg = fdiv_reg;
+	fgxbar->fgate.bit_idx = 6;
+
+	/* Final divider config */
+	fgxbar->fdiv.lock = lock;
+	fgxbar->fdiv.reg = fdiv_reg;
+	fgxbar->fdiv.width = 6;
+
+	fgxbar->hw.init = &init;
+
+	clk = clk_register(NULL, &fgxbar->hw);
+	if (IS_ERR(clk))
+		kfree(fgxbar);
+	else
+		pr_debug("%s: parent %s rate %u\n",
+			__clk_get_name(clk),
+			__clk_get_name(clk_get_parent(clk)),
+			(unsigned int)clk_get_rate(clk));
+	return clk;
+}
+
+static const char ** __init flexgen_get_parents(struct device_node *np,
+						       int *num_parents)
+{
+	const char **parents;
+	int nparents, i;
+
+	nparents = of_count_phandle_with_args(np, "clocks", "#clock-cells");
+	if (WARN_ON(nparents <= 0))
+		return NULL;
+
+	parents = kcalloc(nparents, sizeof(const char *), GFP_KERNEL);
+	if (!parents)
+		return NULL;
+
+	for (i = 0; i < nparents; i++)
+		parents[i] = of_clk_get_parent_name(np, i);
+
+	*num_parents = nparents;
+	return parents;
+}
+
+void __init st_of_flexgen_setup(struct device_node *np)
+{
+	struct device_node *pnode;
+	void __iomem *reg;
+	struct clk_onecell_data *clk_data;
+	const char **parents;
+	int num_parents, i;
+	spinlock_t *rlock = NULL;
+	unsigned long flex_flags = 0;
+
+	pnode = of_get_parent(np);
+	if (!pnode)
+		return;
+
+	reg = of_iomap(pnode, 0);
+	if (!reg)
+		return;
+
+	parents = flexgen_get_parents(np, &num_parents);
+	if (!parents)
+		return;
+
+	clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
+	if (!clk_data)
+		goto err;
+
+	clk_data->clk_num = of_property_count_strings(np ,
+			"clock-output-names");
+	if (clk_data->clk_num <= 0) {
+		pr_err("%s: Failed to get number of output clocks (%d)",
+				__func__, clk_data->clk_num);
+		goto err;
+	}
+
+	clk_data->clks = kcalloc(clk_data->clk_num, sizeof(struct clk *),
+			GFP_KERNEL);
+	if (!clk_data->clks)
+		goto err;
+
+	rlock = kzalloc(sizeof(spinlock_t), GFP_KERNEL);
+	if (!rlock)
+		goto err;
+
+	for (i = 0; i < clk_data->clk_num; i++) {
+		struct clk *clk;
+		const char *clk_name;
+
+		if (of_property_read_string_index(np, "clock-output-names",
+						  i, &clk_name)) {
+			break;
+		}
+
+		/*
+		 * If we read an empty clock name then the output is unused
+		 */
+		if (*clk_name == '\0')
+			continue;
+
+		clk = clk_register_flexgen(clk_name, parents, num_parents,
+					   reg, rlock, i, flex_flags);
+
+		if (IS_ERR(clk))
+			goto err;
+
+		clk_data->clks[i] = clk;
+	}
+
+	kfree(parents);
+	of_clk_add_provider(np, of_clk_src_onecell_get, clk_data);
+
+	return;
+
+err:
+	if (clk_data)
+		kfree(clk_data->clks);
+	kfree(clk_data);
+	kfree(parents);
+	kfree(rlock);
+}
+CLK_OF_DECLARE(flexgen, "st,flexgen", st_of_flexgen_setup);
diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c
index 4f53ee0..af94ed8 100644
--- a/drivers/clk/st/clkgen-fsyn.c
+++ b/drivers/clk/st/clkgen-fsyn.c
@@ -41,7 +41,7 @@
 	unsigned long nsdiv;
 };
 
-static struct stm_fs fs216c65_rtbl[] = {
+static const struct stm_fs fs216c65_rtbl[] = {
 	{ .mdiv = 0x1f, .pe = 0x0,	.sdiv = 0x7,	.nsdiv = 0 },	/* 312.5 Khz */
 	{ .mdiv = 0x17, .pe = 0x25ed,	.sdiv = 0x1,	.nsdiv = 0 },	/* 27    MHz */
 	{ .mdiv = 0x1a, .pe = 0x7b36,	.sdiv = 0x2,	.nsdiv = 1 },	/* 36.87 MHz */
@@ -49,31 +49,86 @@
 	{ .mdiv = 0x11, .pe = 0x1c72,	.sdiv = 0x1,	.nsdiv = 1 },	/* 108   MHz */
 };
 
-static struct stm_fs fs432c65_rtbl[] = {
-	{ .mdiv = 0x1f, .pe = 0x0,	.sdiv = 0x7,	.nsdiv = 0 },	/* 625   Khz */
-	{ .mdiv = 0x11, .pe = 0x1c72,	.sdiv = 0x2,	.nsdiv = 1 },	/* 108   MHz */
-	{ .mdiv = 0x19, .pe = 0x121a,	.sdiv = 0x0,	.nsdiv = 1 },	/* 297   MHz */
+static const struct stm_fs fs432c65_rtbl[] = {
+	{ .mdiv = 0x1f, .pe = 0x0,	.sdiv = 0x7,	.nsdiv = 0 },	/* 625     Khz */
+	{ .mdiv = 0x13, .pe = 0x777c,	.sdiv = 0x4,	.nsdiv = 1 },	/* 25.175  MHz */
+	{ .mdiv = 0x19, .pe = 0x4d35,	.sdiv = 0x2,	.nsdiv = 0 },	/* 25.200  MHz */
+	{ .mdiv = 0x11, .pe = 0x1c72,	.sdiv = 0x4,	.nsdiv = 1 },	/* 27.000  MHz */
+	{ .mdiv = 0x17, .pe = 0x28f5,	.sdiv = 0x2,	.nsdiv = 0 },	/* 27.027  MHz */
+	{ .mdiv = 0x16, .pe = 0x3359,	.sdiv = 0x2,	.nsdiv = 0 },	/* 28.320  MHz */
+	{ .mdiv = 0x1f, .pe = 0x2083,	.sdiv = 0x3,	.nsdiv = 1 },	/* 30.240  MHz */
+	{ .mdiv = 0x1e, .pe = 0x430d,	.sdiv = 0x3,	.nsdiv = 1 },	/* 31.500  MHz */
+	{ .mdiv = 0x17, .pe = 0x0,	.sdiv = 0x3,	.nsdiv = 1 },	/* 40.000  MHz */
+	{ .mdiv = 0x19, .pe = 0x121a,	.sdiv = 0x1,	.nsdiv = 0 },	/* 49.500  MHz */
+	{ .mdiv = 0x13, .pe = 0x6667,	.sdiv = 0x3,	.nsdiv = 1 },	/* 50.000  MHz */
+	{ .mdiv = 0x10, .pe = 0x1ee6,	.sdiv = 0x3,	.nsdiv = 1 },	/* 57.284  MHz */
+	{ .mdiv = 0x1d, .pe = 0x3b14,	.sdiv = 0x2,	.nsdiv = 1 },	/* 65.000  MHz */
+	{ .mdiv = 0x12, .pe = 0x7c65,	.sdiv = 0x1,	.nsdiv = 0 },	/* 71.000  MHz */
+	{ .mdiv = 0x19, .pe = 0xecd,	.sdiv = 0x2,	.nsdiv = 1 },	/* 74.176  MHz */
+	{ .mdiv = 0x19, .pe = 0x121a,	.sdiv = 0x2,	.nsdiv = 1 },	/* 74.250  MHz */
+	{ .mdiv = 0x19, .pe = 0x3334,	.sdiv = 0x2,	.nsdiv = 1 },	/* 75.000  MHz */
+	{ .mdiv = 0x18, .pe = 0x5138,	.sdiv = 0x2,	.nsdiv = 1 },	/* 78.800  MHz */
+	{ .mdiv = 0x1d, .pe = 0x77d,	.sdiv = 0x0,	.nsdiv = 0 },	/* 85.500  MHz */
+	{ .mdiv = 0x1c, .pe = 0x13d5,	.sdiv = 0x0,	.nsdiv = 0 },	/* 88.750  MHz */
+	{ .mdiv = 0x11, .pe = 0x1c72,	.sdiv = 0x2,	.nsdiv = 1 },	/* 108.000 MHz */
+	{ .mdiv = 0x17, .pe = 0x28f5,	.sdiv = 0x0,	.nsdiv = 0 },	/* 108.108 MHz */
+	{ .mdiv = 0x10, .pe = 0x6e26,	.sdiv = 0x2,	.nsdiv = 1 },	/* 118.963 MHz */
+	{ .mdiv = 0x15, .pe = 0x3e63,	.sdiv = 0x0,	.nsdiv = 0 },	/* 119.000 MHz */
+	{ .mdiv = 0x1c, .pe = 0x471d,	.sdiv = 0x1,	.nsdiv = 1 },	/* 135.000 MHz */
+	{ .mdiv = 0x19, .pe = 0xecd,	.sdiv = 0x1,	.nsdiv = 1 },	/* 148.352 MHz */
+	{ .mdiv = 0x19, .pe = 0x121a,	.sdiv = 0x1,	.nsdiv = 1 },	/* 148.500 MHz */
+	{ .mdiv = 0x19, .pe = 0x121a,	.sdiv = 0x0,	.nsdiv = 1 },	/* 297     MHz */
 };
 
-static struct stm_fs fs660c32_rtbl[] = {
-	{ .mdiv = 0x01, .pe = 0x2aaa,	.sdiv = 0x8,	.nsdiv = 0 },	/* 600   KHz */
-	{ .mdiv = 0x02, .pe = 0x3d33,	.sdiv = 0x0,	.nsdiv = 0 },	/* 148.5 Mhz */
-	{ .mdiv = 0x13, .pe = 0x5bcc,	.sdiv = 0x0,	.nsdiv = 1 },	/* 297   Mhz */
-	{ .mdiv = 0x0e, .pe = 0x1025,	.sdiv = 0x0,	.nsdiv = 1 },	/* 333   Mhz */
-	{ .mdiv = 0x0b, .pe = 0x715f,	.sdiv = 0x0,	.nsdiv = 1 },	/* 350   Mhz */
+static const struct stm_fs fs660c32_rtbl[] = {
+	{ .mdiv = 0x14, .pe = 0x376b,	.sdiv = 0x4,	.nsdiv = 1 },	/* 25.175  MHz */
+	{ .mdiv = 0x14, .pe = 0x30c3,	.sdiv = 0x4,	.nsdiv = 1 },	/* 25.200  MHz */
+	{ .mdiv = 0x10, .pe = 0x71c7,	.sdiv = 0x4,	.nsdiv = 1 },	/* 27.000  MHz */
+	{ .mdiv = 0x00, .pe = 0x47af,	.sdiv = 0x3,	.nsdiv = 0 },	/* 27.027  MHz */
+	{ .mdiv = 0x0e, .pe = 0x4e1a,	.sdiv = 0x4,	.nsdiv = 1 },	/* 28.320  MHz */
+	{ .mdiv = 0x0b, .pe = 0x534d,	.sdiv = 0x4,	.nsdiv = 1 },	/* 30.240  MHz */
+	{ .mdiv = 0x17, .pe = 0x6fbf,	.sdiv = 0x2,	.nsdiv = 0 },	/* 31.500  MHz */
+	{ .mdiv = 0x01, .pe = 0x0,	.sdiv = 0x4,	.nsdiv = 1 },	/* 40.000  MHz */
+	{ .mdiv = 0x15, .pe = 0x2aab,	.sdiv = 0x3,	.nsdiv = 1 },	/* 49.500  MHz */
+	{ .mdiv = 0x14, .pe = 0x6666,	.sdiv = 0x3,	.nsdiv = 1 },	/* 50.000  MHz */
+	{ .mdiv = 0x1d, .pe = 0x395f,	.sdiv = 0x1,	.nsdiv = 0 },	/* 57.284  MHz */
+	{ .mdiv = 0x08, .pe = 0x4ec5,	.sdiv = 0x3,	.nsdiv = 1 },	/* 65.000  MHz */
+	{ .mdiv = 0x05, .pe = 0x1770,	.sdiv = 0x3,	.nsdiv = 1 },	/* 71.000  MHz */
+	{ .mdiv = 0x03, .pe = 0x4ba7,	.sdiv = 0x3,	.nsdiv = 1 },	/* 74.176  MHz */
+	{ .mdiv = 0x0f, .pe = 0x3426,	.sdiv = 0x1,	.nsdiv = 0 },	/* 74.250  MHz */
+	{ .mdiv = 0x0e, .pe = 0x7777,	.sdiv = 0x1,	.nsdiv = 0 },	/* 75.000  MHz */
+	{ .mdiv = 0x01, .pe = 0x4053,	.sdiv = 0x3,	.nsdiv = 1 },	/* 78.800  MHz */
+	{ .mdiv = 0x09, .pe = 0x15b5,	.sdiv = 0x1,	.nsdiv = 0 },	/* 85.500  MHz */
+	{ .mdiv = 0x1b, .pe = 0x3f19,	.sdiv = 0x2,	.nsdiv = 1 },	/* 88.750  MHz */
+	{ .mdiv = 0x10, .pe = 0x71c7,	.sdiv = 0x2,	.nsdiv = 1 },	/* 108.000 MHz */
+	{ .mdiv = 0x00, .pe = 0x47af,	.sdiv = 0x1,	.nsdiv = 0 },	/* 108.108 MHz */
+	{ .mdiv = 0x0c, .pe = 0x3118,	.sdiv = 0x2,	.nsdiv = 1 },	/* 118.963 MHz */
+	{ .mdiv = 0x0c, .pe = 0x2f54,	.sdiv = 0x2,	.nsdiv = 1 },	/* 119.000 MHz */
+	{ .mdiv = 0x07, .pe = 0xe39,	.sdiv = 0x2,	.nsdiv = 1 },	/* 135.000 MHz */
+	{ .mdiv = 0x03, .pe = 0x4ba7,	.sdiv = 0x2,	.nsdiv = 1 },	/* 148.352 MHz */
+	{ .mdiv = 0x0f, .pe = 0x3426,	.sdiv = 0x0,	.nsdiv = 0 },	/* 148.500 MHz */
+	{ .mdiv = 0x03, .pe = 0x4ba7,	.sdiv = 0x1,	.nsdiv = 1 },	/* 296.704 MHz */
+	{ .mdiv = 0x03, .pe = 0x471c,	.sdiv = 0x1,	.nsdiv = 1 },	/* 297.000 MHz */
+	{ .mdiv = 0x00, .pe = 0x295f,	.sdiv = 0x1,	.nsdiv = 1 },	/* 326.700 MHz */
+	{ .mdiv = 0x1f, .pe = 0x3633,	.sdiv = 0x0,	.nsdiv = 1 },	/* 333.000 MHz */
+	{ .mdiv = 0x1c, .pe = 0x0,	.sdiv = 0x0,	.nsdiv = 1 },	/* 352.000 Mhz */
 };
 
 struct clkgen_quadfs_data {
 	bool reset_present;
 	bool bwfilter_present;
 	bool lockstatus_present;
+	bool powerup_polarity;
+	bool standby_polarity;
 	bool nsdiv_present;
+	bool nrst_present;
 	struct clkgen_field ndiv;
 	struct clkgen_field ref_bw;
 	struct clkgen_field nreset;
 	struct clkgen_field npda;
 	struct clkgen_field lock_status;
 
+	struct clkgen_field nrst[QUADFS_MAX_CHAN];
 	struct clkgen_field nsb[QUADFS_MAX_CHAN];
 	struct clkgen_field en[QUADFS_MAX_CHAN];
 	struct clkgen_field mdiv[QUADFS_MAX_CHAN];
@@ -82,9 +137,9 @@
 	struct clkgen_field nsdiv[QUADFS_MAX_CHAN];
 
 	const struct clk_ops *pll_ops;
-	struct stm_fs *rtbl;
+	const struct stm_fs *rtbl;
 	u8 rtbl_cnt;
-	int  (*get_rate)(unsigned long , struct stm_fs *,
+	int  (*get_rate)(unsigned long , const struct stm_fs *,
 			unsigned long *);
 };
 
@@ -94,11 +149,11 @@
 static const struct clk_ops st_quadfs_fs432c65_ops;
 static const struct clk_ops st_quadfs_fs660c32_ops;
 
-static int clk_fs216c65_get_rate(unsigned long, struct stm_fs *,
+static int clk_fs216c65_get_rate(unsigned long, const struct stm_fs *,
 		unsigned long *);
-static int clk_fs432c65_get_rate(unsigned long, struct stm_fs *,
+static int clk_fs432c65_get_rate(unsigned long, const struct stm_fs *,
 		unsigned long *);
-static int clk_fs660c32_dig_get_rate(unsigned long, struct stm_fs *,
+static int clk_fs660c32_dig_get_rate(unsigned long, const struct stm_fs *,
 		unsigned long *);
 /*
  * Values for all of the standalone instances of this clock
@@ -106,7 +161,7 @@
  * that the individual channel standby control bits (nsb) are in the
  * first register along with the PLL control bits.
  */
-static struct clkgen_quadfs_data st_fs216c65_416 = {
+static const struct clkgen_quadfs_data st_fs216c65_416 = {
 	/* 416 specific */
 	.npda	= CLKGEN_FIELD(0x0, 0x1, 14),
 	.nsb	= { CLKGEN_FIELD(0x0, 0x1, 10),
@@ -143,7 +198,7 @@
 	.get_rate	= clk_fs216c65_get_rate,
 };
 
-static struct clkgen_quadfs_data st_fs432c65_416 = {
+static const struct clkgen_quadfs_data st_fs432c65_416 = {
 	.npda	= CLKGEN_FIELD(0x0, 0x1, 14),
 	.nsb	= { CLKGEN_FIELD(0x0, 0x1, 10),
 		    CLKGEN_FIELD(0x0, 0x1, 11),
@@ -179,7 +234,7 @@
 	.get_rate	= clk_fs432c65_get_rate,
 };
 
-static struct clkgen_quadfs_data st_fs660c32_E_416 = {
+static const struct clkgen_quadfs_data st_fs660c32_E_416 = {
 	.npda	= CLKGEN_FIELD(0x0, 0x1, 14),
 	.nsb	= { CLKGEN_FIELD(0x0, 0x1, 10),
 		    CLKGEN_FIELD(0x0, 0x1, 11),
@@ -215,7 +270,7 @@
 	.get_rate	= clk_fs660c32_dig_get_rate,
 };
 
-static struct clkgen_quadfs_data st_fs660c32_F_416 = {
+static const struct clkgen_quadfs_data st_fs660c32_F_416 = {
 	.npda	= CLKGEN_FIELD(0x0, 0x1, 14),
 	.nsb	= { CLKGEN_FIELD(0x0, 0x1, 10),
 		    CLKGEN_FIELD(0x0, 0x1, 11),
@@ -251,6 +306,91 @@
 	.get_rate	= clk_fs660c32_dig_get_rate,
 };
 
+static const struct clkgen_quadfs_data st_fs660c32_C_407 = {
+	.nrst_present = true,
+	.nrst	= { CLKGEN_FIELD(0x2f0, 0x1, 0),
+		    CLKGEN_FIELD(0x2f0, 0x1, 1),
+		    CLKGEN_FIELD(0x2f0, 0x1, 2),
+		    CLKGEN_FIELD(0x2f0, 0x1, 3) },
+	.npda	= CLKGEN_FIELD(0x2f0, 0x1, 12),
+	.nsb	= { CLKGEN_FIELD(0x2f0, 0x1, 8),
+		    CLKGEN_FIELD(0x2f0, 0x1, 9),
+		    CLKGEN_FIELD(0x2f0, 0x1, 10),
+		    CLKGEN_FIELD(0x2f0, 0x1, 11) },
+	.nsdiv_present = true,
+	.nsdiv	= { CLKGEN_FIELD(0x304, 0x1, 24),
+		    CLKGEN_FIELD(0x308, 0x1, 24),
+		    CLKGEN_FIELD(0x30c, 0x1, 24),
+		    CLKGEN_FIELD(0x310, 0x1, 24) },
+	.mdiv	= { CLKGEN_FIELD(0x304, 0x1f, 15),
+		    CLKGEN_FIELD(0x308, 0x1f, 15),
+		    CLKGEN_FIELD(0x30c, 0x1f, 15),
+		    CLKGEN_FIELD(0x310, 0x1f, 15) },
+	.en	= { CLKGEN_FIELD(0x2fc, 0x1, 0),
+		    CLKGEN_FIELD(0x2fc, 0x1, 1),
+		    CLKGEN_FIELD(0x2fc, 0x1, 2),
+		    CLKGEN_FIELD(0x2fc, 0x1, 3) },
+	.ndiv	= CLKGEN_FIELD(0x2f4, 0x7, 16),
+	.pe	= { CLKGEN_FIELD(0x304, 0x7fff, 0),
+		    CLKGEN_FIELD(0x308, 0x7fff, 0),
+		    CLKGEN_FIELD(0x30c, 0x7fff, 0),
+		    CLKGEN_FIELD(0x310, 0x7fff, 0) },
+	.sdiv	= { CLKGEN_FIELD(0x304, 0xf, 20),
+		    CLKGEN_FIELD(0x308, 0xf, 20),
+		    CLKGEN_FIELD(0x30c, 0xf, 20),
+		    CLKGEN_FIELD(0x310, 0xf, 20) },
+	.lockstatus_present = true,
+	.lock_status = CLKGEN_FIELD(0x2A0, 0x1, 24),
+	.powerup_polarity = 1,
+	.standby_polarity = 1,
+	.pll_ops	= &st_quadfs_pll_c32_ops,
+	.rtbl		= fs660c32_rtbl,
+	.rtbl_cnt	= ARRAY_SIZE(fs660c32_rtbl),
+	.get_rate	= clk_fs660c32_dig_get_rate,
+};
+
+static const struct clkgen_quadfs_data st_fs660c32_D_407 = {
+	.nrst_present = true,
+	.nrst	= { CLKGEN_FIELD(0x2a0, 0x1, 0),
+		    CLKGEN_FIELD(0x2a0, 0x1, 1),
+		    CLKGEN_FIELD(0x2a0, 0x1, 2),
+		    CLKGEN_FIELD(0x2a0, 0x1, 3) },
+	.ndiv	= CLKGEN_FIELD(0x2a4, 0x7, 16),
+	.pe	= { CLKGEN_FIELD(0x2b4, 0x7fff, 0),
+		    CLKGEN_FIELD(0x2b8, 0x7fff, 0),
+		    CLKGEN_FIELD(0x2bc, 0x7fff, 0),
+		    CLKGEN_FIELD(0x2c0, 0x7fff, 0) },
+	.sdiv	= { CLKGEN_FIELD(0x2b4, 0xf, 20),
+		    CLKGEN_FIELD(0x2b8, 0xf, 20),
+		    CLKGEN_FIELD(0x2bc, 0xf, 20),
+		    CLKGEN_FIELD(0x2c0, 0xf, 20) },
+	.npda	= CLKGEN_FIELD(0x2a0, 0x1, 12),
+	.nsb	= { CLKGEN_FIELD(0x2a0, 0x1, 8),
+		    CLKGEN_FIELD(0x2a0, 0x1, 9),
+		    CLKGEN_FIELD(0x2a0, 0x1, 10),
+		    CLKGEN_FIELD(0x2a0, 0x1, 11) },
+	.nsdiv_present = true,
+	.nsdiv	= { CLKGEN_FIELD(0x2b4, 0x1, 24),
+		    CLKGEN_FIELD(0x2b8, 0x1, 24),
+		    CLKGEN_FIELD(0x2bc, 0x1, 24),
+		    CLKGEN_FIELD(0x2c0, 0x1, 24) },
+	.mdiv	= { CLKGEN_FIELD(0x2b4, 0x1f, 15),
+		    CLKGEN_FIELD(0x2b8, 0x1f, 15),
+		    CLKGEN_FIELD(0x2bc, 0x1f, 15),
+		    CLKGEN_FIELD(0x2c0, 0x1f, 15) },
+	.en	= { CLKGEN_FIELD(0x2ac, 0x1, 0),
+		    CLKGEN_FIELD(0x2ac, 0x1, 1),
+		    CLKGEN_FIELD(0x2ac, 0x1, 2),
+		    CLKGEN_FIELD(0x2ac, 0x1, 3) },
+	.lockstatus_present = true,
+	.lock_status = CLKGEN_FIELD(0x2A0, 0x1, 24),
+	.powerup_polarity = 1,
+	.standby_polarity = 1,
+	.pll_ops	= &st_quadfs_pll_c32_ops,
+	.rtbl		= fs660c32_rtbl,
+	.rtbl_cnt	= ARRAY_SIZE(fs660c32_rtbl),
+	.get_rate	= clk_fs660c32_dig_get_rate,};
+
 /**
  * DOC: A Frequency Synthesizer that multiples its input clock by a fixed factor
  *
@@ -308,7 +448,7 @@
 	/*
 	 * Power up the PLL
 	 */
-	CLKGEN_WRITE(pll, npda, 1);
+	CLKGEN_WRITE(pll, npda, !pll->data->powerup_polarity);
 
 	if (pll->lock)
 		spin_unlock_irqrestore(pll->lock, flags);
@@ -335,7 +475,7 @@
 	 * Powerdown the PLL and then put block into soft reset if we have
 	 * reset control.
 	 */
-	CLKGEN_WRITE(pll, npda, 0);
+	CLKGEN_WRITE(pll, npda, pll->data->powerup_polarity);
 
 	if (pll->data->reset_present)
 		CLKGEN_WRITE(pll, nreset, 0);
@@ -611,7 +751,10 @@
 	if (fs->lock)
 		spin_lock_irqsave(fs->lock, flags);
 
-	CLKGEN_WRITE(fs, nsb[fs->chan], 1);
+	CLKGEN_WRITE(fs, nsb[fs->chan], !fs->data->standby_polarity);
+
+	if (fs->data->nrst_present)
+		CLKGEN_WRITE(fs, nrst[fs->chan], 0);
 
 	if (fs->lock)
 		spin_unlock_irqrestore(fs->lock, flags);
@@ -631,7 +774,7 @@
 	if (fs->lock)
 		spin_lock_irqsave(fs->lock, flags);
 
-	CLKGEN_WRITE(fs, nsb[fs->chan], 0);
+	CLKGEN_WRITE(fs, nsb[fs->chan], !fs->data->standby_polarity);
 
 	if (fs->lock)
 		spin_unlock_irqrestore(fs->lock, flags);
@@ -645,12 +788,12 @@
 	pr_debug("%s: %s enable bit = 0x%x\n",
 		 __func__, __clk_get_name(hw->clk), nsb);
 
-	return !!nsb;
+	return fs->data->standby_polarity ? !nsb : !!nsb;
 }
 
 #define P15			(uint64_t)(1 << 15)
 
-static int clk_fs216c65_get_rate(unsigned long input, struct stm_fs *fs,
+static int clk_fs216c65_get_rate(unsigned long input, const struct stm_fs *fs,
 		unsigned long *rate)
 {
 	uint64_t res;
@@ -670,7 +813,7 @@
 	return 0;
 }
 
-static int clk_fs432c65_get_rate(unsigned long input, struct stm_fs *fs,
+static int clk_fs432c65_get_rate(unsigned long input, const struct stm_fs *fs,
 		unsigned long *rate)
 {
 	uint64_t res;
@@ -693,7 +836,7 @@
 #define P20		(uint64_t)(1 << 20)
 
 static int clk_fs660c32_dig_get_rate(unsigned long input,
-				struct stm_fs *fs, unsigned long *rate)
+				const struct stm_fs *fs, unsigned long *rate)
 {
 	unsigned long s = (1 << fs->sdiv);
 	unsigned long ns;
@@ -749,7 +892,7 @@
 {
 	struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
 	int (*clk_fs_get_rate)(unsigned long ,
-				struct stm_fs *, unsigned long *);
+				const struct stm_fs *, unsigned long *);
 	struct stm_fs prev_params;
 	unsigned long prev_rate, rate = 0;
 	unsigned long diff_rate, prev_diff_rate = ~0;
@@ -793,7 +936,7 @@
 	unsigned long rate = 0;
 	struct stm_fs params;
 	int (*clk_fs_get_rate)(unsigned long ,
-				struct stm_fs *, unsigned long *);
+				const struct stm_fs *, unsigned long *);
 
 	clk_fs_get_rate = fs->data->get_rate;
 
@@ -917,19 +1060,31 @@
 static struct of_device_id quadfs_of_match[] = {
 	{
 		.compatible = "st,stih416-quadfs216",
-		.data = (void *)&st_fs216c65_416
+		.data = &st_fs216c65_416
 	},
 	{
 		.compatible = "st,stih416-quadfs432",
-		.data = (void *)&st_fs432c65_416
+		.data = &st_fs432c65_416
 	},
 	{
 		.compatible = "st,stih416-quadfs660-E",
-		.data = (void *)&st_fs660c32_E_416
+		.data = &st_fs660c32_E_416
 	},
 	{
 		.compatible = "st,stih416-quadfs660-F",
-		.data = (void *)&st_fs660c32_F_416
+		.data = &st_fs660c32_F_416
+	},
+	{
+		.compatible = "st,stih407-quadfs660-C",
+		.data = &st_fs660c32_C_407
+	},
+	{
+		.compatible = "st,stih407-quadfs660-D",
+		.data = &st_fs660c32_D_407
+	},
+	{
+		.compatible = "st,stih407-quadfs660-D",
+		.data = (void *)&st_fs660c32_D_407
 	},
 	{}
 };
diff --git a/drivers/clk/st/clkgen-mux.c b/drivers/clk/st/clkgen-mux.c
index a329906..79dc40b 100644
--- a/drivers/clk/st/clkgen-mux.c
+++ b/drivers/clk/st/clkgen-mux.c
@@ -580,6 +580,11 @@
 	.shift = 0,
 	.width = 2,
 };
+static struct clkgen_mux_data stih407_a9_mux_data = {
+	.offset = 0x1a4,
+	.shift = 1,
+	.width = 2,
+};
 
 static struct of_device_id mux_of_match[] = {
 	{
@@ -610,6 +615,10 @@
 		.compatible = "st,stih416-clkgen-a9-mux",
 		.data = &stih416_a9_mux_data,
 	},
+	{
+		.compatible = "st,stih407-clkgen-a9-mux",
+		.data = &stih407_a9_mux_data,
+	},
 	{}
 };
 
@@ -765,7 +774,8 @@
 		div->reg = reg + VCC_DIV_OFFSET;
 		div->shift = 2 * i;
 		div->width = 2;
-		div->flags = CLK_DIVIDER_POWER_OF_TWO;
+		div->flags = CLK_DIVIDER_POWER_OF_TWO |
+			CLK_DIVIDER_ROUND_CLOSEST;
 
 		mux->reg = reg + VCC_MUX_OFFSET;
 		mux->shift = 2 * i;
diff --git a/drivers/clk/st/clkgen-pll.c b/drivers/clk/st/clkgen-pll.c
index d8b9b1a..29769d7 100644
--- a/drivers/clk/st/clkgen-pll.c
+++ b/drivers/clk/st/clkgen-pll.c
@@ -59,7 +59,7 @@
 static const struct clk_ops stm_pll3200c32_ops;
 static const struct clk_ops st_pll1200c32_ops;
 
-static struct clkgen_pll_data st_pll1600c65_ax = {
+static const struct clkgen_pll_data st_pll1600c65_ax = {
 	.pdn_status	= CLKGEN_FIELD(0x0, 0x1,			19),
 	.locked_status	= CLKGEN_FIELD(0x0, 0x1,			31),
 	.mdiv		= CLKGEN_FIELD(0x0, C65_MDIV_PLL1600_MASK,	0),
@@ -67,7 +67,7 @@
 	.ops		= &st_pll1600c65_ops
 };
 
-static struct clkgen_pll_data st_pll800c65_ax = {
+static const struct clkgen_pll_data st_pll800c65_ax = {
 	.pdn_status	= CLKGEN_FIELD(0x0,	0x1,			19),
 	.locked_status	= CLKGEN_FIELD(0x0,	0x1,			31),
 	.mdiv		= CLKGEN_FIELD(0x0,	C65_MDIV_PLL800_MASK,	0),
@@ -76,7 +76,7 @@
 	.ops		= &st_pll800c65_ops
 };
 
-static struct clkgen_pll_data st_pll3200c32_a1x_0 = {
+static const struct clkgen_pll_data st_pll3200c32_a1x_0 = {
 	.pdn_status	= CLKGEN_FIELD(0x0,	0x1,			31),
 	.locked_status	= CLKGEN_FIELD(0x4,	0x1,			31),
 	.ndiv		= CLKGEN_FIELD(0x0,	C32_NDIV_MASK,		0x0),
@@ -93,7 +93,7 @@
 	.ops		= &stm_pll3200c32_ops,
 };
 
-static struct clkgen_pll_data st_pll3200c32_a1x_1 = {
+static const struct clkgen_pll_data st_pll3200c32_a1x_1 = {
 	.pdn_status	= CLKGEN_FIELD(0xC,	0x1,			31),
 	.locked_status	= CLKGEN_FIELD(0x10,	0x1,			31),
 	.ndiv		= CLKGEN_FIELD(0xC,	C32_NDIV_MASK,		0x0),
@@ -111,7 +111,7 @@
 };
 
 /* 415 specific */
-static struct clkgen_pll_data st_pll3200c32_a9_415 = {
+static const struct clkgen_pll_data st_pll3200c32_a9_415 = {
 	.pdn_status	= CLKGEN_FIELD(0x0,	0x1,			0),
 	.locked_status	= CLKGEN_FIELD(0x6C,	0x1,			0),
 	.ndiv		= CLKGEN_FIELD(0x0,	C32_NDIV_MASK,		9),
@@ -122,7 +122,7 @@
 	.ops		= &stm_pll3200c32_ops,
 };
 
-static struct clkgen_pll_data st_pll3200c32_ddr_415 = {
+static const struct clkgen_pll_data st_pll3200c32_ddr_415 = {
 	.pdn_status	= CLKGEN_FIELD(0x0,	0x1,			0),
 	.locked_status	= CLKGEN_FIELD(0x100,	0x1,			0),
 	.ndiv		= CLKGEN_FIELD(0x8,	C32_NDIV_MASK,		0),
@@ -135,7 +135,7 @@
 	.ops		= &stm_pll3200c32_ops,
 };
 
-static struct clkgen_pll_data st_pll1200c32_gpu_415 = {
+static const struct clkgen_pll_data st_pll1200c32_gpu_415 = {
 	.pdn_status	= CLKGEN_FIELD(0x144,	0x1,			3),
 	.locked_status	= CLKGEN_FIELD(0x168,	0x1,			0),
 	.ldf		= CLKGEN_FIELD(0x0,	C32_LDF_MASK,		3),
@@ -146,7 +146,7 @@
 };
 
 /* 416 specific */
-static struct clkgen_pll_data st_pll3200c32_a9_416 = {
+static const struct clkgen_pll_data st_pll3200c32_a9_416 = {
 	.pdn_status	= CLKGEN_FIELD(0x0,	0x1,			0),
 	.locked_status	= CLKGEN_FIELD(0x6C,	0x1,			0),
 	.ndiv		= CLKGEN_FIELD(0x8,	C32_NDIV_MASK,		0),
@@ -157,7 +157,7 @@
 	.ops		= &stm_pll3200c32_ops,
 };
 
-static struct clkgen_pll_data st_pll3200c32_ddr_416 = {
+static const struct clkgen_pll_data st_pll3200c32_ddr_416 = {
 	.pdn_status	= CLKGEN_FIELD(0x0,	0x1,			0),
 	.locked_status	= CLKGEN_FIELD(0x10C,	0x1,			0),
 	.ndiv		= CLKGEN_FIELD(0x8,	C32_NDIV_MASK,		0),
@@ -170,7 +170,7 @@
 	.ops		= &stm_pll3200c32_ops,
 };
 
-static struct clkgen_pll_data st_pll1200c32_gpu_416 = {
+static const struct clkgen_pll_data st_pll1200c32_gpu_416 = {
 	.pdn_status	= CLKGEN_FIELD(0x8E4,	0x1,			3),
 	.locked_status	= CLKGEN_FIELD(0x90C,	0x1,			0),
 	.ldf		= CLKGEN_FIELD(0x0,	C32_LDF_MASK,		3),
@@ -180,6 +180,54 @@
 	.ops		= &st_pll1200c32_ops,
 };
 
+static const struct clkgen_pll_data st_pll3200c32_407_a0 = {
+	/* 407 A0 */
+	.pdn_status	= CLKGEN_FIELD(0x2a0,	0x1,			8),
+	.locked_status	= CLKGEN_FIELD(0x2a0,	0x1,			24),
+	.ndiv		= CLKGEN_FIELD(0x2a4,	C32_NDIV_MASK,		16),
+	.idf		= CLKGEN_FIELD(0x2a4,	C32_IDF_MASK,		0x0),
+	.num_odfs = 1,
+	.odf		= { CLKGEN_FIELD(0x2b4, C32_ODF_MASK,		0) },
+	.odf_gate	= { CLKGEN_FIELD(0x2b4,	0x1,			6) },
+	.ops		= &stm_pll3200c32_ops,
+};
+
+static const struct clkgen_pll_data st_pll3200c32_407_c0_0 = {
+	/* 407 C0 PLL0 */
+	.pdn_status	= CLKGEN_FIELD(0x2a0,	0x1,			8),
+	.locked_status	= CLKGEN_FIELD(0x2a0,	0x1,			24),
+	.ndiv		= CLKGEN_FIELD(0x2a4,	C32_NDIV_MASK,		16),
+	.idf		= CLKGEN_FIELD(0x2a4,	C32_IDF_MASK,		0x0),
+	.num_odfs = 1,
+	.odf		= { CLKGEN_FIELD(0x2b4, C32_ODF_MASK,		0) },
+	.odf_gate	= { CLKGEN_FIELD(0x2b4, 0x1,			6) },
+	.ops		= &stm_pll3200c32_ops,
+};
+
+static const struct clkgen_pll_data st_pll3200c32_407_c0_1 = {
+	/* 407 C0 PLL1 */
+	.pdn_status	= CLKGEN_FIELD(0x2c8,	0x1,			8),
+	.locked_status	= CLKGEN_FIELD(0x2c8,	0x1,			24),
+	.ndiv		= CLKGEN_FIELD(0x2cc,	C32_NDIV_MASK,		16),
+	.idf		= CLKGEN_FIELD(0x2cc,	C32_IDF_MASK,		0x0),
+	.num_odfs = 1,
+	.odf		= { CLKGEN_FIELD(0x2dc, C32_ODF_MASK,		0) },
+	.odf_gate	= { CLKGEN_FIELD(0x2dc, 0x1,			6) },
+	.ops		= &stm_pll3200c32_ops,
+};
+
+static const struct clkgen_pll_data st_pll3200c32_407_a9 = {
+	/* 407 A9 */
+	.pdn_status	= CLKGEN_FIELD(0x1a8,	0x1,			0),
+	.locked_status	= CLKGEN_FIELD(0x87c,	0x1,			0),
+	.ndiv		= CLKGEN_FIELD(0x1b0,	C32_NDIV_MASK,		0),
+	.idf		= CLKGEN_FIELD(0x1a8,	C32_IDF_MASK,		25),
+	.num_odfs = 1,
+	.odf		= { CLKGEN_FIELD(0x1b0, C32_ODF_MASK,		8) },
+	.odf_gate	= { CLKGEN_FIELD(0x1ac, 0x1,			28) },
+	.ops		= &stm_pll3200c32_ops,
+};
+
 /**
  * DOC: Clock Generated by PLL, rate set and enabled by bootloader
  *
@@ -450,9 +498,8 @@
 	 * PLL0 HS (high speed) output
 	 */
 	clk_data->clks[0] = clkgen_pll_register(parent_name,
-						&st_pll1600c65_ax,
-						reg + CLKGENAx_PLL0_OFFSET,
-						clk_name);
+			(struct clkgen_pll_data *) &st_pll1600c65_ax,
+			reg + CLKGENAx_PLL0_OFFSET, clk_name);
 
 	if (IS_ERR(clk_data->clks[0]))
 		goto err;
@@ -480,9 +527,8 @@
 	 * PLL1 output
 	 */
 	clk_data->clks[2] = clkgen_pll_register(parent_name,
-						&st_pll800c65_ax,
-						reg + CLKGENAx_PLL1_OFFSET,
-						clk_name);
+			(struct clkgen_pll_data *) &st_pll800c65_ax,
+			reg + CLKGENAx_PLL1_OFFSET, clk_name);
 
 	if (IS_ERR(clk_data->clks[2]))
 		goto err;
@@ -572,6 +618,22 @@
 		.compatible = "st,stih416-plls-c32-ddr",
 		.data = &st_pll3200c32_ddr_416,
 	},
+	{
+		.compatible = "st,stih407-plls-c32-a0",
+		.data = &st_pll3200c32_407_a0,
+	},
+	{
+		.compatible = "st,stih407-plls-c32-c0_0",
+		.data = &st_pll3200c32_407_c0_0,
+	},
+	{
+		.compatible = "st,stih407-plls-c32-c0_1",
+		.data = &st_pll3200c32_407_c0_1,
+	},
+	{
+		.compatible = "st,stih407-plls-c32-a9",
+		.data = &st_pll3200c32_407_a9,
+	},
 	{}
 };
 
diff --git a/drivers/clk/sunxi/Makefile b/drivers/clk/sunxi/Makefile
index 762fd64..6850cba 100644
--- a/drivers/clk/sunxi/Makefile
+++ b/drivers/clk/sunxi/Makefile
@@ -6,4 +6,6 @@
 obj-y += clk-a10-hosc.o
 obj-y += clk-a20-gmac.o
 
-obj-$(CONFIG_MFD_SUN6I_PRCM) += clk-sun6i-ar100.o clk-sun6i-apb0.o clk-sun6i-apb0-gates.o
+obj-$(CONFIG_MFD_SUN6I_PRCM) += \
+	clk-sun6i-ar100.o clk-sun6i-apb0.o clk-sun6i-apb0-gates.o \
+	clk-sun8i-apb0.o
diff --git a/drivers/clk/sunxi/clk-a20-gmac.c b/drivers/clk/sunxi/clk-a20-gmac.c
index 633ddc4..5296fd6 100644
--- a/drivers/clk/sunxi/clk-a20-gmac.c
+++ b/drivers/clk/sunxi/clk-a20-gmac.c
@@ -60,7 +60,7 @@
 	struct clk_gate *gate;
 	const char *clk_name = node->name;
 	const char *parents[SUN7I_A20_GMAC_PARENTS];
-	void *reg;
+	void __iomem *reg;
 
 	if (of_property_read_string(node, "clock-output-names", &clk_name))
 		return;
diff --git a/drivers/clk/sunxi/clk-factors.c b/drivers/clk/sunxi/clk-factors.c
index 3806d97..2057c8a 100644
--- a/drivers/clk/sunxi/clk-factors.c
+++ b/drivers/clk/sunxi/clk-factors.c
@@ -62,7 +62,7 @@
 		p = FACTOR_GET(config->pshift, config->pwidth, reg);
 
 	/* Calculate the rate */
-	rate = (parent_rate * n * (k + 1) >> p) / (m + 1);
+	rate = (parent_rate * (n + config->n_start) * (k + 1) >> p) / (m + 1);
 
 	return rate;
 }
diff --git a/drivers/clk/sunxi/clk-factors.h b/drivers/clk/sunxi/clk-factors.h
index 02e1a43..d2d0efa 100644
--- a/drivers/clk/sunxi/clk-factors.h
+++ b/drivers/clk/sunxi/clk-factors.h
@@ -15,6 +15,7 @@
 	u8 mwidth;
 	u8 pshift;
 	u8 pwidth;
+	u8 n_start;
 };
 
 struct clk_factors {
diff --git a/drivers/clk/sunxi/clk-sun6i-apb0-gates.c b/drivers/clk/sunxi/clk-sun6i-apb0-gates.c
index 670f90d..e10d052 100644
--- a/drivers/clk/sunxi/clk-sun6i-apb0-gates.c
+++ b/drivers/clk/sunxi/clk-sun6i-apb0-gates.c
@@ -9,23 +9,53 @@
  */
 
 #include <linux/clk-provider.h>
+#include <linux/clkdev.h>
 #include <linux/module.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 
 #define SUN6I_APB0_GATES_MAX_SIZE	32
 
+struct gates_data {
+	DECLARE_BITMAP(mask, SUN6I_APB0_GATES_MAX_SIZE);
+};
+
+static const struct gates_data sun6i_a31_apb0_gates __initconst = {
+	.mask = {0x7F},
+};
+
+static const struct gates_data sun8i_a23_apb0_gates __initconst = {
+	.mask = {0x5D},
+};
+
+static const struct of_device_id sun6i_a31_apb0_gates_clk_dt_ids[] = {
+	{ .compatible = "allwinner,sun6i-a31-apb0-gates-clk", .data = &sun6i_a31_apb0_gates },
+	{ .compatible = "allwinner,sun8i-a23-apb0-gates-clk", .data = &sun8i_a23_apb0_gates },
+	{ /* sentinel */ }
+};
+
 static int sun6i_a31_apb0_gates_clk_probe(struct platform_device *pdev)
 {
 	struct device_node *np = pdev->dev.of_node;
 	struct clk_onecell_data *clk_data;
+	const struct of_device_id *device;
+	const struct gates_data *data;
 	const char *clk_parent;
 	const char *clk_name;
 	struct resource *r;
 	void __iomem *reg;
-	int gate_id;
 	int ngates;
 	int i;
+	int j = 0;
+
+	if (!np)
+		return -ENODEV;
+
+	device = of_match_device(sun6i_a31_apb0_gates_clk_dt_ids, &pdev->dev);
+	if (!device)
+		return -ENODEV;
+	data = device->data;
 
 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	reg = devm_ioremap_resource(&pdev->dev, r);
@@ -36,54 +66,36 @@
 	if (!clk_parent)
 		return -EINVAL;
 
-	ngates = of_property_count_strings(np, "clock-output-names");
-	if (ngates < 0)
-		return ngates;
-
-	if (!ngates || ngates > SUN6I_APB0_GATES_MAX_SIZE)
-		return -EINVAL;
-
 	clk_data = devm_kzalloc(&pdev->dev, sizeof(struct clk_onecell_data),
 				GFP_KERNEL);
 	if (!clk_data)
 		return -ENOMEM;
 
-	clk_data->clks = devm_kzalloc(&pdev->dev,
-				      SUN6I_APB0_GATES_MAX_SIZE *
-				      sizeof(struct clk *),
-				      GFP_KERNEL);
+	/* Worst-case size approximation and memory allocation */
+	ngates = find_last_bit(data->mask, SUN6I_APB0_GATES_MAX_SIZE);
+	clk_data->clks = devm_kcalloc(&pdev->dev, (ngates + 1),
+				      sizeof(struct clk *), GFP_KERNEL);
 	if (!clk_data->clks)
 		return -ENOMEM;
 
-	for (i = 0; i < ngates; i++) {
+	for_each_set_bit(i, data->mask, SUN6I_APB0_GATES_MAX_SIZE) {
 		of_property_read_string_index(np, "clock-output-names",
-					      i, &clk_name);
+					      j, &clk_name);
 
-		gate_id = i;
-		of_property_read_u32_index(np, "clock-indices", i, &gate_id);
+		clk_data->clks[i] = clk_register_gate(&pdev->dev, clk_name,
+						      clk_parent, 0, reg, i,
+						      0, NULL);
+		WARN_ON(IS_ERR(clk_data->clks[i]));
+		clk_register_clkdev(clk_data->clks[i], clk_name, NULL);
 
-		WARN_ON(gate_id >= SUN6I_APB0_GATES_MAX_SIZE);
-		if (gate_id >= SUN6I_APB0_GATES_MAX_SIZE)
-			continue;
-
-		clk_data->clks[gate_id] = clk_register_gate(&pdev->dev,
-							    clk_name,
-							    clk_parent, 0,
-							    reg, gate_id,
-							    0, NULL);
-		WARN_ON(IS_ERR(clk_data->clks[gate_id]));
+		j++;
 	}
 
-	clk_data->clk_num = ngates;
+	clk_data->clk_num = ngates + 1;
 
 	return of_clk_add_provider(np, of_clk_src_onecell_get, clk_data);
 }
 
-const struct of_device_id sun6i_a31_apb0_gates_clk_dt_ids[] = {
-	{ .compatible = "allwinner,sun6i-a31-apb0-gates-clk" },
-	{ /* sentinel */ }
-};
-
 static struct platform_driver sun6i_a31_apb0_gates_clk_driver = {
 	.driver = {
 		.name = "sun6i-a31-apb0-gates-clk",
diff --git a/drivers/clk/sunxi/clk-sun6i-apb0.c b/drivers/clk/sunxi/clk-sun6i-apb0.c
index 11f17c3..1fa23371 100644
--- a/drivers/clk/sunxi/clk-sun6i-apb0.c
+++ b/drivers/clk/sunxi/clk-sun6i-apb0.c
@@ -57,7 +57,7 @@
 	return of_clk_add_provider(np, of_clk_src_simple_get, clk);
 }
 
-const struct of_device_id sun6i_a31_apb0_clk_dt_ids[] = {
+static const struct of_device_id sun6i_a31_apb0_clk_dt_ids[] = {
 	{ .compatible = "allwinner,sun6i-a31-apb0-clk" },
 	{ /* sentinel */ }
 };
diff --git a/drivers/clk/sunxi/clk-sun6i-ar100.c b/drivers/clk/sunxi/clk-sun6i-ar100.c
index f73cc05..eca8ca0 100644
--- a/drivers/clk/sunxi/clk-sun6i-ar100.c
+++ b/drivers/clk/sunxi/clk-sun6i-ar100.c
@@ -160,7 +160,7 @@
 	return 0;
 }
 
-struct clk_ops ar100_ops = {
+static struct clk_ops ar100_ops = {
 	.recalc_rate = ar100_recalc_rate,
 	.determine_rate = ar100_determine_rate,
 	.set_parent = ar100_set_parent,
@@ -213,7 +213,7 @@
 	return of_clk_add_provider(np, of_clk_src_simple_get, clk);
 }
 
-const struct of_device_id sun6i_a31_ar100_clk_dt_ids[] = {
+static const struct of_device_id sun6i_a31_ar100_clk_dt_ids[] = {
 	{ .compatible = "allwinner,sun6i-a31-ar100-clk" },
 	{ /* sentinel */ }
 };
diff --git a/drivers/clk/sunxi/clk-sun8i-apb0.c b/drivers/clk/sunxi/clk-sun8i-apb0.c
new file mode 100644
index 0000000..1f5ba9b
--- /dev/null
+++ b/drivers/clk/sunxi/clk-sun8i-apb0.c
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2014 Chen-Yu Tsai
+ * Author: Chen-Yu Tsai <wens@csie.org>
+ *
+ * Allwinner A23 APB0 clock driver
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Based on clk-sun6i-apb0.c
+ * Allwinner A31 APB0 clock driver
+ *
+ * Copyright (C) 2014 Free Electrons
+ * Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+static int sun8i_a23_apb0_clk_probe(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	const char *clk_name = np->name;
+	const char *clk_parent;
+	struct resource *r;
+	void __iomem *reg;
+	struct clk *clk;
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	reg = devm_ioremap_resource(&pdev->dev, r);
+	if (IS_ERR(reg))
+		return PTR_ERR(reg);
+
+	clk_parent = of_clk_get_parent_name(np, 0);
+	if (!clk_parent)
+		return -EINVAL;
+
+	of_property_read_string(np, "clock-output-names", &clk_name);
+
+	/* The A23 APB0 clock is a standard 2 bit wide divider clock */
+	clk = clk_register_divider(&pdev->dev, clk_name, clk_parent, 0, reg,
+				   0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
+	if (IS_ERR(clk))
+		return PTR_ERR(clk);
+
+	return of_clk_add_provider(np, of_clk_src_simple_get, clk);
+}
+
+static const struct of_device_id sun8i_a23_apb0_clk_dt_ids[] = {
+	{ .compatible = "allwinner,sun8i-a23-apb0-clk" },
+	{ /* sentinel */ }
+};
+
+static struct platform_driver sun8i_a23_apb0_clk_driver = {
+	.driver = {
+		.name = "sun8i-a23-apb0-clk",
+		.owner = THIS_MODULE,
+		.of_match_table = sun8i_a23_apb0_clk_dt_ids,
+	},
+	.probe = sun8i_a23_apb0_clk_probe,
+};
+module_platform_driver(sun8i_a23_apb0_clk_driver);
+
+MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>");
+MODULE_DESCRIPTION("Allwinner A23 APB0 clock Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
index fb2ce84..b654b7b 100644
--- a/drivers/clk/sunxi/clk-sunxi.c
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -164,6 +164,54 @@
 }
 
 /**
+ * sun8i_a23_get_pll1_factors() - calculates n, k, m, p factors for PLL1
+ * PLL1 rate is calculated as follows
+ * rate = (parent_rate * (n + 1) * (k + 1) >> p) / (m + 1);
+ * parent_rate is always 24Mhz
+ */
+
+static void sun8i_a23_get_pll1_factors(u32 *freq, u32 parent_rate,
+				   u8 *n, u8 *k, u8 *m, u8 *p)
+{
+	u8 div;
+
+	/* Normalize value to a 6M multiple */
+	div = *freq / 6000000;
+	*freq = 6000000 * div;
+
+	/* we were called to round the frequency, we can now return */
+	if (n == NULL)
+		return;
+
+	/* m is always zero for pll1 */
+	*m = 0;
+
+	/* k is 1 only on these cases */
+	if (*freq >= 768000000 || *freq == 42000000 || *freq == 54000000)
+		*k = 1;
+	else
+		*k = 0;
+
+	/* p will be 2 for divs under 20 and odd divs under 32 */
+	if (div < 20 || (div < 32 && (div & 1)))
+		*p = 2;
+
+	/* p will be 1 for even divs under 32, divs under 40 and odd pairs
+	 * of divs between 40-62 */
+	else if (div < 40 || (div < 64 && (div & 2)))
+		*p = 1;
+
+	/* any other entries have p = 0 */
+	else
+		*p = 0;
+
+	/* calculate a suitable n based on k and p */
+	div <<= *p;
+	div /= (*k + 1);
+	*n = div / 4 - 1;
+}
+
+/**
  * sun4i_get_pll5_factors() - calculates n, k factors for PLL5
  * PLL5 rate is calculated as follows
  * rate = parent_rate * n * (k + 1)
@@ -422,6 +470,18 @@
 	.mwidth = 2,
 };
 
+static struct clk_factors_config sun8i_a23_pll1_config = {
+	.nshift = 8,
+	.nwidth = 5,
+	.kshift = 4,
+	.kwidth = 2,
+	.mshift = 0,
+	.mwidth = 2,
+	.pshift = 16,
+	.pwidth = 2,
+	.n_start = 1,
+};
+
 static struct clk_factors_config sun4i_pll5_config = {
 	.nshift = 8,
 	.nwidth = 5,
@@ -471,6 +531,12 @@
 	.getter = sun6i_a31_get_pll1_factors,
 };
 
+static const struct factors_data sun8i_a23_pll1_data __initconst = {
+	.enable = 31,
+	.table = &sun8i_a23_pll1_config,
+	.getter = sun8i_a23_get_pll1_factors,
+};
+
 static const struct factors_data sun7i_a20_pll4_data __initconst = {
 	.enable = 31,
 	.table = &sun4i_pll5_config,
@@ -527,7 +593,7 @@
 	struct clk_hw *mux_hw = NULL;
 	const char *clk_name = node->name;
 	const char *parents[SUNXI_MAX_PARENTS];
-	void *reg;
+	void __iomem *reg;
 	int i = 0;
 
 	reg = of_iomap(node, 0);
@@ -632,7 +698,7 @@
 	struct clk *clk;
 	const char *clk_name = node->name;
 	const char *parents[SUNXI_MAX_PARENTS];
-	void *reg;
+	void __iomem *reg;
 	int i = 0;
 
 	reg = of_iomap(node, 0);
@@ -664,6 +730,7 @@
 	u8	shift;
 	u8	pow;
 	u8	width;
+	const struct clk_div_table *table;
 };
 
 static const struct div_data sun4i_axi_data __initconst = {
@@ -672,6 +739,23 @@
 	.width	= 2,
 };
 
+static const struct clk_div_table sun8i_a23_axi_table[] __initconst = {
+	{ .val = 0, .div = 1 },
+	{ .val = 1, .div = 2 },
+	{ .val = 2, .div = 3 },
+	{ .val = 3, .div = 4 },
+	{ .val = 4, .div = 4 },
+	{ .val = 5, .div = 4 },
+	{ .val = 6, .div = 4 },
+	{ .val = 7, .div = 4 },
+	{ } /* sentinel */
+};
+
+static const struct div_data sun8i_a23_axi_data __initconst = {
+	.width	= 3,
+	.table	= sun8i_a23_axi_table,
+};
+
 static const struct div_data sun4i_ahb_data __initconst = {
 	.shift	= 4,
 	.pow	= 1,
@@ -696,7 +780,7 @@
 	struct clk *clk;
 	const char *clk_name = node->name;
 	const char *clk_parent;
-	void *reg;
+	void __iomem *reg;
 
 	reg = of_iomap(node, 0);
 
@@ -704,10 +788,10 @@
 
 	of_property_read_string(node, "clock-output-names", &clk_name);
 
-	clk = clk_register_divider(NULL, clk_name, clk_parent, 0,
-				   reg, data->shift, data->width,
-				   data->pow ? CLK_DIVIDER_POWER_OF_TWO : 0,
-				   &clk_lock);
+	clk = clk_register_divider_table(NULL, clk_name, clk_parent, 0,
+					 reg, data->shift, data->width,
+					 data->pow ? CLK_DIVIDER_POWER_OF_TWO : 0,
+					 data->table, &clk_lock);
 	if (clk) {
 		of_clk_add_provider(node, of_clk_src_simple_get, clk);
 		clk_register_clkdev(clk, clk_name, NULL);
@@ -804,6 +888,10 @@
 	.mask = { 0x12f77fff, 0x16ff3f },
 };
 
+static const struct gates_data sun8i_a23_ahb1_gates_data __initconst = {
+	.mask = {0x25386742, 0x2505111},
+};
+
 static const struct gates_data sun4i_apb0_gates_data __initconst = {
 	.mask = {0x4EF},
 };
@@ -836,6 +924,10 @@
 	.mask = {0x3031},
 };
 
+static const struct gates_data sun8i_a23_apb1_gates_data __initconst = {
+	.mask = {0x3021},
+};
+
 static const struct gates_data sun6i_a31_apb2_gates_data __initconst = {
 	.mask = {0x3F000F},
 };
@@ -844,6 +936,10 @@
 	.mask = { 0xff80ff },
 };
 
+static const struct gates_data sun8i_a23_apb2_gates_data __initconst = {
+	.mask = {0x1F0007},
+};
+
 static const struct gates_data sun4i_a10_usb_gates_data __initconst = {
 	.mask = {0x1C0},
 	.reset_mask = 0x07,
@@ -866,11 +962,10 @@
 	struct gates_reset_data *reset_data;
 	const char *clk_parent;
 	const char *clk_name;
-	void *reg;
+	void __iomem *reg;
 	int qty;
 	int i = 0;
 	int j = 0;
-	int ignore;
 
 	reg = of_iomap(node, 0);
 
@@ -891,14 +986,12 @@
 		of_property_read_string_index(node, "clock-output-names",
 					      j, &clk_name);
 
-		/* No driver claims this clock, but it should remain gated */
-		ignore = !strcmp("ahb_sdram", clk_name) ? CLK_IGNORE_UNUSED : 0;
-
 		clk_data->clks[i] = clk_register_gate(NULL, clk_name,
-						      clk_parent, ignore,
+						      clk_parent, 0,
 						      reg + 4 * (i/32), i % 32,
 						      0, &clk_lock);
 		WARN_ON(IS_ERR(clk_data->clks[i]));
+		clk_register_clkdev(clk_data->clks[i], clk_name, NULL);
 
 		j++;
 	}
@@ -991,7 +1084,7 @@
 	struct clk_gate *gate = NULL;
 	struct clk_fixed_factor *fix_factor;
 	struct clk_divider *divider;
-	void *reg;
+	void __iomem *reg;
 	int i = 0;
 	int flags, clkflags;
 
@@ -1102,6 +1195,7 @@
 static const struct of_device_id clk_factors_match[] __initconst = {
 	{.compatible = "allwinner,sun4i-a10-pll1-clk", .data = &sun4i_pll1_data,},
 	{.compatible = "allwinner,sun6i-a31-pll1-clk", .data = &sun6i_a31_pll1_data,},
+	{.compatible = "allwinner,sun8i-a23-pll1-clk", .data = &sun8i_a23_pll1_data,},
 	{.compatible = "allwinner,sun7i-a20-pll4-clk", .data = &sun7i_a20_pll4_data,},
 	{.compatible = "allwinner,sun6i-a31-pll6-clk", .data = &sun6i_a31_pll6_data,},
 	{.compatible = "allwinner,sun4i-a10-apb1-clk", .data = &sun4i_apb1_data,},
@@ -1113,6 +1207,7 @@
 /* Matches for divider clocks */
 static const struct of_device_id clk_div_match[] __initconst = {
 	{.compatible = "allwinner,sun4i-a10-axi-clk", .data = &sun4i_axi_data,},
+	{.compatible = "allwinner,sun8i-a23-axi-clk", .data = &sun8i_a23_axi_data,},
 	{.compatible = "allwinner,sun4i-a10-ahb-clk", .data = &sun4i_ahb_data,},
 	{.compatible = "allwinner,sun4i-a10-apb0-clk", .data = &sun4i_apb0_data,},
 	{.compatible = "allwinner,sun6i-a31-apb2-div-clk", .data = &sun6i_a31_apb2_div_data,},
@@ -1142,6 +1237,7 @@
 	{.compatible = "allwinner,sun5i-a13-ahb-gates-clk", .data = &sun5i_a13_ahb_gates_data,},
 	{.compatible = "allwinner,sun6i-a31-ahb1-gates-clk", .data = &sun6i_a31_ahb1_gates_data,},
 	{.compatible = "allwinner,sun7i-a20-ahb-gates-clk", .data = &sun7i_a20_ahb_gates_data,},
+	{.compatible = "allwinner,sun8i-a23-ahb1-gates-clk", .data = &sun8i_a23_ahb1_gates_data,},
 	{.compatible = "allwinner,sun4i-a10-apb0-gates-clk", .data = &sun4i_apb0_gates_data,},
 	{.compatible = "allwinner,sun5i-a10s-apb0-gates-clk", .data = &sun5i_a10s_apb0_gates_data,},
 	{.compatible = "allwinner,sun5i-a13-apb0-gates-clk", .data = &sun5i_a13_apb0_gates_data,},
@@ -1151,7 +1247,9 @@
 	{.compatible = "allwinner,sun5i-a13-apb1-gates-clk", .data = &sun5i_a13_apb1_gates_data,},
 	{.compatible = "allwinner,sun6i-a31-apb1-gates-clk", .data = &sun6i_a31_apb1_gates_data,},
 	{.compatible = "allwinner,sun7i-a20-apb1-gates-clk", .data = &sun7i_a20_apb1_gates_data,},
+	{.compatible = "allwinner,sun8i-a23-apb1-gates-clk", .data = &sun8i_a23_apb1_gates_data,},
 	{.compatible = "allwinner,sun6i-a31-apb2-gates-clk", .data = &sun6i_a31_apb2_gates_data,},
+	{.compatible = "allwinner,sun8i-a23-apb2-gates-clk", .data = &sun8i_a23_apb2_gates_data,},
 	{.compatible = "allwinner,sun4i-a10-usb-clk", .data = &sun4i_a10_usb_gates_data,},
 	{.compatible = "allwinner,sun5i-a13-usb-clk", .data = &sun5i_a13_usb_gates_data,},
 	{.compatible = "allwinner,sun6i-a31-usb-clk", .data = &sun6i_a31_usb_gates_data,},
@@ -1202,6 +1300,7 @@
 
 static const char *sun4i_a10_critical_clocks[] __initdata = {
 	"pll5_ddr",
+	"ahb_sdram",
 };
 
 static void __init sun4i_a10_init_clocks(struct device_node *node)
@@ -1214,6 +1313,7 @@
 static const char *sun5i_critical_clocks[] __initdata = {
 	"mbus",
 	"pll5_ddr",
+	"ahb_sdram",
 };
 
 static void __init sun5i_init_clocks(struct device_node *node)
@@ -1236,3 +1336,4 @@
 			  ARRAY_SIZE(sun6i_critical_clocks));
 }
 CLK_OF_DECLARE(sun6i_a31_clk_init, "allwinner,sun6i-a31", sun6i_init_clocks);
+CLK_OF_DECLARE(sun8i_a23_clk_init, "allwinner,sun8i-a23", sun6i_init_clocks);
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index 637b62c..c7c6d8f 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -110,6 +110,12 @@
 #define XUSBIO_PLL_CFG0_SEQ_ENABLE		BIT(24)
 #define XUSBIO_PLL_CFG0_SEQ_START_STATE		BIT(25)
 
+#define SATA_PLL_CFG0		0x490
+#define SATA_PLL_CFG0_PADPLL_RESET_SWCTL	BIT(0)
+#define SATA_PLL_CFG0_PADPLL_USE_LOCKDET	BIT(2)
+#define SATA_PLL_CFG0_SEQ_ENABLE		BIT(24)
+#define SATA_PLL_CFG0_SEQ_START_STATE		BIT(25)
+
 #define PLLE_MISC_PLLE_PTS	BIT(8)
 #define PLLE_MISC_IDDQ_SW_VALUE	BIT(13)
 #define PLLE_MISC_IDDQ_SW_CTRL	BIT(14)
@@ -1361,6 +1367,19 @@
 	val |= XUSBIO_PLL_CFG0_SEQ_ENABLE;
 	pll_writel(val, XUSBIO_PLL_CFG0, pll);
 
+	/* Enable hw control of SATA pll */
+	val = pll_readl(SATA_PLL_CFG0, pll);
+	val &= ~SATA_PLL_CFG0_PADPLL_RESET_SWCTL;
+	val |= SATA_PLL_CFG0_PADPLL_USE_LOCKDET;
+	val |= SATA_PLL_CFG0_SEQ_START_STATE;
+	pll_writel(val, SATA_PLL_CFG0, pll);
+
+	udelay(1);
+
+	val = pll_readl(SATA_PLL_CFG0, pll);
+	val |= SATA_PLL_CFG0_SEQ_ENABLE;
+	pll_writel(val, SATA_PLL_CFG0, pll);
+
 out:
 	if (pll->lock)
 		spin_unlock_irqrestore(pll->lock, flags);
diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c
index adf6b81..37f32c4 100644
--- a/drivers/clk/tegra/clk-tegra-periph.c
+++ b/drivers/clk/tegra/clk-tegra-periph.c
@@ -469,7 +469,7 @@
 	MUX("sata", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SATA, 124, TEGRA_PERIPH_ON_APB, tegra_clk_sata),
 	MUX("adx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_ADX1, 180, TEGRA_PERIPH_ON_APB, tegra_clk_adx1),
 	MUX("amx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_AMX1, 185, TEGRA_PERIPH_ON_APB, tegra_clk_amx1),
-	MUX("vi_sensor2", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_VI_SENSOR2, 20, TEGRA_PERIPH_NO_RESET, tegra_clk_vi_sensor2),
+	MUX("vi_sensor2", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_VI_SENSOR2, 165, TEGRA_PERIPH_NO_RESET, tegra_clk_vi_sensor2),
 	MUX8("sdmmc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC1, 14, 0, tegra_clk_sdmmc1_8),
 	MUX8("sdmmc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC2, 9, 0, tegra_clk_sdmmc2_8),
 	MUX8("sdmmc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC3, 69, 0, tegra_clk_sdmmc3_8),
@@ -487,7 +487,7 @@
 	MUX8("extern2", mux_plla_clk32_pllp_clkm_plle, CLK_SOURCE_EXTERN2, 121, 0, tegra_clk_extern2),
 	MUX8("extern3", mux_plla_clk32_pllp_clkm_plle, CLK_SOURCE_EXTERN3, 122, 0, tegra_clk_extern3),
 	MUX8("soc_therm", mux_pllm_pllc_pllp_plla, CLK_SOURCE_SOC_THERM, 78, TEGRA_PERIPH_ON_APB, tegra_clk_soc_therm),
-	MUX8("vi_sensor", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_VI_SENSOR, 20, TEGRA_PERIPH_NO_RESET, tegra_clk_vi_sensor_8),
+	MUX8("vi_sensor", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_VI_SENSOR, 164, TEGRA_PERIPH_NO_RESET, tegra_clk_vi_sensor_8),
 	MUX8("isp", mux_pllm_pllc_pllp_plla_clkm_pllc4, CLK_SOURCE_ISP, 23, TEGRA_PERIPH_ON_APB, tegra_clk_isp_8),
 	MUX8("entropy", mux_pllp_clkm1, CLK_SOURCE_ENTROPY, 149,  0, tegra_clk_entropy),
 	MUX8("hdmi_audio", mux_pllp3_pllc_clkm, CLK_SOURCE_HDMI_AUDIO, 176, TEGRA_PERIPH_NO_RESET, tegra_clk_hdmi_audio),
diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
index b9c8ba2..f760f31 100644
--- a/drivers/clk/tegra/clk-tegra114.c
+++ b/drivers/clk/tegra/clk-tegra114.c
@@ -151,6 +151,13 @@
 /* Tegra CPU clock and reset control regs */
 #define CLK_RST_CONTROLLER_CPU_CMPLX_STATUS	0x470
 
+#define MUX8(_name, _parents, _offset, \
+			     _clk_num, _gate_flags, _clk_id)	\
+	TEGRA_INIT_DATA_TABLE(_name, NULL, NULL, _parents, _offset,\
+			29, MASK(3), 0, 0, 8, 1, TEGRA_DIVIDER_ROUND_UP,\
+			_clk_num, _gate_flags, _clk_id, _parents##_idx, 0,\
+			NULL)
+
 #ifdef CONFIG_PM_SLEEP
 static struct cpu_clk_suspend_context {
 	u32 clk_csite_src;
@@ -777,7 +784,6 @@
 	[tegra_clk_spdif_in] = { .dt_id = TEGRA114_CLK_SPDIF_IN, .present = true },
 	[tegra_clk_spdif_out] = { .dt_id = TEGRA114_CLK_SPDIF_OUT, .present = true },
 	[tegra_clk_vi_8] = { .dt_id = TEGRA114_CLK_VI, .present = true },
-	[tegra_clk_vi_sensor_8] = { .dt_id = TEGRA114_CLK_VI_SENSOR, .present = true },
 	[tegra_clk_fuse] = { .dt_id = TEGRA114_CLK_FUSE, .present = true },
 	[tegra_clk_fuse_burn] = { .dt_id = TEGRA114_CLK_FUSE_BURN, .present = true },
 	[tegra_clk_clk_32k] = { .dt_id = TEGRA114_CLK_CLK_32K, .present = true },
@@ -923,6 +929,13 @@
 	{ .dev_id = "timer", .dt_id = TEGRA114_CLK_TIMER },
 };
 
+static const char *mux_pllm_pllc2_c_c3_pllp_plla[] = {
+	"pll_m", "pll_c2", "pll_c", "pll_c3", "pll_p", "pll_a_out0"
+};
+static u32 mux_pllm_pllc2_c_c3_pllp_plla_idx[] = {
+	[0] = 0, [1] = 1, [2] = 2, [3] = 3, [4] = 4, [5] = 6,
+};
+
 static struct clk **clks;
 
 static unsigned long osc_freq;
@@ -1178,10 +1191,18 @@
 	clks[TEGRA114_CLK_PLL_E_OUT0] = clk;
 }
 
+#define CLK_SOURCE_VI_SENSOR 0x1a8
+
+static struct tegra_periph_init_data tegra_periph_clk_list[] = {
+	MUX8("vi_sensor", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_VI_SENSOR, 20, TEGRA_PERIPH_NO_RESET, TEGRA114_CLK_VI_SENSOR),
+};
+
 static __init void tegra114_periph_clk_init(void __iomem *clk_base,
 					    void __iomem *pmc_base)
 {
 	struct clk *clk;
+	struct tegra_periph_init_data *data;
+	int i;
 
 	/* xusb_ss_div2 */
 	clk = clk_register_fixed_factor(NULL, "xusb_ss_div2", "xusb_ss_src", 0,
@@ -1209,6 +1230,14 @@
 			       clk_base + CLK_SOURCE_EMC,
 			       29, 3, 0, NULL);
 
+	for (i = 0; i < ARRAY_SIZE(tegra_periph_clk_list); i++) {
+		data = &tegra_periph_clk_list[i];
+		clk = tegra_clk_register_periph(data->name,
+			data->p.parent_names, data->num_parents,
+			&data->periph, clk_base, data->offset, data->flags);
+		clks[data->clk_id] = clk;
+	}
+
 	tegra_periph_clk_init(clk_base, pmc_base, tegra114_clks,
 				&pll_p_params);
 }
diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
index 80efe51..9525c68 100644
--- a/drivers/clk/tegra/clk-tegra124.c
+++ b/drivers/clk/tegra/clk-tegra124.c
@@ -869,7 +869,7 @@
 	[tegra_clk_spdif_in] = { .dt_id = TEGRA124_CLK_SPDIF_IN, .present = true },
 	[tegra_clk_spdif_out] = { .dt_id = TEGRA124_CLK_SPDIF_OUT, .present = true },
 	[tegra_clk_vi_9] = { .dt_id = TEGRA124_CLK_VI, .present = true },
-	[tegra_clk_vi_sensor] = { .dt_id = TEGRA124_CLK_VI_SENSOR, .present = true },
+	[tegra_clk_vi_sensor_8] = { .dt_id = TEGRA124_CLK_VI_SENSOR, .present = true },
 	[tegra_clk_fuse] = { .dt_id = TEGRA124_CLK_FUSE, .present = true },
 	[tegra_clk_fuse_burn] = { .dt_id = TEGRA124_CLK_FUSE_BURN, .present = true },
 	[tegra_clk_clk_32k] = { .dt_id = TEGRA124_CLK_CLK_32K, .present = true },
@@ -1369,6 +1369,14 @@
 	{TEGRA124_CLK_XUSB_HS_SRC, TEGRA124_CLK_PLL_U_60M, 60000000, 0},
 	{TEGRA124_CLK_XUSB_FALCON_SRC, TEGRA124_CLK_PLL_RE_OUT, 224000000, 0},
 	{TEGRA124_CLK_XUSB_HOST_SRC, TEGRA124_CLK_PLL_RE_OUT, 112000000, 0},
+	{TEGRA124_CLK_SATA, TEGRA124_CLK_PLL_P, 104000000, 0},
+	{TEGRA124_CLK_SATA_OOB, TEGRA124_CLK_PLL_P, 204000000, 0},
+	{TEGRA124_CLK_EMC, TEGRA124_CLK_CLK_MAX, 0, 1},
+	{TEGRA124_CLK_CCLK_G, TEGRA124_CLK_CLK_MAX, 0, 1},
+	{TEGRA124_CLK_MSELECT, TEGRA124_CLK_CLK_MAX, 0, 1},
+	{TEGRA124_CLK_CSITE, TEGRA124_CLK_CLK_MAX, 0, 1},
+	{TEGRA124_CLK_TSENSOR, TEGRA124_CLK_CLK_M, 400000, 0},
+	{TEGRA124_CLK_SOC_THERM, TEGRA124_CLK_PLL_P, 51000000, 0},
 	/* This MUST be the last entry. */
 	{TEGRA124_CLK_CLK_MAX, TEGRA124_CLK_CLK_MAX, 0, 0},
 };
diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c
index c0a7d77..bf452b6 100644
--- a/drivers/clk/tegra/clk.c
+++ b/drivers/clk/tegra/clk.c
@@ -277,6 +277,12 @@
 	for (i = 0; i < num; i++, dev_clks++)
 		clk_register_clkdev(clks[dev_clks->dt_id], dev_clks->con_id,
 				dev_clks->dev_id);
+
+	for (i = 0; i < clk_num; i++) {
+		if (!IS_ERR_OR_NULL(clks[i]))
+			clk_register_clkdev(clks[i], __clk_get_name(clks[i]),
+				"tegra-clk-debug");
+	}
 }
 
 struct clk ** __init tegra_lookup_dt_id(int clk_id,
diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c
index e158133..62ac8f6 100644
--- a/drivers/clk/ti/clk-7xx.c
+++ b/drivers/clk/ti/clk-7xx.c
@@ -16,8 +16,9 @@
 #include <linux/clkdev.h>
 #include <linux/clk/ti.h>
 
-#define DRA7_DPLL_ABE_DEFFREQ				361267200
+#define DRA7_DPLL_ABE_DEFFREQ				180633600
 #define DRA7_DPLL_GMAC_DEFFREQ				1000000000
+#define DRA7_DPLL_USB_DEFFREQ				960000000
 
 
 static struct ti_dt_clk dra7xx_clks[] = {
@@ -322,10 +323,25 @@
 	if (rc)
 		pr_err("%s: failed to configure ABE DPLL!\n", __func__);
 
+	dpll_ck = clk_get_sys(NULL, "dpll_abe_m2x2_ck");
+	rc = clk_set_rate(dpll_ck, DRA7_DPLL_ABE_DEFFREQ * 2);
+	if (rc)
+		pr_err("%s: failed to configure ABE DPLL m2x2!\n", __func__);
+
 	dpll_ck = clk_get_sys(NULL, "dpll_gmac_ck");
 	rc = clk_set_rate(dpll_ck, DRA7_DPLL_GMAC_DEFFREQ);
 	if (rc)
 		pr_err("%s: failed to configure GMAC DPLL!\n", __func__);
 
+	dpll_ck = clk_get_sys(NULL, "dpll_usb_ck");
+	rc = clk_set_rate(dpll_ck, DRA7_DPLL_USB_DEFFREQ);
+	if (rc)
+		pr_err("%s: failed to configure USB DPLL!\n", __func__);
+
+	dpll_ck = clk_get_sys(NULL, "dpll_usb_m2_ck");
+	rc = clk_set_rate(dpll_ck, DRA7_DPLL_USB_DEFFREQ/2);
+	if (rc)
+		pr_err("%s: failed to set USB_DPLL M2 OUT\n", __func__);
+
 	return rc;
 }
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 02f177a..2fb0fdf 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -391,7 +391,7 @@
 
 config CRYPTO_DEV_CCP
 	bool "Support for AMD Cryptographic Coprocessor"
-	depends on X86 && PCI
+	depends on (X86 && PCI) || ARM64
 	default n
 	help
 	  The AMD Cryptographic Coprocessor provides hardware support
@@ -418,4 +418,22 @@
 	  To compile this driver as a module, choose M here: the module
 	  will be called mxs-dcp.
 
+source "drivers/crypto/qat/Kconfig"
+
+config CRYPTO_DEV_QCE
+	tristate "Qualcomm crypto engine accelerator"
+	depends on (ARCH_QCOM || COMPILE_TEST) && HAS_DMA && HAS_IOMEM
+	select CRYPTO_AES
+	select CRYPTO_DES
+	select CRYPTO_ECB
+	select CRYPTO_CBC
+	select CRYPTO_XTS
+	select CRYPTO_CTR
+	select CRYPTO_ALGAPI
+	select CRYPTO_BLKCIPHER
+	help
+	  This driver supports Qualcomm crypto engine accelerator
+	  hardware. To compile this driver as a module, choose M here. The
+	  module will be called qcrypto.
+
 endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 482f090..3924f93 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -23,3 +23,5 @@
 obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
 obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
 obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
+obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
+obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 37f9cc9..e4c6c58 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -1292,7 +1292,7 @@
 		.of_match_table = crypto4xx_match,
 	},
 	.probe		= crypto4xx_probe,
-	.remove		= crypto4xx_remove,
+	.remove		= __exit_p(crypto4xx_remove),
 };
 
 module_platform_driver(crypto4xx_driver);
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 0618be0..9a4f69e 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -1353,7 +1353,6 @@
 					GFP_KERNEL);
 	if (!pdata->dma_slave) {
 		dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
-		devm_kfree(&pdev->dev, pdata);
 		return ERR_PTR(-ENOMEM);
 	}
 
@@ -1375,7 +1374,8 @@
 	unsigned long sha_phys_size;
 	int err;
 
-	sha_dd = kzalloc(sizeof(struct atmel_sha_dev), GFP_KERNEL);
+	sha_dd = devm_kzalloc(&pdev->dev, sizeof(struct atmel_sha_dev),
+				GFP_KERNEL);
 	if (sha_dd == NULL) {
 		dev_err(dev, "unable to alloc data struct.\n");
 		err = -ENOMEM;
@@ -1490,8 +1490,6 @@
 	free_irq(sha_dd->irq, sha_dd);
 res_err:
 	tasklet_kill(&sha_dd->done_task);
-	kfree(sha_dd);
-	sha_dd = NULL;
 sha_dd_err:
 	dev_err(dev, "initialization failed.\n");
 
@@ -1523,9 +1521,6 @@
 	if (sha_dd->irq >= 0)
 		free_irq(sha_dd->irq, sha_dd);
 
-	kfree(sha_dd);
-	sha_dd = NULL;
-
 	return 0;
 }
 
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 6cde5b5..d3a9041 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -1337,7 +1337,6 @@
 					GFP_KERNEL);
 	if (!pdata->dma_slave) {
 		dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
-		devm_kfree(&pdev->dev, pdata);
 		return ERR_PTR(-ENOMEM);
 	}
 
@@ -1359,7 +1358,7 @@
 	unsigned long tdes_phys_size;
 	int err;
 
-	tdes_dd = kzalloc(sizeof(struct atmel_tdes_dev), GFP_KERNEL);
+	tdes_dd = devm_kmalloc(&pdev->dev, sizeof(*tdes_dd), GFP_KERNEL);
 	if (tdes_dd == NULL) {
 		dev_err(dev, "unable to alloc data struct.\n");
 		err = -ENOMEM;
@@ -1483,8 +1482,6 @@
 res_err:
 	tasklet_kill(&tdes_dd->done_task);
 	tasklet_kill(&tdes_dd->queue_task);
-	kfree(tdes_dd);
-	tdes_dd = NULL;
 tdes_dd_err:
 	dev_err(dev, "initialization failed.\n");
 
@@ -1519,9 +1516,6 @@
 	if (tdes_dd->irq >= 0)
 		free_irq(tdes_dd->irq, tdes_dd);
 
-	kfree(tdes_dd);
-	tdes_dd = NULL;
-
 	return 0;
 }
 
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index c09ce1f..a80ea85 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -97,6 +97,13 @@
 {
 	u32 *jump_cmd, *uncond_jump_cmd;
 
+	/* DK bit is valid only for AES */
+	if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
+		append_operation(desc, type | OP_ALG_AS_INITFINAL |
+				 OP_ALG_DECRYPT);
+		return;
+	}
+
 	jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
 	append_operation(desc, type | OP_ALG_AS_INITFINAL |
 			 OP_ALG_DECRYPT);
@@ -786,7 +793,7 @@
 	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
 					      desc_bytes(desc),
 					      DMA_TO_DEVICE);
-	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
 		dev_err(jrdev, "unable to map shared descriptor\n");
 		return -ENOMEM;
 	}
@@ -1313,8 +1320,13 @@
 					 DMA_FROM_DEVICE, dst_chained);
 	}
 
-	/* Check if data are contiguous */
 	iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, iv_dma)) {
+		dev_err(jrdev, "unable to map IV\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	/* Check if data are contiguous */
 	if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
 	    iv_dma || src_nents || iv_dma + ivsize !=
 	    sg_dma_address(req->src)) {
@@ -1345,8 +1357,6 @@
 	edesc->sec4_sg_bytes = sec4_sg_bytes;
 	edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
 			 desc_bytes;
-	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
-					    sec4_sg_bytes, DMA_TO_DEVICE);
 	*all_contig_ptr = all_contig;
 
 	sec4_sg_index = 0;
@@ -1369,6 +1379,12 @@
 		sg_to_sec4_sg_last(req->dst, dst_nents,
 				   edesc->sec4_sg + sec4_sg_index, 0);
 	}
+	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+					    sec4_sg_bytes, DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+		dev_err(jrdev, "unable to map S/G table\n");
+		return ERR_PTR(-ENOMEM);
+	}
 
 	return edesc;
 }
@@ -1494,8 +1510,13 @@
 					 DMA_FROM_DEVICE, dst_chained);
 	}
 
-	/* Check if data are contiguous */
 	iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, iv_dma)) {
+		dev_err(jrdev, "unable to map IV\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	/* Check if data are contiguous */
 	if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
 	    iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src))
 		contig &= ~GIV_SRC_CONTIG;
@@ -1534,8 +1555,6 @@
 	edesc->sec4_sg_bytes = sec4_sg_bytes;
 	edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
 			 desc_bytes;
-	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
-					    sec4_sg_bytes, DMA_TO_DEVICE);
 	*contig_ptr = contig;
 
 	sec4_sg_index = 0;
@@ -1559,6 +1578,12 @@
 		sg_to_sec4_sg_last(req->dst, dst_nents,
 				   edesc->sec4_sg + sec4_sg_index, 0);
 	}
+	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+					    sec4_sg_bytes, DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+		dev_err(jrdev, "unable to map S/G table\n");
+		return ERR_PTR(-ENOMEM);
+	}
 
 	return edesc;
 }
@@ -1650,11 +1675,16 @@
 					 DMA_FROM_DEVICE, dst_chained);
 	}
 
+	iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, iv_dma)) {
+		dev_err(jrdev, "unable to map IV\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
 	/*
 	 * Check if iv can be contiguous with source and destination.
 	 * If so, include it. If not, create scatterlist.
 	 */
-	iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
 	if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
 		iv_contig = true;
 	else
@@ -1693,6 +1723,11 @@
 
 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
 					    sec4_sg_bytes, DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+		dev_err(jrdev, "unable to map S/G table\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
 	edesc->iv_dma = iv_dma;
 
 #ifdef DEBUG
@@ -2441,8 +2476,37 @@
 
 static int __init caam_algapi_init(void)
 {
+	struct device_node *dev_node;
+	struct platform_device *pdev;
+	struct device *ctrldev;
+	void *priv;
 	int i = 0, err = 0;
 
+	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+	if (!dev_node) {
+		dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+		if (!dev_node)
+			return -ENODEV;
+	}
+
+	pdev = of_find_device_by_node(dev_node);
+	if (!pdev) {
+		of_node_put(dev_node);
+		return -ENODEV;
+	}
+
+	ctrldev = &pdev->dev;
+	priv = dev_get_drvdata(ctrldev);
+	of_node_put(dev_node);
+
+	/*
+	 * If priv is NULL, it's probably because the caam driver wasn't
+	 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+	 */
+	if (!priv)
+		return -ENODEV;
+
+
 	INIT_LIST_HEAD(&alg_list);
 
 	/* register crypto algorithms the device supports */
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 0d9284e..b464d03 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -137,13 +137,20 @@
 /* Common job descriptor seq in/out ptr routines */
 
 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
-static inline void map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
-				       struct caam_hash_state *state,
-				       int ctx_len)
+static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
+				      struct caam_hash_state *state,
+				      int ctx_len)
 {
 	state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
 					ctx_len, DMA_FROM_DEVICE);
+	if (dma_mapping_error(jrdev, state->ctx_dma)) {
+		dev_err(jrdev, "unable to map ctx\n");
+		return -ENOMEM;
+	}
+
 	append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
+
+	return 0;
 }
 
 /* Map req->result, and append seq_out_ptr command that points to it */
@@ -201,14 +208,19 @@
 }
 
 /* Map state->caam_ctx, and add it to link table */
-static inline void ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
-				      struct caam_hash_state *state,
-				      int ctx_len,
-				      struct sec4_sg_entry *sec4_sg,
-				      u32 flag)
+static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
+				     struct caam_hash_state *state, int ctx_len,
+				     struct sec4_sg_entry *sec4_sg, u32 flag)
 {
 	state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
+	if (dma_mapping_error(jrdev, state->ctx_dma)) {
+		dev_err(jrdev, "unable to map ctx\n");
+		return -ENOMEM;
+	}
+
 	dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
+
+	return 0;
 }
 
 /* Common shared descriptor commands */
@@ -487,11 +499,11 @@
 			       digestsize, 1);
 #endif
 	}
-	*keylen = digestsize;
-
 	dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
 	dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
 
+	*keylen = digestsize;
+
 	kfree(desc);
 
 	return ret;
@@ -706,7 +718,7 @@
 	if (err)
 		caam_jr_strstatus(jrdev, err);
 
-	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
+	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
 	kfree(edesc);
 
 #ifdef DEBUG
@@ -741,7 +753,7 @@
 	if (err)
 		caam_jr_strstatus(jrdev, err);
 
-	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
+	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
 	kfree(edesc);
 
 #ifdef DEBUG
@@ -808,12 +820,11 @@
 		edesc->sec4_sg_bytes = sec4_sg_bytes;
 		edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
 				 DESC_JOB_IO_LEN;
-		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
-						     sec4_sg_bytes,
-						     DMA_TO_DEVICE);
 
-		ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
-				   edesc->sec4_sg, DMA_BIDIRECTIONAL);
+		ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
+					 edesc->sec4_sg, DMA_BIDIRECTIONAL);
+		if (ret)
+			return ret;
 
 		state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
 							edesc->sec4_sg + 1,
@@ -839,6 +850,14 @@
 		init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
 				     HDR_REVERSE);
 
+		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+						     sec4_sg_bytes,
+						     DMA_TO_DEVICE);
+		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+			dev_err(jrdev, "unable to map S/G table\n");
+			return -ENOMEM;
+		}
+
 		append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
 				       to_hash, LDST_SGF);
 
@@ -911,23 +930,34 @@
 	edesc->sec4_sg_bytes = sec4_sg_bytes;
 	edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
 			 DESC_JOB_IO_LEN;
-	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
-					    sec4_sg_bytes, DMA_TO_DEVICE);
 	edesc->src_nents = 0;
 
-	ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
-			   DMA_TO_DEVICE);
+	ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
+				 edesc->sec4_sg, DMA_TO_DEVICE);
+	if (ret)
+		return ret;
 
 	state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
 						buf, state->buf_dma, buflen,
 						last_buflen);
 	(edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
 
+	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+					    sec4_sg_bytes, DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+		dev_err(jrdev, "unable to map S/G table\n");
+		return -ENOMEM;
+	}
+
 	append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
 			  LDST_SGF);
 
 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
 						digestsize);
+	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
+		dev_err(jrdev, "unable to map dst\n");
+		return -ENOMEM;
+	}
 
 #ifdef DEBUG
 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -989,11 +1019,11 @@
 	edesc->sec4_sg_bytes = sec4_sg_bytes;
 	edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
 			 DESC_JOB_IO_LEN;
-	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
-					    sec4_sg_bytes, DMA_TO_DEVICE);
 
-	ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
-			   DMA_TO_DEVICE);
+	ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
+				 edesc->sec4_sg, DMA_TO_DEVICE);
+	if (ret)
+		return ret;
 
 	state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
 						buf, state->buf_dma, buflen,
@@ -1002,11 +1032,22 @@
 	src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg +
 			   sec4_sg_src_index, chained);
 
+	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+					    sec4_sg_bytes, DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+		dev_err(jrdev, "unable to map S/G table\n");
+		return -ENOMEM;
+	}
+
 	append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
 			       buflen + req->nbytes, LDST_SGF);
 
 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
 						digestsize);
+	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
+		dev_err(jrdev, "unable to map dst\n");
+		return -ENOMEM;
+	}
 
 #ifdef DEBUG
 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -1056,8 +1097,7 @@
 	}
 	edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
 			  DESC_JOB_IO_LEN;
-	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
-					    sec4_sg_bytes, DMA_TO_DEVICE);
+	edesc->sec4_sg_bytes = sec4_sg_bytes;
 	edesc->src_nents = src_nents;
 	edesc->chained = chained;
 
@@ -1067,6 +1107,12 @@
 
 	if (src_nents) {
 		sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
+		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+					    sec4_sg_bytes, DMA_TO_DEVICE);
+		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+			dev_err(jrdev, "unable to map S/G table\n");
+			return -ENOMEM;
+		}
 		src_dma = edesc->sec4_sg_dma;
 		options = LDST_SGF;
 	} else {
@@ -1077,6 +1123,10 @@
 
 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
 						digestsize);
+	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
+		dev_err(jrdev, "unable to map dst\n");
+		return -ENOMEM;
+	}
 
 #ifdef DEBUG
 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -1125,11 +1175,19 @@
 	init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
 
 	state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, state->buf_dma)) {
+		dev_err(jrdev, "unable to map src\n");
+		return -ENOMEM;
+	}
 
 	append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
 
 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
 						digestsize);
+	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
+		dev_err(jrdev, "unable to map dst\n");
+		return -ENOMEM;
+	}
 	edesc->src_nents = 0;
 
 #ifdef DEBUG
@@ -1197,9 +1255,7 @@
 		edesc->sec4_sg_bytes = sec4_sg_bytes;
 		edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
 				 DESC_JOB_IO_LEN;
-		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
-						    sec4_sg_bytes,
-						    DMA_TO_DEVICE);
+		edesc->dst_dma = 0;
 
 		state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
 						    buf, *buflen);
@@ -1216,9 +1272,19 @@
 		init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
 				     HDR_REVERSE);
 
+		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+						    sec4_sg_bytes,
+						    DMA_TO_DEVICE);
+		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+			dev_err(jrdev, "unable to map S/G table\n");
+			return -ENOMEM;
+		}
+
 		append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
 
-		map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
+		ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
+		if (ret)
+			return ret;
 
 #ifdef DEBUG
 		print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -1297,8 +1363,6 @@
 	edesc->sec4_sg_bytes = sec4_sg_bytes;
 	edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
 			 DESC_JOB_IO_LEN;
-	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
-					    sec4_sg_bytes, DMA_TO_DEVICE);
 
 	state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
 						state->buf_dma, buflen,
@@ -1307,11 +1371,22 @@
 	src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1,
 			   chained);
 
+	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+					    sec4_sg_bytes, DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+		dev_err(jrdev, "unable to map S/G table\n");
+		return -ENOMEM;
+	}
+
 	append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen +
 			       req->nbytes, LDST_SGF);
 
 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
 						digestsize);
+	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
+		dev_err(jrdev, "unable to map dst\n");
+		return -ENOMEM;
+	}
 
 #ifdef DEBUG
 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -1380,13 +1455,19 @@
 		edesc->sec4_sg_bytes = sec4_sg_bytes;
 		edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
 				 DESC_JOB_IO_LEN;
-		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
-						    sec4_sg_bytes,
-						    DMA_TO_DEVICE);
+		edesc->dst_dma = 0;
 
 		if (src_nents) {
 			sg_to_sec4_sg_last(req->src, src_nents,
 					   edesc->sec4_sg, 0);
+			edesc->sec4_sg_dma = dma_map_single(jrdev,
+							    edesc->sec4_sg,
+							    sec4_sg_bytes,
+							    DMA_TO_DEVICE);
+			if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+				dev_err(jrdev, "unable to map S/G table\n");
+				return -ENOMEM;
+			}
 			src_dma = edesc->sec4_sg_dma;
 			options = LDST_SGF;
 		} else {
@@ -1404,7 +1485,9 @@
 
 		append_seq_in_ptr(desc, src_dma, to_hash, options);
 
-		map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
+		ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
+		if (ret)
+			return ret;
 
 #ifdef DEBUG
 		print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -1453,6 +1536,7 @@
 	state->final = ahash_final_no_ctx;
 
 	state->current_buf = 0;
+	state->buf_dma = 0;
 
 	return 0;
 }
@@ -1787,8 +1871,36 @@
 
 static int __init caam_algapi_hash_init(void)
 {
+	struct device_node *dev_node;
+	struct platform_device *pdev;
+	struct device *ctrldev;
+	void *priv;
 	int i = 0, err = 0;
 
+	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+	if (!dev_node) {
+		dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+		if (!dev_node)
+			return -ENODEV;
+	}
+
+	pdev = of_find_device_by_node(dev_node);
+	if (!pdev) {
+		of_node_put(dev_node);
+		return -ENODEV;
+	}
+
+	ctrldev = &pdev->dev;
+	priv = dev_get_drvdata(ctrldev);
+	of_node_put(dev_node);
+
+	/*
+	 * If priv is NULL, it's probably because the caam driver wasn't
+	 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+	 */
+	if (!priv)
+		return -ENODEV;
+
 	INIT_LIST_HEAD(&hash_list);
 
 	/* register crypto algorithms the device supports */
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index 8c07d31..ae31e55 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -185,7 +185,7 @@
 				      max - copied_idx, false);
 }
 
-static inline void rng_create_sh_desc(struct caam_rng_ctx *ctx)
+static inline int rng_create_sh_desc(struct caam_rng_ctx *ctx)
 {
 	struct device *jrdev = ctx->jrdev;
 	u32 *desc = ctx->sh_desc;
@@ -203,13 +203,18 @@
 
 	ctx->sh_desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
 					  DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, ctx->sh_desc_dma)) {
+		dev_err(jrdev, "unable to map shared descriptor\n");
+		return -ENOMEM;
+	}
 #ifdef DEBUG
 	print_hex_dump(KERN_ERR, "rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
 		       desc, desc_bytes(desc), 1);
 #endif
+	return 0;
 }
 
-static inline void rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id)
+static inline int rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id)
 {
 	struct device *jrdev = ctx->jrdev;
 	struct buf_data *bd = &ctx->bufs[buf_id];
@@ -220,12 +225,17 @@
 			     HDR_REVERSE);
 
 	bd->addr = dma_map_single(jrdev, bd->buf, RN_BUF_SIZE, DMA_FROM_DEVICE);
+	if (dma_mapping_error(jrdev, bd->addr)) {
+		dev_err(jrdev, "unable to map dst\n");
+		return -ENOMEM;
+	}
 
 	append_seq_out_ptr_intlen(desc, bd->addr, RN_BUF_SIZE, 0);
 #ifdef DEBUG
 	print_hex_dump(KERN_ERR, "rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
 		       desc, desc_bytes(desc), 1);
 #endif
+	return 0;
 }
 
 static void caam_cleanup(struct hwrng *rng)
@@ -242,24 +252,44 @@
 	rng_unmap_ctx(rng_ctx);
 }
 
-static void caam_init_buf(struct caam_rng_ctx *ctx, int buf_id)
+static int caam_init_buf(struct caam_rng_ctx *ctx, int buf_id)
 {
 	struct buf_data *bd = &ctx->bufs[buf_id];
+	int err;
 
-	rng_create_job_desc(ctx, buf_id);
+	err = rng_create_job_desc(ctx, buf_id);
+	if (err)
+		return err;
+
 	atomic_set(&bd->empty, BUF_EMPTY);
 	submit_job(ctx, buf_id == ctx->current_buf);
 	wait_for_completion(&bd->filled);
+
+	return 0;
 }
 
-static void caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev)
+static int caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev)
 {
+	int err;
+
 	ctx->jrdev = jrdev;
-	rng_create_sh_desc(ctx);
+
+	err = rng_create_sh_desc(ctx);
+	if (err)
+		return err;
+
 	ctx->current_buf = 0;
 	ctx->cur_buf_idx = 0;
-	caam_init_buf(ctx, 0);
-	caam_init_buf(ctx, 1);
+
+	err = caam_init_buf(ctx, 0);
+	if (err)
+		return err;
+
+	err = caam_init_buf(ctx, 1);
+	if (err)
+		return err;
+
+	return 0;
 }
 
 static struct hwrng caam_rng = {
@@ -278,6 +308,35 @@
 static int __init caam_rng_init(void)
 {
 	struct device *dev;
+	struct device_node *dev_node;
+	struct platform_device *pdev;
+	struct device *ctrldev;
+	void *priv;
+	int err;
+
+	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+	if (!dev_node) {
+		dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+		if (!dev_node)
+			return -ENODEV;
+	}
+
+	pdev = of_find_device_by_node(dev_node);
+	if (!pdev) {
+		of_node_put(dev_node);
+		return -ENODEV;
+	}
+
+	ctrldev = &pdev->dev;
+	priv = dev_get_drvdata(ctrldev);
+	of_node_put(dev_node);
+
+	/*
+	 * If priv is NULL, it's probably because the caam driver wasn't
+	 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+	 */
+	if (!priv)
+		return -ENODEV;
 
 	dev = caam_jr_alloc();
 	if (IS_ERR(dev)) {
@@ -287,7 +346,9 @@
 	rng_ctx = kmalloc(sizeof(struct caam_rng_ctx), GFP_DMA);
 	if (!rng_ctx)
 		return -ENOMEM;
-	caam_init_rng(rng_ctx, dev);
+	err = caam_init_rng(rng_ctx, dev);
+	if (err)
+		return err;
 
 	dev_info(dev, "registering rng-caam\n");
 	return hwrng_register(&caam_rng);
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 1c38f86..3cade79 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -5,6 +5,7 @@
  * Copyright 2008-2012 Freescale Semiconductor, Inc.
  */
 
+#include <linux/device.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
 
@@ -87,6 +88,17 @@
 
 	/* Set the bit to request direct access to DECO0 */
 	topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
+
+	if (ctrlpriv->virt_en == 1) {
+		setbits32(&topregs->ctrl.deco_rsr, DECORSR_JR0);
+
+		while (!(rd_reg32(&topregs->ctrl.deco_rsr) & DECORSR_VALID) &&
+		       --timeout)
+			cpu_relax();
+
+		timeout = 100000;
+	}
+
 	setbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
 
 	while (!(rd_reg32(&topregs->ctrl.deco_rq) & DECORR_DEN0) &&
@@ -129,6 +141,9 @@
 	*status = rd_reg32(&topregs->deco.op_status_hi) &
 		  DECO_OP_STATUS_HI_ERR_MASK;
 
+	if (ctrlpriv->virt_en == 1)
+		clrbits32(&topregs->ctrl.deco_rsr, DECORSR_JR0);
+
 	/* Mark the DECO as free */
 	clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
 
@@ -295,9 +310,6 @@
 	/* Unmap controller region */
 	iounmap(&topregs->ctrl);
 
-	kfree(ctrlpriv->jrpdev);
-	kfree(ctrlpriv);
-
 	return ret;
 }
 
@@ -380,9 +392,11 @@
 #ifdef CONFIG_DEBUG_FS
 	struct caam_perfmon *perfmon;
 #endif
-	u64 cha_vid;
+	u32 scfgr, comp_params;
+	u32 cha_vid_ls;
 
-	ctrlpriv = kzalloc(sizeof(struct caam_drv_private), GFP_KERNEL);
+	ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(struct caam_drv_private),
+				GFP_KERNEL);
 	if (!ctrlpriv)
 		return -ENOMEM;
 
@@ -413,13 +427,40 @@
 	setbits32(&topregs->ctrl.mcr, MCFGR_WDENABLE |
 		  (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
 
+	/*
+	 *  Read the Compile Time paramters and SCFGR to determine
+	 * if Virtualization is enabled for this platform
+	 */
+	comp_params = rd_reg32(&topregs->ctrl.perfmon.comp_parms_ms);
+	scfgr = rd_reg32(&topregs->ctrl.scfgr);
+
+	ctrlpriv->virt_en = 0;
+	if (comp_params & CTPR_MS_VIRT_EN_INCL) {
+		/* VIRT_EN_INCL = 1 & VIRT_EN_POR = 1 or
+		 * VIRT_EN_INCL = 1 & VIRT_EN_POR = 0 & SCFGR_VIRT_EN = 1
+		 */
+		if ((comp_params & CTPR_MS_VIRT_EN_POR) ||
+		    (!(comp_params & CTPR_MS_VIRT_EN_POR) &&
+		       (scfgr & SCFGR_VIRT_EN)))
+				ctrlpriv->virt_en = 1;
+	} else {
+		/* VIRT_EN_INCL = 0 && VIRT_EN_POR_VALUE = 1 */
+		if (comp_params & CTPR_MS_VIRT_EN_POR)
+				ctrlpriv->virt_en = 1;
+	}
+
+	if (ctrlpriv->virt_en == 1)
+		setbits32(&topregs->ctrl.jrstart, JRSTART_JR0_START |
+			  JRSTART_JR1_START | JRSTART_JR2_START |
+			  JRSTART_JR3_START);
+
 	if (sizeof(dma_addr_t) == sizeof(u64))
 		if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
-			dma_set_mask(dev, DMA_BIT_MASK(40));
+			dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
 		else
-			dma_set_mask(dev, DMA_BIT_MASK(36));
+			dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
 	else
-		dma_set_mask(dev, DMA_BIT_MASK(32));
+		dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
 
 	/*
 	 * Detect and enable JobRs
@@ -432,8 +473,9 @@
 		    of_device_is_compatible(np, "fsl,sec4.0-job-ring"))
 			rspec++;
 
-	ctrlpriv->jrpdev = kzalloc(sizeof(struct platform_device *) * rspec,
-								GFP_KERNEL);
+	ctrlpriv->jrpdev = devm_kzalloc(&pdev->dev,
+					sizeof(struct platform_device *) * rspec,
+					GFP_KERNEL);
 	if (ctrlpriv->jrpdev == NULL) {
 		iounmap(&topregs->ctrl);
 		return -ENOMEM;
@@ -456,8 +498,9 @@
 		}
 
 	/* Check to see if QI present. If so, enable */
-	ctrlpriv->qi_present = !!(rd_reg64(&topregs->ctrl.perfmon.comp_parms) &
-				  CTPR_QI_MASK);
+	ctrlpriv->qi_present =
+			!!(rd_reg32(&topregs->ctrl.perfmon.comp_parms_ms) &
+			   CTPR_MS_QI_MASK);
 	if (ctrlpriv->qi_present) {
 		ctrlpriv->qi = (struct caam_queue_if __force *)&topregs->qi;
 		/* This is all that's required to physically enable QI */
@@ -471,13 +514,13 @@
 		return -ENOMEM;
 	}
 
-	cha_vid = rd_reg64(&topregs->ctrl.perfmon.cha_id);
+	cha_vid_ls = rd_reg32(&topregs->ctrl.perfmon.cha_id_ls);
 
 	/*
 	 * If SEC has RNG version >= 4 and RNG state handle has not been
 	 * already instantiated, do RNG instantiation
 	 */
-	if ((cha_vid & CHA_ID_RNG_MASK) >> CHA_ID_RNG_SHIFT >= 4) {
+	if ((cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
 		ctrlpriv->rng4_sh_init =
 			rd_reg32(&topregs->ctrl.r4tst[0].rdsta);
 		/*
@@ -531,7 +574,8 @@
 
 	/* NOTE: RTIC detection ought to go here, around Si time */
 
-	caam_id = rd_reg64(&topregs->ctrl.perfmon.caam_id);
+	caam_id = (u64)rd_reg32(&topregs->ctrl.perfmon.caam_id_ms) << 32 |
+		  (u64)rd_reg32(&topregs->ctrl.perfmon.caam_id_ls);
 
 	/* Report "alive" for developer to see */
 	dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
@@ -547,7 +591,7 @@
 	 */
 	perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
 
-	ctrlpriv->dfs_root = debugfs_create_dir("caam", NULL);
+	ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
 	ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
 
 	/* Controller-level - performance monitor counters */
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index 7e4500f..d397ff9 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -321,7 +321,6 @@
 /* Continue - Not the last FIFO store to come */
 #define FIFOST_CONT_SHIFT	23
 #define FIFOST_CONT_MASK	(1 << FIFOST_CONT_SHIFT)
-#define FIFOST_CONT_MASK	(1 << FIFOST_CONT_SHIFT)
 
 /*
  * Extended Length - use 32-bit extended length that
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 6d85fcc..97363db 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -82,6 +82,7 @@
 	u8 total_jobrs;		/* Total Job Rings in device */
 	u8 qi_present;		/* Nonzero if QI present in device */
 	int secvio_irq;		/* Security violation interrupt number */
+	int virt_en;		/* Virtualization enabled in CAAM */
 
 #define	RNG4_MAX_HANDLES 2
 	/* RNG4 block */
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index b512a4b..4d18e27 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -476,11 +476,11 @@
 
 	if (sizeof(dma_addr_t) == sizeof(u64))
 		if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring"))
-			dma_set_mask(jrdev, DMA_BIT_MASK(40));
+			dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(40));
 		else
-			dma_set_mask(jrdev, DMA_BIT_MASK(36));
+			dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(36));
 	else
-		dma_set_mask(jrdev, DMA_BIT_MASK(32));
+		dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32));
 
 	/* Identify the interrupt */
 	jrpriv->irq = irq_of_parse_and_map(nprop, 0);
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index cbde8b9..f48e344 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -84,6 +84,7 @@
 #endif
 
 #ifndef CONFIG_64BIT
+#ifdef __BIG_ENDIAN
 static inline void wr_reg64(u64 __iomem *reg, u64 data)
 {
 	wr_reg32((u32 __iomem *)reg, (data & 0xffffffff00000000ull) >> 32);
@@ -95,6 +96,21 @@
 	return (((u64)rd_reg32((u32 __iomem *)reg)) << 32) |
 		((u64)rd_reg32((u32 __iomem *)reg + 1));
 }
+#else
+#ifdef __LITTLE_ENDIAN
+static inline void wr_reg64(u64 __iomem *reg, u64 data)
+{
+	wr_reg32((u32 __iomem *)reg + 1, (data & 0xffffffff00000000ull) >> 32);
+	wr_reg32((u32 __iomem *)reg, data & 0x00000000ffffffffull);
+}
+
+static inline u64 rd_reg64(u64 __iomem *reg)
+{
+	return (((u64)rd_reg32((u32 __iomem *)reg + 1)) << 32) |
+		((u64)rd_reg32((u32 __iomem *)reg));
+}
+#endif
+#endif
 #endif
 
 /*
@@ -114,45 +130,45 @@
  */
 
 /* Number of DECOs */
-#define CHA_NUM_DECONUM_SHIFT	56
-#define CHA_NUM_DECONUM_MASK	(0xfull << CHA_NUM_DECONUM_SHIFT)
+#define CHA_NUM_MS_DECONUM_SHIFT	24
+#define CHA_NUM_MS_DECONUM_MASK	(0xfull << CHA_NUM_MS_DECONUM_SHIFT)
 
 /* CHA Version IDs */
-#define CHA_ID_AES_SHIFT	0
-#define CHA_ID_AES_MASK		(0xfull << CHA_ID_AES_SHIFT)
+#define CHA_ID_LS_AES_SHIFT	0
+#define CHA_ID_LS_AES_MASK		(0xfull << CHA_ID_LS_AES_SHIFT)
 
-#define CHA_ID_DES_SHIFT	4
-#define CHA_ID_DES_MASK		(0xfull << CHA_ID_DES_SHIFT)
+#define CHA_ID_LS_DES_SHIFT	4
+#define CHA_ID_LS_DES_MASK		(0xfull << CHA_ID_LS_DES_SHIFT)
 
-#define CHA_ID_ARC4_SHIFT	8
-#define CHA_ID_ARC4_MASK	(0xfull << CHA_ID_ARC4_SHIFT)
+#define CHA_ID_LS_ARC4_SHIFT	8
+#define CHA_ID_LS_ARC4_MASK	(0xfull << CHA_ID_LS_ARC4_SHIFT)
 
-#define CHA_ID_MD_SHIFT		12
-#define CHA_ID_MD_MASK		(0xfull << CHA_ID_MD_SHIFT)
+#define CHA_ID_LS_MD_SHIFT	12
+#define CHA_ID_LS_MD_MASK	(0xfull << CHA_ID_LS_MD_SHIFT)
 
-#define CHA_ID_RNG_SHIFT	16
-#define CHA_ID_RNG_MASK		(0xfull << CHA_ID_RNG_SHIFT)
+#define CHA_ID_LS_RNG_SHIFT	16
+#define CHA_ID_LS_RNG_MASK	(0xfull << CHA_ID_LS_RNG_SHIFT)
 
-#define CHA_ID_SNW8_SHIFT	20
-#define CHA_ID_SNW8_MASK	(0xfull << CHA_ID_SNW8_SHIFT)
+#define CHA_ID_LS_SNW8_SHIFT	20
+#define CHA_ID_LS_SNW8_MASK	(0xfull << CHA_ID_LS_SNW8_SHIFT)
 
-#define CHA_ID_KAS_SHIFT	24
-#define CHA_ID_KAS_MASK		(0xfull << CHA_ID_KAS_SHIFT)
+#define CHA_ID_LS_KAS_SHIFT	24
+#define CHA_ID_LS_KAS_MASK	(0xfull << CHA_ID_LS_KAS_SHIFT)
 
-#define CHA_ID_PK_SHIFT		28
-#define CHA_ID_PK_MASK		(0xfull << CHA_ID_PK_SHIFT)
+#define CHA_ID_LS_PK_SHIFT	28
+#define CHA_ID_LS_PK_MASK	(0xfull << CHA_ID_LS_PK_SHIFT)
 
-#define CHA_ID_CRC_SHIFT	32
-#define CHA_ID_CRC_MASK		(0xfull << CHA_ID_CRC_SHIFT)
+#define CHA_ID_MS_CRC_SHIFT	0
+#define CHA_ID_MS_CRC_MASK	(0xfull << CHA_ID_MS_CRC_SHIFT)
 
-#define CHA_ID_SNW9_SHIFT	36
-#define CHA_ID_SNW9_MASK	(0xfull << CHA_ID_SNW9_SHIFT)
+#define CHA_ID_MS_SNW9_SHIFT	4
+#define CHA_ID_MS_SNW9_MASK	(0xfull << CHA_ID_MS_SNW9_SHIFT)
 
-#define CHA_ID_DECO_SHIFT	56
-#define CHA_ID_DECO_MASK	(0xfull << CHA_ID_DECO_SHIFT)
+#define CHA_ID_MS_DECO_SHIFT	24
+#define CHA_ID_MS_DECO_MASK	(0xfull << CHA_ID_MS_DECO_SHIFT)
 
-#define CHA_ID_JR_SHIFT		60
-#define CHA_ID_JR_MASK		(0xfull << CHA_ID_JR_SHIFT)
+#define CHA_ID_MS_JR_SHIFT	28
+#define CHA_ID_MS_JR_MASK	(0xfull << CHA_ID_MS_JR_SHIFT)
 
 struct sec_vid {
 	u16 ip_id;
@@ -172,10 +188,14 @@
 	u64 rsvd[13];
 
 	/* CAAM Hardware Instantiation Parameters		fa0-fbf */
-	u64 cha_rev;		/* CRNR - CHA Revision Number		*/
-#define CTPR_QI_SHIFT		57
-#define CTPR_QI_MASK		(0x1ull << CTPR_QI_SHIFT)
-	u64 comp_parms;	/* CTPR - Compile Parameters Register	*/
+	u32 cha_rev_ms;		/* CRNR - CHA Rev No. Most significant half*/
+	u32 cha_rev_ls;		/* CRNR - CHA Rev No. Least significant half*/
+#define CTPR_MS_QI_SHIFT	25
+#define CTPR_MS_QI_MASK		(0x1ull << CTPR_MS_QI_SHIFT)
+#define CTPR_MS_VIRT_EN_INCL	0x00000001
+#define CTPR_MS_VIRT_EN_POR	0x00000002
+	u32 comp_parms_ms;	/* CTPR - Compile Parameters Register	*/
+	u32 comp_parms_ls;	/* CTPR - Compile Parameters Register	*/
 	u64 rsvd1[2];
 
 	/* CAAM Global Status					fc0-fdf */
@@ -189,9 +209,12 @@
 	/* Component Instantiation Parameters			fe0-fff */
 	u32 rtic_id;		/* RVID - RTIC Version ID	*/
 	u32 ccb_id;		/* CCBVID - CCB Version ID	*/
-	u64 cha_id;		/* CHAVID - CHA Version ID	*/
-	u64 cha_num;		/* CHANUM - CHA Number		*/
-	u64 caam_id;		/* CAAMVID - CAAM Version ID	*/
+	u32 cha_id_ms;		/* CHAVID - CHA Version ID Most Significant*/
+	u32 cha_id_ls;		/* CHAVID - CHA Version ID Least Significant*/
+	u32 cha_num_ms;		/* CHANUM - CHA Number Most Significant	*/
+	u32 cha_num_ls;		/* CHANUM - CHA Number Least Significant*/
+	u32 caam_id_ms;		/* CAAMVID - CAAM Version ID MS	*/
+	u32 caam_id_ls;		/* CAAMVID - CAAM Version ID LS	*/
 };
 
 /* LIODN programming for DMA configuration */
@@ -304,9 +327,12 @@
 	/* Bus Access Configuration Section			010-11f */
 	/* Read/Writable                                                */
 	struct masterid jr_mid[4];	/* JRxLIODNR - JobR LIODN setup */
-	u32 rsvd3[12];
+	u32 rsvd3[11];
+	u32 jrstart;			/* JRSTART - Job Ring Start Register */
 	struct masterid rtic_mid[4];	/* RTICxLIODNR - RTIC LIODN setup */
-	u32 rsvd4[7];
+	u32 rsvd4[5];
+	u32 deco_rsr;			/* DECORSR - Deco Request Source */
+	u32 rsvd11;
 	u32 deco_rq;			/* DECORR - DECO Request */
 	struct partid deco_mid[5];	/* DECOxLIODNR - 1 per DECO */
 	u32 rsvd5[22];
@@ -347,7 +373,10 @@
 #define MCFGR_DMA_RESET		0x10000000
 #define MCFGR_LONG_PTR		0x00010000 /* Use >32-bit desc addressing */
 #define SCFGR_RDBENABLE		0x00000400
+#define SCFGR_VIRT_EN		0x00008000
 #define DECORR_RQD0ENABLE	0x00000001 /* Enable DECO0 for direct access */
+#define DECORSR_JR0		0x00000001 /* JR to supply TZ, SDID, ICID */
+#define DECORSR_VALID		0x80000000
 #define DECORR_DEN0		0x00010000 /* DECO0 available for access*/
 
 /* AXI read cache control */
@@ -365,6 +394,12 @@
 #define MCFGR_AXIPRI		0x00000008 /* Assert AXI priority sideband */
 #define MCFGR_BURST_64		0x00000001 /* Max burst size */
 
+/* JRSTART register offsets */
+#define JRSTART_JR0_START       0x00000001 /* Start Job ring 0 */
+#define JRSTART_JR1_START       0x00000002 /* Start Job ring 1 */
+#define JRSTART_JR2_START       0x00000004 /* Start Job ring 2 */
+#define JRSTART_JR3_START       0x00000008 /* Start Job ring 3 */
+
 /*
  * caam_job_ring - direct job ring setup
  * 1-4 possible per instantiation, base + 1000/2000/3000/4000
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index d3505a0..7f592d8 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -1,6 +1,11 @@
 obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o
 ccp-objs := ccp-dev.o ccp-ops.o
+ifdef CONFIG_X86
 ccp-objs += ccp-pci.o
+endif
+ifdef CONFIG_ARM64
+ccp-objs += ccp-platform.o
+endif
 
 obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
 ccp-crypto-objs := ccp-crypto-main.o \
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index 2c78161..a7d1106 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -20,7 +20,9 @@
 #include <linux/delay.h>
 #include <linux/hw_random.h>
 #include <linux/cpu.h>
+#ifdef CONFIG_X86
 #include <asm/cpu_device_id.h>
+#endif
 #include <linux/ccp.h>
 
 #include "ccp-dev.h"
@@ -360,6 +362,12 @@
 		/* Build queue interrupt mask (two interrupts per queue) */
 		qim |= cmd_q->int_ok | cmd_q->int_err;
 
+#ifdef CONFIG_ARM64
+		/* For arm64 set the recommended queue cache settings */
+		iowrite32(ccp->axcache, ccp->io_regs + CMD_Q_CACHE_BASE +
+			  (CMD_Q_CACHE_INC * i));
+#endif
+
 		dev_dbg(dev, "queue #%u available\n", i);
 	}
 	if (ccp->cmd_q_count == 0) {
@@ -558,12 +566,15 @@
 }
 #endif
 
+#ifdef CONFIG_X86
 static const struct x86_cpu_id ccp_support[] = {
 	{ X86_VENDOR_AMD, 22, },
 };
+#endif
 
 static int __init ccp_mod_init(void)
 {
+#ifdef CONFIG_X86
 	struct cpuinfo_x86 *cpuinfo = &boot_cpu_data;
 	int ret;
 
@@ -589,12 +600,30 @@
 
 		break;
 	}
+#endif
+
+#ifdef CONFIG_ARM64
+	int ret;
+
+	ret = ccp_platform_init();
+	if (ret)
+		return ret;
+
+	/* Don't leave the driver loaded if init failed */
+	if (!ccp_get_device()) {
+		ccp_platform_exit();
+		return -ENODEV;
+	}
+
+	return 0;
+#endif
 
 	return -ENODEV;
 }
 
 static void __exit ccp_mod_exit(void)
 {
+#ifdef CONFIG_X86
 	struct cpuinfo_x86 *cpuinfo = &boot_cpu_data;
 
 	switch (cpuinfo->x86) {
@@ -602,6 +631,11 @@
 		ccp_pci_exit();
 		break;
 	}
+#endif
+
+#ifdef CONFIG_ARM64
+	ccp_platform_exit();
+#endif
 }
 
 module_init(ccp_mod_init);
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index 7ec536e..62ff35a 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -23,8 +23,6 @@
 #include <linux/hw_random.h>
 
 
-#define IO_OFFSET			0x20000
-
 #define MAX_DMAPOOL_NAME_LEN		32
 
 #define MAX_HW_QUEUES			5
@@ -32,6 +30,9 @@
 
 #define TRNG_RETRIES			10
 
+#define CACHE_NONE			0x00
+#define CACHE_WB_NO_ALLOC		0xb7
+
 
 /****** Register Mappings ******/
 #define Q_MASK_REG			0x000
@@ -50,7 +51,7 @@
 #define CMD_Q_INT_STATUS_BASE		0x214
 #define CMD_Q_STATUS_INCR		0x20
 
-#define CMD_Q_CACHE			0x228
+#define CMD_Q_CACHE_BASE		0x228
 #define CMD_Q_CACHE_INC			0x20
 
 #define CMD_Q_ERROR(__qs)		((__qs) & 0x0000003f);
@@ -194,6 +195,7 @@
 	void *dev_specific;
 	int (*get_irq)(struct ccp_device *ccp);
 	void (*free_irq)(struct ccp_device *ccp);
+	unsigned int irq;
 
 	/*
 	 * I/O area used for device communication. The register mapping
@@ -254,12 +256,18 @@
 	/* Suspend support */
 	unsigned int suspending;
 	wait_queue_head_t suspend_queue;
+
+	/* DMA caching attribute support */
+	unsigned int axcache;
 };
 
 
 int ccp_pci_init(void);
 void ccp_pci_exit(void);
 
+int ccp_platform_init(void);
+void ccp_platform_exit(void);
+
 struct ccp_device *ccp_alloc_struct(struct device *dev);
 int ccp_init(struct ccp_device *ccp);
 void ccp_destroy(struct ccp_device *ccp);
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index 9ae006d..8729364 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -1606,7 +1606,7 @@
 		goto e_ksb;
 
 	ccp_reverse_set_dm_area(&exp, rsa->exp, rsa->exp_len, CCP_KSB_BYTES,
-				true);
+				false);
 	ret = ccp_copy_to_ksb(cmd_q, &exp, op.jobid, op.ksb_key,
 			      CCP_PASSTHRU_BYTESWAP_NOOP);
 	if (ret) {
@@ -1623,10 +1623,10 @@
 		goto e_exp;
 
 	ccp_reverse_set_dm_area(&src, rsa->mod, rsa->mod_len, CCP_KSB_BYTES,
-				true);
+				false);
 	src.address += o_len;	/* Adjust the address for the copy operation */
 	ccp_reverse_set_dm_area(&src, rsa->src, rsa->src_len, CCP_KSB_BYTES,
-				true);
+				false);
 	src.address -= o_len;	/* Reset the address to original value */
 
 	/* Prepare the output area for the operation */
@@ -1841,20 +1841,20 @@
 
 	/* Copy the ECC modulus */
 	ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len,
-				CCP_ECC_OPERAND_SIZE, true);
+				CCP_ECC_OPERAND_SIZE, false);
 	src.address += CCP_ECC_OPERAND_SIZE;
 
 	/* Copy the first operand */
 	ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_1,
 				ecc->u.mm.operand_1_len,
-				CCP_ECC_OPERAND_SIZE, true);
+				CCP_ECC_OPERAND_SIZE, false);
 	src.address += CCP_ECC_OPERAND_SIZE;
 
 	if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) {
 		/* Copy the second operand */
 		ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_2,
 					ecc->u.mm.operand_2_len,
-					CCP_ECC_OPERAND_SIZE, true);
+					CCP_ECC_OPERAND_SIZE, false);
 		src.address += CCP_ECC_OPERAND_SIZE;
 	}
 
@@ -1960,17 +1960,17 @@
 
 	/* Copy the ECC modulus */
 	ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len,
-				CCP_ECC_OPERAND_SIZE, true);
+				CCP_ECC_OPERAND_SIZE, false);
 	src.address += CCP_ECC_OPERAND_SIZE;
 
 	/* Copy the first point X and Y coordinate */
 	ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.x,
 				ecc->u.pm.point_1.x_len,
-				CCP_ECC_OPERAND_SIZE, true);
+				CCP_ECC_OPERAND_SIZE, false);
 	src.address += CCP_ECC_OPERAND_SIZE;
 	ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.y,
 				ecc->u.pm.point_1.y_len,
-				CCP_ECC_OPERAND_SIZE, true);
+				CCP_ECC_OPERAND_SIZE, false);
 	src.address += CCP_ECC_OPERAND_SIZE;
 
 	/* Set the first point Z coordianate to 1 */
@@ -1981,11 +1981,11 @@
 		/* Copy the second point X and Y coordinate */
 		ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.x,
 					ecc->u.pm.point_2.x_len,
-					CCP_ECC_OPERAND_SIZE, true);
+					CCP_ECC_OPERAND_SIZE, false);
 		src.address += CCP_ECC_OPERAND_SIZE;
 		ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.y,
 					ecc->u.pm.point_2.y_len,
-					CCP_ECC_OPERAND_SIZE, true);
+					CCP_ECC_OPERAND_SIZE, false);
 		src.address += CCP_ECC_OPERAND_SIZE;
 
 		/* Set the second point Z coordianate to 1 */
@@ -1995,14 +1995,14 @@
 		/* Copy the Domain "a" parameter */
 		ccp_reverse_set_dm_area(&src, ecc->u.pm.domain_a,
 					ecc->u.pm.domain_a_len,
-					CCP_ECC_OPERAND_SIZE, true);
+					CCP_ECC_OPERAND_SIZE, false);
 		src.address += CCP_ECC_OPERAND_SIZE;
 
 		if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) {
 			/* Copy the scalar value */
 			ccp_reverse_set_dm_area(&src, ecc->u.pm.scalar,
 						ecc->u.pm.scalar_len,
-						CCP_ECC_OPERAND_SIZE, true);
+						CCP_ECC_OPERAND_SIZE, false);
 			src.address += CCP_ECC_OPERAND_SIZE;
 		}
 	}
diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c
index 0d74623..180cc87 100644
--- a/drivers/crypto/ccp/ccp-pci.c
+++ b/drivers/crypto/ccp/ccp-pci.c
@@ -12,8 +12,10 @@
 
 #include <linux/module.h>
 #include <linux/kernel.h>
+#include <linux/device.h>
 #include <linux/pci.h>
 #include <linux/pci_ids.h>
+#include <linux/dma-mapping.h>
 #include <linux/kthread.h>
 #include <linux/sched.h>
 #include <linux/interrupt.h>
@@ -24,6 +26,8 @@
 #include "ccp-dev.h"
 
 #define IO_BAR				2
+#define IO_OFFSET			0x20000
+
 #define MSIX_VECTORS			2
 
 struct ccp_msix {
@@ -89,7 +93,8 @@
 	if (ret)
 		return ret;
 
-	ret = request_irq(pdev->irq, ccp_irq_handler, 0, "ccp", dev);
+	ccp->irq = pdev->irq;
+	ret = request_irq(ccp->irq, ccp_irq_handler, 0, "ccp", dev);
 	if (ret) {
 		dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret);
 		goto e_msi;
@@ -136,7 +141,7 @@
 				 dev);
 		pci_disable_msix(pdev);
 	} else {
-		free_irq(pdev->irq, dev);
+		free_irq(ccp->irq, dev);
 		pci_disable_msi(pdev);
 	}
 }
@@ -147,21 +152,12 @@
 	struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
 	resource_size_t io_len;
 	unsigned long io_flags;
-	int bar;
 
 	io_flags = pci_resource_flags(pdev, IO_BAR);
 	io_len = pci_resource_len(pdev, IO_BAR);
 	if ((io_flags & IORESOURCE_MEM) && (io_len >= (IO_OFFSET + 0x800)))
 		return IO_BAR;
 
-	for (bar = 0; bar < PCI_STD_RESOURCE_END; bar++) {
-		io_flags = pci_resource_flags(pdev, bar);
-		io_len = pci_resource_len(pdev, bar);
-		if ((io_flags & IORESOURCE_MEM) &&
-		    (io_len >= (IO_OFFSET + 0x800)))
-			return bar;
-	}
-
 	return -EIO;
 }
 
@@ -214,20 +210,13 @@
 	}
 	ccp->io_regs = ccp->io_map + IO_OFFSET;
 
-	ret = dma_set_mask(dev, DMA_BIT_MASK(48));
-	if (ret == 0) {
-		ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(48));
+	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+	if (ret) {
+		ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
 		if (ret) {
-			dev_err(dev,
-				"pci_set_consistent_dma_mask failed (%d)\n",
+			dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n",
 				ret);
-			goto e_bar0;
-		}
-	} else {
-		ret = dma_set_mask(dev, DMA_BIT_MASK(32));
-		if (ret) {
-			dev_err(dev, "pci_set_dma_mask failed (%d)\n", ret);
-			goto e_bar0;
+			goto e_iomap;
 		}
 	}
 
@@ -235,13 +224,13 @@
 
 	ret = ccp_init(ccp);
 	if (ret)
-		goto e_bar0;
+		goto e_iomap;
 
 	dev_notice(dev, "enabled\n");
 
 	return 0;
 
-e_bar0:
+e_iomap:
 	pci_iounmap(pdev, ccp->io_map);
 
 e_device:
diff --git a/drivers/crypto/ccp/ccp-platform.c b/drivers/crypto/ccp/ccp-platform.c
new file mode 100644
index 0000000..b0a2806
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-platform.c
@@ -0,0 +1,230 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) driver
+ *
+ * Copyright (C) 2014 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/ioport.h>
+#include <linux/dma-mapping.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/ccp.h>
+#include <linux/of.h>
+
+#include "ccp-dev.h"
+
+
+static int ccp_get_irq(struct ccp_device *ccp)
+{
+	struct device *dev = ccp->dev;
+	struct platform_device *pdev = container_of(dev,
+					struct platform_device, dev);
+	int ret;
+
+	ret = platform_get_irq(pdev, 0);
+	if (ret < 0)
+		return ret;
+
+	ccp->irq = ret;
+	ret = request_irq(ccp->irq, ccp_irq_handler, 0, "ccp", dev);
+	if (ret) {
+		dev_notice(dev, "unable to allocate IRQ (%d)\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int ccp_get_irqs(struct ccp_device *ccp)
+{
+	struct device *dev = ccp->dev;
+	int ret;
+
+	ret = ccp_get_irq(ccp);
+	if (!ret)
+		return 0;
+
+	/* Couldn't get an interrupt */
+	dev_notice(dev, "could not enable interrupts (%d)\n", ret);
+
+	return ret;
+}
+
+static void ccp_free_irqs(struct ccp_device *ccp)
+{
+	struct device *dev = ccp->dev;
+
+	free_irq(ccp->irq, dev);
+}
+
+static struct resource *ccp_find_mmio_area(struct ccp_device *ccp)
+{
+	struct device *dev = ccp->dev;
+	struct platform_device *pdev = container_of(dev,
+					struct platform_device, dev);
+	struct resource *ior;
+
+	ior = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (ior && (resource_size(ior) >= 0x800))
+		return ior;
+
+	return NULL;
+}
+
+static int ccp_platform_probe(struct platform_device *pdev)
+{
+	struct ccp_device *ccp;
+	struct device *dev = &pdev->dev;
+	struct resource *ior;
+	int ret;
+
+	ret = -ENOMEM;
+	ccp = ccp_alloc_struct(dev);
+	if (!ccp)
+		goto e_err;
+
+	ccp->dev_specific = NULL;
+	ccp->get_irq = ccp_get_irqs;
+	ccp->free_irq = ccp_free_irqs;
+
+	ior = ccp_find_mmio_area(ccp);
+	ccp->io_map = devm_ioremap_resource(dev, ior);
+	if (IS_ERR(ccp->io_map)) {
+		ret = PTR_ERR(ccp->io_map);
+		goto e_free;
+	}
+	ccp->io_regs = ccp->io_map;
+
+	if (!dev->dma_mask)
+		dev->dma_mask = &dev->coherent_dma_mask;
+	*(dev->dma_mask) = DMA_BIT_MASK(48);
+	dev->coherent_dma_mask = DMA_BIT_MASK(48);
+
+	if (of_property_read_bool(dev->of_node, "dma-coherent"))
+		ccp->axcache = CACHE_WB_NO_ALLOC;
+	else
+		ccp->axcache = CACHE_NONE;
+
+	dev_set_drvdata(dev, ccp);
+
+	ret = ccp_init(ccp);
+	if (ret)
+		goto e_free;
+
+	dev_notice(dev, "enabled\n");
+
+	return 0;
+
+e_free:
+	kfree(ccp);
+
+e_err:
+	dev_notice(dev, "initialization failed\n");
+	return ret;
+}
+
+static int ccp_platform_remove(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct ccp_device *ccp = dev_get_drvdata(dev);
+
+	ccp_destroy(ccp);
+
+	kfree(ccp);
+
+	dev_notice(dev, "disabled\n");
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int ccp_platform_suspend(struct platform_device *pdev,
+				pm_message_t state)
+{
+	struct device *dev = &pdev->dev;
+	struct ccp_device *ccp = dev_get_drvdata(dev);
+	unsigned long flags;
+	unsigned int i;
+
+	spin_lock_irqsave(&ccp->cmd_lock, flags);
+
+	ccp->suspending = 1;
+
+	/* Wake all the queue kthreads to prepare for suspend */
+	for (i = 0; i < ccp->cmd_q_count; i++)
+		wake_up_process(ccp->cmd_q[i].kthread);
+
+	spin_unlock_irqrestore(&ccp->cmd_lock, flags);
+
+	/* Wait for all queue kthreads to say they're done */
+	while (!ccp_queues_suspended(ccp))
+		wait_event_interruptible(ccp->suspend_queue,
+					 ccp_queues_suspended(ccp));
+
+	return 0;
+}
+
+static int ccp_platform_resume(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct ccp_device *ccp = dev_get_drvdata(dev);
+	unsigned long flags;
+	unsigned int i;
+
+	spin_lock_irqsave(&ccp->cmd_lock, flags);
+
+	ccp->suspending = 0;
+
+	/* Wake up all the kthreads */
+	for (i = 0; i < ccp->cmd_q_count; i++) {
+		ccp->cmd_q[i].suspended = 0;
+		wake_up_process(ccp->cmd_q[i].kthread);
+	}
+
+	spin_unlock_irqrestore(&ccp->cmd_lock, flags);
+
+	return 0;
+}
+#endif
+
+static const struct of_device_id ccp_platform_ids[] = {
+	{ .compatible = "amd,ccp-seattle-v1a" },
+	{ },
+};
+
+static struct platform_driver ccp_platform_driver = {
+	.driver = {
+		.name = "AMD Cryptographic Coprocessor",
+		.owner = THIS_MODULE,
+		.of_match_table = ccp_platform_ids,
+	},
+	.probe = ccp_platform_probe,
+	.remove = ccp_platform_remove,
+#ifdef CONFIG_PM
+	.suspend = ccp_platform_suspend,
+	.resume = ccp_platform_resume,
+#endif
+};
+
+int ccp_platform_init(void)
+{
+	return platform_driver_register(&ccp_platform_driver);
+}
+
+void ccp_platform_exit(void)
+{
+	platform_driver_unregister(&ccp_platform_driver);
+}
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c
index 502edf0..544f6d3 100644
--- a/drivers/crypto/nx/nx-842.c
+++ b/drivers/crypto/nx/nx-842.c
@@ -1247,7 +1247,7 @@
 static struct vio_driver nx842_driver = {
 	.name = MODULE_NAME,
 	.probe = nx842_probe,
-	.remove = nx842_remove,
+	.remove = __exit_p(nx842_remove),
 	.get_desired_dma = nx842_get_desired_dma,
 	.id_table = nx842_driver_ids,
 };
diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/qat/Kconfig
new file mode 100644
index 0000000..49bede2
--- /dev/null
+++ b/drivers/crypto/qat/Kconfig
@@ -0,0 +1,23 @@
+config CRYPTO_DEV_QAT
+	tristate
+	select CRYPTO_AEAD
+	select CRYPTO_AUTHENC
+	select CRYPTO_ALGAPI
+	select CRYPTO_AES
+	select CRYPTO_CBC
+	select CRYPTO_SHA1
+	select CRYPTO_SHA256
+	select CRYPTO_SHA512
+	select FW_LOADER
+
+config CRYPTO_DEV_QAT_DH895xCC
+	tristate "Support for Intel(R) DH895xCC"
+	depends on X86 && PCI
+	default n
+	select CRYPTO_DEV_QAT
+	help
+	  Support for Intel(R) DH895xcc with Intel(R) QuickAssist Technology
+	  for accelerating crypto and compression workloads.
+
+	  To compile this as a module, choose M here: the module
+	  will be called qat_dh895xcc.
diff --git a/drivers/crypto/qat/Makefile b/drivers/crypto/qat/Makefile
new file mode 100644
index 0000000..d11481b
--- /dev/null
+++ b/drivers/crypto/qat/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_CRYPTO_DEV_QAT) += qat_common/
+obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/
diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile
new file mode 100644
index 0000000..e0424dc
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/Makefile
@@ -0,0 +1,14 @@
+obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o
+intel_qat-objs := adf_cfg.o \
+	adf_ctl_drv.o \
+	adf_dev_mgr.o \
+	adf_init.o \
+	adf_accel_engine.o \
+	adf_aer.o \
+	adf_transport.o \
+	qat_crypto.o \
+	qat_algs.o \
+	qat_uclo.o \
+	qat_hal.o
+
+intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
new file mode 100644
index 0000000..9282381
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -0,0 +1,205 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_ACCEL_DEVICES_H_
+#define ADF_ACCEL_DEVICES_H_
+#include <linux/module.h>
+#include <linux/atomic.h>
+#include <linux/list.h>
+#include <linux/proc_fs.h>
+#include <linux/io.h>
+#include "adf_cfg_common.h"
+
+#define PCI_VENDOR_ID_INTEL 0x8086
+#define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
+#define ADF_DH895XCC_PCI_DEVICE_ID 0x435
+#define ADF_DH895XCC_PMISC_BAR 1
+#define ADF_DH895XCC_ETR_BAR 2
+#define ADF_PCI_MAX_BARS 3
+#define ADF_DEVICE_NAME_LENGTH 32
+#define ADF_ETR_MAX_RINGS_PER_BANK 16
+#define ADF_MAX_MSIX_VECTOR_NAME 16
+#define ADF_DEVICE_NAME_PREFIX "qat_"
+
+enum adf_accel_capabilities {
+	ADF_ACCEL_CAPABILITIES_NULL = 0,
+	ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC = 1,
+	ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC = 2,
+	ADF_ACCEL_CAPABILITIES_CIPHER = 4,
+	ADF_ACCEL_CAPABILITIES_AUTHENTICATION = 8,
+	ADF_ACCEL_CAPABILITIES_COMPRESSION = 32,
+	ADF_ACCEL_CAPABILITIES_LZS_COMPRESSION = 64,
+	ADF_ACCEL_CAPABILITIES_RANDOM_NUMBER = 128
+};
+
+struct adf_bar {
+	resource_size_t base_addr;
+	void __iomem *virt_addr;
+	resource_size_t size;
+} __packed;
+
+struct adf_accel_msix {
+	struct msix_entry *entries;
+	char **names;
+} __packed;
+
+struct adf_accel_pci {
+	struct pci_dev *pci_dev;
+	struct adf_accel_msix msix_entries;
+	struct adf_bar pci_bars[ADF_PCI_MAX_BARS];
+	uint8_t revid;
+	uint8_t sku;
+} __packed;
+
+enum dev_state {
+	DEV_DOWN = 0,
+	DEV_UP
+};
+
+enum dev_sku_info {
+	DEV_SKU_1 = 0,
+	DEV_SKU_2,
+	DEV_SKU_3,
+	DEV_SKU_4,
+	DEV_SKU_UNKNOWN,
+};
+
+static inline const char *get_sku_info(enum dev_sku_info info)
+{
+	switch (info) {
+	case DEV_SKU_1:
+		return "SKU1";
+	case DEV_SKU_2:
+		return "SKU2";
+	case DEV_SKU_3:
+		return "SKU3";
+	case DEV_SKU_4:
+		return "SKU4";
+	case DEV_SKU_UNKNOWN:
+	default:
+		break;
+	}
+	return "Unknown SKU";
+}
+
+struct adf_hw_device_class {
+	const char *name;
+	const enum adf_device_type type;
+	uint32_t instances;
+} __packed;
+
+struct adf_cfg_device_data;
+struct adf_accel_dev;
+struct adf_etr_data;
+struct adf_etr_ring_data;
+
+struct adf_hw_device_data {
+	struct adf_hw_device_class *dev_class;
+	uint32_t (*get_accel_mask)(uint32_t fuse);
+	uint32_t (*get_ae_mask)(uint32_t fuse);
+	uint32_t (*get_misc_bar_id)(struct adf_hw_device_data *self);
+	uint32_t (*get_etr_bar_id)(struct adf_hw_device_data *self);
+	uint32_t (*get_num_aes)(struct adf_hw_device_data *self);
+	uint32_t (*get_num_accels)(struct adf_hw_device_data *self);
+	enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self);
+	void (*hw_arb_ring_enable)(struct adf_etr_ring_data *ring);
+	void (*hw_arb_ring_disable)(struct adf_etr_ring_data *ring);
+	int (*alloc_irq)(struct adf_accel_dev *accel_dev);
+	void (*free_irq)(struct adf_accel_dev *accel_dev);
+	void (*enable_error_correction)(struct adf_accel_dev *accel_dev);
+	const char *fw_name;
+	uint32_t pci_dev_id;
+	uint32_t fuses;
+	uint32_t accel_capabilities_mask;
+	uint16_t accel_mask;
+	uint16_t ae_mask;
+	uint16_t tx_rings_mask;
+	uint8_t tx_rx_gap;
+	uint8_t instance_id;
+	uint8_t num_banks;
+	uint8_t num_accel;
+	uint8_t num_logical_accel;
+	uint8_t num_engines;
+} __packed;
+
+/* CSR write macro */
+#define ADF_CSR_WR(csr_base, csr_offset, val) \
+	__raw_writel(val, csr_base + csr_offset)
+
+/* CSR read macro */
+#define ADF_CSR_RD(csr_base, csr_offset) __raw_readl(csr_base + csr_offset)
+
+#define GET_DEV(accel_dev) ((accel_dev)->accel_pci_dev.pci_dev->dev)
+#define GET_BARS(accel_dev) ((accel_dev)->accel_pci_dev.pci_bars)
+#define GET_HW_DATA(accel_dev) (accel_dev->hw_device)
+#define GET_MAX_BANKS(accel_dev) (GET_HW_DATA(accel_dev)->num_banks)
+#define GET_MAX_ACCELENGINES(accel_dev) (GET_HW_DATA(accel_dev)->num_engines)
+#define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev
+
+struct adf_admin_comms;
+struct icp_qat_fw_loader_handle;
+struct adf_fw_loader_data {
+	struct icp_qat_fw_loader_handle *fw_loader;
+	const struct firmware *uof_fw;
+};
+
+struct adf_accel_dev {
+	struct adf_etr_data *transport;
+	struct adf_hw_device_data *hw_device;
+	struct adf_cfg_device_data *cfg;
+	struct adf_fw_loader_data *fw_loader;
+	struct adf_admin_comms *admin;
+	struct list_head crypto_list;
+	unsigned long status;
+	atomic_t ref_count;
+	struct dentry *debugfs_dir;
+	struct list_head list;
+	struct module *owner;
+	uint8_t accel_id;
+	uint8_t numa_node;
+	struct adf_accel_pci accel_pci_dev;
+} __packed;
+#endif
diff --git a/drivers/crypto/qat/qat_common/adf_accel_engine.c b/drivers/crypto/qat/qat_common/adf_accel_engine.c
new file mode 100644
index 0000000..c77453b
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_accel_engine.c
@@ -0,0 +1,168 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/firmware.h>
+#include <linux/pci.h>
+#include "adf_cfg.h"
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "icp_qat_uclo.h"
+
+int adf_ae_fw_load(struct adf_accel_dev *accel_dev)
+{
+	struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+	void *uof_addr;
+	uint32_t uof_size;
+
+	if (request_firmware(&loader_data->uof_fw, hw_device->fw_name,
+			     &accel_dev->accel_pci_dev.pci_dev->dev)) {
+		pr_err("QAT: Failed to load firmware %s\n", hw_device->fw_name);
+		return -EFAULT;
+	}
+
+	uof_size = loader_data->uof_fw->size;
+	uof_addr = (void *)loader_data->uof_fw->data;
+	if (qat_uclo_map_uof_obj(loader_data->fw_loader, uof_addr, uof_size)) {
+		pr_err("QAT: Failed to map UOF\n");
+		goto out_err;
+	}
+	if (qat_uclo_wr_all_uimage(loader_data->fw_loader)) {
+		pr_err("QAT: Failed to map UOF\n");
+		goto out_err;
+	}
+	return 0;
+
+out_err:
+	release_firmware(loader_data->uof_fw);
+	return -EFAULT;
+}
+
+int adf_ae_fw_release(struct adf_accel_dev *accel_dev)
+{
+	struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+
+	release_firmware(loader_data->uof_fw);
+	qat_uclo_del_uof_obj(loader_data->fw_loader);
+	qat_hal_deinit(loader_data->fw_loader);
+	loader_data->fw_loader = NULL;
+	return 0;
+}
+
+int adf_ae_start(struct adf_accel_dev *accel_dev)
+{
+	struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev);
+
+	for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) {
+		if (hw_data->ae_mask & (1 << ae)) {
+			qat_hal_start(loader_data->fw_loader, ae, 0xFF);
+			ae_ctr++;
+		}
+	}
+	pr_info("QAT: qat_dev%d started %d acceleration engines\n",
+		accel_dev->accel_id, ae_ctr);
+	return 0;
+}
+
+int adf_ae_stop(struct adf_accel_dev *accel_dev)
+{
+	struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev);
+
+	for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) {
+		if (hw_data->ae_mask & (1 << ae)) {
+			qat_hal_stop(loader_data->fw_loader, ae, 0xFF);
+			ae_ctr++;
+		}
+	}
+	pr_info("QAT: qat_dev%d stopped %d acceleration engines\n",
+		accel_dev->accel_id, ae_ctr);
+	return 0;
+}
+
+static int adf_ae_reset(struct adf_accel_dev *accel_dev, int ae)
+{
+	struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+
+	qat_hal_reset(loader_data->fw_loader);
+	if (qat_hal_clr_reset(loader_data->fw_loader))
+		return -EFAULT;
+
+	return 0;
+}
+
+int adf_ae_init(struct adf_accel_dev *accel_dev)
+{
+	struct adf_fw_loader_data *loader_data;
+
+	loader_data = kzalloc(sizeof(*loader_data), GFP_KERNEL);
+	if (!loader_data)
+		return -ENOMEM;
+
+	accel_dev->fw_loader = loader_data;
+	if (qat_hal_init(accel_dev)) {
+		pr_err("QAT: Failed to init the AEs\n");
+		kfree(loader_data);
+		return -EFAULT;
+	}
+	if (adf_ae_reset(accel_dev, 0)) {
+		pr_err("QAT: Failed to reset the AEs\n");
+		qat_hal_deinit(loader_data->fw_loader);
+		kfree(loader_data);
+		return -EFAULT;
+	}
+	return 0;
+}
+
+int adf_ae_shutdown(struct adf_accel_dev *accel_dev)
+{
+	kfree(accel_dev->fw_loader);
+	accel_dev->fw_loader = NULL;
+	return 0;
+}
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
new file mode 100644
index 0000000..c29d4c3
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_aer.c
@@ -0,0 +1,259 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+
+static struct workqueue_struct *device_reset_wq;
+
+static pci_ers_result_t adf_error_detected(struct pci_dev *pdev,
+					   pci_channel_state_t state)
+{
+	struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+	pr_info("QAT: Acceleration driver hardware error detected.\n");
+	if (!accel_dev) {
+		pr_err("QAT: Can't find acceleration device\n");
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	if (state == pci_channel_io_perm_failure) {
+		pr_err("QAT: Can't recover from device error\n");
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/* reset dev data */
+struct adf_reset_dev_data {
+	int mode;
+	struct adf_accel_dev *accel_dev;
+	struct completion compl;
+	struct work_struct reset_work;
+};
+
+#define PPDSTAT_OFFSET 0x7E
+static void adf_dev_restore(struct adf_accel_dev *accel_dev)
+{
+	struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+	struct pci_dev *parent = pdev->bus->self;
+	uint16_t ppdstat = 0, bridge_ctl = 0;
+	int pending = 0;
+
+	pr_info("QAT: Reseting device qat_dev%d\n", accel_dev->accel_id);
+	pci_read_config_word(pdev, PPDSTAT_OFFSET, &ppdstat);
+	pending = ppdstat & PCI_EXP_DEVSTA_TRPND;
+	if (pending) {
+		int ctr = 0;
+
+		do {
+			msleep(100);
+			pci_read_config_word(pdev, PPDSTAT_OFFSET, &ppdstat);
+			pending = ppdstat & PCI_EXP_DEVSTA_TRPND;
+		} while (pending && ctr++ < 10);
+	}
+
+	if (pending)
+		pr_info("QAT: Transaction still in progress. Proceeding\n");
+
+	pci_read_config_word(parent, PCI_BRIDGE_CONTROL, &bridge_ctl);
+	bridge_ctl |= PCI_BRIDGE_CTL_BUS_RESET;
+	pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl);
+	msleep(100);
+	bridge_ctl &= ~PCI_BRIDGE_CTL_BUS_RESET;
+	pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl);
+	msleep(100);
+	pci_restore_state(pdev);
+	pci_save_state(pdev);
+}
+
+static void adf_device_reset_worker(struct work_struct *work)
+{
+	struct adf_reset_dev_data *reset_data =
+		  container_of(work, struct adf_reset_dev_data, reset_work);
+	struct adf_accel_dev *accel_dev = reset_data->accel_dev;
+
+	adf_dev_restarting_notify(accel_dev);
+	adf_dev_stop(accel_dev);
+	adf_dev_restore(accel_dev);
+	if (adf_dev_start(accel_dev)) {
+		/* The device hanged and we can't restart it so stop here */
+		dev_err(&GET_DEV(accel_dev), "Restart device failed\n");
+		kfree(reset_data);
+		WARN(1, "QAT: device restart failed. Device is unusable\n");
+		return;
+	}
+	adf_dev_restarted_notify(accel_dev);
+	clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
+
+	/* The dev is back alive. Notify the caller if in sync mode */
+	if (reset_data->mode == ADF_DEV_RESET_SYNC)
+		complete(&reset_data->compl);
+	else
+		kfree(reset_data);
+}
+
+static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
+				      enum adf_dev_reset_mode mode)
+{
+	struct adf_reset_dev_data *reset_data;
+
+	if (adf_dev_started(accel_dev) &&
+	    !test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
+		return 0;
+
+	set_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
+	reset_data = kzalloc(sizeof(*reset_data), GFP_ATOMIC);
+	if (!reset_data)
+		return -ENOMEM;
+	reset_data->accel_dev = accel_dev;
+	init_completion(&reset_data->compl);
+	reset_data->mode = mode;
+	INIT_WORK(&reset_data->reset_work, adf_device_reset_worker);
+	queue_work(device_reset_wq, &reset_data->reset_work);
+
+	/* If in sync mode wait for the result */
+	if (mode == ADF_DEV_RESET_SYNC) {
+		int ret = 0;
+		/* Maximum device reset time is 10 seconds */
+		unsigned long wait_jiffies = msecs_to_jiffies(10000);
+		unsigned long timeout = wait_for_completion_timeout(
+				   &reset_data->compl, wait_jiffies);
+		if (!timeout) {
+			pr_err("QAT: Reset device timeout expired\n");
+			ret = -EFAULT;
+		}
+		kfree(reset_data);
+		return ret;
+	}
+	return 0;
+}
+
+static pci_ers_result_t adf_slot_reset(struct pci_dev *pdev)
+{
+	struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+	if (!accel_dev) {
+		pr_err("QAT: Can't find acceleration device\n");
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+	pci_cleanup_aer_uncorrect_error_status(pdev);
+	if (adf_dev_aer_schedule_reset(accel_dev, ADF_DEV_RESET_SYNC))
+		return PCI_ERS_RESULT_DISCONNECT;
+
+	return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void adf_resume(struct pci_dev *pdev)
+{
+	pr_info("QAT: Acceleration driver reset completed\n");
+	pr_info("QAT: Device is up and runnig\n");
+}
+
+static struct pci_error_handlers adf_err_handler = {
+	.error_detected = adf_error_detected,
+	.slot_reset = adf_slot_reset,
+	.resume = adf_resume,
+};
+
+/**
+ * adf_enable_aer() - Enable Advance Error Reporting for acceleration device
+ * @accel_dev:  Pointer to acceleration device.
+ * @adf:        PCI device driver owning the given acceleration device.
+ *
+ * Function enables PCI Advance Error Reporting for the
+ * QAT acceleration device accel_dev.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf)
+{
+	struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+
+	adf->err_handler = &adf_err_handler;
+	pci_enable_pcie_error_reporting(pdev);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(adf_enable_aer);
+
+/**
+ * adf_disable_aer() - Enable Advance Error Reporting for acceleration device
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function disables PCI Advance Error Reporting for the
+ * QAT acceleration device accel_dev.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
+void adf_disable_aer(struct adf_accel_dev *accel_dev)
+{
+	struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+
+	pci_disable_pcie_error_reporting(pdev);
+}
+EXPORT_SYMBOL_GPL(adf_disable_aer);
+
+int adf_init_aer(void)
+{
+	device_reset_wq = create_workqueue("qat_device_reset_wq");
+	return (device_reset_wq == NULL) ? -EFAULT : 0;
+}
+
+void adf_exit_aer(void)
+{
+	if (device_reset_wq)
+		destroy_workqueue(device_reset_wq);
+	device_reset_wq = NULL;
+}
diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/qat/qat_common/adf_cfg.c
new file mode 100644
index 0000000..aba7f1d
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_cfg.c
@@ -0,0 +1,361 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/seq_file.h>
+#include "adf_accel_devices.h"
+#include "adf_cfg.h"
+
+static DEFINE_MUTEX(qat_cfg_read_lock);
+
+static void *qat_dev_cfg_start(struct seq_file *sfile, loff_t *pos)
+{
+	struct adf_cfg_device_data *dev_cfg = sfile->private;
+
+	mutex_lock(&qat_cfg_read_lock);
+	return seq_list_start(&dev_cfg->sec_list, *pos);
+}
+
+static int qat_dev_cfg_show(struct seq_file *sfile, void *v)
+{
+	struct list_head *list;
+	struct adf_cfg_section *sec =
+				list_entry(v, struct adf_cfg_section, list);
+
+	seq_printf(sfile, "[%s]\n", sec->name);
+	list_for_each(list, &sec->param_head) {
+		struct adf_cfg_key_val *ptr =
+			list_entry(list, struct adf_cfg_key_val, list);
+		seq_printf(sfile, "%s = %s\n", ptr->key, ptr->val);
+	}
+	return 0;
+}
+
+static void *qat_dev_cfg_next(struct seq_file *sfile, void *v, loff_t *pos)
+{
+	struct adf_cfg_device_data *dev_cfg = sfile->private;
+
+	return seq_list_next(v, &dev_cfg->sec_list, pos);
+}
+
+static void qat_dev_cfg_stop(struct seq_file *sfile, void *v)
+{
+	mutex_unlock(&qat_cfg_read_lock);
+}
+
+static const struct seq_operations qat_dev_cfg_sops = {
+	.start = qat_dev_cfg_start,
+	.next = qat_dev_cfg_next,
+	.stop = qat_dev_cfg_stop,
+	.show = qat_dev_cfg_show
+};
+
+static int qat_dev_cfg_open(struct inode *inode, struct file *file)
+{
+	int ret = seq_open(file, &qat_dev_cfg_sops);
+
+	if (!ret) {
+		struct seq_file *seq_f = file->private_data;
+
+		seq_f->private = inode->i_private;
+	}
+	return ret;
+}
+
+static const struct file_operations qat_dev_cfg_fops = {
+	.open = qat_dev_cfg_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release
+};
+
+/**
+ * adf_cfg_dev_add() - Create an acceleration device configuration table.
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function creates a configuration table for the given acceleration device.
+ * The table stores device specific config values.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int adf_cfg_dev_add(struct adf_accel_dev *accel_dev)
+{
+	struct adf_cfg_device_data *dev_cfg_data;
+
+	dev_cfg_data = kzalloc(sizeof(*dev_cfg_data), GFP_KERNEL);
+	if (!dev_cfg_data)
+		return -ENOMEM;
+	INIT_LIST_HEAD(&dev_cfg_data->sec_list);
+	init_rwsem(&dev_cfg_data->lock);
+	accel_dev->cfg = dev_cfg_data;
+
+	/* accel_dev->debugfs_dir should always be non-NULL here */
+	dev_cfg_data->debug = debugfs_create_file("dev_cfg", S_IRUSR,
+						  accel_dev->debugfs_dir,
+						  dev_cfg_data,
+						  &qat_dev_cfg_fops);
+	if (!dev_cfg_data->debug) {
+		pr_err("QAT: Failed to create qat cfg debugfs entry.\n");
+		kfree(dev_cfg_data);
+		accel_dev->cfg = NULL;
+		return -EFAULT;
+	}
+	return 0;
+}
+EXPORT_SYMBOL_GPL(adf_cfg_dev_add);
+
+static void adf_cfg_section_del_all(struct list_head *head);
+
+void adf_cfg_del_all(struct adf_accel_dev *accel_dev)
+{
+	struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg;
+
+	down_write(&dev_cfg_data->lock);
+	adf_cfg_section_del_all(&dev_cfg_data->sec_list);
+	up_write(&dev_cfg_data->lock);
+}
+
+/**
+ * adf_cfg_dev_remove() - Clears acceleration device configuration table.
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function removes configuration table from the given acceleration device
+ * and frees all allocated memory.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
+void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev)
+{
+	struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg;
+
+	down_write(&dev_cfg_data->lock);
+	adf_cfg_section_del_all(&dev_cfg_data->sec_list);
+	up_write(&dev_cfg_data->lock);
+	debugfs_remove(dev_cfg_data->debug);
+	kfree(dev_cfg_data);
+	accel_dev->cfg = NULL;
+}
+EXPORT_SYMBOL_GPL(adf_cfg_dev_remove);
+
+static void adf_cfg_keyval_add(struct adf_cfg_key_val *new,
+			       struct adf_cfg_section *sec)
+{
+	list_add_tail(&new->list, &sec->param_head);
+}
+
+static void adf_cfg_keyval_del_all(struct list_head *head)
+{
+	struct list_head *list_ptr, *tmp;
+
+	list_for_each_prev_safe(list_ptr, tmp, head) {
+		struct adf_cfg_key_val *ptr =
+			list_entry(list_ptr, struct adf_cfg_key_val, list);
+		list_del(list_ptr);
+		kfree(ptr);
+	}
+}
+
+static void adf_cfg_section_del_all(struct list_head *head)
+{
+	struct adf_cfg_section *ptr;
+	struct list_head *list, *tmp;
+
+	list_for_each_prev_safe(list, tmp, head) {
+		ptr = list_entry(list, struct adf_cfg_section, list);
+		adf_cfg_keyval_del_all(&ptr->param_head);
+		list_del(list);
+		kfree(ptr);
+	}
+}
+
+static struct adf_cfg_key_val *adf_cfg_key_value_find(struct adf_cfg_section *s,
+						      const char *key)
+{
+	struct list_head *list;
+
+	list_for_each(list, &s->param_head) {
+		struct adf_cfg_key_val *ptr =
+			list_entry(list, struct adf_cfg_key_val, list);
+		if (!strcmp(ptr->key, key))
+			return ptr;
+	}
+	return NULL;
+}
+
+static struct adf_cfg_section *adf_cfg_sec_find(struct adf_accel_dev *accel_dev,
+						const char *sec_name)
+{
+	struct adf_cfg_device_data *cfg = accel_dev->cfg;
+	struct list_head *list;
+
+	list_for_each(list, &cfg->sec_list) {
+		struct adf_cfg_section *ptr =
+			list_entry(list, struct adf_cfg_section, list);
+		if (!strcmp(ptr->name, sec_name))
+			return ptr;
+	}
+	return NULL;
+}
+
+static int adf_cfg_key_val_get(struct adf_accel_dev *accel_dev,
+			       const char *sec_name,
+			       const char *key_name,
+			       char *val)
+{
+	struct adf_cfg_section *sec = adf_cfg_sec_find(accel_dev, sec_name);
+	struct adf_cfg_key_val *keyval = NULL;
+
+	if (sec)
+		keyval = adf_cfg_key_value_find(sec, key_name);
+	if (keyval) {
+		memcpy(val, keyval->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES);
+		return 0;
+	}
+	return -1;
+}
+
+/**
+ * adf_cfg_add_key_value_param() - Add key-value config entry to config table.
+ * @accel_dev:  Pointer to acceleration device.
+ * @section_name: Name of the section where the param will be added
+ * @key: The key string
+ * @val: Value pain for the given @key
+ * @type: Type - string, int or address
+ *
+ * Function adds configuration key - value entry in the appropriate section
+ * in the given acceleration device
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
+				const char *section_name,
+				const char *key, const void *val,
+				enum adf_cfg_val_type type)
+{
+	struct adf_cfg_device_data *cfg = accel_dev->cfg;
+	struct adf_cfg_key_val *key_val;
+	struct adf_cfg_section *section = adf_cfg_sec_find(accel_dev,
+							   section_name);
+	if (!section)
+		return -EFAULT;
+
+	key_val = kzalloc(sizeof(*key_val), GFP_KERNEL);
+	if (!key_val)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&key_val->list);
+	strlcpy(key_val->key, key, sizeof(key_val->key));
+
+	if (type == ADF_DEC) {
+		snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES,
+			 "%ld", (*((long *)val)));
+	} else if (type == ADF_STR) {
+		strlcpy(key_val->val, (char *)val, sizeof(key_val->val));
+	} else if (type == ADF_HEX) {
+		snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES,
+			 "0x%lx", (unsigned long)val);
+	} else {
+		pr_err("QAT: Unknown type given.\n");
+		kfree(key_val);
+		return -1;
+	}
+	key_val->type = type;
+	down_write(&cfg->lock);
+	adf_cfg_keyval_add(key_val, section);
+	up_write(&cfg->lock);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(adf_cfg_add_key_value_param);
+
+/**
+ * adf_cfg_section_add() - Add config section entry to config table.
+ * @accel_dev:  Pointer to acceleration device.
+ * @name: Name of the section
+ *
+ * Function adds configuration section where key - value entries
+ * will be stored.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name)
+{
+	struct adf_cfg_device_data *cfg = accel_dev->cfg;
+	struct adf_cfg_section *sec = adf_cfg_sec_find(accel_dev, name);
+
+	if (sec)
+		return 0;
+
+	sec = kzalloc(sizeof(*sec), GFP_KERNEL);
+	if (!sec)
+		return -ENOMEM;
+
+	strlcpy(sec->name, name, sizeof(sec->name));
+	INIT_LIST_HEAD(&sec->param_head);
+	down_write(&cfg->lock);
+	list_add_tail(&sec->list, &cfg->sec_list);
+	up_write(&cfg->lock);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(adf_cfg_section_add);
+
+int adf_cfg_get_param_value(struct adf_accel_dev *accel_dev,
+			    const char *section, const char *name,
+			    char *value)
+{
+	struct adf_cfg_device_data *cfg = accel_dev->cfg;
+	int ret;
+
+	down_read(&cfg->lock);
+	ret = adf_cfg_key_val_get(accel_dev, section, name, value);
+	up_read(&cfg->lock);
+	return ret;
+}
diff --git a/drivers/crypto/qat/qat_common/adf_cfg.h b/drivers/crypto/qat/qat_common/adf_cfg.h
new file mode 100644
index 0000000..6a9c6f6b
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_cfg.h
@@ -0,0 +1,87 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_CFG_H_
+#define ADF_CFG_H_
+
+#include <linux/list.h>
+#include <linux/rwsem.h>
+#include <linux/debugfs.h>
+#include "adf_accel_devices.h"
+#include "adf_cfg_common.h"
+#include "adf_cfg_strings.h"
+
+struct adf_cfg_key_val {
+	char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+	char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+	enum adf_cfg_val_type type;
+	struct list_head list;
+};
+
+struct adf_cfg_section {
+	char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES];
+	struct list_head list;
+	struct list_head param_head;
+};
+
+struct adf_cfg_device_data {
+	struct list_head sec_list;
+	struct dentry *debug;
+	struct rw_semaphore lock;
+};
+
+int adf_cfg_dev_add(struct adf_accel_dev *accel_dev);
+void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev);
+int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name);
+void adf_cfg_del_all(struct adf_accel_dev *accel_dev);
+int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
+				const char *section_name,
+				const char *key, const void *val,
+				enum adf_cfg_val_type type);
+int adf_cfg_get_param_value(struct adf_accel_dev *accel_dev,
+			    const char *section, const char *name, char *value);
+
+#endif
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_common.h b/drivers/crypto/qat/qat_common/adf_cfg_common.h
new file mode 100644
index 0000000..88b8218
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_cfg_common.h
@@ -0,0 +1,100 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_CFG_COMMON_H_
+#define ADF_CFG_COMMON_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define ADF_CFG_MAX_STR_LEN 64
+#define ADF_CFG_MAX_KEY_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN
+#define ADF_CFG_MAX_VAL_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN
+#define ADF_CFG_MAX_SECTION_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN
+#define ADF_CFG_BASE_DEC 10
+#define ADF_CFG_BASE_HEX 16
+#define ADF_CFG_ALL_DEVICES 0xFE
+#define ADF_CFG_NO_DEVICE 0xFF
+#define ADF_CFG_AFFINITY_WHATEVER 0xFF
+#define MAX_DEVICE_NAME_SIZE 32
+#define ADF_MAX_DEVICES 32
+
+enum adf_cfg_val_type {
+	ADF_DEC,
+	ADF_HEX,
+	ADF_STR
+};
+
+enum adf_device_type {
+	DEV_UNKNOWN = 0,
+	DEV_DH895XCC,
+};
+
+struct adf_dev_status_info {
+	enum adf_device_type type;
+	uint8_t accel_id;
+	uint8_t instance_id;
+	uint8_t num_ae;
+	uint8_t num_accel;
+	uint8_t num_logical_accel;
+	uint8_t banks_per_accel;
+	uint8_t state;
+	uint8_t bus;
+	uint8_t dev;
+	uint8_t fun;
+	char name[MAX_DEVICE_NAME_SIZE];
+};
+
+#define ADF_CTL_IOC_MAGIC 'a'
+#define IOCTL_CONFIG_SYS_RESOURCE_PARAMETERS _IOW(ADF_CTL_IOC_MAGIC, 0, \
+		struct adf_user_cfg_ctl_data)
+#define IOCTL_STOP_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 1, \
+		struct adf_user_cfg_ctl_data)
+#define IOCTL_START_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 2, \
+		struct adf_user_cfg_ctl_data)
+#define IOCTL_STATUS_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 3, uint32_t)
+#define IOCTL_GET_NUM_DEVICES _IOW(ADF_CTL_IOC_MAGIC, 4, int32_t)
+#endif
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/qat/qat_common/adf_cfg_strings.h
new file mode 100644
index 0000000..c7ac758
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_cfg_strings.h
@@ -0,0 +1,83 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_CFG_STRINGS_H_
+#define ADF_CFG_STRINGS_H_
+
+#define ADF_GENERAL_SEC "GENERAL"
+#define ADF_KERNEL_SEC "KERNEL"
+#define ADF_ACCEL_SEC "Accelerator"
+#define ADF_NUM_CY "NumberCyInstances"
+#define ADF_NUM_DC "NumberDcInstances"
+#define ADF_RING_SYM_SIZE "NumConcurrentSymRequests"
+#define ADF_RING_ASYM_SIZE "NumConcurrentAsymRequests"
+#define ADF_RING_DC_SIZE "NumConcurrentRequests"
+#define ADF_RING_ASYM_TX "RingAsymTx"
+#define ADF_RING_SYM_TX "RingSymTx"
+#define ADF_RING_RND_TX "RingNrbgTx"
+#define ADF_RING_ASYM_RX "RingAsymRx"
+#define ADF_RING_SYM_RX "RinSymRx"
+#define ADF_RING_RND_RX "RingNrbgRx"
+#define ADF_RING_DC_TX "RingTx"
+#define ADF_RING_DC_RX "RingRx"
+#define ADF_ETRMGR_BANK "Bank"
+#define ADF_RING_BANK_NUM "BankNumber"
+#define ADF_CY "Cy"
+#define ADF_DC "Dc"
+#define ADF_ETRMGR_COALESCING_ENABLED "InterruptCoalescingEnabled"
+#define ADF_ETRMGR_COALESCING_ENABLED_FORMAT \
+	ADF_ETRMGR_BANK"%d"ADF_ETRMGR_COALESCING_ENABLED
+#define ADF_ETRMGR_COALESCE_TIMER "InterruptCoalescingTimerNs"
+#define ADF_ETRMGR_COALESCE_TIMER_FORMAT \
+	ADF_ETRMGR_BANK"%d"ADF_ETRMGR_COALESCE_TIMER
+#define ADF_ETRMGR_COALESCING_MSG_ENABLED "InterruptCoalescingNumResponses"
+#define ADF_ETRMGR_COALESCING_MSG_ENABLED_FORMAT \
+	ADF_ETRMGR_BANK"%d"ADF_ETRMGR_COALESCING_MSG_ENABLED
+#define ADF_ETRMGR_CORE_AFFINITY "CoreAffinity"
+#define ADF_ETRMGR_CORE_AFFINITY_FORMAT \
+	ADF_ETRMGR_BANK"%d"ADF_ETRMGR_CORE_AFFINITY
+#define ADF_ACCEL_STR "Accelerator%d"
+#endif
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_user.h b/drivers/crypto/qat/qat_common/adf_cfg_user.h
new file mode 100644
index 0000000..0c38a15
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_cfg_user.h
@@ -0,0 +1,94 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_CFG_USER_H_
+#define ADF_CFG_USER_H_
+
+#include "adf_cfg_common.h"
+#include "adf_cfg_strings.h"
+
+struct adf_user_cfg_key_val {
+	char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+	char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+	union {
+		char *user_val_ptr;
+		uint64_t padding1;
+	};
+	union {
+		struct adf_user_cfg_key_val *prev;
+		uint64_t padding2;
+	};
+	union {
+		struct adf_user_cfg_key_val *next;
+		uint64_t padding3;
+	};
+	enum adf_cfg_val_type type;
+};
+
+struct adf_user_cfg_section {
+	char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES];
+	union {
+		struct adf_user_cfg_key_val *params;
+		uint64_t padding1;
+	};
+	union {
+		struct adf_user_cfg_section *prev;
+		uint64_t padding2;
+	};
+	union {
+		struct adf_user_cfg_section *next;
+		uint64_t padding3;
+	};
+};
+
+struct adf_user_cfg_ctl_data {
+	union {
+		struct adf_user_cfg_section *config_section;
+		uint64_t padding;
+	};
+	uint8_t device_id;
+};
+#endif
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
new file mode 100644
index 0000000..5e8f9d4
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
@@ -0,0 +1,192 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_DRV_H
+#define ADF_DRV_H
+
+#include <linux/list.h>
+#include <linux/pci.h>
+#include "adf_accel_devices.h"
+#include "icp_qat_fw_loader_handle.h"
+#include "icp_qat_hal.h"
+
+#define ADF_STATUS_RESTARTING 0
+#define ADF_STATUS_STARTING 1
+#define ADF_STATUS_CONFIGURED 2
+#define ADF_STATUS_STARTED 3
+#define ADF_STATUS_AE_INITIALISED 4
+#define ADF_STATUS_AE_UCODE_LOADED 5
+#define ADF_STATUS_AE_STARTED 6
+#define ADF_STATUS_ORPHAN_TH_RUNNING 7
+#define ADF_STATUS_IRQ_ALLOCATED 8
+
+enum adf_dev_reset_mode {
+	ADF_DEV_RESET_ASYNC = 0,
+	ADF_DEV_RESET_SYNC
+};
+
+enum adf_event {
+	ADF_EVENT_INIT = 0,
+	ADF_EVENT_START,
+	ADF_EVENT_STOP,
+	ADF_EVENT_SHUTDOWN,
+	ADF_EVENT_RESTARTING,
+	ADF_EVENT_RESTARTED,
+};
+
+struct service_hndl {
+	int (*event_hld)(struct adf_accel_dev *accel_dev,
+			 enum adf_event event);
+	unsigned long init_status;
+	unsigned long start_status;
+	char *name;
+	struct list_head list;
+	int admin;
+};
+
+int adf_service_register(struct service_hndl *service);
+int adf_service_unregister(struct service_hndl *service);
+
+int adf_dev_init(struct adf_accel_dev *accel_dev);
+int adf_dev_start(struct adf_accel_dev *accel_dev);
+int adf_dev_stop(struct adf_accel_dev *accel_dev);
+int adf_dev_shutdown(struct adf_accel_dev *accel_dev);
+
+int adf_ctl_dev_register(void);
+void adf_ctl_dev_unregister(void);
+int adf_processes_dev_register(void);
+void adf_processes_dev_unregister(void);
+
+int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev);
+void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev);
+struct list_head *adf_devmgr_get_head(void);
+struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id);
+struct adf_accel_dev *adf_devmgr_get_first(void);
+struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev);
+int adf_devmgr_verify_id(uint32_t id);
+void adf_devmgr_get_num_dev(uint32_t *num);
+int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev);
+int adf_dev_started(struct adf_accel_dev *accel_dev);
+int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev);
+int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev);
+int adf_ae_init(struct adf_accel_dev *accel_dev);
+int adf_ae_shutdown(struct adf_accel_dev *accel_dev);
+int adf_ae_fw_load(struct adf_accel_dev *accel_dev);
+int adf_ae_fw_release(struct adf_accel_dev *accel_dev);
+int adf_ae_start(struct adf_accel_dev *accel_dev);
+int adf_ae_stop(struct adf_accel_dev *accel_dev);
+
+int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf);
+void adf_disable_aer(struct adf_accel_dev *accel_dev);
+int adf_init_aer(void);
+void adf_exit_aer(void);
+
+int adf_dev_get(struct adf_accel_dev *accel_dev);
+void adf_dev_put(struct adf_accel_dev *accel_dev);
+int adf_dev_in_use(struct adf_accel_dev *accel_dev);
+int adf_init_etr_data(struct adf_accel_dev *accel_dev);
+void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev);
+int qat_crypto_register(void);
+int qat_crypto_unregister(void);
+struct qat_crypto_instance *qat_crypto_get_instance_node(int node);
+void qat_crypto_put_instance(struct qat_crypto_instance *inst);
+void qat_alg_callback(void *resp);
+int qat_algs_init(void);
+void qat_algs_exit(void);
+int qat_algs_register(void);
+int qat_algs_unregister(void);
+
+int qat_hal_init(struct adf_accel_dev *accel_dev);
+void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle);
+void qat_hal_start(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
+		   unsigned int ctx_mask);
+void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
+		  unsigned int ctx_mask);
+void qat_hal_reset(struct icp_qat_fw_loader_handle *handle);
+int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle);
+void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle,
+			  unsigned char ae, unsigned int ctx_mask);
+int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle,
+			   unsigned char ae, enum icp_qat_uof_regtype lm_type,
+			   unsigned char mode);
+int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle,
+			    unsigned char ae, unsigned char mode);
+int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle,
+			   unsigned char ae, unsigned char mode);
+void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle,
+		    unsigned char ae, unsigned int ctx_mask, unsigned int upc);
+void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle,
+		       unsigned char ae, unsigned int uaddr,
+		       unsigned int words_num, uint64_t *uword);
+void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
+		     unsigned int uword_addr, unsigned int words_num,
+		     unsigned int *data);
+int qat_hal_get_ins_num(void);
+int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle,
+			unsigned char ae,
+			struct icp_qat_uof_batch_init *lm_init_header);
+int qat_hal_init_gpr(struct icp_qat_fw_loader_handle *handle,
+		     unsigned char ae, unsigned char ctx_mask,
+		     enum icp_qat_uof_regtype reg_type,
+		     unsigned short reg_num, unsigned int regdata);
+int qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle *handle,
+			 unsigned char ae, unsigned char ctx_mask,
+			 enum icp_qat_uof_regtype reg_type,
+			 unsigned short reg_num, unsigned int regdata);
+int qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle,
+			 unsigned char ae, unsigned char ctx_mask,
+			 enum icp_qat_uof_regtype reg_type,
+			 unsigned short reg_num, unsigned int regdata);
+int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle,
+		    unsigned char ae, unsigned char ctx_mask,
+		    unsigned short reg_num, unsigned int regdata);
+int qat_hal_wr_lm(struct icp_qat_fw_loader_handle *handle,
+		  unsigned char ae, unsigned short lm_addr, unsigned int value);
+int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle);
+void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle);
+int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
+			 void *addr_ptr, int mem_size);
+#endif
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
new file mode 100644
index 0000000..d97069b
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -0,0 +1,490 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/bitops.h>
+#include <linux/pci.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_cfg.h"
+#include "adf_cfg_common.h"
+#include "adf_cfg_user.h"
+
+#define DEVICE_NAME "qat_adf_ctl"
+
+static DEFINE_MUTEX(adf_ctl_lock);
+static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg);
+
+static const struct file_operations adf_ctl_ops = {
+	.owner = THIS_MODULE,
+	.unlocked_ioctl = adf_ctl_ioctl,
+	.compat_ioctl = adf_ctl_ioctl,
+};
+
+struct adf_ctl_drv_info {
+	unsigned int major;
+	struct cdev drv_cdev;
+	struct class *drv_class;
+};
+
+static struct adf_ctl_drv_info adt_ctl_drv;
+
+static void adf_chr_drv_destroy(void)
+{
+	device_destroy(adt_ctl_drv.drv_class, MKDEV(adt_ctl_drv.major, 0));
+	cdev_del(&adt_ctl_drv.drv_cdev);
+	class_destroy(adt_ctl_drv.drv_class);
+	unregister_chrdev_region(MKDEV(adt_ctl_drv.major, 0), 1);
+}
+
+static int adf_chr_drv_create(void)
+{
+	dev_t dev_id;
+	struct device *drv_device;
+
+	if (alloc_chrdev_region(&dev_id, 0, 1, DEVICE_NAME)) {
+		pr_err("QAT: unable to allocate chrdev region\n");
+		return -EFAULT;
+	}
+
+	adt_ctl_drv.drv_class = class_create(THIS_MODULE, DEVICE_NAME);
+	if (IS_ERR(adt_ctl_drv.drv_class)) {
+		pr_err("QAT: class_create failed for adf_ctl\n");
+		goto err_chrdev_unreg;
+	}
+	adt_ctl_drv.major = MAJOR(dev_id);
+	cdev_init(&adt_ctl_drv.drv_cdev, &adf_ctl_ops);
+	if (cdev_add(&adt_ctl_drv.drv_cdev, dev_id, 1)) {
+		pr_err("QAT: cdev add failed\n");
+		goto err_class_destr;
+	}
+
+	drv_device = device_create(adt_ctl_drv.drv_class, NULL,
+				   MKDEV(adt_ctl_drv.major, 0),
+				   NULL, DEVICE_NAME);
+	if (!drv_device) {
+		pr_err("QAT: failed to create device\n");
+		goto err_cdev_del;
+	}
+	return 0;
+err_cdev_del:
+	cdev_del(&adt_ctl_drv.drv_cdev);
+err_class_destr:
+	class_destroy(adt_ctl_drv.drv_class);
+err_chrdev_unreg:
+	unregister_chrdev_region(dev_id, 1);
+	return -EFAULT;
+}
+
+static int adf_ctl_alloc_resources(struct adf_user_cfg_ctl_data **ctl_data,
+				   unsigned long arg)
+{
+	struct adf_user_cfg_ctl_data *cfg_data;
+
+	cfg_data = kzalloc(sizeof(*cfg_data), GFP_KERNEL);
+	if (!cfg_data)
+		return -ENOMEM;
+
+	/* Initialize device id to NO DEVICE as 0 is a valid device id */
+	cfg_data->device_id = ADF_CFG_NO_DEVICE;
+
+	if (copy_from_user(cfg_data, (void __user *)arg, sizeof(*cfg_data))) {
+		pr_err("QAT: failed to copy from user cfg_data.\n");
+		kfree(cfg_data);
+		return -EIO;
+	}
+
+	*ctl_data = cfg_data;
+	return 0;
+}
+
+static int adf_add_key_value_data(struct adf_accel_dev *accel_dev,
+				  const char *section,
+				  const struct adf_user_cfg_key_val *key_val)
+{
+	if (key_val->type == ADF_HEX) {
+		long *ptr = (long *)key_val->val;
+		long val = *ptr;
+
+		if (adf_cfg_add_key_value_param(accel_dev, section,
+						key_val->key, (void *)val,
+						key_val->type)) {
+			pr_err("QAT: failed to add keyvalue.\n");
+			return -EFAULT;
+		}
+	} else {
+		if (adf_cfg_add_key_value_param(accel_dev, section,
+						key_val->key, key_val->val,
+						key_val->type)) {
+			pr_err("QAT: failed to add keyvalue.\n");
+			return -EFAULT;
+		}
+	}
+	return 0;
+}
+
+static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev,
+				   struct adf_user_cfg_ctl_data *ctl_data)
+{
+	struct adf_user_cfg_key_val key_val;
+	struct adf_user_cfg_key_val *params_head;
+	struct adf_user_cfg_section section, *section_head;
+
+	section_head = ctl_data->config_section;
+
+	while (section_head) {
+		if (copy_from_user(&section, (void __user *)section_head,
+				   sizeof(*section_head))) {
+			pr_err("QAT: failed to copy section info\n");
+			goto out_err;
+		}
+
+		if (adf_cfg_section_add(accel_dev, section.name)) {
+			pr_err("QAT: failed to add section.\n");
+			goto out_err;
+		}
+
+		params_head = section_head->params;
+
+		while (params_head) {
+			if (copy_from_user(&key_val, (void __user *)params_head,
+					   sizeof(key_val))) {
+				pr_err("QAT: Failed to copy keyvalue.\n");
+				goto out_err;
+			}
+			if (adf_add_key_value_data(accel_dev, section.name,
+						   &key_val)) {
+				goto out_err;
+			}
+			params_head = key_val.next;
+		}
+		section_head = section.next;
+	}
+	return 0;
+out_err:
+	adf_cfg_del_all(accel_dev);
+	return -EFAULT;
+}
+
+static int adf_ctl_ioctl_dev_config(struct file *fp, unsigned int cmd,
+				    unsigned long arg)
+{
+	int ret;
+	struct adf_user_cfg_ctl_data *ctl_data;
+	struct adf_accel_dev *accel_dev;
+
+	ret = adf_ctl_alloc_resources(&ctl_data, arg);
+	if (ret)
+		return ret;
+
+	accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
+	if (!accel_dev) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	if (adf_dev_started(accel_dev)) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	if (adf_copy_key_value_data(accel_dev, ctl_data)) {
+		ret = -EFAULT;
+		goto out;
+	}
+	set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+out:
+	kfree(ctl_data);
+	return ret;
+}
+
+static int adf_ctl_is_device_in_use(int id)
+{
+	struct list_head *itr, *head = adf_devmgr_get_head();
+
+	list_for_each(itr, head) {
+		struct adf_accel_dev *dev =
+				list_entry(itr, struct adf_accel_dev, list);
+
+		if (id == dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
+			if (adf_devmgr_in_reset(dev) || adf_dev_in_use(dev)) {
+				pr_info("QAT: device qat_dev%d is busy\n",
+					dev->accel_id);
+				return -EBUSY;
+			}
+		}
+	}
+	return 0;
+}
+
+static int adf_ctl_stop_devices(uint32_t id)
+{
+	struct list_head *itr, *head = adf_devmgr_get_head();
+	int ret = 0;
+
+	list_for_each(itr, head) {
+		struct adf_accel_dev *accel_dev =
+				list_entry(itr, struct adf_accel_dev, list);
+		if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
+			if (!adf_dev_started(accel_dev))
+				continue;
+
+			if (adf_dev_stop(accel_dev)) {
+				pr_err("QAT: Failed to stop qat_dev%d\n", id);
+				ret = -EFAULT;
+			}
+		}
+	}
+	return ret;
+}
+
+static int adf_ctl_ioctl_dev_stop(struct file *fp, unsigned int cmd,
+				  unsigned long arg)
+{
+	int ret;
+	struct adf_user_cfg_ctl_data *ctl_data;
+
+	ret = adf_ctl_alloc_resources(&ctl_data, arg);
+	if (ret)
+		return ret;
+
+	if (adf_devmgr_verify_id(ctl_data->device_id)) {
+		pr_err("QAT: Device %d not found\n", ctl_data->device_id);
+		ret = -ENODEV;
+		goto out;
+	}
+
+	ret = adf_ctl_is_device_in_use(ctl_data->device_id);
+	if (ret)
+		goto out;
+
+	if (ctl_data->device_id == ADF_CFG_ALL_DEVICES)
+		pr_info("QAT: Stopping all acceleration devices.\n");
+	else
+		pr_info("QAT: Stopping acceleration device qat_dev%d.\n",
+			ctl_data->device_id);
+
+	ret = adf_ctl_stop_devices(ctl_data->device_id);
+	if (ret)
+		pr_err("QAT: failed to stop device.\n");
+out:
+	kfree(ctl_data);
+	return ret;
+}
+
+static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd,
+				   unsigned long arg)
+{
+	int ret;
+	struct adf_user_cfg_ctl_data *ctl_data;
+	struct adf_accel_dev *accel_dev;
+
+	ret = adf_ctl_alloc_resources(&ctl_data, arg);
+	if (ret)
+		return ret;
+
+	accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
+	if (!accel_dev) {
+		pr_err("QAT: Device %d not found\n", ctl_data->device_id);
+		ret = -ENODEV;
+		goto out;
+	}
+
+	if (!adf_dev_started(accel_dev)) {
+		pr_info("QAT: Starting acceleration device qat_dev%d.\n",
+			ctl_data->device_id);
+		ret = adf_dev_start(accel_dev);
+	} else {
+		pr_info("QAT: Acceleration device qat_dev%d already started.\n",
+			ctl_data->device_id);
+	}
+	if (ret) {
+		pr_err("QAT: Failed to start qat_dev%d\n", ctl_data->device_id);
+		adf_dev_stop(accel_dev);
+	}
+out:
+	kfree(ctl_data);
+	return ret;
+}
+
+static int adf_ctl_ioctl_get_num_devices(struct file *fp, unsigned int cmd,
+					 unsigned long arg)
+{
+	uint32_t num_devices = 0;
+
+	adf_devmgr_get_num_dev(&num_devices);
+	if (copy_to_user((void __user *)arg, &num_devices, sizeof(num_devices)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int adf_ctl_ioctl_get_status(struct file *fp, unsigned int cmd,
+				    unsigned long arg)
+{
+	struct adf_hw_device_data *hw_data;
+	struct adf_dev_status_info dev_info;
+	struct adf_accel_dev *accel_dev;
+
+	if (copy_from_user(&dev_info, (void __user *)arg,
+			   sizeof(struct adf_dev_status_info))) {
+		pr_err("QAT: failed to copy from user.\n");
+		return -EFAULT;
+	}
+
+	accel_dev = adf_devmgr_get_dev_by_id(dev_info.accel_id);
+	if (!accel_dev) {
+		pr_err("QAT: Device %d not found\n", dev_info.accel_id);
+		return -ENODEV;
+	}
+	hw_data = accel_dev->hw_device;
+	dev_info.state = adf_dev_started(accel_dev) ? DEV_UP : DEV_DOWN;
+	dev_info.num_ae = hw_data->get_num_aes(hw_data);
+	dev_info.num_accel = hw_data->get_num_accels(hw_data);
+	dev_info.num_logical_accel = hw_data->num_logical_accel;
+	dev_info.banks_per_accel = hw_data->num_banks
+					/ hw_data->num_logical_accel;
+	strlcpy(dev_info.name, hw_data->dev_class->name, sizeof(dev_info.name));
+	dev_info.instance_id = hw_data->instance_id;
+	dev_info.type = hw_data->dev_class->type;
+	dev_info.bus = accel_to_pci_dev(accel_dev)->bus->number;
+	dev_info.dev = PCI_SLOT(accel_to_pci_dev(accel_dev)->devfn);
+	dev_info.fun = PCI_FUNC(accel_to_pci_dev(accel_dev)->devfn);
+
+	if (copy_to_user((void __user *)arg, &dev_info,
+			 sizeof(struct adf_dev_status_info))) {
+		pr_err("QAT: failed to copy status.\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
+{
+	int ret;
+
+	if (mutex_lock_interruptible(&adf_ctl_lock))
+		return -EFAULT;
+
+	switch (cmd) {
+	case IOCTL_CONFIG_SYS_RESOURCE_PARAMETERS:
+		ret = adf_ctl_ioctl_dev_config(fp, cmd, arg);
+		break;
+
+	case IOCTL_STOP_ACCEL_DEV:
+		ret = adf_ctl_ioctl_dev_stop(fp, cmd, arg);
+		break;
+
+	case IOCTL_START_ACCEL_DEV:
+		ret = adf_ctl_ioctl_dev_start(fp, cmd, arg);
+		break;
+
+	case IOCTL_GET_NUM_DEVICES:
+		ret = adf_ctl_ioctl_get_num_devices(fp, cmd, arg);
+		break;
+
+	case IOCTL_STATUS_ACCEL_DEV:
+		ret = adf_ctl_ioctl_get_status(fp, cmd, arg);
+		break;
+	default:
+		pr_err("QAT: Invalid ioclt\n");
+		ret = -EFAULT;
+		break;
+	}
+	mutex_unlock(&adf_ctl_lock);
+	return ret;
+}
+
+static int __init adf_register_ctl_device_driver(void)
+{
+	mutex_init(&adf_ctl_lock);
+
+	if (qat_algs_init())
+		goto err_algs_init;
+
+	if (adf_chr_drv_create())
+		goto err_chr_dev;
+
+	if (adf_init_aer())
+		goto err_aer;
+
+	if (qat_crypto_register())
+		goto err_crypto_register;
+
+	return 0;
+
+err_crypto_register:
+	adf_exit_aer();
+err_aer:
+	adf_chr_drv_destroy();
+err_chr_dev:
+	qat_algs_exit();
+err_algs_init:
+	mutex_destroy(&adf_ctl_lock);
+	return -EFAULT;
+}
+
+static void __exit adf_unregister_ctl_device_driver(void)
+{
+	adf_chr_drv_destroy();
+	adf_exit_aer();
+	qat_crypto_unregister();
+	qat_algs_exit();
+	mutex_destroy(&adf_ctl_lock);
+}
+
+module_init(adf_register_ctl_device_driver);
+module_exit(adf_unregister_ctl_device_driver);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_ALIAS("intel_qat");
diff --git a/drivers/crypto/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/qat/qat_common/adf_dev_mgr.c
new file mode 100644
index 0000000..ae71555
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_dev_mgr.c
@@ -0,0 +1,215 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include "adf_cfg.h"
+#include "adf_common_drv.h"
+
+static LIST_HEAD(accel_table);
+static DEFINE_MUTEX(table_lock);
+static uint32_t num_devices;
+
+/**
+ * adf_devmgr_add_dev() - Add accel_dev to the acceleration framework
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function adds acceleration device to the acceleration framework.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev)
+{
+	struct list_head *itr;
+
+	if (num_devices == ADF_MAX_DEVICES) {
+		pr_err("QAT: Only support up to %d devices\n", ADF_MAX_DEVICES);
+		return -EFAULT;
+	}
+
+	mutex_lock(&table_lock);
+	list_for_each(itr, &accel_table) {
+		struct adf_accel_dev *ptr =
+				list_entry(itr, struct adf_accel_dev, list);
+
+		if (ptr == accel_dev) {
+			mutex_unlock(&table_lock);
+			return -EEXIST;
+		}
+	}
+	atomic_set(&accel_dev->ref_count, 0);
+	list_add_tail(&accel_dev->list, &accel_table);
+	accel_dev->accel_id = num_devices++;
+	mutex_unlock(&table_lock);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(adf_devmgr_add_dev);
+
+struct list_head *adf_devmgr_get_head(void)
+{
+	return &accel_table;
+}
+
+/**
+ * adf_devmgr_rm_dev() - Remove accel_dev from the acceleration framework.
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function removes acceleration device from the acceleration framework.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
+void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev)
+{
+	mutex_lock(&table_lock);
+	list_del(&accel_dev->list);
+	num_devices--;
+	mutex_unlock(&table_lock);
+}
+EXPORT_SYMBOL_GPL(adf_devmgr_rm_dev);
+
+struct adf_accel_dev *adf_devmgr_get_first(void)
+{
+	struct adf_accel_dev *dev = NULL;
+
+	if (!list_empty(&accel_table))
+		dev = list_first_entry(&accel_table, struct adf_accel_dev,
+				       list);
+	return dev;
+}
+
+/**
+ * adf_devmgr_pci_to_accel_dev() - Get accel_dev associated with the pci_dev.
+ * @accel_dev:  Pointer to pci device.
+ *
+ * Function returns acceleration device associated with the given pci device.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: pinter to accel_dev or NULL if not found.
+ */
+struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev)
+{
+	struct list_head *itr;
+
+	list_for_each(itr, &accel_table) {
+		struct adf_accel_dev *ptr =
+				list_entry(itr, struct adf_accel_dev, list);
+
+		if (ptr->accel_pci_dev.pci_dev == pci_dev) {
+			mutex_unlock(&table_lock);
+			return ptr;
+		}
+	}
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(adf_devmgr_pci_to_accel_dev);
+
+struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id)
+{
+	struct list_head *itr;
+
+	list_for_each(itr, &accel_table) {
+		struct adf_accel_dev *ptr =
+				list_entry(itr, struct adf_accel_dev, list);
+
+		if (ptr->accel_id == id) {
+			mutex_unlock(&table_lock);
+			return ptr;
+		}
+	}
+	return NULL;
+}
+
+int adf_devmgr_verify_id(uint32_t id)
+{
+	if (id == ADF_CFG_ALL_DEVICES)
+		return 0;
+
+	if (adf_devmgr_get_dev_by_id(id))
+		return 0;
+
+	return -ENODEV;
+}
+
+void adf_devmgr_get_num_dev(uint32_t *num)
+{
+	struct list_head *itr;
+
+	*num = 0;
+	list_for_each(itr, &accel_table) {
+		(*num)++;
+	}
+}
+
+int adf_dev_in_use(struct adf_accel_dev *accel_dev)
+{
+	return atomic_read(&accel_dev->ref_count) != 0;
+}
+
+int adf_dev_get(struct adf_accel_dev *accel_dev)
+{
+	if (atomic_add_return(1, &accel_dev->ref_count) == 1)
+		if (!try_module_get(accel_dev->owner))
+			return -EFAULT;
+	return 0;
+}
+
+void adf_dev_put(struct adf_accel_dev *accel_dev)
+{
+	if (atomic_sub_return(1, &accel_dev->ref_count) == 0)
+		module_put(accel_dev->owner);
+}
+
+int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev)
+{
+	return test_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
+}
+
+int adf_dev_started(struct adf_accel_dev *accel_dev)
+{
+	return test_bit(ADF_STATUS_STARTED, &accel_dev->status);
+}
diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c
new file mode 100644
index 0000000..5c0e47a
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_init.c
@@ -0,0 +1,388 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include "adf_accel_devices.h"
+#include "adf_cfg.h"
+#include "adf_common_drv.h"
+
+static LIST_HEAD(service_table);
+static DEFINE_MUTEX(service_lock);
+
+static void adf_service_add(struct service_hndl *service)
+{
+	mutex_lock(&service_lock);
+	list_add(&service->list, &service_table);
+	mutex_unlock(&service_lock);
+}
+
+/**
+ * adf_service_register() - Register acceleration service in the accel framework
+ * @service:    Pointer to the service
+ *
+ * Function adds the acceleration service to the acceleration framework.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int adf_service_register(struct service_hndl *service)
+{
+	service->init_status = 0;
+	service->start_status = 0;
+	adf_service_add(service);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(adf_service_register);
+
+static void adf_service_remove(struct service_hndl *service)
+{
+	mutex_lock(&service_lock);
+	list_del(&service->list);
+	mutex_unlock(&service_lock);
+}
+
+/**
+ * adf_service_unregister() - Unregister acceleration service from the framework
+ * @service:    Pointer to the service
+ *
+ * Function remove the acceleration service from the acceleration framework.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int adf_service_unregister(struct service_hndl *service)
+{
+	if (service->init_status || service->start_status) {
+		pr_err("QAT: Could not remove active service\n");
+		return -EFAULT;
+	}
+	adf_service_remove(service);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(adf_service_unregister);
+
+/**
+ * adf_dev_start() - Start acceleration service for the given accel device
+ * @accel_dev:    Pointer to acceleration device.
+ *
+ * Function notifies all the registered services that the acceleration device
+ * is ready to be used.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int adf_dev_start(struct adf_accel_dev *accel_dev)
+{
+	struct service_hndl *service;
+	struct list_head *list_itr;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+
+	if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status)) {
+		pr_info("QAT: Device not configured\n");
+		return -EFAULT;
+	}
+	set_bit(ADF_STATUS_STARTING, &accel_dev->status);
+
+	if (adf_ae_init(accel_dev)) {
+		pr_err("QAT: Failed to initialise Acceleration Engine\n");
+		return -EFAULT;
+	}
+	set_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status);
+
+	if (adf_ae_fw_load(accel_dev)) {
+		pr_err("QAT: Failed to load acceleration FW\n");
+		adf_ae_fw_release(accel_dev);
+		return -EFAULT;
+	}
+	set_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status);
+
+	if (hw_data->alloc_irq(accel_dev)) {
+		pr_err("QAT: Failed to allocate interrupts\n");
+		return -EFAULT;
+	}
+	set_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
+
+	/*
+	 * Subservice initialisation is divided into two stages: init and start.
+	 * This is to facilitate any ordering dependencies between services
+	 * prior to starting any of the accelerators.
+	 */
+	list_for_each(list_itr, &service_table) {
+		service = list_entry(list_itr, struct service_hndl, list);
+		if (!service->admin)
+			continue;
+		if (service->event_hld(accel_dev, ADF_EVENT_INIT)) {
+			pr_err("QAT: Failed to initialise service %s\n",
+			       service->name);
+			return -EFAULT;
+		}
+		set_bit(accel_dev->accel_id, &service->init_status);
+	}
+	list_for_each(list_itr, &service_table) {
+		service = list_entry(list_itr, struct service_hndl, list);
+		if (service->admin)
+			continue;
+		if (service->event_hld(accel_dev, ADF_EVENT_INIT)) {
+			pr_err("QAT: Failed to initialise service %s\n",
+			       service->name);
+			return -EFAULT;
+		}
+		set_bit(accel_dev->accel_id, &service->init_status);
+	}
+
+	hw_data->enable_error_correction(accel_dev);
+
+	if (adf_ae_start(accel_dev)) {
+		pr_err("QAT: AE Start Failed\n");
+		return -EFAULT;
+	}
+	set_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
+
+	list_for_each(list_itr, &service_table) {
+		service = list_entry(list_itr, struct service_hndl, list);
+		if (!service->admin)
+			continue;
+		if (service->event_hld(accel_dev, ADF_EVENT_START)) {
+			pr_err("QAT: Failed to start service %s\n",
+			       service->name);
+			return -EFAULT;
+		}
+		set_bit(accel_dev->accel_id, &service->start_status);
+	}
+	list_for_each(list_itr, &service_table) {
+		service = list_entry(list_itr, struct service_hndl, list);
+		if (service->admin)
+			continue;
+		if (service->event_hld(accel_dev, ADF_EVENT_START)) {
+			pr_err("QAT: Failed to start service %s\n",
+			       service->name);
+			return -EFAULT;
+		}
+		set_bit(accel_dev->accel_id, &service->start_status);
+	}
+
+	clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
+	set_bit(ADF_STATUS_STARTED, &accel_dev->status);
+
+	if (qat_algs_register()) {
+		pr_err("QAT: Failed to register crypto algs\n");
+		set_bit(ADF_STATUS_STARTING, &accel_dev->status);
+		clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
+		return -EFAULT;
+	}
+	return 0;
+}
+EXPORT_SYMBOL_GPL(adf_dev_start);
+
+/**
+ * adf_dev_stop() - Stop acceleration service for the given accel device
+ * @accel_dev:    Pointer to acceleration device.
+ *
+ * Function notifies all the registered services that the acceleration device
+ * is shuting down.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int adf_dev_stop(struct adf_accel_dev *accel_dev)
+{
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	struct service_hndl *service;
+	struct list_head *list_itr;
+	int ret, wait = 0;
+
+	if (!adf_dev_started(accel_dev) &&
+	    !test_bit(ADF_STATUS_STARTING, &accel_dev->status)) {
+		return 0;
+	}
+	clear_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+	clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
+	clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
+
+	if (qat_algs_unregister())
+		pr_err("QAT: Failed to unregister crypto algs\n");
+
+	list_for_each(list_itr, &service_table) {
+		service = list_entry(list_itr, struct service_hndl, list);
+		if (service->admin)
+			continue;
+		if (!test_bit(accel_dev->accel_id, &service->start_status))
+			continue;
+		ret = service->event_hld(accel_dev, ADF_EVENT_STOP);
+		if (!ret) {
+			clear_bit(accel_dev->accel_id, &service->start_status);
+		} else if (ret == -EAGAIN) {
+			wait = 1;
+			clear_bit(accel_dev->accel_id, &service->start_status);
+		}
+	}
+	list_for_each(list_itr, &service_table) {
+		service = list_entry(list_itr, struct service_hndl, list);
+		if (!service->admin)
+			continue;
+		if (!test_bit(accel_dev->accel_id, &service->start_status))
+			continue;
+		if (service->event_hld(accel_dev, ADF_EVENT_STOP))
+			pr_err("QAT: Failed to shutdown service %s\n",
+			       service->name);
+		else
+			clear_bit(accel_dev->accel_id, &service->start_status);
+	}
+
+	if (wait)
+		msleep(100);
+
+	if (adf_dev_started(accel_dev)) {
+		if (adf_ae_stop(accel_dev))
+			pr_err("QAT: failed to stop AE\n");
+		else
+			clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
+	}
+
+	if (test_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status)) {
+		if (adf_ae_fw_release(accel_dev))
+			pr_err("QAT: Failed to release the ucode\n");
+		else
+			clear_bit(ADF_STATUS_AE_UCODE_LOADED,
+				  &accel_dev->status);
+	}
+
+	if (test_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status)) {
+		if (adf_ae_shutdown(accel_dev))
+			pr_err("QAT: Failed to shutdown Accel Engine\n");
+		else
+			clear_bit(ADF_STATUS_AE_INITIALISED,
+				  &accel_dev->status);
+	}
+
+	list_for_each(list_itr, &service_table) {
+		service = list_entry(list_itr, struct service_hndl, list);
+		if (service->admin)
+			continue;
+		if (!test_bit(accel_dev->accel_id, &service->init_status))
+			continue;
+		if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN))
+			pr_err("QAT: Failed to shutdown service %s\n",
+			       service->name);
+		else
+			clear_bit(accel_dev->accel_id, &service->init_status);
+	}
+	list_for_each(list_itr, &service_table) {
+		service = list_entry(list_itr, struct service_hndl, list);
+		if (!service->admin)
+			continue;
+		if (!test_bit(accel_dev->accel_id, &service->init_status))
+			continue;
+		if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN))
+			pr_err("QAT: Failed to shutdown service %s\n",
+			       service->name);
+		else
+			clear_bit(accel_dev->accel_id, &service->init_status);
+	}
+
+	if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) {
+		hw_data->free_irq(accel_dev);
+		clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
+	}
+
+	/* Delete configuration only if not restarting */
+	if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
+		adf_cfg_del_all(accel_dev);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(adf_dev_stop);
+
+int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev)
+{
+	struct service_hndl *service;
+	struct list_head *list_itr;
+
+	list_for_each(list_itr, &service_table) {
+		service = list_entry(list_itr, struct service_hndl, list);
+		if (service->admin)
+			continue;
+		if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING))
+			pr_err("QAT: Failed to restart service %s.\n",
+			       service->name);
+	}
+	list_for_each(list_itr, &service_table) {
+		service = list_entry(list_itr, struct service_hndl, list);
+		if (!service->admin)
+			continue;
+		if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING))
+			pr_err("QAT: Failed to restart service %s.\n",
+			       service->name);
+	}
+	return 0;
+}
+
+int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev)
+{
+	struct service_hndl *service;
+	struct list_head *list_itr;
+
+	list_for_each(list_itr, &service_table) {
+		service = list_entry(list_itr, struct service_hndl, list);
+		if (service->admin)
+			continue;
+		if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED))
+			pr_err("QAT: Failed to restart service %s.\n",
+			       service->name);
+	}
+	list_for_each(list_itr, &service_table) {
+		service = list_entry(list_itr, struct service_hndl, list);
+		if (!service->admin)
+			continue;
+		if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED))
+			pr_err("QAT: Failed to restart service %s.\n",
+			       service->name);
+	}
+	return 0;
+}
diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c
new file mode 100644
index 0000000..5f3fa45
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_transport.c
@@ -0,0 +1,567 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/delay.h>
+#include "adf_accel_devices.h"
+#include "adf_transport_internal.h"
+#include "adf_transport_access_macros.h"
+#include "adf_cfg.h"
+#include "adf_common_drv.h"
+
+static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
+{
+	uint32_t div = data >> shift;
+	uint32_t mult = div << shift;
+
+	return data - mult;
+}
+
+static inline int adf_check_ring_alignment(uint64_t addr, uint64_t size)
+{
+	if (((size - 1) & addr) != 0)
+		return -EFAULT;
+	return 0;
+}
+
+static int adf_verify_ring_size(uint32_t msg_size, uint32_t msg_num)
+{
+	int i = ADF_MIN_RING_SIZE;
+
+	for (; i <= ADF_MAX_RING_SIZE; i++)
+		if ((msg_size * msg_num) == ADF_SIZE_TO_RING_SIZE_IN_BYTES(i))
+			return i;
+
+	return ADF_DEFAULT_RING_SIZE;
+}
+
+static int adf_reserve_ring(struct adf_etr_bank_data *bank, uint32_t ring)
+{
+	spin_lock(&bank->lock);
+	if (bank->ring_mask & (1 << ring)) {
+		spin_unlock(&bank->lock);
+		return -EFAULT;
+	}
+	bank->ring_mask |= (1 << ring);
+	spin_unlock(&bank->lock);
+	return 0;
+}
+
+static void adf_unreserve_ring(struct adf_etr_bank_data *bank, uint32_t ring)
+{
+	spin_lock(&bank->lock);
+	bank->ring_mask &= ~(1 << ring);
+	spin_unlock(&bank->lock);
+}
+
+static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring)
+{
+	spin_lock_bh(&bank->lock);
+	bank->irq_mask |= (1 << ring);
+	spin_unlock_bh(&bank->lock);
+	WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask);
+	WRITE_CSR_INT_COL_CTL(bank->csr_addr, bank->bank_number,
+			      bank->irq_coalesc_timer);
+}
+
+static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring)
+{
+	spin_lock_bh(&bank->lock);
+	bank->irq_mask &= ~(1 << ring);
+	spin_unlock_bh(&bank->lock);
+	WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask);
+}
+
+int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg)
+{
+	if (atomic_add_return(1, ring->inflights) >
+	    ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size)) {
+		atomic_dec(ring->inflights);
+		return -EAGAIN;
+	}
+	spin_lock_bh(&ring->lock);
+	memcpy(ring->base_addr + ring->tail, msg,
+	       ADF_MSG_SIZE_TO_BYTES(ring->msg_size));
+
+	ring->tail = adf_modulo(ring->tail +
+				ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
+				ADF_RING_SIZE_MODULO(ring->ring_size));
+	WRITE_CSR_RING_TAIL(ring->bank->csr_addr, ring->bank->bank_number,
+			    ring->ring_number, ring->tail);
+	spin_unlock_bh(&ring->lock);
+	return 0;
+}
+
+static int adf_handle_response(struct adf_etr_ring_data *ring)
+{
+	uint32_t msg_counter = 0;
+	uint32_t *msg = (uint32_t *)(ring->base_addr + ring->head);
+
+	while (*msg != ADF_RING_EMPTY_SIG) {
+		ring->callback((uint32_t *)msg);
+		*msg = ADF_RING_EMPTY_SIG;
+		ring->head = adf_modulo(ring->head +
+					ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
+					ADF_RING_SIZE_MODULO(ring->ring_size));
+		msg_counter++;
+		msg = (uint32_t *)(ring->base_addr + ring->head);
+	}
+	if (msg_counter > 0) {
+		WRITE_CSR_RING_HEAD(ring->bank->csr_addr,
+				    ring->bank->bank_number,
+				    ring->ring_number, ring->head);
+		atomic_sub(msg_counter, ring->inflights);
+	}
+	return 0;
+}
+
+static void adf_configure_tx_ring(struct adf_etr_ring_data *ring)
+{
+	uint32_t ring_config = BUILD_RING_CONFIG(ring->ring_size);
+
+	WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number,
+			      ring->ring_number, ring_config);
+}
+
+static void adf_configure_rx_ring(struct adf_etr_ring_data *ring)
+{
+	uint32_t ring_config =
+			BUILD_RESP_RING_CONFIG(ring->ring_size,
+					       ADF_RING_NEAR_WATERMARK_512,
+					       ADF_RING_NEAR_WATERMARK_0);
+
+	WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number,
+			      ring->ring_number, ring_config);
+}
+
+static int adf_init_ring(struct adf_etr_ring_data *ring)
+{
+	struct adf_etr_bank_data *bank = ring->bank;
+	struct adf_accel_dev *accel_dev = bank->accel_dev;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	uint64_t ring_base;
+	uint32_t ring_size_bytes =
+			ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
+
+	ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
+	ring->base_addr = dma_alloc_coherent(&GET_DEV(accel_dev),
+					     ring_size_bytes, &ring->dma_addr,
+					     GFP_KERNEL);
+	if (!ring->base_addr)
+		return -ENOMEM;
+
+	memset(ring->base_addr, 0x7F, ring_size_bytes);
+	/* The base_addr has to be aligned to the size of the buffer */
+	if (adf_check_ring_alignment(ring->dma_addr, ring_size_bytes)) {
+		pr_err("QAT: Ring address not aligned\n");
+		dma_free_coherent(&GET_DEV(accel_dev), ring_size_bytes,
+				  ring->base_addr, ring->dma_addr);
+		return -EFAULT;
+	}
+
+	if (hw_data->tx_rings_mask & (1 << ring->ring_number))
+		adf_configure_tx_ring(ring);
+
+	else
+		adf_configure_rx_ring(ring);
+
+	ring_base = BUILD_RING_BASE_ADDR(ring->dma_addr, ring->ring_size);
+	WRITE_CSR_RING_BASE(ring->bank->csr_addr, ring->bank->bank_number,
+			    ring->ring_number, ring_base);
+	spin_lock_init(&ring->lock);
+	return 0;
+}
+
+static void adf_cleanup_ring(struct adf_etr_ring_data *ring)
+{
+	uint32_t ring_size_bytes =
+			ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
+	ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
+
+	if (ring->base_addr) {
+		memset(ring->base_addr, 0x7F, ring_size_bytes);
+		dma_free_coherent(&GET_DEV(ring->bank->accel_dev),
+				  ring_size_bytes, ring->base_addr,
+				  ring->dma_addr);
+	}
+}
+
+int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
+		    uint32_t bank_num, uint32_t num_msgs,
+		    uint32_t msg_size, const char *ring_name,
+		    adf_callback_fn callback, int poll_mode,
+		    struct adf_etr_ring_data **ring_ptr)
+{
+	struct adf_etr_data *transport_data = accel_dev->transport;
+	struct adf_etr_bank_data *bank;
+	struct adf_etr_ring_data *ring;
+	char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+	uint32_t ring_num;
+	int ret;
+
+	if (bank_num >= GET_MAX_BANKS(accel_dev)) {
+		pr_err("QAT: Invalid bank number\n");
+		return -EFAULT;
+	}
+	if (msg_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
+		pr_err("QAT: Invalid msg size\n");
+		return -EFAULT;
+	}
+	if (ADF_MAX_INFLIGHTS(adf_verify_ring_size(msg_size, num_msgs),
+			      ADF_BYTES_TO_MSG_SIZE(msg_size)) < 2) {
+		pr_err("QAT: Invalid ring size for given msg size\n");
+		return -EFAULT;
+	}
+	if (adf_cfg_get_param_value(accel_dev, section, ring_name, val)) {
+		pr_err("QAT: Section %s, no such entry : %s\n",
+		       section, ring_name);
+		return -EFAULT;
+	}
+	if (kstrtouint(val, 10, &ring_num)) {
+		pr_err("QAT: Can't get ring number\n");
+		return -EFAULT;
+	}
+
+	bank = &transport_data->banks[bank_num];
+	if (adf_reserve_ring(bank, ring_num)) {
+		pr_err("QAT: Ring %d, %s already exists.\n",
+		       ring_num, ring_name);
+		return -EFAULT;
+	}
+	ring = &bank->rings[ring_num];
+	ring->ring_number = ring_num;
+	ring->bank = bank;
+	ring->callback = callback;
+	ring->msg_size = ADF_BYTES_TO_MSG_SIZE(msg_size);
+	ring->ring_size = adf_verify_ring_size(msg_size, num_msgs);
+	ring->head = 0;
+	ring->tail = 0;
+	atomic_set(ring->inflights, 0);
+	ret = adf_init_ring(ring);
+	if (ret)
+		goto err;
+
+	/* Enable HW arbitration for the given ring */
+	accel_dev->hw_device->hw_arb_ring_enable(ring);
+
+	if (adf_ring_debugfs_add(ring, ring_name)) {
+		pr_err("QAT: Couldn't add ring debugfs entry\n");
+		ret = -EFAULT;
+		goto err;
+	}
+
+	/* Enable interrupts if needed */
+	if (callback && (!poll_mode))
+		adf_enable_ring_irq(bank, ring->ring_number);
+	*ring_ptr = ring;
+	return 0;
+err:
+	adf_cleanup_ring(ring);
+	adf_unreserve_ring(bank, ring_num);
+	accel_dev->hw_device->hw_arb_ring_disable(ring);
+	return ret;
+}
+
+void adf_remove_ring(struct adf_etr_ring_data *ring)
+{
+	struct adf_etr_bank_data *bank = ring->bank;
+	struct adf_accel_dev *accel_dev = bank->accel_dev;
+
+	/* Disable interrupts for the given ring */
+	adf_disable_ring_irq(bank, ring->ring_number);
+
+	/* Clear PCI config space */
+	WRITE_CSR_RING_CONFIG(bank->csr_addr, bank->bank_number,
+			      ring->ring_number, 0);
+	WRITE_CSR_RING_BASE(bank->csr_addr, bank->bank_number,
+			    ring->ring_number, 0);
+	adf_ring_debugfs_rm(ring);
+	adf_unreserve_ring(bank, ring->ring_number);
+	/* Disable HW arbitration for the given ring */
+	accel_dev->hw_device->hw_arb_ring_disable(ring);
+	adf_cleanup_ring(ring);
+}
+
+static void adf_ring_response_handler(struct adf_etr_bank_data *bank)
+{
+	uint32_t empty_rings, i;
+
+	empty_rings = READ_CSR_E_STAT(bank->csr_addr, bank->bank_number);
+	empty_rings = ~empty_rings & bank->irq_mask;
+
+	for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; ++i) {
+		if (empty_rings & (1 << i))
+			adf_handle_response(&bank->rings[i]);
+	}
+}
+
+/**
+ * adf_response_handler() - Bottom half handler response handler
+ * @bank_addr:  Address of a ring bank for with the BH was scheduled.
+ *
+ * Function is the bottom half handler for the response from acceleration
+ * device. There is one handler for every ring bank. Function checks all
+ * communication rings in the bank.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
+void adf_response_handler(unsigned long bank_addr)
+{
+	struct adf_etr_bank_data *bank = (void *)bank_addr;
+
+	/* Handle all the responses nad reenable IRQs */
+	adf_ring_response_handler(bank);
+	WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number,
+				   bank->irq_mask);
+}
+EXPORT_SYMBOL_GPL(adf_response_handler);
+
+static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev,
+				  const char *section, const char *format,
+				  uint32_t key, uint32_t *value)
+{
+	char key_buf[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+	char val_buf[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+
+	snprintf(key_buf, ADF_CFG_MAX_KEY_LEN_IN_BYTES, format, key);
+
+	if (adf_cfg_get_param_value(accel_dev, section, key_buf, val_buf))
+		return -EFAULT;
+
+	if (kstrtouint(val_buf, 10, value))
+		return -EFAULT;
+	return 0;
+}
+
+static void adf_enable_coalesc(struct adf_etr_bank_data *bank,
+			       const char *section, uint32_t bank_num_in_accel)
+{
+	if (adf_get_cfg_int(bank->accel_dev, section,
+			    ADF_ETRMGR_COALESCE_TIMER_FORMAT,
+			    bank_num_in_accel, &bank->irq_coalesc_timer))
+		bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
+
+	if (ADF_COALESCING_MAX_TIME < bank->irq_coalesc_timer ||
+	    ADF_COALESCING_MIN_TIME > bank->irq_coalesc_timer)
+		bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
+}
+
+static int adf_init_bank(struct adf_accel_dev *accel_dev,
+			 struct adf_etr_bank_data *bank,
+			 uint32_t bank_num, void __iomem *csr_addr)
+{
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	struct adf_etr_ring_data *ring;
+	struct adf_etr_ring_data *tx_ring;
+	uint32_t i, coalesc_enabled;
+
+	memset(bank, 0, sizeof(*bank));
+	bank->bank_number = bank_num;
+	bank->csr_addr = csr_addr;
+	bank->accel_dev = accel_dev;
+	spin_lock_init(&bank->lock);
+
+	/* Enable IRQ coalescing always. This will allow to use
+	 * the optimised flag and coalesc register.
+	 * If it is disabled in the config file just use min time value */
+	if (adf_get_cfg_int(accel_dev, "Accelerator0",
+			    ADF_ETRMGR_COALESCING_ENABLED_FORMAT,
+			    bank_num, &coalesc_enabled) && coalesc_enabled)
+		adf_enable_coalesc(bank, "Accelerator0", bank_num);
+	else
+		bank->irq_coalesc_timer = ADF_COALESCING_MIN_TIME;
+
+	for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
+		WRITE_CSR_RING_CONFIG(csr_addr, bank_num, i, 0);
+		WRITE_CSR_RING_BASE(csr_addr, bank_num, i, 0);
+		ring = &bank->rings[i];
+		if (hw_data->tx_rings_mask & (1 << i)) {
+			ring->inflights = kzalloc_node(sizeof(atomic_t),
+						       GFP_KERNEL,
+						       accel_dev->numa_node);
+			if (!ring->inflights)
+				goto err;
+		} else {
+			if (i < hw_data->tx_rx_gap) {
+				pr_err("QAT: Invalid tx rings mask config\n");
+				goto err;
+			}
+			tx_ring = &bank->rings[i - hw_data->tx_rx_gap];
+			ring->inflights = tx_ring->inflights;
+		}
+	}
+	if (adf_bank_debugfs_add(bank)) {
+		pr_err("QAT: Failed to add bank debugfs entry\n");
+		goto err;
+	}
+
+	WRITE_CSR_INT_SRCSEL(csr_addr, bank_num);
+	return 0;
+err:
+	for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
+		ring = &bank->rings[i];
+		if (hw_data->tx_rings_mask & (1 << i) && ring->inflights)
+			kfree(ring->inflights);
+	}
+	return -ENOMEM;
+}
+
+/**
+ * adf_init_etr_data() - Initialize transport rings for acceleration device
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function is the initializes the communications channels (rings) to the
+ * acceleration device accel_dev.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int adf_init_etr_data(struct adf_accel_dev *accel_dev)
+{
+	struct adf_etr_data *etr_data;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	void __iomem *csr_addr;
+	uint32_t size;
+	uint32_t num_banks = 0;
+	int i, ret;
+
+	etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL,
+				accel_dev->numa_node);
+	if (!etr_data)
+		return -ENOMEM;
+
+	num_banks = GET_MAX_BANKS(accel_dev);
+	size = num_banks * sizeof(struct adf_etr_bank_data);
+	etr_data->banks = kzalloc_node(size, GFP_KERNEL, accel_dev->numa_node);
+	if (!etr_data->banks) {
+		ret = -ENOMEM;
+		goto err_bank;
+	}
+
+	accel_dev->transport = etr_data;
+	i = hw_data->get_etr_bar_id(hw_data);
+	csr_addr = accel_dev->accel_pci_dev.pci_bars[i].virt_addr;
+
+	/* accel_dev->debugfs_dir should always be non-NULL here */
+	etr_data->debug = debugfs_create_dir("transport",
+					     accel_dev->debugfs_dir);
+	if (!etr_data->debug) {
+		pr_err("QAT: Unable to create transport debugfs entry\n");
+		ret = -ENOENT;
+		goto err_bank_debug;
+	}
+
+	for (i = 0; i < num_banks; i++) {
+		ret = adf_init_bank(accel_dev, &etr_data->banks[i], i,
+				    csr_addr);
+		if (ret)
+			goto err_bank_all;
+	}
+
+	return 0;
+
+err_bank_all:
+	debugfs_remove(etr_data->debug);
+err_bank_debug:
+	kfree(etr_data->banks);
+err_bank:
+	kfree(etr_data);
+	accel_dev->transport = NULL;
+	return ret;
+}
+EXPORT_SYMBOL_GPL(adf_init_etr_data);
+
+static void cleanup_bank(struct adf_etr_bank_data *bank)
+{
+	uint32_t i;
+
+	for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
+		struct adf_accel_dev *accel_dev = bank->accel_dev;
+		struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+		struct adf_etr_ring_data *ring = &bank->rings[i];
+
+		if (bank->ring_mask & (1 << i))
+			adf_cleanup_ring(ring);
+
+		if (hw_data->tx_rings_mask & (1 << i))
+			kfree(ring->inflights);
+	}
+	adf_bank_debugfs_rm(bank);
+	memset(bank, 0, sizeof(*bank));
+}
+
+static void adf_cleanup_etr_handles(struct adf_accel_dev *accel_dev)
+{
+	struct adf_etr_data *etr_data = accel_dev->transport;
+	uint32_t i, num_banks = GET_MAX_BANKS(accel_dev);
+
+	for (i = 0; i < num_banks; i++)
+		cleanup_bank(&etr_data->banks[i]);
+}
+
+/**
+ * adf_cleanup_etr_data() - Clear transport rings for acceleration device
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function is the clears the communications channels (rings) of the
+ * acceleration device accel_dev.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
+void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev)
+{
+	struct adf_etr_data *etr_data = accel_dev->transport;
+
+	if (etr_data) {
+		adf_cleanup_etr_handles(accel_dev);
+		debugfs_remove(etr_data->debug);
+		kfree(etr_data->banks);
+		kfree(etr_data);
+		accel_dev->transport = NULL;
+	}
+}
+EXPORT_SYMBOL_GPL(adf_cleanup_etr_data);
diff --git a/drivers/crypto/qat/qat_common/adf_transport.h b/drivers/crypto/qat/qat_common/adf_transport.h
new file mode 100644
index 0000000..386485b
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_transport.h
@@ -0,0 +1,63 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_TRANSPORT_H
+#define ADF_TRANSPORT_H
+
+#include "adf_accel_devices.h"
+
+struct adf_etr_ring_data;
+
+typedef void (*adf_callback_fn)(void *resp_msg);
+
+int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
+		    uint32_t bank_num, uint32_t num_mgs, uint32_t msg_size,
+		    const char *ring_name, adf_callback_fn callback,
+		    int poll_mode, struct adf_etr_ring_data **ring_ptr);
+
+int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg);
+void adf_remove_ring(struct adf_etr_ring_data *ring);
+#endif
diff --git a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
new file mode 100644
index 0000000..91d88d6
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
@@ -0,0 +1,160 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_TRANSPORT_ACCESS_MACROS_H
+#define ADF_TRANSPORT_ACCESS_MACROS_H
+
+#include "adf_accel_devices.h"
+#define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL
+#define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL
+#define ADF_RING_CSR_RING_CONFIG 0x000
+#define ADF_RING_CSR_RING_LBASE 0x040
+#define ADF_RING_CSR_RING_UBASE 0x080
+#define ADF_RING_CSR_RING_HEAD 0x0C0
+#define ADF_RING_CSR_RING_TAIL 0x100
+#define ADF_RING_CSR_E_STAT 0x14C
+#define ADF_RING_CSR_INT_SRCSEL 0x174
+#define ADF_RING_CSR_INT_SRCSEL_2 0x178
+#define ADF_RING_CSR_INT_COL_EN 0x17C
+#define ADF_RING_CSR_INT_COL_CTL 0x180
+#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184
+#define ADF_RING_CSR_INT_COL_CTL_ENABLE	0x80000000
+#define ADF_RING_BUNDLE_SIZE 0x1000
+#define ADF_RING_CONFIG_NEAR_FULL_WM 0x0A
+#define ADF_RING_CONFIG_NEAR_EMPTY_WM 0x05
+#define ADF_COALESCING_MIN_TIME 0x1FF
+#define ADF_COALESCING_MAX_TIME 0xFFFFF
+#define ADF_COALESCING_DEF_TIME 0x27FF
+#define ADF_RING_NEAR_WATERMARK_512 0x08
+#define ADF_RING_NEAR_WATERMARK_0 0x00
+#define ADF_RING_EMPTY_SIG 0x7F7F7F7F
+
+/* Valid internal ring size values */
+#define ADF_RING_SIZE_128 0x01
+#define ADF_RING_SIZE_256 0x02
+#define ADF_RING_SIZE_512 0x03
+#define ADF_RING_SIZE_4K 0x06
+#define ADF_RING_SIZE_16K 0x08
+#define ADF_RING_SIZE_4M 0x10
+#define ADF_MIN_RING_SIZE ADF_RING_SIZE_128
+#define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M
+#define ADF_DEFAULT_RING_SIZE ADF_RING_SIZE_16K
+
+/* Valid internal msg size values internal */
+#define ADF_MSG_SIZE_32 0x01
+#define ADF_MSG_SIZE_64 0x02
+#define ADF_MSG_SIZE_128 0x04
+#define ADF_MIN_MSG_SIZE ADF_MSG_SIZE_32
+#define ADF_MAX_MSG_SIZE ADF_MSG_SIZE_128
+
+/* Size to bytes conversion macros for ring and msg values */
+#define ADF_MSG_SIZE_TO_BYTES(SIZE) (SIZE << 5)
+#define ADF_BYTES_TO_MSG_SIZE(SIZE) (SIZE >> 5)
+#define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7)
+#define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7)
+
+/* Minimum ring bufer size for memory allocation */
+#define ADF_RING_SIZE_BYTES_MIN(SIZE) ((SIZE < ADF_RING_SIZE_4K) ? \
+				ADF_RING_SIZE_4K : SIZE)
+#define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6)
+#define ADF_MAX_INFLIGHTS(RING_SIZE, MSG_SIZE) \
+	((((1 << (RING_SIZE - 1)) << 4) >> MSG_SIZE) - 1)
+#define BUILD_RING_CONFIG(size)	\
+	((ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_FULL_WM) \
+	| (ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_EMPTY_WM) \
+	| size)
+#define BUILD_RESP_RING_CONFIG(size, watermark_nf, watermark_ne) \
+	((watermark_nf << ADF_RING_CONFIG_NEAR_FULL_WM)	\
+	| (watermark_ne << ADF_RING_CONFIG_NEAR_EMPTY_WM) \
+	| size)
+#define BUILD_RING_BASE_ADDR(addr, size) \
+	((addr >> 6) & (0xFFFFFFFFFFFFFFFFULL << size))
+#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \
+	ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+			ADF_RING_CSR_RING_HEAD + (ring << 2))
+#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \
+	ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+			ADF_RING_CSR_RING_TAIL + (ring << 2))
+#define READ_CSR_E_STAT(csr_base_addr, bank) \
+	ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+			ADF_RING_CSR_E_STAT)
+#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+		ADF_RING_CSR_RING_CONFIG + (ring << 2), value)
+#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \
+do { \
+	uint32_t l_base = 0, u_base = 0; \
+	l_base = (uint32_t)(value & 0xFFFFFFFF); \
+	u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32); \
+	ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+		ADF_RING_CSR_RING_LBASE + (ring << 2), l_base);	\
+	ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+		ADF_RING_CSR_RING_UBASE + (ring << 2), u_base);	\
+} while (0)
+#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+		ADF_RING_CSR_RING_HEAD + (ring << 2), value)
+#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+		ADF_RING_CSR_RING_TAIL + (ring << 2), value)
+#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \
+do { \
+	ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+	ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK_0);	\
+	ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+	ADF_RING_CSR_INT_SRCSEL_2, ADF_BANK_INT_SRC_SEL_MASK_X); \
+} while (0)
+#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \
+	ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+			ADF_RING_CSR_INT_COL_EN, value)
+#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \
+	ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+			ADF_RING_CSR_INT_COL_CTL, \
+			ADF_RING_CSR_INT_COL_CTL_ENABLE | value)
+#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \
+	ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+			ADF_RING_CSR_INT_FLAG_AND_COL, value)
+#endif
diff --git a/drivers/crypto/qat/qat_common/adf_transport_debug.c b/drivers/crypto/qat/qat_common/adf_transport_debug.c
new file mode 100644
index 0000000..6b69745
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_transport_debug.c
@@ -0,0 +1,304 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include "adf_accel_devices.h"
+#include "adf_transport_internal.h"
+#include "adf_transport_access_macros.h"
+
+static DEFINE_MUTEX(ring_read_lock);
+static DEFINE_MUTEX(bank_read_lock);
+
+static void *adf_ring_start(struct seq_file *sfile, loff_t *pos)
+{
+	struct adf_etr_ring_data *ring = sfile->private;
+
+	mutex_lock(&ring_read_lock);
+	if (*pos == 0)
+		return SEQ_START_TOKEN;
+
+	if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) /
+		     ADF_MSG_SIZE_TO_BYTES(ring->msg_size)))
+		return NULL;
+
+	return ring->base_addr +
+		(ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++);
+}
+
+static void *adf_ring_next(struct seq_file *sfile, void *v, loff_t *pos)
+{
+	struct adf_etr_ring_data *ring = sfile->private;
+
+	if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) /
+		     ADF_MSG_SIZE_TO_BYTES(ring->msg_size)))
+		return NULL;
+
+	return ring->base_addr +
+		(ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++);
+}
+
+static int adf_ring_show(struct seq_file *sfile, void *v)
+{
+	struct adf_etr_ring_data *ring = sfile->private;
+	struct adf_etr_bank_data *bank = ring->bank;
+	uint32_t *msg = v;
+	void __iomem *csr = ring->bank->csr_addr;
+	int i, x;
+
+	if (v == SEQ_START_TOKEN) {
+		int head, tail, empty;
+
+		head = READ_CSR_RING_HEAD(csr, bank->bank_number,
+					  ring->ring_number);
+		tail = READ_CSR_RING_TAIL(csr, bank->bank_number,
+					  ring->ring_number);
+		empty = READ_CSR_E_STAT(csr, bank->bank_number);
+
+		seq_puts(sfile, "------- Ring configuration -------\n");
+		seq_printf(sfile, "ring num %d, bank num %d\n",
+			   ring->ring_number, ring->bank->bank_number);
+		seq_printf(sfile, "head %x, tail %x, empty: %d\n",
+			   head, tail, (empty & 1 << ring->ring_number)
+			   >> ring->ring_number);
+		seq_printf(sfile, "ring size %d, msg size %d\n",
+			   ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size),
+			   ADF_MSG_SIZE_TO_BYTES(ring->msg_size));
+		seq_puts(sfile, "----------- Ring data ------------\n");
+		return 0;
+	}
+	seq_printf(sfile, "%p:", msg);
+	x = 0;
+	i = 0;
+	for (; i < (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) >> 2); i++) {
+		seq_printf(sfile, " %08X", *(msg + i));
+		if ((ADF_MSG_SIZE_TO_BYTES(ring->msg_size) >> 2) != i + 1 &&
+		    (++x == 8)) {
+			seq_printf(sfile, "\n%p:", msg + i + 1);
+			x = 0;
+		}
+	}
+	seq_puts(sfile, "\n");
+	return 0;
+}
+
+static void adf_ring_stop(struct seq_file *sfile, void *v)
+{
+	mutex_unlock(&ring_read_lock);
+}
+
+static const struct seq_operations adf_ring_sops = {
+	.start = adf_ring_start,
+	.next = adf_ring_next,
+	.stop = adf_ring_stop,
+	.show = adf_ring_show
+};
+
+static int adf_ring_open(struct inode *inode, struct file *file)
+{
+	int ret = seq_open(file, &adf_ring_sops);
+
+	if (!ret) {
+		struct seq_file *seq_f = file->private_data;
+
+		seq_f->private = inode->i_private;
+	}
+	return ret;
+}
+
+static const struct file_operations adf_ring_debug_fops = {
+	.open = adf_ring_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release
+};
+
+int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name)
+{
+	struct adf_etr_ring_debug_entry *ring_debug;
+	char entry_name[8];
+
+	ring_debug = kzalloc(sizeof(*ring_debug), GFP_KERNEL);
+	if (!ring_debug)
+		return -ENOMEM;
+
+	strlcpy(ring_debug->ring_name, name, sizeof(ring_debug->ring_name));
+	snprintf(entry_name, sizeof(entry_name), "ring_%02d",
+		 ring->ring_number);
+
+	ring_debug->debug = debugfs_create_file(entry_name, S_IRUSR,
+						ring->bank->bank_debug_dir,
+						ring, &adf_ring_debug_fops);
+	if (!ring_debug->debug) {
+		pr_err("QAT: Failed to create ring debug entry.\n");
+		kfree(ring_debug);
+		return -EFAULT;
+	}
+	ring->ring_debug = ring_debug;
+	return 0;
+}
+
+void adf_ring_debugfs_rm(struct adf_etr_ring_data *ring)
+{
+	if (ring->ring_debug) {
+		debugfs_remove(ring->ring_debug->debug);
+		kfree(ring->ring_debug);
+		ring->ring_debug = NULL;
+	}
+}
+
+static void *adf_bank_start(struct seq_file *sfile, loff_t *pos)
+{
+	mutex_lock(&bank_read_lock);
+	if (*pos == 0)
+		return SEQ_START_TOKEN;
+
+	if (*pos >= ADF_ETR_MAX_RINGS_PER_BANK)
+		return NULL;
+
+	return pos;
+}
+
+static void *adf_bank_next(struct seq_file *sfile, void *v, loff_t *pos)
+{
+	if (++(*pos) >= ADF_ETR_MAX_RINGS_PER_BANK)
+		return NULL;
+
+	return pos;
+}
+
+static int adf_bank_show(struct seq_file *sfile, void *v)
+{
+	struct adf_etr_bank_data *bank = sfile->private;
+
+	if (v == SEQ_START_TOKEN) {
+		seq_printf(sfile, "------- Bank %d configuration -------\n",
+			   bank->bank_number);
+	} else {
+		int ring_id = *((int *)v) - 1;
+		struct adf_etr_ring_data *ring = &bank->rings[ring_id];
+		void __iomem *csr = bank->csr_addr;
+		int head, tail, empty;
+
+		if (!(bank->ring_mask & 1 << ring_id))
+			return 0;
+
+		head = READ_CSR_RING_HEAD(csr, bank->bank_number,
+					  ring->ring_number);
+		tail = READ_CSR_RING_TAIL(csr, bank->bank_number,
+					  ring->ring_number);
+		empty = READ_CSR_E_STAT(csr, bank->bank_number);
+
+		seq_printf(sfile,
+			   "ring num %02d, head %04x, tail %04x, empty: %d\n",
+			   ring->ring_number, head, tail,
+			   (empty & 1 << ring->ring_number) >>
+			   ring->ring_number);
+	}
+	return 0;
+}
+
+static void adf_bank_stop(struct seq_file *sfile, void *v)
+{
+	mutex_unlock(&bank_read_lock);
+}
+
+static const struct seq_operations adf_bank_sops = {
+	.start = adf_bank_start,
+	.next = adf_bank_next,
+	.stop = adf_bank_stop,
+	.show = adf_bank_show
+};
+
+static int adf_bank_open(struct inode *inode, struct file *file)
+{
+	int ret = seq_open(file, &adf_bank_sops);
+
+	if (!ret) {
+		struct seq_file *seq_f = file->private_data;
+
+		seq_f->private = inode->i_private;
+	}
+	return ret;
+}
+
+static const struct file_operations adf_bank_debug_fops = {
+	.open = adf_bank_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release
+};
+
+int adf_bank_debugfs_add(struct adf_etr_bank_data *bank)
+{
+	struct adf_accel_dev *accel_dev = bank->accel_dev;
+	struct dentry *parent = accel_dev->transport->debug;
+	char name[8];
+
+	snprintf(name, sizeof(name), "bank_%02d", bank->bank_number);
+	bank->bank_debug_dir = debugfs_create_dir(name, parent);
+	if (!bank->bank_debug_dir) {
+		pr_err("QAT: Failed to create bank debug dir.\n");
+		return -EFAULT;
+	}
+
+	bank->bank_debug_cfg = debugfs_create_file("config", S_IRUSR,
+						   bank->bank_debug_dir, bank,
+						   &adf_bank_debug_fops);
+	if (!bank->bank_debug_cfg) {
+		pr_err("QAT: Failed to create bank debug entry.\n");
+		debugfs_remove(bank->bank_debug_dir);
+		return -EFAULT;
+	}
+	return 0;
+}
+
+void adf_bank_debugfs_rm(struct adf_etr_bank_data *bank)
+{
+	debugfs_remove(bank->bank_debug_cfg);
+	debugfs_remove(bank->bank_debug_dir);
+}
diff --git a/drivers/crypto/qat/qat_common/adf_transport_internal.h b/drivers/crypto/qat/qat_common/adf_transport_internal.h
new file mode 100644
index 0000000..f854bac
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_transport_internal.h
@@ -0,0 +1,118 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_TRANSPORT_INTRN_H
+#define ADF_TRANSPORT_INTRN_H
+
+#include <linux/interrupt.h>
+#include <linux/atomic.h>
+#include <linux/spinlock_types.h>
+#include "adf_transport.h"
+
+struct adf_etr_ring_debug_entry {
+	char ring_name[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+	struct dentry *debug;
+};
+
+struct adf_etr_ring_data {
+	void *base_addr;
+	atomic_t *inflights;
+	spinlock_t lock;	/* protects ring data struct */
+	adf_callback_fn callback;
+	struct adf_etr_bank_data *bank;
+	dma_addr_t dma_addr;
+	uint16_t head;
+	uint16_t tail;
+	uint8_t ring_number;
+	uint8_t ring_size;
+	uint8_t msg_size;
+	uint8_t reserved;
+	struct adf_etr_ring_debug_entry *ring_debug;
+} __packed;
+
+struct adf_etr_bank_data {
+	struct adf_etr_ring_data rings[ADF_ETR_MAX_RINGS_PER_BANK];
+	struct tasklet_struct resp_hanlder;
+	void __iomem *csr_addr;
+	struct adf_accel_dev *accel_dev;
+	uint32_t irq_coalesc_timer;
+	uint16_t ring_mask;
+	uint16_t irq_mask;
+	spinlock_t lock;	/* protects bank data struct */
+	struct dentry *bank_debug_dir;
+	struct dentry *bank_debug_cfg;
+	uint32_t bank_number;
+} __packed;
+
+struct adf_etr_data {
+	struct adf_etr_bank_data *banks;
+	struct dentry *debug;
+};
+
+void adf_response_handler(unsigned long bank_addr);
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+int adf_bank_debugfs_add(struct adf_etr_bank_data *bank);
+void adf_bank_debugfs_rm(struct adf_etr_bank_data *bank);
+int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name);
+void adf_ring_debugfs_rm(struct adf_etr_ring_data *ring);
+#else
+static inline int adf_bank_debugfs_add(struct adf_etr_bank_data *bank)
+{
+	return 0;
+}
+
+#define adf_bank_debugfs_rm(bank) do {} while (0)
+
+static inline int adf_ring_debugfs_add(struct adf_etr_ring_data *ring,
+				       const char *name)
+{
+	return 0;
+}
+
+#define adf_ring_debugfs_rm(ring) do {} while (0)
+#endif
+#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw.h b/drivers/crypto/qat/qat_common/icp_qat_fw.h
new file mode 100644
index 0000000..f1e30e2
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/icp_qat_fw.h
@@ -0,0 +1,316 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef _ICP_QAT_FW_H_
+#define _ICP_QAT_FW_H_
+#include <linux/types.h>
+#include "icp_qat_hw.h"
+
+#define QAT_FIELD_SET(flags, val, bitpos, mask) \
+{ (flags) = (((flags) & (~((mask) << (bitpos)))) | \
+		(((val) & (mask)) << (bitpos))) ; }
+
+#define QAT_FIELD_GET(flags, bitpos, mask) \
+	(((flags) >> (bitpos)) & (mask))
+
+#define ICP_QAT_FW_REQ_DEFAULT_SZ 128
+#define ICP_QAT_FW_RESP_DEFAULT_SZ 32
+#define ICP_QAT_FW_COMN_ONE_BYTE_SHIFT 8
+#define ICP_QAT_FW_COMN_SINGLE_BYTE_MASK 0xFF
+#define ICP_QAT_FW_NUM_LONGWORDS_1 1
+#define ICP_QAT_FW_NUM_LONGWORDS_2 2
+#define ICP_QAT_FW_NUM_LONGWORDS_3 3
+#define ICP_QAT_FW_NUM_LONGWORDS_4 4
+#define ICP_QAT_FW_NUM_LONGWORDS_5 5
+#define ICP_QAT_FW_NUM_LONGWORDS_6 6
+#define ICP_QAT_FW_NUM_LONGWORDS_7 7
+#define ICP_QAT_FW_NUM_LONGWORDS_10 10
+#define ICP_QAT_FW_NUM_LONGWORDS_13 13
+#define ICP_QAT_FW_NULL_REQ_SERV_ID 1
+
+enum icp_qat_fw_comn_resp_serv_id {
+	ICP_QAT_FW_COMN_RESP_SERV_NULL,
+	ICP_QAT_FW_COMN_RESP_SERV_CPM_FW,
+	ICP_QAT_FW_COMN_RESP_SERV_DELIMITER
+};
+
+enum icp_qat_fw_comn_request_id {
+	ICP_QAT_FW_COMN_REQ_NULL = 0,
+	ICP_QAT_FW_COMN_REQ_CPM_FW_PKE = 3,
+	ICP_QAT_FW_COMN_REQ_CPM_FW_LA = 4,
+	ICP_QAT_FW_COMN_REQ_CPM_FW_DMA = 7,
+	ICP_QAT_FW_COMN_REQ_CPM_FW_COMP = 9,
+	ICP_QAT_FW_COMN_REQ_DELIMITER
+};
+
+struct icp_qat_fw_comn_req_hdr_cd_pars {
+	union {
+		struct {
+			uint64_t content_desc_addr;
+			uint16_t content_desc_resrvd1;
+			uint8_t content_desc_params_sz;
+			uint8_t content_desc_hdr_resrvd2;
+			uint32_t content_desc_resrvd3;
+		} s;
+		struct {
+			uint32_t serv_specif_fields[4];
+		} s1;
+	} u;
+};
+
+struct icp_qat_fw_comn_req_mid {
+	uint64_t opaque_data;
+	uint64_t src_data_addr;
+	uint64_t dest_data_addr;
+	uint32_t src_length;
+	uint32_t dst_length;
+};
+
+struct icp_qat_fw_comn_req_cd_ctrl {
+	uint32_t content_desc_ctrl_lw[ICP_QAT_FW_NUM_LONGWORDS_5];
+};
+
+struct icp_qat_fw_comn_req_hdr {
+	uint8_t resrvd1;
+	uint8_t service_cmd_id;
+	uint8_t service_type;
+	uint8_t hdr_flags;
+	uint16_t serv_specif_flags;
+	uint16_t comn_req_flags;
+};
+
+struct icp_qat_fw_comn_req_rqpars {
+	uint32_t serv_specif_rqpars_lw[ICP_QAT_FW_NUM_LONGWORDS_13];
+};
+
+struct icp_qat_fw_comn_req {
+	struct icp_qat_fw_comn_req_hdr comn_hdr;
+	struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
+	struct icp_qat_fw_comn_req_mid comn_mid;
+	struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
+	struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
+};
+
+struct icp_qat_fw_comn_error {
+	uint8_t xlat_err_code;
+	uint8_t cmp_err_code;
+};
+
+struct icp_qat_fw_comn_resp_hdr {
+	uint8_t resrvd1;
+	uint8_t service_id;
+	uint8_t response_type;
+	uint8_t hdr_flags;
+	struct icp_qat_fw_comn_error comn_error;
+	uint8_t comn_status;
+	uint8_t cmd_id;
+};
+
+struct icp_qat_fw_comn_resp {
+	struct icp_qat_fw_comn_resp_hdr comn_hdr;
+	uint64_t opaque_data;
+	uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
+};
+
+#define ICP_QAT_FW_COMN_REQ_FLAG_SET 1
+#define ICP_QAT_FW_COMN_REQ_FLAG_CLR 0
+#define ICP_QAT_FW_COMN_VALID_FLAG_BITPOS 7
+#define ICP_QAT_FW_COMN_VALID_FLAG_MASK 0x1
+#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK 0x7F
+
+#define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \
+	icp_qat_fw_comn_req_hdr_t.service_type
+
+#define ICP_QAT_FW_COMN_OV_SRV_TYPE_SET(icp_qat_fw_comn_req_hdr_t, val) \
+	icp_qat_fw_comn_req_hdr_t.service_type = val
+
+#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_GET(icp_qat_fw_comn_req_hdr_t) \
+	icp_qat_fw_comn_req_hdr_t.service_cmd_id
+
+#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_SET(icp_qat_fw_comn_req_hdr_t, val) \
+	icp_qat_fw_comn_req_hdr_t.service_cmd_id = val
+
+#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_GET(hdr_t) \
+	ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_t.hdr_flags)
+
+#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_SET(hdr_t, val) \
+	ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val)
+
+#define ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_flags) \
+	QAT_FIELD_GET(hdr_flags, \
+	ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \
+	ICP_QAT_FW_COMN_VALID_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_GET(hdr_flags) \
+	(hdr_flags & ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK)
+
+#define ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val) \
+	QAT_FIELD_SET((hdr_t.hdr_flags), (val), \
+	ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \
+	ICP_QAT_FW_COMN_VALID_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(valid) \
+	(((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \
+	 ICP_QAT_FW_COMN_VALID_FLAG_BITPOS)
+
+#define QAT_COMN_PTR_TYPE_BITPOS 0
+#define QAT_COMN_PTR_TYPE_MASK 0x1
+#define QAT_COMN_CD_FLD_TYPE_BITPOS 1
+#define QAT_COMN_CD_FLD_TYPE_MASK 0x1
+#define QAT_COMN_PTR_TYPE_FLAT 0x0
+#define QAT_COMN_PTR_TYPE_SGL 0x1
+#define QAT_COMN_CD_FLD_TYPE_64BIT_ADR 0x0
+#define QAT_COMN_CD_FLD_TYPE_16BYTE_DATA 0x1
+
+#define ICP_QAT_FW_COMN_FLAGS_BUILD(cdt, ptr) \
+	((((cdt) & QAT_COMN_CD_FLD_TYPE_MASK) << QAT_COMN_CD_FLD_TYPE_BITPOS) \
+	 | (((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS))
+
+#define ICP_QAT_FW_COMN_PTR_TYPE_GET(flags) \
+	QAT_FIELD_GET(flags, QAT_COMN_PTR_TYPE_BITPOS, QAT_COMN_PTR_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_CD_FLD_TYPE_GET(flags) \
+	QAT_FIELD_GET(flags, QAT_COMN_CD_FLD_TYPE_BITPOS, \
+			QAT_COMN_CD_FLD_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_PTR_TYPE_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, QAT_COMN_PTR_TYPE_BITPOS, \
+			QAT_COMN_PTR_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_CD_FLD_TYPE_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, QAT_COMN_CD_FLD_TYPE_BITPOS, \
+			QAT_COMN_CD_FLD_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_NEXT_ID_BITPOS 4
+#define ICP_QAT_FW_COMN_NEXT_ID_MASK 0xF0
+#define ICP_QAT_FW_COMN_CURR_ID_BITPOS 0
+#define ICP_QAT_FW_COMN_CURR_ID_MASK 0x0F
+
+#define ICP_QAT_FW_COMN_NEXT_ID_GET(cd_ctrl_hdr_t) \
+	((((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \
+	>> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
+
+#define ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+	{ ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \
+	& ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+	((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
+	 & ICP_QAT_FW_COMN_NEXT_ID_MASK)); }
+
+#define ICP_QAT_FW_COMN_CURR_ID_GET(cd_ctrl_hdr_t) \
+	(((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_CURR_ID_MASK)
+
+#define ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+	{ ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \
+	& ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+	((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)); }
+
+#define QAT_COMN_RESP_CRYPTO_STATUS_BITPOS 7
+#define QAT_COMN_RESP_CRYPTO_STATUS_MASK 0x1
+#define QAT_COMN_RESP_CMP_STATUS_BITPOS 5
+#define QAT_COMN_RESP_CMP_STATUS_MASK 0x1
+#define QAT_COMN_RESP_XLAT_STATUS_BITPOS 4
+#define QAT_COMN_RESP_XLAT_STATUS_MASK 0x1
+#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS 3
+#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1
+
+#define ICP_QAT_FW_COMN_RESP_STATUS_BUILD(crypto, comp, xlat, eolb) \
+	((((crypto) & QAT_COMN_RESP_CRYPTO_STATUS_MASK) << \
+	QAT_COMN_RESP_CRYPTO_STATUS_BITPOS) | \
+	(((comp) & QAT_COMN_RESP_CMP_STATUS_MASK) << \
+	QAT_COMN_RESP_CMP_STATUS_BITPOS) | \
+	(((xlat) & QAT_COMN_RESP_XLAT_STATUS_MASK) << \
+	QAT_COMN_RESP_XLAT_STATUS_BITPOS) | \
+	(((eolb) & QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK) << \
+	QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS))
+
+#define ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(status) \
+	QAT_FIELD_GET(status, QAT_COMN_RESP_CRYPTO_STATUS_BITPOS, \
+	QAT_COMN_RESP_CRYPTO_STATUS_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(status) \
+	QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_STATUS_BITPOS, \
+	QAT_COMN_RESP_CMP_STATUS_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(status) \
+	QAT_FIELD_GET(status, QAT_COMN_RESP_XLAT_STATUS_BITPOS, \
+	QAT_COMN_RESP_XLAT_STATUS_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_CMP_END_OF_LAST_BLK_FLAG_GET(status) \
+	QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS, \
+	QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK)
+
+#define ICP_QAT_FW_COMN_STATUS_FLAG_OK 0
+#define ICP_QAT_FW_COMN_STATUS_FLAG_ERROR 1
+#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0
+#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_SET 1
+#define ERR_CODE_NO_ERROR 0
+#define ERR_CODE_INVALID_BLOCK_TYPE -1
+#define ERR_CODE_NO_MATCH_ONES_COMP -2
+#define ERR_CODE_TOO_MANY_LEN_OR_DIS -3
+#define ERR_CODE_INCOMPLETE_LEN -4
+#define ERR_CODE_RPT_LEN_NO_FIRST_LEN -5
+#define ERR_CODE_RPT_GT_SPEC_LEN -6
+#define ERR_CODE_INV_LIT_LEN_CODE_LEN -7
+#define ERR_CODE_INV_DIS_CODE_LEN -8
+#define ERR_CODE_INV_LIT_LEN_DIS_IN_BLK -9
+#define ERR_CODE_DIS_TOO_FAR_BACK -10
+#define ERR_CODE_OVERFLOW_ERROR -11
+#define ERR_CODE_SOFT_ERROR -12
+#define ERR_CODE_FATAL_ERROR -13
+#define ERR_CODE_SSM_ERROR -14
+#define ERR_CODE_ENDPOINT_ERROR -15
+
+enum icp_qat_fw_slice {
+	ICP_QAT_FW_SLICE_NULL = 0,
+	ICP_QAT_FW_SLICE_CIPHER = 1,
+	ICP_QAT_FW_SLICE_AUTH = 2,
+	ICP_QAT_FW_SLICE_DRAM_RD = 3,
+	ICP_QAT_FW_SLICE_DRAM_WR = 4,
+	ICP_QAT_FW_SLICE_COMP = 5,
+	ICP_QAT_FW_SLICE_XLAT = 6,
+	ICP_QAT_FW_SLICE_DELIMITER
+};
+#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h
new file mode 100644
index 0000000..72a59fa
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h
@@ -0,0 +1,131 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef _ICP_QAT_FW_INIT_ADMIN_H_
+#define _ICP_QAT_FW_INIT_ADMIN_H_
+
+#include "icp_qat_fw.h"
+
+enum icp_qat_fw_init_admin_cmd_id {
+	ICP_QAT_FW_INIT_ME = 0,
+	ICP_QAT_FW_TRNG_ENABLE = 1,
+	ICP_QAT_FW_TRNG_DISABLE = 2,
+	ICP_QAT_FW_CONSTANTS_CFG = 3,
+	ICP_QAT_FW_STATUS_GET = 4,
+	ICP_QAT_FW_COUNTERS_GET = 5,
+	ICP_QAT_FW_LOOPBACK = 6,
+	ICP_QAT_FW_HEARTBEAT_SYNC = 7,
+	ICP_QAT_FW_HEARTBEAT_GET = 8
+};
+
+enum icp_qat_fw_init_admin_resp_status {
+	ICP_QAT_FW_INIT_RESP_STATUS_SUCCESS = 0,
+	ICP_QAT_FW_INIT_RESP_STATUS_FAIL
+};
+
+struct icp_qat_fw_init_admin_req {
+	uint16_t init_cfg_sz;
+	uint8_t resrvd1;
+	uint8_t init_admin_cmd_id;
+	uint32_t resrvd2;
+	uint64_t opaque_data;
+	uint64_t init_cfg_ptr;
+	uint64_t resrvd3;
+};
+
+struct icp_qat_fw_init_admin_resp_hdr {
+	uint8_t flags;
+	uint8_t resrvd1;
+	uint8_t status;
+	uint8_t init_admin_cmd_id;
+};
+
+struct icp_qat_fw_init_admin_resp_pars {
+	union {
+		uint32_t resrvd1[ICP_QAT_FW_NUM_LONGWORDS_4];
+		struct {
+			uint32_t version_patch_num;
+			uint8_t context_id;
+			uint8_t ae_id;
+			uint16_t resrvd1;
+			uint64_t resrvd2;
+		} s1;
+		struct {
+			uint64_t req_rec_count;
+			uint64_t resp_sent_count;
+		} s2;
+	} u;
+};
+
+struct icp_qat_fw_init_admin_resp {
+	struct icp_qat_fw_init_admin_resp_hdr init_resp_hdr;
+	union {
+		uint32_t resrvd2;
+		struct {
+			uint16_t version_minor_num;
+			uint16_t version_major_num;
+		} s;
+	} u;
+	uint64_t opaque_data;
+	struct icp_qat_fw_init_admin_resp_pars init_resp_pars;
+};
+
+#define ICP_QAT_FW_COMN_HEARTBEAT_OK 0
+#define ICP_QAT_FW_COMN_HEARTBEAT_BLOCKED 1
+#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_BITPOS 0
+#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_MASK 0x1
+#define ICP_QAT_FW_COMN_STATUS_RESRVD_FLD_MASK 0xFE
+#define ICP_QAT_FW_COMN_HEARTBEAT_HDR_FLAG_GET(hdr_t) \
+	ICP_QAT_FW_COMN_HEARTBEAT_FLAG_GET(hdr_t.flags)
+
+#define ICP_QAT_FW_COMN_HEARTBEAT_HDR_FLAG_SET(hdr_t, val) \
+	ICP_QAT_FW_COMN_HEARTBEAT_FLAG_SET(hdr_t, val)
+
+#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_GET(flags) \
+	QAT_FIELD_GET(flags, \
+		 ICP_QAT_FW_COMN_HEARTBEAT_FLAG_BITPOS, \
+		 ICP_QAT_FW_COMN_HEARTBEAT_FLAG_MASK)
+#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_la.h b/drivers/crypto/qat/qat_common/icp_qat_fw_la.h
new file mode 100644
index 0000000..c8d2669
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/icp_qat_fw_la.h
@@ -0,0 +1,404 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef _ICP_QAT_FW_LA_H_
+#define _ICP_QAT_FW_LA_H_
+#include "icp_qat_fw.h"
+
+enum icp_qat_fw_la_cmd_id {
+	ICP_QAT_FW_LA_CMD_CIPHER = 0,
+	ICP_QAT_FW_LA_CMD_AUTH = 1,
+	ICP_QAT_FW_LA_CMD_CIPHER_HASH = 2,
+	ICP_QAT_FW_LA_CMD_HASH_CIPHER = 3,
+	ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM = 4,
+	ICP_QAT_FW_LA_CMD_TRNG_TEST = 5,
+	ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE = 6,
+	ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE = 7,
+	ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE = 8,
+	ICP_QAT_FW_LA_CMD_MGF1 = 9,
+	ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP = 10,
+	ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP = 11,
+	ICP_QAT_FW_LA_CMD_DELIMITER = 12
+};
+
+#define ICP_QAT_FW_LA_ICV_VER_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
+#define ICP_QAT_FW_LA_ICV_VER_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
+#define ICP_QAT_FW_LA_TRNG_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
+#define ICP_QAT_FW_LA_TRNG_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
+
+struct icp_qat_fw_la_bulk_req {
+	struct icp_qat_fw_comn_req_hdr comn_hdr;
+	struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
+	struct icp_qat_fw_comn_req_mid comn_mid;
+	struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
+	struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
+};
+
+#define ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS 1
+#define ICP_QAT_FW_LA_GCM_IV_LEN_NOT_12_OCTETS 0
+#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS 12
+#define ICP_QAT_FW_LA_ZUC_3G_PROTO 1
+#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK 0x1
+#define QAT_LA_GCM_IV_LEN_FLAG_BITPOS 11
+#define QAT_LA_GCM_IV_LEN_FLAG_MASK 0x1
+#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER 1
+#define ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER 0
+#define QAT_LA_DIGEST_IN_BUFFER_BITPOS	10
+#define QAT_LA_DIGEST_IN_BUFFER_MASK 0x1
+#define ICP_QAT_FW_LA_SNOW_3G_PROTO 4
+#define ICP_QAT_FW_LA_GCM_PROTO	2
+#define ICP_QAT_FW_LA_CCM_PROTO	1
+#define ICP_QAT_FW_LA_NO_PROTO 0
+#define QAT_LA_PROTO_BITPOS 7
+#define QAT_LA_PROTO_MASK 0x7
+#define ICP_QAT_FW_LA_CMP_AUTH_RES 1
+#define ICP_QAT_FW_LA_NO_CMP_AUTH_RES 0
+#define QAT_LA_CMP_AUTH_RES_BITPOS 6
+#define QAT_LA_CMP_AUTH_RES_MASK 0x1
+#define ICP_QAT_FW_LA_RET_AUTH_RES 1
+#define ICP_QAT_FW_LA_NO_RET_AUTH_RES 0
+#define QAT_LA_RET_AUTH_RES_BITPOS 5
+#define QAT_LA_RET_AUTH_RES_MASK 0x1
+#define ICP_QAT_FW_LA_UPDATE_STATE 1
+#define ICP_QAT_FW_LA_NO_UPDATE_STATE 0
+#define QAT_LA_UPDATE_STATE_BITPOS 4
+#define QAT_LA_UPDATE_STATE_MASK 0x1
+#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_CD_SETUP 0
+#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_SHRAM_CP 1
+#define QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS 3
+#define QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK 0x1
+#define ICP_QAT_FW_CIPH_IV_64BIT_PTR 0
+#define ICP_QAT_FW_CIPH_IV_16BYTE_DATA 1
+#define QAT_LA_CIPH_IV_FLD_BITPOS 2
+#define QAT_LA_CIPH_IV_FLD_MASK   0x1
+#define ICP_QAT_FW_LA_PARTIAL_NONE 0
+#define ICP_QAT_FW_LA_PARTIAL_START 1
+#define ICP_QAT_FW_LA_PARTIAL_MID 3
+#define ICP_QAT_FW_LA_PARTIAL_END 2
+#define QAT_LA_PARTIAL_BITPOS 0
+#define QAT_LA_PARTIAL_MASK 0x3
+#define ICP_QAT_FW_LA_FLAGS_BUILD(zuc_proto, gcm_iv_len, auth_rslt, proto, \
+	cmp_auth, ret_auth, update_state, \
+	ciph_iv, ciphcfg, partial) \
+	(((zuc_proto & QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) << \
+	QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS) | \
+	((gcm_iv_len & QAT_LA_GCM_IV_LEN_FLAG_MASK) << \
+	QAT_LA_GCM_IV_LEN_FLAG_BITPOS) | \
+	((auth_rslt & QAT_LA_DIGEST_IN_BUFFER_MASK) << \
+	QAT_LA_DIGEST_IN_BUFFER_BITPOS) | \
+	((proto & QAT_LA_PROTO_MASK) << \
+	QAT_LA_PROTO_BITPOS)	| \
+	((cmp_auth & QAT_LA_CMP_AUTH_RES_MASK) << \
+	QAT_LA_CMP_AUTH_RES_BITPOS) | \
+	((ret_auth & QAT_LA_RET_AUTH_RES_MASK) << \
+	QAT_LA_RET_AUTH_RES_BITPOS) | \
+	((update_state & QAT_LA_UPDATE_STATE_MASK) << \
+	QAT_LA_UPDATE_STATE_BITPOS) | \
+	((ciph_iv & QAT_LA_CIPH_IV_FLD_MASK) << \
+	QAT_LA_CIPH_IV_FLD_BITPOS) | \
+	((ciphcfg & QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) << \
+	QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS) | \
+	((partial & QAT_LA_PARTIAL_MASK) << \
+	QAT_LA_PARTIAL_BITPOS))
+
+#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_GET(flags) \
+	QAT_FIELD_GET(flags, QAT_LA_CIPH_IV_FLD_BITPOS, \
+	QAT_LA_CIPH_IV_FLD_MASK)
+
+#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_GET(flags) \
+	QAT_FIELD_GET(flags, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \
+	QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK)
+
+#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_GET(flags) \
+	QAT_FIELD_GET(flags, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \
+	QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_GET(flags) \
+	QAT_FIELD_GET(flags, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \
+	QAT_LA_GCM_IV_LEN_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_PROTO_GET(flags) \
+	QAT_FIELD_GET(flags, QAT_LA_PROTO_BITPOS, QAT_LA_PROTO_MASK)
+
+#define ICP_QAT_FW_LA_CMP_AUTH_GET(flags) \
+	QAT_FIELD_GET(flags, QAT_LA_CMP_AUTH_RES_BITPOS, \
+	QAT_LA_CMP_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_RET_AUTH_GET(flags) \
+	QAT_FIELD_GET(flags, QAT_LA_RET_AUTH_RES_BITPOS, \
+	QAT_LA_RET_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_GET(flags) \
+	QAT_FIELD_GET(flags, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \
+	QAT_LA_DIGEST_IN_BUFFER_MASK)
+
+#define ICP_QAT_FW_LA_UPDATE_STATE_GET(flags) \
+	QAT_FIELD_GET(flags, QAT_LA_UPDATE_STATE_BITPOS, \
+	QAT_LA_UPDATE_STATE_MASK)
+
+#define ICP_QAT_FW_LA_PARTIAL_GET(flags) \
+	QAT_FIELD_GET(flags, QAT_LA_PARTIAL_BITPOS, \
+	QAT_LA_PARTIAL_MASK)
+
+#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, QAT_LA_CIPH_IV_FLD_BITPOS, \
+	QAT_LA_CIPH_IV_FLD_MASK)
+
+#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \
+	QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK)
+
+#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \
+	QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \
+	QAT_LA_GCM_IV_LEN_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_PROTO_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, QAT_LA_PROTO_BITPOS, \
+	QAT_LA_PROTO_MASK)
+
+#define ICP_QAT_FW_LA_CMP_AUTH_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, QAT_LA_CMP_AUTH_RES_BITPOS, \
+	QAT_LA_CMP_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_RET_AUTH_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, QAT_LA_RET_AUTH_RES_BITPOS, \
+	QAT_LA_RET_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \
+	QAT_LA_DIGEST_IN_BUFFER_MASK)
+
+#define ICP_QAT_FW_LA_UPDATE_STATE_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, QAT_LA_UPDATE_STATE_BITPOS, \
+	QAT_LA_UPDATE_STATE_MASK)
+
+#define ICP_QAT_FW_LA_PARTIAL_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, QAT_LA_PARTIAL_BITPOS, \
+	QAT_LA_PARTIAL_MASK)
+
+struct icp_qat_fw_cipher_req_hdr_cd_pars {
+	union {
+		struct {
+			uint64_t content_desc_addr;
+			uint16_t content_desc_resrvd1;
+			uint8_t content_desc_params_sz;
+			uint8_t content_desc_hdr_resrvd2;
+			uint32_t content_desc_resrvd3;
+		} s;
+		struct {
+			uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+		} s1;
+	} u;
+};
+
+struct icp_qat_fw_cipher_auth_req_hdr_cd_pars {
+	union {
+		struct {
+			uint64_t content_desc_addr;
+			uint16_t content_desc_resrvd1;
+			uint8_t content_desc_params_sz;
+			uint8_t content_desc_hdr_resrvd2;
+			uint32_t content_desc_resrvd3;
+		} s;
+		struct {
+			uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+		} sl;
+	} u;
+};
+
+struct icp_qat_fw_cipher_cd_ctrl_hdr {
+	uint8_t cipher_state_sz;
+	uint8_t cipher_key_sz;
+	uint8_t cipher_cfg_offset;
+	uint8_t next_curr_id;
+	uint8_t cipher_padding_sz;
+	uint8_t resrvd1;
+	uint16_t resrvd2;
+	uint32_t resrvd3[ICP_QAT_FW_NUM_LONGWORDS_3];
+};
+
+struct icp_qat_fw_auth_cd_ctrl_hdr {
+	uint32_t resrvd1;
+	uint8_t resrvd2;
+	uint8_t hash_flags;
+	uint8_t hash_cfg_offset;
+	uint8_t next_curr_id;
+	uint8_t resrvd3;
+	uint8_t outer_prefix_sz;
+	uint8_t final_sz;
+	uint8_t inner_res_sz;
+	uint8_t resrvd4;
+	uint8_t inner_state1_sz;
+	uint8_t inner_state2_offset;
+	uint8_t inner_state2_sz;
+	uint8_t outer_config_offset;
+	uint8_t outer_state1_sz;
+	uint8_t outer_res_sz;
+	uint8_t outer_prefix_offset;
+};
+
+struct icp_qat_fw_cipher_auth_cd_ctrl_hdr {
+	uint8_t cipher_state_sz;
+	uint8_t cipher_key_sz;
+	uint8_t cipher_cfg_offset;
+	uint8_t next_curr_id_cipher;
+	uint8_t cipher_padding_sz;
+	uint8_t hash_flags;
+	uint8_t hash_cfg_offset;
+	uint8_t next_curr_id_auth;
+	uint8_t resrvd1;
+	uint8_t outer_prefix_sz;
+	uint8_t final_sz;
+	uint8_t inner_res_sz;
+	uint8_t resrvd2;
+	uint8_t inner_state1_sz;
+	uint8_t inner_state2_offset;
+	uint8_t inner_state2_sz;
+	uint8_t outer_config_offset;
+	uint8_t outer_state1_sz;
+	uint8_t outer_res_sz;
+	uint8_t outer_prefix_offset;
+};
+
+#define ICP_QAT_FW_AUTH_HDR_FLAG_DO_NESTED 1
+#define ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED 0
+#define ICP_QAT_FW_CCM_GCM_AAD_SZ_MAX	240
+#define ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET \
+	(sizeof(struct icp_qat_fw_la_cipher_req_params_t))
+#define ICP_QAT_FW_CIPHER_REQUEST_PARAMETERS_OFFSET (0)
+
+struct icp_qat_fw_la_cipher_req_params {
+	uint32_t cipher_offset;
+	uint32_t cipher_length;
+	union {
+		uint32_t cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+		struct {
+			uint64_t cipher_IV_ptr;
+			uint64_t resrvd1;
+		} s;
+	} u;
+};
+
+struct icp_qat_fw_la_auth_req_params {
+	uint32_t auth_off;
+	uint32_t auth_len;
+	union {
+		uint64_t auth_partial_st_prefix;
+		uint64_t aad_adr;
+	} u1;
+	uint64_t auth_res_addr;
+	union {
+		uint8_t inner_prefix_sz;
+		uint8_t aad_sz;
+	} u2;
+	uint8_t resrvd1;
+	uint8_t hash_state_sz;
+	uint8_t auth_res_sz;
+} __packed;
+
+struct icp_qat_fw_la_auth_req_params_resrvd_flds {
+	uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_6];
+	union {
+		uint8_t inner_prefix_sz;
+		uint8_t aad_sz;
+	} u2;
+	uint8_t resrvd1;
+	uint16_t resrvd2;
+};
+
+struct icp_qat_fw_la_resp {
+	struct icp_qat_fw_comn_resp_hdr comn_resp;
+	uint64_t opaque_data;
+	uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
+};
+
+#define ICP_QAT_FW_CIPHER_NEXT_ID_GET(cd_ctrl_hdr_t) \
+	((((cd_ctrl_hdr_t)->next_curr_id_cipher) & \
+	  ICP_QAT_FW_COMN_NEXT_ID_MASK) >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
+
+#define ICP_QAT_FW_CIPHER_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \
+	((((cd_ctrl_hdr_t)->next_curr_id_cipher) \
+	& ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+	((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
+	& ICP_QAT_FW_COMN_NEXT_ID_MASK)) }
+
+#define ICP_QAT_FW_CIPHER_CURR_ID_GET(cd_ctrl_hdr_t) \
+	(((cd_ctrl_hdr_t)->next_curr_id_cipher) \
+	& ICP_QAT_FW_COMN_CURR_ID_MASK)
+
+#define ICP_QAT_FW_CIPHER_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \
+	((((cd_ctrl_hdr_t)->next_curr_id_cipher) \
+	& ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+	((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) }
+
+#define ICP_QAT_FW_AUTH_NEXT_ID_GET(cd_ctrl_hdr_t) \
+	((((cd_ctrl_hdr_t)->next_curr_id_auth) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \
+	>> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
+
+#define ICP_QAT_FW_AUTH_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_auth = \
+	((((cd_ctrl_hdr_t)->next_curr_id_auth) \
+	& ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+	((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
+	& ICP_QAT_FW_COMN_NEXT_ID_MASK)) }
+
+#define ICP_QAT_FW_AUTH_CURR_ID_GET(cd_ctrl_hdr_t) \
+	(((cd_ctrl_hdr_t)->next_curr_id_auth) \
+	& ICP_QAT_FW_COMN_CURR_ID_MASK)
+
+#define ICP_QAT_FW_AUTH_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_auth = \
+	((((cd_ctrl_hdr_t)->next_curr_id_auth) \
+	& ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+	((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) }
+
+#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h b/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h
new file mode 100644
index 0000000..5e1aa40
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h
@@ -0,0 +1,78 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef __ICP_QAT_FW_LOADER_HANDLE_H__
+#define __ICP_QAT_FW_LOADER_HANDLE_H__
+#include "icp_qat_uclo.h"
+
+struct icp_qat_fw_loader_ae_data {
+	unsigned int state;
+	unsigned int ustore_size;
+	unsigned int free_addr;
+	unsigned int free_size;
+	unsigned int live_ctx_mask;
+};
+
+struct icp_qat_fw_loader_hal_handle {
+	struct icp_qat_fw_loader_ae_data aes[ICP_QAT_UCLO_MAX_AE];
+	unsigned int ae_mask;
+	unsigned int slice_mask;
+	unsigned int revision_id;
+	unsigned int ae_max_num;
+	unsigned int upc_mask;
+	unsigned int max_ustore;
+};
+
+struct icp_qat_fw_loader_handle {
+	struct icp_qat_fw_loader_hal_handle *hal_handle;
+	void *obj_handle;
+	void __iomem *hal_sram_addr_v;
+	void __iomem *hal_cap_g_ctl_csr_addr_v;
+	void __iomem *hal_cap_ae_xfer_csr_addr_v;
+	void __iomem *hal_cap_ae_local_csr_addr_v;
+	void __iomem *hal_ep_csr_addr_v;
+};
+#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_hal.h b/drivers/crypto/qat/qat_common/icp_qat_hal.h
new file mode 100644
index 0000000..85b6d24
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/icp_qat_hal.h
@@ -0,0 +1,125 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef __ICP_QAT_HAL_H
+#define __ICP_QAT_HAL_H
+#include "icp_qat_fw_loader_handle.h"
+
+enum hal_global_csr {
+	MISC_CONTROL = 0x04,
+	ICP_RESET = 0x0c,
+	ICP_GLOBAL_CLK_ENABLE = 0x50
+};
+
+enum hal_ae_csr {
+	USTORE_ADDRESS = 0x000,
+	USTORE_DATA_LOWER = 0x004,
+	USTORE_DATA_UPPER = 0x008,
+	ALU_OUT = 0x010,
+	CTX_ARB_CNTL = 0x014,
+	CTX_ENABLES = 0x018,
+	CC_ENABLE = 0x01c,
+	CSR_CTX_POINTER = 0x020,
+	CTX_STS_INDIRECT = 0x040,
+	ACTIVE_CTX_STATUS = 0x044,
+	CTX_SIG_EVENTS_INDIRECT = 0x048,
+	CTX_SIG_EVENTS_ACTIVE = 0x04c,
+	CTX_WAKEUP_EVENTS_INDIRECT = 0x050,
+	LM_ADDR_0_INDIRECT = 0x060,
+	LM_ADDR_1_INDIRECT = 0x068,
+	INDIRECT_LM_ADDR_0_BYTE_INDEX = 0x0e0,
+	INDIRECT_LM_ADDR_1_BYTE_INDEX = 0x0e8,
+	FUTURE_COUNT_SIGNAL_INDIRECT = 0x078,
+	TIMESTAMP_LOW = 0x0c0,
+	TIMESTAMP_HIGH = 0x0c4,
+	PROFILE_COUNT = 0x144,
+	SIGNATURE_ENABLE = 0x150,
+	AE_MISC_CONTROL = 0x160,
+	LOCAL_CSR_STATUS = 0x180,
+};
+
+#define UA_ECS                      (0x1 << 31)
+#define ACS_ABO_BITPOS              31
+#define ACS_ACNO                    0x7
+#define CE_ENABLE_BITPOS            0x8
+#define CE_LMADDR_0_GLOBAL_BITPOS   16
+#define CE_LMADDR_1_GLOBAL_BITPOS   17
+#define CE_NN_MODE_BITPOS           20
+#define CE_REG_PAR_ERR_BITPOS       25
+#define CE_BREAKPOINT_BITPOS        27
+#define CE_CNTL_STORE_PARITY_ERROR_BITPOS 29
+#define CE_INUSE_CONTEXTS_BITPOS    31
+#define CE_NN_MODE                  (0x1 << CE_NN_MODE_BITPOS)
+#define CE_INUSE_CONTEXTS           (0x1 << CE_INUSE_CONTEXTS_BITPOS)
+#define XCWE_VOLUNTARY              (0x1)
+#define LCS_STATUS          (0x1)
+#define MMC_SHARE_CS_BITPOS         2
+#define GLOBAL_CSR                0xA00
+
+#define SET_CAP_CSR(handle, csr, val) \
+	ADF_CSR_WR(handle->hal_cap_g_ctl_csr_addr_v, csr, val)
+#define GET_CAP_CSR(handle, csr) \
+	ADF_CSR_RD(handle->hal_cap_g_ctl_csr_addr_v, csr)
+#define SET_GLB_CSR(handle, csr, val) SET_CAP_CSR(handle, csr + GLOBAL_CSR, val)
+#define GET_GLB_CSR(handle, csr) GET_CAP_CSR(handle, GLOBAL_CSR + csr)
+#define AE_CSR(handle, ae) \
+	(handle->hal_cap_ae_local_csr_addr_v + \
+	((ae & handle->hal_handle->ae_mask) << 12))
+#define AE_CSR_ADDR(handle, ae, csr) (AE_CSR(handle, ae) + (0x3ff & csr))
+#define SET_AE_CSR(handle, ae, csr, val) \
+	ADF_CSR_WR(AE_CSR_ADDR(handle, ae, csr), 0, val)
+#define GET_AE_CSR(handle, ae, csr) ADF_CSR_RD(AE_CSR_ADDR(handle, ae, csr), 0)
+#define AE_XFER(handle, ae) \
+	(handle->hal_cap_ae_xfer_csr_addr_v + \
+	((ae & handle->hal_handle->ae_mask) << 12))
+#define AE_XFER_ADDR(handle, ae, reg) (AE_XFER(handle, ae) + \
+	((reg & 0xff) << 2))
+#define SET_AE_XFER(handle, ae, reg, val) \
+	ADF_CSR_WR(AE_XFER_ADDR(handle, ae, reg), 0, val)
+#define SRAM_WRITE(handle, addr, val) \
+	ADF_CSR_WR(handle->hal_sram_addr_v, addr, val)
+#define SRAM_READ(handle, addr) ADF_CSR_RD(handle->hal_sram_addr_v, addr)
+#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_hw.h b/drivers/crypto/qat/qat_common/icp_qat_hw.h
new file mode 100644
index 0000000..5031f8c
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/icp_qat_hw.h
@@ -0,0 +1,305 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef _ICP_QAT_HW_H_
+#define _ICP_QAT_HW_H_
+
+enum icp_qat_hw_ae_id {
+	ICP_QAT_HW_AE_0 = 0,
+	ICP_QAT_HW_AE_1 = 1,
+	ICP_QAT_HW_AE_2 = 2,
+	ICP_QAT_HW_AE_3 = 3,
+	ICP_QAT_HW_AE_4 = 4,
+	ICP_QAT_HW_AE_5 = 5,
+	ICP_QAT_HW_AE_6 = 6,
+	ICP_QAT_HW_AE_7 = 7,
+	ICP_QAT_HW_AE_8 = 8,
+	ICP_QAT_HW_AE_9 = 9,
+	ICP_QAT_HW_AE_10 = 10,
+	ICP_QAT_HW_AE_11 = 11,
+	ICP_QAT_HW_AE_DELIMITER = 12
+};
+
+enum icp_qat_hw_qat_id {
+	ICP_QAT_HW_QAT_0 = 0,
+	ICP_QAT_HW_QAT_1 = 1,
+	ICP_QAT_HW_QAT_2 = 2,
+	ICP_QAT_HW_QAT_3 = 3,
+	ICP_QAT_HW_QAT_4 = 4,
+	ICP_QAT_HW_QAT_5 = 5,
+	ICP_QAT_HW_QAT_DELIMITER = 6
+};
+
+enum icp_qat_hw_auth_algo {
+	ICP_QAT_HW_AUTH_ALGO_NULL = 0,
+	ICP_QAT_HW_AUTH_ALGO_SHA1 = 1,
+	ICP_QAT_HW_AUTH_ALGO_MD5 = 2,
+	ICP_QAT_HW_AUTH_ALGO_SHA224 = 3,
+	ICP_QAT_HW_AUTH_ALGO_SHA256 = 4,
+	ICP_QAT_HW_AUTH_ALGO_SHA384 = 5,
+	ICP_QAT_HW_AUTH_ALGO_SHA512 = 6,
+	ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC = 7,
+	ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC = 8,
+	ICP_QAT_HW_AUTH_ALGO_AES_F9 = 9,
+	ICP_QAT_HW_AUTH_ALGO_GALOIS_128 = 10,
+	ICP_QAT_HW_AUTH_ALGO_GALOIS_64 = 11,
+	ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 = 12,
+	ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 = 13,
+	ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 = 14,
+	ICP_QAT_HW_AUTH_RESERVED_1 = 15,
+	ICP_QAT_HW_AUTH_RESERVED_2 = 16,
+	ICP_QAT_HW_AUTH_ALGO_SHA3_256 = 17,
+	ICP_QAT_HW_AUTH_RESERVED_3 = 18,
+	ICP_QAT_HW_AUTH_ALGO_SHA3_512 = 19,
+	ICP_QAT_HW_AUTH_ALGO_DELIMITER = 20
+};
+
+enum icp_qat_hw_auth_mode {
+	ICP_QAT_HW_AUTH_MODE0 = 0,
+	ICP_QAT_HW_AUTH_MODE1 = 1,
+	ICP_QAT_HW_AUTH_MODE2 = 2,
+	ICP_QAT_HW_AUTH_MODE_DELIMITER = 3
+};
+
+struct icp_qat_hw_auth_config {
+	uint32_t config;
+	uint32_t reserved;
+};
+
+#define QAT_AUTH_MODE_BITPOS 4
+#define QAT_AUTH_MODE_MASK 0xF
+#define QAT_AUTH_ALGO_BITPOS 0
+#define QAT_AUTH_ALGO_MASK 0xF
+#define QAT_AUTH_CMP_BITPOS 8
+#define QAT_AUTH_CMP_MASK 0x7F
+#define QAT_AUTH_SHA3_PADDING_BITPOS 16
+#define QAT_AUTH_SHA3_PADDING_MASK 0x1
+#define QAT_AUTH_ALGO_SHA3_BITPOS 22
+#define QAT_AUTH_ALGO_SHA3_MASK 0x3
+#define ICP_QAT_HW_AUTH_CONFIG_BUILD(mode, algo, cmp_len) \
+	(((mode & QAT_AUTH_MODE_MASK) << QAT_AUTH_MODE_BITPOS) | \
+	((algo & QAT_AUTH_ALGO_MASK) << QAT_AUTH_ALGO_BITPOS) | \
+	(((algo >> 4) & QAT_AUTH_ALGO_SHA3_MASK) << \
+	 QAT_AUTH_ALGO_SHA3_BITPOS) | \
+	 (((((algo == ICP_QAT_HW_AUTH_ALGO_SHA3_256) || \
+	(algo == ICP_QAT_HW_AUTH_ALGO_SHA3_512)) ? 1 : 0) \
+	& QAT_AUTH_SHA3_PADDING_MASK) << QAT_AUTH_SHA3_PADDING_BITPOS) | \
+	((cmp_len & QAT_AUTH_CMP_MASK) << QAT_AUTH_CMP_BITPOS))
+
+struct icp_qat_hw_auth_counter {
+	__be32 counter;
+	uint32_t reserved;
+};
+
+#define QAT_AUTH_COUNT_MASK 0xFFFFFFFF
+#define QAT_AUTH_COUNT_BITPOS 0
+#define ICP_QAT_HW_AUTH_COUNT_BUILD(val) \
+	(((val) & QAT_AUTH_COUNT_MASK) << QAT_AUTH_COUNT_BITPOS)
+
+struct icp_qat_hw_auth_setup {
+	struct icp_qat_hw_auth_config auth_config;
+	struct icp_qat_hw_auth_counter auth_counter;
+};
+
+#define QAT_HW_DEFAULT_ALIGNMENT 8
+#define QAT_HW_ROUND_UP(val, n) (((val) + ((n)-1)) & (~(n-1)))
+#define ICP_QAT_HW_NULL_STATE1_SZ 32
+#define ICP_QAT_HW_MD5_STATE1_SZ 16
+#define ICP_QAT_HW_SHA1_STATE1_SZ 20
+#define ICP_QAT_HW_SHA224_STATE1_SZ 32
+#define ICP_QAT_HW_SHA256_STATE1_SZ 32
+#define ICP_QAT_HW_SHA3_256_STATE1_SZ 32
+#define ICP_QAT_HW_SHA384_STATE1_SZ 64
+#define ICP_QAT_HW_SHA512_STATE1_SZ 64
+#define ICP_QAT_HW_SHA3_512_STATE1_SZ 64
+#define ICP_QAT_HW_SHA3_224_STATE1_SZ 28
+#define ICP_QAT_HW_SHA3_384_STATE1_SZ 48
+#define ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ 16
+#define ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ 16
+#define ICP_QAT_HW_AES_F9_STATE1_SZ 32
+#define ICP_QAT_HW_KASUMI_F9_STATE1_SZ 16
+#define ICP_QAT_HW_GALOIS_128_STATE1_SZ 16
+#define ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ 8
+#define ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ 8
+#define ICP_QAT_HW_NULL_STATE2_SZ 32
+#define ICP_QAT_HW_MD5_STATE2_SZ 16
+#define ICP_QAT_HW_SHA1_STATE2_SZ 20
+#define ICP_QAT_HW_SHA224_STATE2_SZ 32
+#define ICP_QAT_HW_SHA256_STATE2_SZ 32
+#define ICP_QAT_HW_SHA3_256_STATE2_SZ 0
+#define ICP_QAT_HW_SHA384_STATE2_SZ 64
+#define ICP_QAT_HW_SHA512_STATE2_SZ 64
+#define ICP_QAT_HW_SHA3_512_STATE2_SZ 0
+#define ICP_QAT_HW_SHA3_224_STATE2_SZ 0
+#define ICP_QAT_HW_SHA3_384_STATE2_SZ 0
+#define ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ 16
+#define ICP_QAT_HW_AES_CBC_MAC_KEY_SZ 16
+#define ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ 16
+#define ICP_QAT_HW_F9_IK_SZ 16
+#define ICP_QAT_HW_F9_FK_SZ 16
+#define ICP_QAT_HW_KASUMI_F9_STATE2_SZ (ICP_QAT_HW_F9_IK_SZ + \
+	ICP_QAT_HW_F9_FK_SZ)
+#define ICP_QAT_HW_AES_F9_STATE2_SZ ICP_QAT_HW_KASUMI_F9_STATE2_SZ
+#define ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ 24
+#define ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ 32
+#define ICP_QAT_HW_GALOIS_H_SZ 16
+#define ICP_QAT_HW_GALOIS_LEN_A_SZ 8
+#define ICP_QAT_HW_GALOIS_E_CTR0_SZ 16
+
+struct icp_qat_hw_auth_sha512 {
+	struct icp_qat_hw_auth_setup inner_setup;
+	uint8_t state1[ICP_QAT_HW_SHA512_STATE1_SZ];
+	struct icp_qat_hw_auth_setup outer_setup;
+	uint8_t state2[ICP_QAT_HW_SHA512_STATE2_SZ];
+};
+
+struct icp_qat_hw_auth_algo_blk {
+	struct icp_qat_hw_auth_sha512 sha;
+};
+
+#define ICP_QAT_HW_GALOIS_LEN_A_BITPOS 0
+#define ICP_QAT_HW_GALOIS_LEN_A_MASK 0xFFFFFFFF
+
+enum icp_qat_hw_cipher_algo {
+	ICP_QAT_HW_CIPHER_ALGO_NULL = 0,
+	ICP_QAT_HW_CIPHER_ALGO_DES = 1,
+	ICP_QAT_HW_CIPHER_ALGO_3DES = 2,
+	ICP_QAT_HW_CIPHER_ALGO_AES128 = 3,
+	ICP_QAT_HW_CIPHER_ALGO_AES192 = 4,
+	ICP_QAT_HW_CIPHER_ALGO_AES256 = 5,
+	ICP_QAT_HW_CIPHER_ALGO_ARC4 = 6,
+	ICP_QAT_HW_CIPHER_ALGO_KASUMI = 7,
+	ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 = 8,
+	ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3 = 9,
+	ICP_QAT_HW_CIPHER_DELIMITER = 10
+};
+
+enum icp_qat_hw_cipher_mode {
+	ICP_QAT_HW_CIPHER_ECB_MODE = 0,
+	ICP_QAT_HW_CIPHER_CBC_MODE = 1,
+	ICP_QAT_HW_CIPHER_CTR_MODE = 2,
+	ICP_QAT_HW_CIPHER_F8_MODE = 3,
+	ICP_QAT_HW_CIPHER_XTS_MODE = 6,
+	ICP_QAT_HW_CIPHER_MODE_DELIMITER = 7
+};
+
+struct icp_qat_hw_cipher_config {
+	uint32_t val;
+	uint32_t reserved;
+};
+
+enum icp_qat_hw_cipher_dir {
+	ICP_QAT_HW_CIPHER_ENCRYPT = 0,
+	ICP_QAT_HW_CIPHER_DECRYPT = 1,
+};
+
+enum icp_qat_hw_cipher_convert {
+	ICP_QAT_HW_CIPHER_NO_CONVERT = 0,
+	ICP_QAT_HW_CIPHER_KEY_CONVERT = 1,
+};
+
+#define QAT_CIPHER_MODE_BITPOS 4
+#define QAT_CIPHER_MODE_MASK 0xF
+#define QAT_CIPHER_ALGO_BITPOS 0
+#define QAT_CIPHER_ALGO_MASK 0xF
+#define QAT_CIPHER_CONVERT_BITPOS 9
+#define QAT_CIPHER_CONVERT_MASK 0x1
+#define QAT_CIPHER_DIR_BITPOS 8
+#define QAT_CIPHER_DIR_MASK 0x1
+#define QAT_CIPHER_MODE_F8_KEY_SZ_MULT 2
+#define QAT_CIPHER_MODE_XTS_KEY_SZ_MULT 2
+#define ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, algo, convert, dir) \
+	(((mode & QAT_CIPHER_MODE_MASK) << QAT_CIPHER_MODE_BITPOS) | \
+	((algo & QAT_CIPHER_ALGO_MASK) << QAT_CIPHER_ALGO_BITPOS) | \
+	((convert & QAT_CIPHER_CONVERT_MASK) << QAT_CIPHER_CONVERT_BITPOS) | \
+	((dir & QAT_CIPHER_DIR_MASK) << QAT_CIPHER_DIR_BITPOS))
+#define ICP_QAT_HW_DES_BLK_SZ 8
+#define ICP_QAT_HW_3DES_BLK_SZ 8
+#define ICP_QAT_HW_NULL_BLK_SZ 8
+#define ICP_QAT_HW_AES_BLK_SZ 16
+#define ICP_QAT_HW_KASUMI_BLK_SZ 8
+#define ICP_QAT_HW_SNOW_3G_BLK_SZ 8
+#define ICP_QAT_HW_ZUC_3G_BLK_SZ 8
+#define ICP_QAT_HW_NULL_KEY_SZ 256
+#define ICP_QAT_HW_DES_KEY_SZ 8
+#define ICP_QAT_HW_3DES_KEY_SZ 24
+#define ICP_QAT_HW_AES_128_KEY_SZ 16
+#define ICP_QAT_HW_AES_192_KEY_SZ 24
+#define ICP_QAT_HW_AES_256_KEY_SZ 32
+#define ICP_QAT_HW_AES_128_F8_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
+	QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_192_F8_KEY_SZ (ICP_QAT_HW_AES_192_KEY_SZ * \
+	QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_256_F8_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
+	QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
+	QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
+	QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_KASUMI_KEY_SZ 16
+#define ICP_QAT_HW_KASUMI_F8_KEY_SZ (ICP_QAT_HW_KASUMI_KEY_SZ * \
+	QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
+	QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
+	QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_ARC4_KEY_SZ 256
+#define ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ 16
+#define ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ 16
+#define ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ 16
+#define ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ 16
+#define ICP_QAT_HW_MODE_F8_NUM_REG_TO_CLEAR 2
+#define INIT_SHRAM_CONSTANTS_TABLE_SZ 1024
+
+struct icp_qat_hw_cipher_aes256_f8 {
+	struct icp_qat_hw_cipher_config cipher_config;
+	uint8_t key[ICP_QAT_HW_AES_256_F8_KEY_SZ];
+};
+
+struct icp_qat_hw_cipher_algo_blk {
+	struct icp_qat_hw_cipher_aes256_f8 aes;
+};
+#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/qat/qat_common/icp_qat_uclo.h
new file mode 100644
index 0000000..2132a8c
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/icp_qat_uclo.h
@@ -0,0 +1,377 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef __ICP_QAT_UCLO_H__
+#define __ICP_QAT_UCLO_H__
+
+#define ICP_QAT_AC_C_CPU_TYPE     0x00400000
+#define ICP_QAT_UCLO_MAX_AE       12
+#define ICP_QAT_UCLO_MAX_CTX      8
+#define ICP_QAT_UCLO_MAX_UIMAGE   (ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX)
+#define ICP_QAT_UCLO_MAX_USTORE   0x4000
+#define ICP_QAT_UCLO_MAX_XFER_REG 128
+#define ICP_QAT_UCLO_MAX_GPR_REG  128
+#define ICP_QAT_UCLO_MAX_NN_REG   128
+#define ICP_QAT_UCLO_MAX_LMEM_REG 1024
+#define ICP_QAT_UCLO_AE_ALL_CTX   0xff
+#define ICP_QAT_UOF_OBJID_LEN     8
+#define ICP_QAT_UOF_FID 0xc6c2
+#define ICP_QAT_UOF_MAJVER 0x4
+#define ICP_QAT_UOF_MINVER 0x11
+#define ICP_QAT_UOF_NN_MODE_NOTCARE   0xff
+#define ICP_QAT_UOF_OBJS        "UOF_OBJS"
+#define ICP_QAT_UOF_STRT        "UOF_STRT"
+#define ICP_QAT_UOF_GTID        "UOF_GTID"
+#define ICP_QAT_UOF_IMAG        "UOF_IMAG"
+#define ICP_QAT_UOF_IMEM        "UOF_IMEM"
+#define ICP_QAT_UOF_MSEG        "UOF_MSEG"
+#define ICP_QAT_UOF_LOCAL_SCOPE     1
+#define ICP_QAT_UOF_INIT_EXPR               0
+#define ICP_QAT_UOF_INIT_REG                1
+#define ICP_QAT_UOF_INIT_REG_CTX            2
+#define ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP   3
+
+#define ICP_QAT_CTX_MODE(ae_mode) ((ae_mode) & 0xf)
+#define ICP_QAT_NN_MODE(ae_mode) (((ae_mode) >> 0x4) & 0xf)
+#define ICP_QAT_SHARED_USTORE_MODE(ae_mode) (((ae_mode) >> 0xb) & 0x1)
+#define RELOADABLE_CTX_SHARED_MODE(ae_mode) (((ae_mode) >> 0xc) & 0x1)
+
+#define ICP_QAT_LOC_MEM0_MODE(ae_mode) (((ae_mode) >> 0x8) & 0x1)
+#define ICP_QAT_LOC_MEM1_MODE(ae_mode) (((ae_mode) >> 0x9) & 0x1)
+
+enum icp_qat_uof_mem_region {
+	ICP_QAT_UOF_SRAM_REGION = 0x0,
+	ICP_QAT_UOF_LMEM_REGION = 0x3,
+	ICP_QAT_UOF_UMEM_REGION = 0x5
+};
+
+enum icp_qat_uof_regtype {
+	ICP_NO_DEST,
+	ICP_GPA_REL,
+	ICP_GPA_ABS,
+	ICP_GPB_REL,
+	ICP_GPB_ABS,
+	ICP_SR_REL,
+	ICP_SR_RD_REL,
+	ICP_SR_WR_REL,
+	ICP_SR_ABS,
+	ICP_SR_RD_ABS,
+	ICP_SR_WR_ABS,
+	ICP_DR_REL,
+	ICP_DR_RD_REL,
+	ICP_DR_WR_REL,
+	ICP_DR_ABS,
+	ICP_DR_RD_ABS,
+	ICP_DR_WR_ABS,
+	ICP_LMEM,
+	ICP_LMEM0,
+	ICP_LMEM1,
+	ICP_NEIGH_REL,
+};
+
+struct icp_qat_uclo_page {
+	struct icp_qat_uclo_encap_page *encap_page;
+	struct icp_qat_uclo_region *region;
+	unsigned int flags;
+};
+
+struct icp_qat_uclo_region {
+	struct icp_qat_uclo_page *loaded;
+	struct icp_qat_uclo_page *page;
+};
+
+struct icp_qat_uclo_aeslice {
+	struct icp_qat_uclo_region *region;
+	struct icp_qat_uclo_page *page;
+	struct icp_qat_uclo_page *cur_page[ICP_QAT_UCLO_MAX_CTX];
+	struct icp_qat_uclo_encapme *encap_image;
+	unsigned int ctx_mask_assigned;
+	unsigned int new_uaddr[ICP_QAT_UCLO_MAX_CTX];
+};
+
+struct icp_qat_uclo_aedata {
+	unsigned int slice_num;
+	unsigned int eff_ustore_size;
+	struct icp_qat_uclo_aeslice ae_slices[ICP_QAT_UCLO_MAX_CTX];
+};
+
+struct icp_qat_uof_encap_obj {
+	char *beg_uof;
+	struct icp_qat_uof_objhdr *obj_hdr;
+	struct icp_qat_uof_chunkhdr *chunk_hdr;
+	struct icp_qat_uof_varmem_seg *var_mem_seg;
+};
+
+struct icp_qat_uclo_encap_uwblock {
+	unsigned int start_addr;
+	unsigned int words_num;
+	uint64_t micro_words;
+};
+
+struct icp_qat_uclo_encap_page {
+	unsigned int def_page;
+	unsigned int page_region;
+	unsigned int beg_addr_v;
+	unsigned int beg_addr_p;
+	unsigned int micro_words_num;
+	unsigned int uwblock_num;
+	struct icp_qat_uclo_encap_uwblock *uwblock;
+};
+
+struct icp_qat_uclo_encapme {
+	struct icp_qat_uof_image *img_ptr;
+	struct icp_qat_uclo_encap_page *page;
+	unsigned int ae_reg_num;
+	struct icp_qat_uof_ae_reg *ae_reg;
+	unsigned int init_regsym_num;
+	struct icp_qat_uof_init_regsym *init_regsym;
+	unsigned int sbreak_num;
+	struct icp_qat_uof_sbreak *sbreak;
+	unsigned int uwords_num;
+};
+
+struct icp_qat_uclo_init_mem_table {
+	unsigned int entry_num;
+	struct icp_qat_uof_initmem *init_mem;
+};
+
+struct icp_qat_uclo_objhdr {
+	char *file_buff;
+	unsigned int checksum;
+	unsigned int size;
+};
+
+struct icp_qat_uof_strtable {
+	unsigned int table_len;
+	unsigned int reserved;
+	uint64_t strings;
+};
+
+struct icp_qat_uclo_objhandle {
+	unsigned int prod_type;
+	unsigned int prod_rev;
+	struct icp_qat_uclo_objhdr *obj_hdr;
+	struct icp_qat_uof_encap_obj encap_uof_obj;
+	struct icp_qat_uof_strtable str_table;
+	struct icp_qat_uclo_encapme ae_uimage[ICP_QAT_UCLO_MAX_UIMAGE];
+	struct icp_qat_uclo_aedata ae_data[ICP_QAT_UCLO_MAX_AE];
+	struct icp_qat_uclo_init_mem_table init_mem_tab;
+	struct icp_qat_uof_batch_init *lm_init_tab[ICP_QAT_UCLO_MAX_AE];
+	struct icp_qat_uof_batch_init *umem_init_tab[ICP_QAT_UCLO_MAX_AE];
+	int uimage_num;
+	int uword_in_bytes;
+	int global_inited;
+	unsigned int ae_num;
+	unsigned int ustore_phy_size;
+	void *obj_buf;
+	uint64_t *uword_buf;
+};
+
+struct icp_qat_uof_uword_block {
+	unsigned int start_addr;
+	unsigned int words_num;
+	unsigned int uword_offset;
+	unsigned int reserved;
+};
+
+struct icp_qat_uof_filehdr {
+	unsigned short file_id;
+	unsigned short reserved1;
+	char min_ver;
+	char maj_ver;
+	unsigned short reserved2;
+	unsigned short max_chunks;
+	unsigned short num_chunks;
+};
+
+struct icp_qat_uof_filechunkhdr {
+	char chunk_id[ICP_QAT_UOF_OBJID_LEN];
+	unsigned int checksum;
+	unsigned int offset;
+	unsigned int size;
+};
+
+struct icp_qat_uof_objhdr {
+	unsigned int cpu_type;
+	unsigned short min_cpu_ver;
+	unsigned short max_cpu_ver;
+	short max_chunks;
+	short num_chunks;
+	unsigned int reserved1;
+	unsigned int reserved2;
+};
+
+struct icp_qat_uof_chunkhdr {
+	char chunk_id[ICP_QAT_UOF_OBJID_LEN];
+	unsigned int offset;
+	unsigned int size;
+};
+
+struct icp_qat_uof_memvar_attr {
+	unsigned int offset_in_byte;
+	unsigned int value;
+};
+
+struct icp_qat_uof_initmem {
+	unsigned int sym_name;
+	char region;
+	char scope;
+	unsigned short reserved1;
+	unsigned int addr;
+	unsigned int num_in_bytes;
+	unsigned int val_attr_num;
+};
+
+struct icp_qat_uof_init_regsym {
+	unsigned int sym_name;
+	char init_type;
+	char value_type;
+	char reg_type;
+	unsigned char ctx;
+	unsigned int reg_addr;
+	unsigned int value;
+};
+
+struct icp_qat_uof_varmem_seg {
+	unsigned int sram_base;
+	unsigned int sram_size;
+	unsigned int sram_alignment;
+	unsigned int sdram_base;
+	unsigned int sdram_size;
+	unsigned int sdram_alignment;
+	unsigned int sdram1_base;
+	unsigned int sdram1_size;
+	unsigned int sdram1_alignment;
+	unsigned int scratch_base;
+	unsigned int scratch_size;
+	unsigned int scratch_alignment;
+};
+
+struct icp_qat_uof_gtid {
+	char tool_id[ICP_QAT_UOF_OBJID_LEN];
+	int tool_ver;
+	unsigned int reserved1;
+	unsigned int reserved2;
+};
+
+struct icp_qat_uof_sbreak {
+	unsigned int page_num;
+	unsigned int virt_uaddr;
+	unsigned char sbreak_type;
+	unsigned char reg_type;
+	unsigned short reserved1;
+	unsigned int addr_offset;
+	unsigned int reg_addr;
+};
+
+struct icp_qat_uof_code_page {
+	unsigned int page_region;
+	unsigned int page_num;
+	unsigned char def_page;
+	unsigned char reserved2;
+	unsigned short reserved1;
+	unsigned int beg_addr_v;
+	unsigned int beg_addr_p;
+	unsigned int neigh_reg_tab_offset;
+	unsigned int uc_var_tab_offset;
+	unsigned int imp_var_tab_offset;
+	unsigned int imp_expr_tab_offset;
+	unsigned int code_area_offset;
+};
+
+struct icp_qat_uof_image {
+	unsigned int img_name;
+	unsigned int ae_assigned;
+	unsigned int ctx_assigned;
+	unsigned int cpu_type;
+	unsigned int entry_address;
+	unsigned int fill_pattern[2];
+	unsigned int reloadable_size;
+	unsigned char sensitivity;
+	unsigned char reserved;
+	unsigned short ae_mode;
+	unsigned short max_ver;
+	unsigned short min_ver;
+	unsigned short image_attrib;
+	unsigned short reserved2;
+	unsigned short page_region_num;
+	unsigned short numpages;
+	unsigned int reg_tab_offset;
+	unsigned int init_reg_sym_tab;
+	unsigned int sbreak_tab;
+	unsigned int app_metadata;
+};
+
+struct icp_qat_uof_objtable {
+	unsigned int entry_num;
+};
+
+struct icp_qat_uof_ae_reg {
+	unsigned int name;
+	unsigned int vis_name;
+	unsigned short type;
+	unsigned short addr;
+	unsigned short access_mode;
+	unsigned char visible;
+	unsigned char reserved1;
+	unsigned short ref_count;
+	unsigned short reserved2;
+	unsigned int xo_id;
+};
+
+struct icp_qat_uof_code_area {
+	unsigned int micro_words_num;
+	unsigned int uword_block_tab;
+};
+
+struct icp_qat_uof_batch_init {
+	unsigned int ae;
+	unsigned int addr;
+	unsigned int *value;
+	unsigned int size;
+	struct icp_qat_uof_batch_init *next;
+};
+#endif
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
new file mode 100644
index 0000000..59df488
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -0,0 +1,1038 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/crypto.h>
+#include <crypto/aead.h>
+#include <crypto/aes.h>
+#include <crypto/sha.h>
+#include <crypto/hash.h>
+#include <crypto/algapi.h>
+#include <crypto/authenc.h>
+#include <crypto/rng.h>
+#include <linux/dma-mapping.h>
+#include "adf_accel_devices.h"
+#include "adf_transport.h"
+#include "adf_common_drv.h"
+#include "qat_crypto.h"
+#include "icp_qat_hw.h"
+#include "icp_qat_fw.h"
+#include "icp_qat_fw_la.h"
+
+#define QAT_AES_HW_CONFIG_ENC(alg) \
+	ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
+			ICP_QAT_HW_CIPHER_NO_CONVERT, \
+			ICP_QAT_HW_CIPHER_ENCRYPT)
+
+#define QAT_AES_HW_CONFIG_DEC(alg) \
+	ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
+			ICP_QAT_HW_CIPHER_KEY_CONVERT, \
+			ICP_QAT_HW_CIPHER_DECRYPT)
+
+static atomic_t active_dev;
+
+struct qat_alg_buf {
+	uint32_t len;
+	uint32_t resrvd;
+	uint64_t addr;
+} __packed;
+
+struct qat_alg_buf_list {
+	uint64_t resrvd;
+	uint32_t num_bufs;
+	uint32_t num_mapped_bufs;
+	struct qat_alg_buf bufers[];
+} __packed __aligned(64);
+
+/* Common content descriptor */
+struct qat_alg_cd {
+	union {
+		struct qat_enc { /* Encrypt content desc */
+			struct icp_qat_hw_cipher_algo_blk cipher;
+			struct icp_qat_hw_auth_algo_blk hash;
+		} qat_enc_cd;
+		struct qat_dec { /* Decrytp content desc */
+			struct icp_qat_hw_auth_algo_blk hash;
+			struct icp_qat_hw_cipher_algo_blk cipher;
+		} qat_dec_cd;
+	};
+} __aligned(64);
+
+#define MAX_AUTH_STATE_SIZE sizeof(struct icp_qat_hw_auth_algo_blk)
+
+struct qat_auth_state {
+	uint8_t data[MAX_AUTH_STATE_SIZE];
+} __aligned(64);
+
+struct qat_alg_session_ctx {
+	struct qat_alg_cd *enc_cd;
+	dma_addr_t enc_cd_paddr;
+	struct qat_alg_cd *dec_cd;
+	dma_addr_t dec_cd_paddr;
+	struct qat_auth_state *auth_hw_state_enc;
+	dma_addr_t auth_state_enc_paddr;
+	struct qat_auth_state *auth_hw_state_dec;
+	dma_addr_t auth_state_dec_paddr;
+	struct icp_qat_fw_la_bulk_req enc_fw_req_tmpl;
+	struct icp_qat_fw_la_bulk_req dec_fw_req_tmpl;
+	struct qat_crypto_instance *inst;
+	struct crypto_tfm *tfm;
+	struct crypto_shash *hash_tfm;
+	enum icp_qat_hw_auth_algo qat_hash_alg;
+	uint8_t salt[AES_BLOCK_SIZE];
+	spinlock_t lock;	/* protects qat_alg_session_ctx struct */
+};
+
+static int get_current_node(void)
+{
+	return cpu_data(current_thread_info()->cpu).phys_proc_id;
+}
+
+static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
+{
+	switch (qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_SHA1:
+		return ICP_QAT_HW_SHA1_STATE1_SZ;
+	case ICP_QAT_HW_AUTH_ALGO_SHA256:
+		return ICP_QAT_HW_SHA256_STATE1_SZ;
+	case ICP_QAT_HW_AUTH_ALGO_SHA512:
+		return ICP_QAT_HW_SHA512_STATE1_SZ;
+	default:
+		return -EFAULT;
+	};
+	return -EFAULT;
+}
+
+static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
+				  struct qat_alg_session_ctx *ctx,
+				  const uint8_t *auth_key,
+				  unsigned int auth_keylen, uint8_t *auth_state)
+{
+	struct {
+		struct shash_desc shash;
+		char ctx[crypto_shash_descsize(ctx->hash_tfm)];
+	} desc;
+	struct sha1_state sha1;
+	struct sha256_state sha256;
+	struct sha512_state sha512;
+	int block_size = crypto_shash_blocksize(ctx->hash_tfm);
+	int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
+	uint8_t *ipad = auth_state;
+	uint8_t *opad = ipad + block_size;
+	__be32 *hash_state_out;
+	__be64 *hash512_state_out;
+	int i, offset;
+
+	desc.shash.tfm = ctx->hash_tfm;
+	desc.shash.flags = 0x0;
+
+	if (auth_keylen > block_size) {
+		char buff[SHA512_BLOCK_SIZE];
+		int ret = crypto_shash_digest(&desc.shash, auth_key,
+					      auth_keylen, buff);
+		if (ret)
+			return ret;
+
+		memcpy(ipad, buff, digest_size);
+		memcpy(opad, buff, digest_size);
+		memset(ipad + digest_size, 0, block_size - digest_size);
+		memset(opad + digest_size, 0, block_size - digest_size);
+	} else {
+		memcpy(ipad, auth_key, auth_keylen);
+		memcpy(opad, auth_key, auth_keylen);
+		memset(ipad + auth_keylen, 0, block_size - auth_keylen);
+		memset(opad + auth_keylen, 0, block_size - auth_keylen);
+	}
+
+	for (i = 0; i < block_size; i++) {
+		char *ipad_ptr = ipad + i;
+		char *opad_ptr = opad + i;
+		*ipad_ptr ^= 0x36;
+		*opad_ptr ^= 0x5C;
+	}
+
+	if (crypto_shash_init(&desc.shash))
+		return -EFAULT;
+
+	if (crypto_shash_update(&desc.shash, ipad, block_size))
+		return -EFAULT;
+
+	hash_state_out = (__be32 *)hash->sha.state1;
+	hash512_state_out = (__be64 *)hash_state_out;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_SHA1:
+		if (crypto_shash_export(&desc.shash, &sha1))
+			return -EFAULT;
+		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
+			*hash_state_out = cpu_to_be32(*(sha1.state + i));
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_SHA256:
+		if (crypto_shash_export(&desc.shash, &sha256))
+			return -EFAULT;
+		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
+			*hash_state_out = cpu_to_be32(*(sha256.state + i));
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_SHA512:
+		if (crypto_shash_export(&desc.shash, &sha512))
+			return -EFAULT;
+		for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
+			*hash512_state_out = cpu_to_be64(*(sha512.state + i));
+		break;
+	default:
+		return -EFAULT;
+	}
+
+	if (crypto_shash_init(&desc.shash))
+		return -EFAULT;
+
+	if (crypto_shash_update(&desc.shash, opad, block_size))
+		return -EFAULT;
+
+	offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
+	hash_state_out = (__be32 *)(hash->sha.state1 + offset);
+	hash512_state_out = (__be64 *)hash_state_out;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_SHA1:
+		if (crypto_shash_export(&desc.shash, &sha1))
+			return -EFAULT;
+		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
+			*hash_state_out = cpu_to_be32(*(sha1.state + i));
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_SHA256:
+		if (crypto_shash_export(&desc.shash, &sha256))
+			return -EFAULT;
+		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
+			*hash_state_out = cpu_to_be32(*(sha256.state + i));
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_SHA512:
+		if (crypto_shash_export(&desc.shash, &sha512))
+			return -EFAULT;
+		for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
+			*hash512_state_out = cpu_to_be64(*(sha512.state + i));
+		break;
+	default:
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
+{
+	header->hdr_flags =
+		ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+	header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
+	header->comn_req_flags =
+		ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
+					    QAT_COMN_PTR_TYPE_SGL);
+	ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
+					   ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
+	ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
+				  ICP_QAT_FW_LA_PARTIAL_NONE);
+	ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
+					   ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
+	ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+				ICP_QAT_FW_LA_NO_PROTO);
+	ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
+				       ICP_QAT_FW_LA_NO_UPDATE_STATE);
+}
+
+static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx,
+				    int alg, struct crypto_authenc_keys *keys)
+{
+	struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
+	unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
+	struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
+	struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
+	struct icp_qat_hw_auth_algo_blk *hash =
+		(struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
+		sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
+	struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req_tmpl;
+	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+	struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+	void *ptr = &req_tmpl->cd_ctrl;
+	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
+	struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
+	struct icp_qat_fw_la_auth_req_params *auth_param =
+		(struct icp_qat_fw_la_auth_req_params *)
+		((char *)&req_tmpl->serv_specif_rqpars +
+		 sizeof(struct icp_qat_fw_la_cipher_req_params));
+
+	/* CD setup */
+	cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg);
+	memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
+	hash->sha.inner_setup.auth_config.config =
+		ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
+					     ctx->qat_hash_alg, digestsize);
+	hash->sha.inner_setup.auth_counter.counter =
+		cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
+
+	if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen,
+				   (uint8_t *)ctx->auth_hw_state_enc))
+		return -EFAULT;
+
+	/* Request setup */
+	qat_alg_init_common_hdr(header);
+	header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+	ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
+				   ICP_QAT_FW_LA_RET_AUTH_RES);
+	ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
+				   ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
+	cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
+	cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
+
+	/* Cipher CD config setup */
+	cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
+	cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
+	cipher_cd_ctrl->cipher_cfg_offset = 0;
+	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
+	/* Auth CD config setup */
+	hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
+	hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
+	hash_cd_ctrl->inner_res_sz = digestsize;
+	hash_cd_ctrl->final_sz = digestsize;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_SHA1:
+		hash_cd_ctrl->inner_state1_sz =
+			round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
+		hash_cd_ctrl->inner_state2_sz =
+			round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_SHA256:
+		hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
+		hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_SHA512:
+		hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
+		hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
+		break;
+	default:
+		break;
+	}
+	hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
+			((sizeof(struct icp_qat_hw_auth_setup) +
+			 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
+	auth_param->u1.auth_partial_st_prefix = ctx->auth_state_enc_paddr +
+			sizeof(struct icp_qat_hw_auth_counter) +
+			round_up(hash_cd_ctrl->inner_state1_sz, 8);
+	ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
+	ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+	return 0;
+}
+
+static int qat_alg_init_dec_session(struct qat_alg_session_ctx *ctx,
+				    int alg, struct crypto_authenc_keys *keys)
+{
+	struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
+	unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
+	struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
+	struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
+	struct icp_qat_hw_cipher_algo_blk *cipher =
+		(struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
+		sizeof(struct icp_qat_hw_auth_setup) +
+		roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
+	struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req_tmpl;
+	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+	struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+	void *ptr = &req_tmpl->cd_ctrl;
+	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
+	struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
+	struct icp_qat_fw_la_auth_req_params *auth_param =
+		(struct icp_qat_fw_la_auth_req_params *)
+		((char *)&req_tmpl->serv_specif_rqpars +
+		sizeof(struct icp_qat_fw_la_cipher_req_params));
+
+	/* CD setup */
+	cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg);
+	memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
+	hash->sha.inner_setup.auth_config.config =
+		ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
+					     ctx->qat_hash_alg,
+					     digestsize);
+	hash->sha.inner_setup.auth_counter.counter =
+		cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
+
+	if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen,
+				   (uint8_t *)ctx->auth_hw_state_dec))
+		return -EFAULT;
+
+	/* Request setup */
+	qat_alg_init_common_hdr(header);
+	header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+	ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
+				   ICP_QAT_FW_LA_NO_RET_AUTH_RES);
+	ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
+				   ICP_QAT_FW_LA_CMP_AUTH_RES);
+	cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
+	cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
+
+	/* Cipher CD config setup */
+	cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
+	cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
+	cipher_cd_ctrl->cipher_cfg_offset =
+		(sizeof(struct icp_qat_hw_auth_setup) +
+		 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
+	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+
+	/* Auth CD config setup */
+	hash_cd_ctrl->hash_cfg_offset = 0;
+	hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
+	hash_cd_ctrl->inner_res_sz = digestsize;
+	hash_cd_ctrl->final_sz = digestsize;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_SHA1:
+		hash_cd_ctrl->inner_state1_sz =
+			round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
+		hash_cd_ctrl->inner_state2_sz =
+			round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_SHA256:
+		hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
+		hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_SHA512:
+		hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
+		hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
+		break;
+	default:
+		break;
+	}
+
+	hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
+			((sizeof(struct icp_qat_hw_auth_setup) +
+			 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
+	auth_param->u1.auth_partial_st_prefix = ctx->auth_state_enc_paddr +
+			sizeof(struct icp_qat_hw_auth_counter) +
+			round_up(hash_cd_ctrl->inner_state1_sz, 8);
+	auth_param->auth_res_sz = digestsize;
+	ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
+	ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+	return 0;
+}
+
+static int qat_alg_init_sessions(struct qat_alg_session_ctx *ctx,
+				 const uint8_t *key, unsigned int keylen)
+{
+	struct crypto_authenc_keys keys;
+	int alg;
+
+	if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE))
+		return -EFAULT;
+
+	if (crypto_authenc_extractkeys(&keys, key, keylen))
+		goto bad_key;
+
+	switch (keys.enckeylen) {
+	case AES_KEYSIZE_128:
+		alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
+		break;
+	case AES_KEYSIZE_192:
+		alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
+		break;
+	case AES_KEYSIZE_256:
+		alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
+		break;
+	default:
+		goto bad_key;
+		break;
+	}
+
+	if (qat_alg_init_enc_session(ctx, alg, &keys))
+		goto error;
+
+	if (qat_alg_init_dec_session(ctx, alg, &keys))
+		goto error;
+
+	return 0;
+bad_key:
+	crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	return -EINVAL;
+error:
+	return -EFAULT;
+}
+
+static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key,
+			  unsigned int keylen)
+{
+	struct qat_alg_session_ctx *ctx = crypto_aead_ctx(tfm);
+	struct device *dev;
+
+	spin_lock(&ctx->lock);
+	if (ctx->enc_cd) {
+		/* rekeying */
+		dev = &GET_DEV(ctx->inst->accel_dev);
+		memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
+		memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
+		memset(ctx->auth_hw_state_enc, 0,
+		       sizeof(struct qat_auth_state));
+		memset(ctx->auth_hw_state_dec, 0,
+		       sizeof(struct qat_auth_state));
+		memset(&ctx->enc_fw_req_tmpl, 0,
+		       sizeof(struct icp_qat_fw_la_bulk_req));
+		memset(&ctx->dec_fw_req_tmpl, 0,
+		       sizeof(struct icp_qat_fw_la_bulk_req));
+	} else {
+		/* new key */
+		int node = get_current_node();
+		struct qat_crypto_instance *inst =
+				qat_crypto_get_instance_node(node);
+		if (!inst) {
+			spin_unlock(&ctx->lock);
+			return -EINVAL;
+		}
+
+		dev = &GET_DEV(inst->accel_dev);
+		ctx->inst = inst;
+		ctx->enc_cd = dma_zalloc_coherent(dev,
+						  sizeof(struct qat_alg_cd),
+						  &ctx->enc_cd_paddr,
+						  GFP_ATOMIC);
+		if (!ctx->enc_cd) {
+			spin_unlock(&ctx->lock);
+			return -ENOMEM;
+		}
+		ctx->dec_cd = dma_zalloc_coherent(dev,
+						  sizeof(struct qat_alg_cd),
+						  &ctx->dec_cd_paddr,
+						  GFP_ATOMIC);
+		if (!ctx->dec_cd) {
+			spin_unlock(&ctx->lock);
+			goto out_free_enc;
+		}
+		ctx->auth_hw_state_enc =
+			dma_zalloc_coherent(dev, sizeof(struct qat_auth_state),
+					    &ctx->auth_state_enc_paddr,
+					    GFP_ATOMIC);
+		if (!ctx->auth_hw_state_enc) {
+			spin_unlock(&ctx->lock);
+			goto out_free_dec;
+		}
+		ctx->auth_hw_state_dec =
+			dma_zalloc_coherent(dev, sizeof(struct qat_auth_state),
+					    &ctx->auth_state_dec_paddr,
+					    GFP_ATOMIC);
+		if (!ctx->auth_hw_state_dec) {
+			spin_unlock(&ctx->lock);
+			goto out_free_auth_enc;
+		}
+	}
+	spin_unlock(&ctx->lock);
+	if (qat_alg_init_sessions(ctx, key, keylen))
+		goto out_free_all;
+
+	return 0;
+
+out_free_all:
+	dma_free_coherent(dev, sizeof(struct qat_auth_state),
+			  ctx->auth_hw_state_dec, ctx->auth_state_dec_paddr);
+	ctx->auth_hw_state_dec = NULL;
+out_free_auth_enc:
+	dma_free_coherent(dev, sizeof(struct qat_auth_state),
+			  ctx->auth_hw_state_enc, ctx->auth_state_enc_paddr);
+	ctx->auth_hw_state_enc = NULL;
+out_free_dec:
+	dma_free_coherent(dev, sizeof(struct qat_alg_cd),
+			  ctx->dec_cd, ctx->dec_cd_paddr);
+	ctx->dec_cd = NULL;
+out_free_enc:
+	dma_free_coherent(dev, sizeof(struct qat_alg_cd),
+			  ctx->enc_cd, ctx->enc_cd_paddr);
+	ctx->enc_cd = NULL;
+	return -ENOMEM;
+}
+
+static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
+			      struct qat_crypto_request *qat_req)
+{
+	struct device *dev = &GET_DEV(inst->accel_dev);
+	struct qat_alg_buf_list *bl = qat_req->buf.bl;
+	struct qat_alg_buf_list *blout = qat_req->buf.blout;
+	dma_addr_t blp = qat_req->buf.blp;
+	dma_addr_t blpout = qat_req->buf.bloutp;
+	size_t sz = qat_req->buf.sz;
+	int i, bufs = bl->num_bufs;
+
+	for (i = 0; i < bl->num_bufs; i++)
+		dma_unmap_single(dev, bl->bufers[i].addr,
+				 bl->bufers[i].len, DMA_BIDIRECTIONAL);
+
+	dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
+	kfree(bl);
+	if (blp != blpout) {
+		/* If out of place operation dma unmap only data */
+		int bufless = bufs - blout->num_mapped_bufs;
+
+		for (i = bufless; i < bufs; i++) {
+			dma_unmap_single(dev, blout->bufers[i].addr,
+					 blout->bufers[i].len,
+					 DMA_BIDIRECTIONAL);
+		}
+		dma_unmap_single(dev, blpout, sz, DMA_TO_DEVICE);
+		kfree(blout);
+	}
+}
+
+static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
+			       struct scatterlist *assoc,
+			       struct scatterlist *sgl,
+			       struct scatterlist *sglout, uint8_t *iv,
+			       uint8_t ivlen,
+			       struct qat_crypto_request *qat_req)
+{
+	struct device *dev = &GET_DEV(inst->accel_dev);
+	int i, bufs = 0, n = sg_nents(sgl), assoc_n = sg_nents(assoc);
+	struct qat_alg_buf_list *bufl;
+	struct qat_alg_buf_list *buflout = NULL;
+	dma_addr_t blp;
+	dma_addr_t bloutp = 0;
+	struct scatterlist *sg;
+	size_t sz = sizeof(struct qat_alg_buf_list) +
+			((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
+
+	if (unlikely(!n))
+		return -EINVAL;
+
+	bufl = kmalloc_node(sz, GFP_ATOMIC, inst->accel_dev->numa_node);
+	if (unlikely(!bufl))
+		return -ENOMEM;
+
+	blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(dev, blp)))
+		goto err;
+
+	for_each_sg(assoc, sg, assoc_n, i) {
+		bufl->bufers[bufs].addr = dma_map_single(dev,
+							 sg_virt(sg),
+							 sg->length,
+							 DMA_BIDIRECTIONAL);
+		bufl->bufers[bufs].len = sg->length;
+		if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
+			goto err;
+		bufs++;
+	}
+	bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen,
+						 DMA_BIDIRECTIONAL);
+	bufl->bufers[bufs].len = ivlen;
+	if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
+		goto err;
+	bufs++;
+
+	for_each_sg(sgl, sg, n, i) {
+		int y = i + bufs;
+
+		bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
+						      sg->length,
+						      DMA_BIDIRECTIONAL);
+		bufl->bufers[y].len = sg->length;
+		if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
+			goto err;
+	}
+	bufl->num_bufs = n + bufs;
+	qat_req->buf.bl = bufl;
+	qat_req->buf.blp = blp;
+	qat_req->buf.sz = sz;
+	/* Handle out of place operation */
+	if (sgl != sglout) {
+		struct qat_alg_buf *bufers;
+
+		buflout = kmalloc_node(sz, GFP_ATOMIC,
+				       inst->accel_dev->numa_node);
+		if (unlikely(!buflout))
+			goto err;
+		bloutp = dma_map_single(dev, buflout, sz, DMA_TO_DEVICE);
+		if (unlikely(dma_mapping_error(dev, bloutp)))
+			goto err;
+		bufers = buflout->bufers;
+		/* For out of place operation dma map only data and
+		 * reuse assoc mapping and iv */
+		for (i = 0; i < bufs; i++) {
+			bufers[i].len = bufl->bufers[i].len;
+			bufers[i].addr = bufl->bufers[i].addr;
+		}
+		for_each_sg(sglout, sg, n, i) {
+			int y = i + bufs;
+
+			bufers[y].addr = dma_map_single(dev, sg_virt(sg),
+							sg->length,
+							DMA_BIDIRECTIONAL);
+			buflout->bufers[y].len = sg->length;
+			if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
+				goto err;
+		}
+		buflout->num_bufs = n + bufs;
+		buflout->num_mapped_bufs = n;
+		qat_req->buf.blout = buflout;
+		qat_req->buf.bloutp = bloutp;
+	} else {
+		/* Otherwise set the src and dst to the same address */
+		qat_req->buf.bloutp = qat_req->buf.blp;
+	}
+	return 0;
+err:
+	dev_err(dev, "Failed to map buf for dma\n");
+	for_each_sg(sgl, sg, n + bufs, i) {
+		if (!dma_mapping_error(dev, bufl->bufers[i].addr)) {
+			dma_unmap_single(dev, bufl->bufers[i].addr,
+					 bufl->bufers[i].len,
+					 DMA_BIDIRECTIONAL);
+		}
+	}
+	if (!dma_mapping_error(dev, blp))
+		dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
+	kfree(bufl);
+	if (sgl != sglout && buflout) {
+		for_each_sg(sglout, sg, n, i) {
+			int y = i + bufs;
+
+			if (!dma_mapping_error(dev, buflout->bufers[y].addr))
+				dma_unmap_single(dev, buflout->bufers[y].addr,
+						 buflout->bufers[y].len,
+						 DMA_BIDIRECTIONAL);
+		}
+		if (!dma_mapping_error(dev, bloutp))
+			dma_unmap_single(dev, bloutp, sz, DMA_TO_DEVICE);
+		kfree(buflout);
+	}
+	return -ENOMEM;
+}
+
+void qat_alg_callback(void *resp)
+{
+	struct icp_qat_fw_la_resp *qat_resp = resp;
+	struct qat_crypto_request *qat_req =
+				(void *)(__force long)qat_resp->opaque_data;
+	struct qat_alg_session_ctx *ctx = qat_req->ctx;
+	struct qat_crypto_instance *inst = ctx->inst;
+	struct aead_request *areq = qat_req->areq;
+	uint8_t stat_filed = qat_resp->comn_resp.comn_status;
+	int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
+
+	qat_alg_free_bufl(inst, qat_req);
+	if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
+		res = -EBADMSG;
+	areq->base.complete(&areq->base, res);
+}
+
+static int qat_alg_dec(struct aead_request *areq)
+{
+	struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
+	struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
+	struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct qat_crypto_request *qat_req = aead_request_ctx(areq);
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	struct icp_qat_fw_la_auth_req_params *auth_param;
+	struct icp_qat_fw_la_bulk_req *msg;
+	int digst_size = crypto_aead_crt(aead_tfm)->authsize;
+	int ret, ctr = 0;
+
+	ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst,
+				  areq->iv, AES_BLOCK_SIZE, qat_req);
+	if (unlikely(ret))
+		return ret;
+
+	msg = &qat_req->req;
+	*msg = ctx->dec_fw_req_tmpl;
+	qat_req->ctx = ctx;
+	qat_req->areq = areq;
+	qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
+	qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
+	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
+	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
+	cipher_param->cipher_length = areq->cryptlen - digst_size;
+	cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
+	memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
+	auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
+	auth_param->auth_off = 0;
+	auth_param->auth_len = areq->assoclen +
+				cipher_param->cipher_length + AES_BLOCK_SIZE;
+	do {
+		ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
+	} while (ret == -EAGAIN && ctr++ < 10);
+
+	if (ret == -EAGAIN) {
+		qat_alg_free_bufl(ctx->inst, qat_req);
+		return -EBUSY;
+	}
+	return -EINPROGRESS;
+}
+
+static int qat_alg_enc_internal(struct aead_request *areq, uint8_t *iv,
+				int enc_iv)
+{
+	struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
+	struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
+	struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct qat_crypto_request *qat_req = aead_request_ctx(areq);
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	struct icp_qat_fw_la_auth_req_params *auth_param;
+	struct icp_qat_fw_la_bulk_req *msg;
+	int ret, ctr = 0;
+
+	ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst,
+				  iv, AES_BLOCK_SIZE, qat_req);
+	if (unlikely(ret))
+		return ret;
+
+	msg = &qat_req->req;
+	*msg = ctx->enc_fw_req_tmpl;
+	qat_req->ctx = ctx;
+	qat_req->areq = areq;
+	qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
+	qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
+	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
+	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
+	auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
+
+	if (enc_iv) {
+		cipher_param->cipher_length = areq->cryptlen + AES_BLOCK_SIZE;
+		cipher_param->cipher_offset = areq->assoclen;
+	} else {
+		memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
+		cipher_param->cipher_length = areq->cryptlen;
+		cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
+	}
+	auth_param->auth_off = 0;
+	auth_param->auth_len = areq->assoclen + areq->cryptlen + AES_BLOCK_SIZE;
+
+	do {
+		ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
+	} while (ret == -EAGAIN && ctr++ < 10);
+
+	if (ret == -EAGAIN) {
+		qat_alg_free_bufl(ctx->inst, qat_req);
+		return -EBUSY;
+	}
+	return -EINPROGRESS;
+}
+
+static int qat_alg_enc(struct aead_request *areq)
+{
+	return qat_alg_enc_internal(areq, areq->iv, 0);
+}
+
+static int qat_alg_genivenc(struct aead_givcrypt_request *req)
+{
+	struct crypto_aead *aead_tfm = crypto_aead_reqtfm(&req->areq);
+	struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
+	struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
+	__be64 seq;
+
+	memcpy(req->giv, ctx->salt, AES_BLOCK_SIZE);
+	seq = cpu_to_be64(req->seq);
+	memcpy(req->giv + AES_BLOCK_SIZE - sizeof(uint64_t),
+	       &seq, sizeof(uint64_t));
+	return qat_alg_enc_internal(&req->areq, req->giv, 1);
+}
+
+static int qat_alg_init(struct crypto_tfm *tfm,
+			enum icp_qat_hw_auth_algo hash, const char *hash_name)
+{
+	struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	memset(ctx, '\0', sizeof(*ctx));
+	ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
+	if (IS_ERR(ctx->hash_tfm))
+		return -EFAULT;
+	spin_lock_init(&ctx->lock);
+	ctx->qat_hash_alg = hash;
+	tfm->crt_aead.reqsize = sizeof(struct aead_request) +
+				sizeof(struct qat_crypto_request);
+	ctx->tfm = tfm;
+	return 0;
+}
+
+static int qat_alg_sha1_init(struct crypto_tfm *tfm)
+{
+	return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
+}
+
+static int qat_alg_sha256_init(struct crypto_tfm *tfm)
+{
+	return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
+}
+
+static int qat_alg_sha512_init(struct crypto_tfm *tfm)
+{
+	return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
+}
+
+static void qat_alg_exit(struct crypto_tfm *tfm)
+{
+	struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct qat_crypto_instance *inst = ctx->inst;
+	struct device *dev;
+
+	if (!IS_ERR(ctx->hash_tfm))
+		crypto_free_shash(ctx->hash_tfm);
+
+	if (!inst)
+		return;
+
+	dev = &GET_DEV(inst->accel_dev);
+	if (ctx->enc_cd)
+		dma_free_coherent(dev, sizeof(struct qat_alg_cd),
+				  ctx->enc_cd, ctx->enc_cd_paddr);
+	if (ctx->dec_cd)
+		dma_free_coherent(dev, sizeof(struct qat_alg_cd),
+				  ctx->dec_cd, ctx->dec_cd_paddr);
+	if (ctx->auth_hw_state_enc)
+		dma_free_coherent(dev, sizeof(struct qat_auth_state),
+				  ctx->auth_hw_state_enc,
+				  ctx->auth_state_enc_paddr);
+
+	if (ctx->auth_hw_state_dec)
+		dma_free_coherent(dev, sizeof(struct qat_auth_state),
+				  ctx->auth_hw_state_dec,
+				  ctx->auth_state_dec_paddr);
+
+	qat_crypto_put_instance(inst);
+}
+
+static struct crypto_alg qat_algs[] = { {
+	.cra_name = "authenc(hmac(sha1),cbc(aes))",
+	.cra_driver_name = "qat_aes_cbc_hmac_sha1",
+	.cra_priority = 4001,
+	.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+	.cra_blocksize = AES_BLOCK_SIZE,
+	.cra_ctxsize = sizeof(struct qat_alg_session_ctx),
+	.cra_alignmask = 0,
+	.cra_type = &crypto_aead_type,
+	.cra_module = THIS_MODULE,
+	.cra_init = qat_alg_sha1_init,
+	.cra_exit = qat_alg_exit,
+	.cra_u = {
+		.aead = {
+			.setkey = qat_alg_setkey,
+			.decrypt = qat_alg_dec,
+			.encrypt = qat_alg_enc,
+			.givencrypt = qat_alg_genivenc,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+	},
+}, {
+	.cra_name = "authenc(hmac(sha256),cbc(aes))",
+	.cra_driver_name = "qat_aes_cbc_hmac_sha256",
+	.cra_priority = 4001,
+	.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+	.cra_blocksize = AES_BLOCK_SIZE,
+	.cra_ctxsize = sizeof(struct qat_alg_session_ctx),
+	.cra_alignmask = 0,
+	.cra_type = &crypto_aead_type,
+	.cra_module = THIS_MODULE,
+	.cra_init = qat_alg_sha256_init,
+	.cra_exit = qat_alg_exit,
+	.cra_u = {
+		.aead = {
+			.setkey = qat_alg_setkey,
+			.decrypt = qat_alg_dec,
+			.encrypt = qat_alg_enc,
+			.givencrypt = qat_alg_genivenc,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+	},
+}, {
+	.cra_name = "authenc(hmac(sha512),cbc(aes))",
+	.cra_driver_name = "qat_aes_cbc_hmac_sha512",
+	.cra_priority = 4001,
+	.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+	.cra_blocksize = AES_BLOCK_SIZE,
+	.cra_ctxsize = sizeof(struct qat_alg_session_ctx),
+	.cra_alignmask = 0,
+	.cra_type = &crypto_aead_type,
+	.cra_module = THIS_MODULE,
+	.cra_init = qat_alg_sha512_init,
+	.cra_exit = qat_alg_exit,
+	.cra_u = {
+		.aead = {
+			.setkey = qat_alg_setkey,
+			.decrypt = qat_alg_dec,
+			.encrypt = qat_alg_enc,
+			.givencrypt = qat_alg_genivenc,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+	},
+} };
+
+int qat_algs_register(void)
+{
+	if (atomic_add_return(1, &active_dev) == 1) {
+		int i;
+
+		for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
+			qat_algs[i].cra_flags =	CRYPTO_ALG_TYPE_AEAD |
+						CRYPTO_ALG_ASYNC;
+		return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
+	}
+	return 0;
+}
+
+int qat_algs_unregister(void)
+{
+	if (atomic_sub_return(1, &active_dev) == 0)
+		return crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
+	return 0;
+}
+
+int qat_algs_init(void)
+{
+	atomic_set(&active_dev, 0);
+	crypto_get_default_rng();
+	return 0;
+}
+
+void qat_algs_exit(void)
+{
+	crypto_put_default_rng();
+}
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c
new file mode 100644
index 0000000..0d59bcb
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/qat_crypto.c
@@ -0,0 +1,284 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/module.h>
+#include <linux/slab.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_transport.h"
+#include "adf_cfg.h"
+#include "adf_cfg_strings.h"
+#include "qat_crypto.h"
+#include "icp_qat_fw.h"
+
+#define SEC ADF_KERNEL_SEC
+
+static struct service_hndl qat_crypto;
+
+void qat_crypto_put_instance(struct qat_crypto_instance *inst)
+{
+	if (atomic_sub_return(1, &inst->refctr) == 0)
+		adf_dev_put(inst->accel_dev);
+}
+
+static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev)
+{
+	struct qat_crypto_instance *inst;
+	struct list_head *list_ptr, *tmp;
+	int i;
+
+	list_for_each_safe(list_ptr, tmp, &accel_dev->crypto_list) {
+		inst = list_entry(list_ptr, struct qat_crypto_instance, list);
+
+		for (i = 0; i < atomic_read(&inst->refctr); i++)
+			qat_crypto_put_instance(inst);
+
+		if (inst->sym_tx)
+			adf_remove_ring(inst->sym_tx);
+
+		if (inst->sym_rx)
+			adf_remove_ring(inst->sym_rx);
+
+		if (inst->pke_tx)
+			adf_remove_ring(inst->pke_tx);
+
+		if (inst->pke_rx)
+			adf_remove_ring(inst->pke_rx);
+
+		if (inst->rnd_tx)
+			adf_remove_ring(inst->rnd_tx);
+
+		if (inst->rnd_rx)
+			adf_remove_ring(inst->rnd_rx);
+
+		list_del(list_ptr);
+		kfree(inst);
+	}
+	return 0;
+}
+
+struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
+{
+	struct adf_accel_dev *accel_dev = NULL;
+	struct qat_crypto_instance *inst_best = NULL;
+	struct list_head *itr;
+	unsigned long best = ~0;
+
+	list_for_each(itr, adf_devmgr_get_head()) {
+		accel_dev = list_entry(itr, struct adf_accel_dev, list);
+		if (accel_dev->numa_node == node && adf_dev_started(accel_dev))
+			break;
+		accel_dev = NULL;
+	}
+	if (!accel_dev) {
+		pr_err("QAT: Could not find device on give node\n");
+		accel_dev = adf_devmgr_get_first();
+	}
+	if (!accel_dev || !adf_dev_started(accel_dev))
+		return NULL;
+
+	list_for_each(itr, &accel_dev->crypto_list) {
+		struct qat_crypto_instance *inst;
+		unsigned long cur;
+
+		inst = list_entry(itr, struct qat_crypto_instance, list);
+		cur = atomic_read(&inst->refctr);
+		if (best > cur) {
+			inst_best = inst;
+			best = cur;
+		}
+	}
+	if (inst_best) {
+		if (atomic_add_return(1, &inst_best->refctr) == 1) {
+			if (adf_dev_get(accel_dev)) {
+				atomic_dec(&inst_best->refctr);
+				pr_err("QAT: Could increment dev refctr\n");
+				return NULL;
+			}
+		}
+	}
+	return inst_best;
+}
+
+static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
+{
+	int i;
+	unsigned long bank;
+	unsigned long num_inst, num_msg_sym, num_msg_asym;
+	int msg_size;
+	struct qat_crypto_instance *inst;
+	char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+	char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+
+	INIT_LIST_HEAD(&accel_dev->crypto_list);
+	strlcpy(key, ADF_NUM_CY, sizeof(key));
+
+	if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
+		return -EFAULT;
+
+	if (kstrtoul(val, 0, &num_inst))
+		return -EFAULT;
+
+	for (i = 0; i < num_inst; i++) {
+		inst = kzalloc_node(sizeof(*inst), GFP_KERNEL,
+				    accel_dev->numa_node);
+		if (!inst)
+			goto err;
+
+		list_add_tail(&inst->list, &accel_dev->crypto_list);
+		inst->id = i;
+		atomic_set(&inst->refctr, 0);
+		inst->accel_dev = accel_dev;
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i);
+		if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
+			goto err;
+
+		if (kstrtoul(val, 10, &bank))
+			goto err;
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
+		if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
+			goto err;
+
+		if (kstrtoul(val, 10, &num_msg_sym))
+			goto err;
+		num_msg_sym = num_msg_sym >> 1;
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
+		if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
+			goto err;
+
+		if (kstrtoul(val, 10, &num_msg_asym))
+			goto err;
+		num_msg_asym = num_msg_asym >> 1;
+
+		msg_size = ICP_QAT_FW_REQ_DEFAULT_SZ;
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
+		if (adf_create_ring(accel_dev, SEC, bank, num_msg_sym,
+				    msg_size, key, NULL, 0, &inst->sym_tx))
+			goto err;
+
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_TX, i);
+		if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
+				    msg_size, key, NULL, 0, &inst->rnd_tx))
+			goto err;
+
+		msg_size = msg_size >> 1;
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
+		if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
+				    msg_size, key, NULL, 0, &inst->pke_tx))
+			goto err;
+
+		msg_size = ICP_QAT_FW_RESP_DEFAULT_SZ;
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
+		if (adf_create_ring(accel_dev, SEC, bank, num_msg_sym,
+				    msg_size, key, qat_alg_callback, 0,
+				    &inst->sym_rx))
+			goto err;
+
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_RX, i);
+		if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
+				    msg_size, key, qat_alg_callback, 0,
+				    &inst->rnd_rx))
+			goto err;
+
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
+		if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
+				    msg_size, key, qat_alg_callback, 0,
+				    &inst->pke_rx))
+			goto err;
+	}
+	return 0;
+err:
+	qat_crypto_free_instances(accel_dev);
+	return -ENOMEM;
+}
+
+static int qat_crypto_init(struct adf_accel_dev *accel_dev)
+{
+	if (qat_crypto_create_instances(accel_dev))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int qat_crypto_shutdown(struct adf_accel_dev *accel_dev)
+{
+	return qat_crypto_free_instances(accel_dev);
+}
+
+static int qat_crypto_event_handler(struct adf_accel_dev *accel_dev,
+				    enum adf_event event)
+{
+	int ret;
+
+	switch (event) {
+	case ADF_EVENT_INIT:
+		ret = qat_crypto_init(accel_dev);
+		break;
+	case ADF_EVENT_SHUTDOWN:
+		ret = qat_crypto_shutdown(accel_dev);
+		break;
+	case ADF_EVENT_RESTARTING:
+	case ADF_EVENT_RESTARTED:
+	case ADF_EVENT_START:
+	case ADF_EVENT_STOP:
+	default:
+		ret = 0;
+	}
+	return ret;
+}
+
+int qat_crypto_register(void)
+{
+	memset(&qat_crypto, 0, sizeof(qat_crypto));
+	qat_crypto.event_hld = qat_crypto_event_handler;
+	qat_crypto.name = "qat_crypto";
+	return adf_service_register(&qat_crypto);
+}
+
+int qat_crypto_unregister(void)
+{
+	return adf_service_unregister(&qat_crypto);
+}
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h
new file mode 100644
index 0000000..ab8468d
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/qat_crypto.h
@@ -0,0 +1,83 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef _QAT_CRYPTO_INSTANCE_H_
+#define _QAT_CRYPTO_INSTANCE_H_
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include "adf_accel_devices.h"
+#include "icp_qat_fw_la.h"
+
+struct qat_crypto_instance {
+	struct adf_etr_ring_data *sym_tx;
+	struct adf_etr_ring_data *sym_rx;
+	struct adf_etr_ring_data *pke_tx;
+	struct adf_etr_ring_data *pke_rx;
+	struct adf_etr_ring_data *rnd_tx;
+	struct adf_etr_ring_data *rnd_rx;
+	struct adf_accel_dev *accel_dev;
+	struct list_head list;
+	unsigned long state;
+	int id;
+	atomic_t refctr;
+};
+
+struct qat_crypto_request_buffs {
+	struct qat_alg_buf_list *bl;
+	dma_addr_t blp;
+	struct qat_alg_buf_list *blout;
+	dma_addr_t bloutp;
+	size_t sz;
+};
+
+struct qat_crypto_request {
+	struct icp_qat_fw_la_bulk_req req;
+	struct qat_alg_session_ctx *ctx;
+	struct aead_request *areq;
+	struct qat_crypto_request_buffs buf;
+};
+#endif
diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c
new file mode 100644
index 0000000..9b8a315
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/qat_hal.c
@@ -0,0 +1,1393 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/slab.h>
+
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "icp_qat_hal.h"
+#include "icp_qat_uclo.h"
+
+#define BAD_REGADDR               0xffff
+#define MAX_RETRY_TIMES           10000
+#define INIT_CTX_ARB_VALUE        0x0
+#define INIT_CTX_ENABLE_VALUE     0x0
+#define INIT_PC_VALUE             0x0
+#define INIT_WAKEUP_EVENTS_VALUE  0x1
+#define INIT_SIG_EVENTS_VALUE     0x1
+#define INIT_CCENABLE_VALUE       0x2000
+#define RST_CSR_QAT_LSB           20
+#define RST_CSR_AE_LSB		  0
+#define MC_TIMESTAMP_ENABLE       (0x1 << 7)
+
+#define IGNORE_W1C_MASK ((~(1 << CE_BREAKPOINT_BITPOS)) & \
+	(~(1 << CE_CNTL_STORE_PARITY_ERROR_BITPOS)) & \
+	(~(1 << CE_REG_PAR_ERR_BITPOS)))
+#define INSERT_IMMED_GPRA_CONST(inst, const_val) \
+	(inst = ((inst & 0xFFFF00C03FFull) | \
+		((((const_val) << 12) & 0x0FF00000ull) | \
+		(((const_val) << 10) & 0x0003FC00ull))))
+#define INSERT_IMMED_GPRB_CONST(inst, const_val) \
+	(inst = ((inst & 0xFFFF00FFF00ull) | \
+		((((const_val) << 12) & 0x0FF00000ull) | \
+		(((const_val) <<  0) & 0x000000FFull))))
+
+#define AE(handle, ae) handle->hal_handle->aes[ae]
+
+static const uint64_t inst_4b[] = {
+	0x0F0400C0000ull, 0x0F4400C0000ull, 0x0F040000300ull, 0x0F440000300ull,
+	0x0FC066C0000ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
+	0x0A021000000ull
+};
+
+static const uint64_t inst[] = {
+	0x0F0000C0000ull, 0x0F000000380ull, 0x0D805000011ull, 0x0FC082C0300ull,
+	0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
+	0x0A0643C0000ull, 0x0BAC0000301ull, 0x0D802000101ull, 0x0F0000C0001ull,
+	0x0FC066C0001ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
+	0x0F000400300ull, 0x0A0610C0000ull, 0x0BAC0000301ull, 0x0D804400101ull,
+	0x0A0580C0000ull, 0x0A0581C0000ull, 0x0A0582C0000ull, 0x0A0583C0000ull,
+	0x0A0584C0000ull, 0x0A0585C0000ull, 0x0A0586C0000ull, 0x0A0587C0000ull,
+	0x0A0588C0000ull, 0x0A0589C0000ull, 0x0A058AC0000ull, 0x0A058BC0000ull,
+	0x0A058CC0000ull, 0x0A058DC0000ull, 0x0A058EC0000ull, 0x0A058FC0000ull,
+	0x0A05C0C0000ull, 0x0A05C1C0000ull, 0x0A05C2C0000ull, 0x0A05C3C0000ull,
+	0x0A05C4C0000ull, 0x0A05C5C0000ull, 0x0A05C6C0000ull, 0x0A05C7C0000ull,
+	0x0A05C8C0000ull, 0x0A05C9C0000ull, 0x0A05CAC0000ull, 0x0A05CBC0000ull,
+	0x0A05CCC0000ull, 0x0A05CDC0000ull, 0x0A05CEC0000ull, 0x0A05CFC0000ull,
+	0x0A0400C0000ull, 0x0B0400C0000ull, 0x0A0401C0000ull, 0x0B0401C0000ull,
+	0x0A0402C0000ull, 0x0B0402C0000ull, 0x0A0403C0000ull, 0x0B0403C0000ull,
+	0x0A0404C0000ull, 0x0B0404C0000ull, 0x0A0405C0000ull, 0x0B0405C0000ull,
+	0x0A0406C0000ull, 0x0B0406C0000ull, 0x0A0407C0000ull, 0x0B0407C0000ull,
+	0x0A0408C0000ull, 0x0B0408C0000ull, 0x0A0409C0000ull, 0x0B0409C0000ull,
+	0x0A040AC0000ull, 0x0B040AC0000ull, 0x0A040BC0000ull, 0x0B040BC0000ull,
+	0x0A040CC0000ull, 0x0B040CC0000ull, 0x0A040DC0000ull, 0x0B040DC0000ull,
+	0x0A040EC0000ull, 0x0B040EC0000ull, 0x0A040FC0000ull, 0x0B040FC0000ull,
+	0x0D81581C010ull, 0x0E000010000ull, 0x0E000010000ull,
+};
+
+void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle,
+			  unsigned char ae, unsigned int ctx_mask)
+{
+	AE(handle, ae).live_ctx_mask = ctx_mask;
+}
+
+#define CSR_RETRY_TIMES 500
+static int qat_hal_rd_ae_csr(struct icp_qat_fw_loader_handle *handle,
+			     unsigned char ae, unsigned int csr,
+			     unsigned int *value)
+{
+	unsigned int iterations = CSR_RETRY_TIMES;
+
+	do {
+		*value = GET_AE_CSR(handle, ae, csr);
+		if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
+			return 0;
+	} while (iterations--);
+
+	pr_err("QAT: Read CSR timeout\n");
+	return -EFAULT;
+}
+
+static int qat_hal_wr_ae_csr(struct icp_qat_fw_loader_handle *handle,
+			     unsigned char ae, unsigned int csr,
+			     unsigned int value)
+{
+	unsigned int iterations = CSR_RETRY_TIMES;
+
+	do {
+		SET_AE_CSR(handle, ae, csr, value);
+		if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
+			return 0;
+	} while (iterations--);
+
+	pr_err("QAT: Write CSR Timeout\n");
+	return -EFAULT;
+}
+
+static void qat_hal_get_wakeup_event(struct icp_qat_fw_loader_handle *handle,
+				     unsigned char ae, unsigned char ctx,
+				     unsigned int *events)
+{
+	unsigned int cur_ctx;
+
+	qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+	qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
+	qat_hal_rd_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT, events);
+	qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+}
+
+static int qat_hal_wait_cycles(struct icp_qat_fw_loader_handle *handle,
+			       unsigned char ae, unsigned int cycles,
+			       int chk_inactive)
+{
+	unsigned int base_cnt = 0, cur_cnt = 0;
+	unsigned int csr = (1 << ACS_ABO_BITPOS);
+	int times = MAX_RETRY_TIMES;
+	int elapsed_cycles = 0;
+
+	qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &base_cnt);
+	base_cnt &= 0xffff;
+	while ((int)cycles > elapsed_cycles && times--) {
+		if (chk_inactive)
+			qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &csr);
+
+		qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &cur_cnt);
+		cur_cnt &= 0xffff;
+		elapsed_cycles = cur_cnt - base_cnt;
+
+		if (elapsed_cycles < 0)
+			elapsed_cycles += 0x10000;
+
+		/* ensure at least 8 time cycles elapsed in wait_cycles */
+		if (elapsed_cycles >= 8 && !(csr & (1 << ACS_ABO_BITPOS)))
+			return 0;
+	}
+	if (!times) {
+		pr_err("QAT: wait_num_cycles time out\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+#define CLR_BIT(wrd, bit) (wrd & ~(1 << bit))
+#define SET_BIT(wrd, bit) (wrd | 1 << bit)
+
+int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle,
+			    unsigned char ae, unsigned char mode)
+{
+	unsigned int csr, new_csr;
+
+	if ((mode != 4) && (mode != 8)) {
+		pr_err("QAT: bad ctx mode=%d\n", mode);
+		return -EINVAL;
+	}
+
+	/* Sets the accelaration engine context mode to either four or eight */
+	qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
+	csr = IGNORE_W1C_MASK & csr;
+	new_csr = (mode == 4) ?
+		SET_BIT(csr, CE_INUSE_CONTEXTS_BITPOS) :
+		CLR_BIT(csr, CE_INUSE_CONTEXTS_BITPOS);
+	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
+	return 0;
+}
+
+int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle,
+			   unsigned char ae, unsigned char mode)
+{
+	unsigned int csr, new_csr;
+
+	qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
+	csr &= IGNORE_W1C_MASK;
+
+	new_csr = (mode) ?
+		SET_BIT(csr, CE_NN_MODE_BITPOS) :
+		CLR_BIT(csr, CE_NN_MODE_BITPOS);
+
+	if (new_csr != csr)
+		qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
+
+	return 0;
+}
+
+int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle,
+			   unsigned char ae, enum icp_qat_uof_regtype lm_type,
+			   unsigned char mode)
+{
+	unsigned int csr, new_csr;
+
+	qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
+	csr &= IGNORE_W1C_MASK;
+	switch (lm_type) {
+	case ICP_LMEM0:
+		new_csr = (mode) ?
+			SET_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS) :
+			CLR_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS);
+		break;
+	case ICP_LMEM1:
+		new_csr = (mode) ?
+			SET_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS) :
+			CLR_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS);
+		break;
+	default:
+		pr_err("QAT: lmType = 0x%x\n", lm_type);
+		return -EINVAL;
+	}
+
+	if (new_csr != csr)
+		qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
+	return 0;
+}
+
+static unsigned short qat_hal_get_reg_addr(unsigned int type,
+					   unsigned short reg_num)
+{
+	unsigned short reg_addr;
+
+	switch (type) {
+	case ICP_GPA_ABS:
+	case ICP_GPB_ABS:
+		reg_addr = 0x80 | (reg_num & 0x7f);
+		break;
+	case ICP_GPA_REL:
+	case ICP_GPB_REL:
+		reg_addr = reg_num & 0x1f;
+		break;
+	case ICP_SR_RD_REL:
+	case ICP_SR_WR_REL:
+	case ICP_SR_REL:
+		reg_addr = 0x180 | (reg_num & 0x1f);
+		break;
+	case ICP_SR_ABS:
+		reg_addr = 0x140 | ((reg_num & 0x3) << 1);
+		break;
+	case ICP_DR_RD_REL:
+	case ICP_DR_WR_REL:
+	case ICP_DR_REL:
+		reg_addr = 0x1c0 | (reg_num & 0x1f);
+		break;
+	case ICP_DR_ABS:
+		reg_addr = 0x100 | ((reg_num & 0x3) << 1);
+		break;
+	case ICP_NEIGH_REL:
+		reg_addr = 0x280 | (reg_num & 0x1f);
+		break;
+	case ICP_LMEM0:
+		reg_addr = 0x200;
+		break;
+	case ICP_LMEM1:
+		reg_addr = 0x220;
+		break;
+	case ICP_NO_DEST:
+		reg_addr = 0x300 | (reg_num & 0xff);
+		break;
+	default:
+		reg_addr = BAD_REGADDR;
+		break;
+	}
+	return reg_addr;
+}
+
+void qat_hal_reset(struct icp_qat_fw_loader_handle *handle)
+{
+	unsigned int ae_reset_csr;
+
+	ae_reset_csr = GET_GLB_CSR(handle, ICP_RESET);
+	ae_reset_csr |= handle->hal_handle->ae_mask << RST_CSR_AE_LSB;
+	ae_reset_csr |= handle->hal_handle->slice_mask << RST_CSR_QAT_LSB;
+	SET_GLB_CSR(handle, ICP_RESET, ae_reset_csr);
+}
+
+static void qat_hal_wr_indr_csr(struct icp_qat_fw_loader_handle *handle,
+				unsigned char ae, unsigned int ctx_mask,
+				unsigned int ae_csr, unsigned int csr_val)
+{
+	unsigned int ctx, cur_ctx;
+
+	qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+
+	for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
+		if (!(ctx_mask & (1 << ctx)))
+			continue;
+		qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
+		qat_hal_wr_ae_csr(handle, ae, ae_csr, csr_val);
+	}
+
+	qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+}
+
+static void qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle *handle,
+				unsigned char ae, unsigned char ctx,
+				unsigned int ae_csr, unsigned int *csr_val)
+{
+	unsigned int cur_ctx;
+
+	qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+	qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
+	qat_hal_rd_ae_csr(handle, ae, ae_csr, csr_val);
+	qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+}
+
+static void qat_hal_put_sig_event(struct icp_qat_fw_loader_handle *handle,
+				  unsigned char ae, unsigned int ctx_mask,
+				  unsigned int events)
+{
+	unsigned int ctx, cur_ctx;
+
+	qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+	for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
+		if (!(ctx_mask & (1 << ctx)))
+			continue;
+		qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
+		qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_INDIRECT, events);
+	}
+	qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+}
+
+static void qat_hal_put_wakeup_event(struct icp_qat_fw_loader_handle *handle,
+				     unsigned char ae, unsigned int ctx_mask,
+				     unsigned int events)
+{
+	unsigned int ctx, cur_ctx;
+
+	qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+	for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
+		if (!(ctx_mask & (1 << ctx)))
+			continue;
+		qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
+		qat_hal_wr_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT,
+				  events);
+	}
+	qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+}
+
+static int qat_hal_check_ae_alive(struct icp_qat_fw_loader_handle *handle)
+{
+	unsigned int base_cnt, cur_cnt;
+	unsigned char ae;
+	unsigned int times = MAX_RETRY_TIMES;
+
+	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+		if (!(handle->hal_handle->ae_mask & (1 << ae)))
+			continue;
+
+		qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT,
+				  (unsigned int *)&base_cnt);
+		base_cnt &= 0xffff;
+
+		do {
+			qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT,
+					  (unsigned int *)&cur_cnt);
+			cur_cnt &= 0xffff;
+		} while (times-- && (cur_cnt == base_cnt));
+
+		if (!times) {
+			pr_err("QAT: AE%d is inactive!!\n", ae);
+			return -EFAULT;
+		}
+	}
+
+	return 0;
+}
+
+static void qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle)
+{
+	unsigned int misc_ctl;
+	unsigned char ae;
+
+	/* stop the timestamp timers */
+	misc_ctl = GET_GLB_CSR(handle, MISC_CONTROL);
+	if (misc_ctl & MC_TIMESTAMP_ENABLE)
+		SET_GLB_CSR(handle, MISC_CONTROL, misc_ctl &
+			    (~MC_TIMESTAMP_ENABLE));
+
+	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+		if (!(handle->hal_handle->ae_mask & (1 << ae)))
+			continue;
+		qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_LOW, 0);
+		qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_HIGH, 0);
+	}
+	/* start timestamp timers */
+	SET_GLB_CSR(handle, MISC_CONTROL, misc_ctl | MC_TIMESTAMP_ENABLE);
+}
+
+#define ESRAM_AUTO_TINIT (1<<2)
+#define ESRAM_AUTO_TINIT_DONE (1<<3)
+#define ESRAM_AUTO_INIT_USED_CYCLES (1640)
+#define ESRAM_AUTO_INIT_CSR_OFFSET 0xC1C
+static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle)
+{
+	void __iomem *csr_addr = handle->hal_ep_csr_addr_v +
+				 ESRAM_AUTO_INIT_CSR_OFFSET;
+	unsigned int csr_val, times = 30;
+
+	csr_val = ADF_CSR_RD(csr_addr, 0);
+	if ((csr_val & ESRAM_AUTO_TINIT) && (csr_val & ESRAM_AUTO_TINIT_DONE))
+		return 0;
+
+	csr_val = ADF_CSR_RD(csr_addr, 0);
+	csr_val |= ESRAM_AUTO_TINIT;
+	ADF_CSR_WR(csr_addr, 0, csr_val);
+
+	do {
+		qat_hal_wait_cycles(handle, 0, ESRAM_AUTO_INIT_USED_CYCLES, 0);
+		csr_val = ADF_CSR_RD(csr_addr, 0);
+	} while (!(csr_val & ESRAM_AUTO_TINIT_DONE) && times--);
+	if ((!times)) {
+		pr_err("QAT: Fail to init eSram!\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+#define SHRAM_INIT_CYCLES 2060
+int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle)
+{
+	unsigned int ae_reset_csr;
+	unsigned char ae;
+	unsigned int clk_csr;
+	unsigned int times = 100;
+	unsigned int csr;
+
+	/* write to the reset csr */
+	ae_reset_csr = GET_GLB_CSR(handle, ICP_RESET);
+	ae_reset_csr &= ~(handle->hal_handle->ae_mask << RST_CSR_AE_LSB);
+	ae_reset_csr &= ~(handle->hal_handle->slice_mask << RST_CSR_QAT_LSB);
+	do {
+		SET_GLB_CSR(handle, ICP_RESET, ae_reset_csr);
+		if (!(times--))
+			goto out_err;
+		csr = GET_GLB_CSR(handle, ICP_RESET);
+	} while ((handle->hal_handle->ae_mask |
+		 (handle->hal_handle->slice_mask << RST_CSR_QAT_LSB)) & csr);
+	/* enable clock */
+	clk_csr = GET_GLB_CSR(handle, ICP_GLOBAL_CLK_ENABLE);
+	clk_csr |= handle->hal_handle->ae_mask << 0;
+	clk_csr |= handle->hal_handle->slice_mask << 20;
+	SET_GLB_CSR(handle, ICP_GLOBAL_CLK_ENABLE, clk_csr);
+	if (qat_hal_check_ae_alive(handle))
+		goto out_err;
+
+	/* Set undefined power-up/reset states to reasonable default values */
+	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+		if (!(handle->hal_handle->ae_mask & (1 << ae)))
+			continue;
+		qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES,
+				  INIT_CTX_ENABLE_VALUE);
+		qat_hal_wr_indr_csr(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX,
+				    CTX_STS_INDIRECT,
+				    handle->hal_handle->upc_mask &
+				    INIT_PC_VALUE);
+		qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE);
+		qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE);
+		qat_hal_put_wakeup_event(handle, ae,
+					 ICP_QAT_UCLO_AE_ALL_CTX,
+					 INIT_WAKEUP_EVENTS_VALUE);
+		qat_hal_put_sig_event(handle, ae,
+				      ICP_QAT_UCLO_AE_ALL_CTX,
+				      INIT_SIG_EVENTS_VALUE);
+	}
+	if (qat_hal_init_esram(handle))
+		goto out_err;
+	if (qat_hal_wait_cycles(handle, 0, SHRAM_INIT_CYCLES, 0))
+		goto out_err;
+	qat_hal_reset_timestamp(handle);
+
+	return 0;
+out_err:
+	pr_err("QAT: failed to get device out of reset\n");
+	return -EFAULT;
+}
+
+static void qat_hal_disable_ctx(struct icp_qat_fw_loader_handle *handle,
+				unsigned char ae, unsigned int ctx_mask)
+{
+	unsigned int ctx;
+
+	qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx);
+	ctx &= IGNORE_W1C_MASK &
+		(~((ctx_mask & ICP_QAT_UCLO_AE_ALL_CTX) << CE_ENABLE_BITPOS));
+	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
+}
+
+static uint64_t qat_hal_parity_64bit(uint64_t word)
+{
+	word ^= word >> 1;
+	word ^= word >> 2;
+	word ^= word >> 4;
+	word ^= word >> 8;
+	word ^= word >> 16;
+	word ^= word >> 32;
+	return word & 1;
+}
+
+static uint64_t qat_hal_set_uword_ecc(uint64_t uword)
+{
+	uint64_t bit0_mask = 0xff800007fffULL, bit1_mask = 0x1f801ff801fULL,
+		bit2_mask = 0xe387e0781e1ULL, bit3_mask = 0x7cb8e388e22ULL,
+		bit4_mask = 0xaf5b2c93244ULL, bit5_mask = 0xf56d5525488ULL,
+		bit6_mask = 0xdaf69a46910ULL;
+
+	/* clear the ecc bits */
+	uword &= ~(0x7fULL << 0x2C);
+	uword |= qat_hal_parity_64bit(bit0_mask & uword) << 0x2C;
+	uword |= qat_hal_parity_64bit(bit1_mask & uword) << 0x2D;
+	uword |= qat_hal_parity_64bit(bit2_mask & uword) << 0x2E;
+	uword |= qat_hal_parity_64bit(bit3_mask & uword) << 0x2F;
+	uword |= qat_hal_parity_64bit(bit4_mask & uword) << 0x30;
+	uword |= qat_hal_parity_64bit(bit5_mask & uword) << 0x31;
+	uword |= qat_hal_parity_64bit(bit6_mask & uword) << 0x32;
+	return uword;
+}
+
+void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle,
+		       unsigned char ae, unsigned int uaddr,
+		       unsigned int words_num, uint64_t *uword)
+{
+	unsigned int ustore_addr;
+	unsigned int i;
+
+	qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
+	uaddr |= UA_ECS;
+	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
+	for (i = 0; i < words_num; i++) {
+		unsigned int uwrd_lo, uwrd_hi;
+		uint64_t tmp;
+
+		tmp = qat_hal_set_uword_ecc(uword[i]);
+		uwrd_lo = (unsigned int)(tmp & 0xffffffff);
+		uwrd_hi = (unsigned int)(tmp >> 0x20);
+		qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
+		qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
+	}
+	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
+}
+
+static void qat_hal_enable_ctx(struct icp_qat_fw_loader_handle *handle,
+			       unsigned char ae, unsigned int ctx_mask)
+{
+	unsigned int ctx;
+
+	qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx);
+	ctx &= IGNORE_W1C_MASK;
+	ctx_mask &= (ctx & CE_INUSE_CONTEXTS) ? 0x55 : 0xFF;
+	ctx |= (ctx_mask << CE_ENABLE_BITPOS);
+	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
+}
+
+static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
+{
+	unsigned char ae;
+	unsigned int ctx_mask = ICP_QAT_UCLO_AE_ALL_CTX;
+	int times = MAX_RETRY_TIMES;
+	unsigned int csr_val = 0;
+	unsigned short reg;
+	unsigned int savctx = 0;
+	int ret = 0;
+
+	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+		if (!(handle->hal_handle->ae_mask & (1 << ae)))
+			continue;
+		for (reg = 0; reg < ICP_QAT_UCLO_MAX_GPR_REG; reg++) {
+			qat_hal_init_rd_xfer(handle, ae, 0, ICP_SR_RD_ABS,
+					     reg, 0);
+			qat_hal_init_rd_xfer(handle, ae, 0, ICP_DR_RD_ABS,
+					     reg, 0);
+		}
+		qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
+		csr_val &= ~(1 << MMC_SHARE_CS_BITPOS);
+		qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val);
+		qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr_val);
+		csr_val &= IGNORE_W1C_MASK;
+		csr_val |= CE_NN_MODE;
+		qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, csr_val);
+		qat_hal_wr_uwords(handle, ae, 0, ARRAY_SIZE(inst),
+				  (uint64_t *)inst);
+		qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
+				    handle->hal_handle->upc_mask &
+				    INIT_PC_VALUE);
+		qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
+		qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, 0);
+		qat_hal_put_wakeup_event(handle, ae, ctx_mask, XCWE_VOLUNTARY);
+		qat_hal_wr_indr_csr(handle, ae, ctx_mask,
+				    CTX_SIG_EVENTS_INDIRECT, 0);
+		qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0);
+		qat_hal_enable_ctx(handle, ae, ctx_mask);
+	}
+	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+		if (!(handle->hal_handle->ae_mask & (1 << ae)))
+			continue;
+		/* wait for AE to finish */
+		do {
+			ret = qat_hal_wait_cycles(handle, ae, 20, 1);
+		} while (ret && times--);
+
+		if (!times) {
+			pr_err("QAT: clear GPR of AE %d failed", ae);
+			return -EINVAL;
+		}
+		qat_hal_disable_ctx(handle, ae, ctx_mask);
+		qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
+				  savctx & ACS_ACNO);
+		qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES,
+				  INIT_CTX_ENABLE_VALUE);
+		qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
+				    handle->hal_handle->upc_mask &
+				    INIT_PC_VALUE);
+		qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE);
+		qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE);
+		qat_hal_put_wakeup_event(handle, ae, ctx_mask,
+					 INIT_WAKEUP_EVENTS_VALUE);
+		qat_hal_put_sig_event(handle, ae, ctx_mask,
+				      INIT_SIG_EVENTS_VALUE);
+	}
+	return 0;
+}
+
+#define ICP_DH895XCC_AE_OFFSET      0x20000
+#define ICP_DH895XCC_CAP_OFFSET     (ICP_DH895XCC_AE_OFFSET + 0x10000)
+#define LOCAL_TO_XFER_REG_OFFSET    0x800
+#define ICP_DH895XCC_EP_OFFSET      0x3a000
+#define ICP_DH895XCC_PMISC_BAR 1
+int qat_hal_init(struct adf_accel_dev *accel_dev)
+{
+	unsigned char ae;
+	unsigned int max_en_ae_id = 0;
+	struct icp_qat_fw_loader_handle *handle;
+	struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	struct adf_bar *bar = &pci_info->pci_bars[ADF_DH895XCC_PMISC_BAR];
+
+	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+	if (!handle)
+		return -ENOMEM;
+
+	handle->hal_cap_g_ctl_csr_addr_v = bar->virt_addr +
+						ICP_DH895XCC_CAP_OFFSET;
+	handle->hal_cap_ae_xfer_csr_addr_v = bar->virt_addr +
+						ICP_DH895XCC_AE_OFFSET;
+	handle->hal_ep_csr_addr_v = bar->virt_addr + ICP_DH895XCC_EP_OFFSET;
+	handle->hal_cap_ae_local_csr_addr_v =
+		handle->hal_cap_ae_xfer_csr_addr_v + LOCAL_TO_XFER_REG_OFFSET;
+
+	handle->hal_handle = kzalloc(sizeof(*handle->hal_handle), GFP_KERNEL);
+	if (!handle->hal_handle)
+		goto out_hal_handle;
+	handle->hal_handle->revision_id = accel_dev->accel_pci_dev.revid;
+	handle->hal_handle->ae_mask = hw_data->ae_mask;
+	handle->hal_handle->slice_mask = hw_data->accel_mask;
+	/* create AE objects */
+	handle->hal_handle->upc_mask = 0x1ffff;
+	handle->hal_handle->max_ustore = 0x4000;
+	for (ae = 0; ae < ICP_QAT_UCLO_MAX_AE; ae++) {
+		if (!(hw_data->ae_mask & (1 << ae)))
+			continue;
+		handle->hal_handle->aes[ae].free_addr = 0;
+		handle->hal_handle->aes[ae].free_size =
+		    handle->hal_handle->max_ustore;
+		handle->hal_handle->aes[ae].ustore_size =
+		    handle->hal_handle->max_ustore;
+		handle->hal_handle->aes[ae].live_ctx_mask =
+						ICP_QAT_UCLO_AE_ALL_CTX;
+		max_en_ae_id = ae;
+	}
+	handle->hal_handle->ae_max_num = max_en_ae_id + 1;
+	/* take all AEs out of reset */
+	if (qat_hal_clr_reset(handle)) {
+		pr_err("QAT: qat_hal_clr_reset error\n");
+		goto out_err;
+	}
+	if (qat_hal_clear_gpr(handle))
+		goto out_err;
+	/* Set SIGNATURE_ENABLE[0] to 0x1 in order to enable ALU_OUT csr */
+	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+		unsigned int csr_val = 0;
+
+		if (!(hw_data->ae_mask & (1 << ae)))
+			continue;
+		qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE, &csr_val);
+		csr_val |= 0x1;
+		qat_hal_wr_ae_csr(handle, ae, SIGNATURE_ENABLE, csr_val);
+	}
+	accel_dev->fw_loader->fw_loader = handle;
+	return 0;
+
+out_err:
+	kfree(handle->hal_handle);
+out_hal_handle:
+	kfree(handle);
+	return -EFAULT;
+}
+
+void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle)
+{
+	if (!handle)
+		return;
+	kfree(handle->hal_handle);
+	kfree(handle);
+}
+
+void qat_hal_start(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
+		   unsigned int ctx_mask)
+{
+	qat_hal_put_wakeup_event(handle, ae, (~ctx_mask) &
+				 ICP_QAT_UCLO_AE_ALL_CTX, 0x10000);
+	qat_hal_enable_ctx(handle, ae, ctx_mask);
+}
+
+void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
+		  unsigned int ctx_mask)
+{
+	qat_hal_disable_ctx(handle, ae, ctx_mask);
+}
+
+void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle,
+		    unsigned char ae, unsigned int ctx_mask, unsigned int upc)
+{
+	qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
+			    handle->hal_handle->upc_mask & upc);
+}
+
+static void qat_hal_get_uwords(struct icp_qat_fw_loader_handle *handle,
+			       unsigned char ae, unsigned int uaddr,
+			       unsigned int words_num, uint64_t *uword)
+{
+	unsigned int i, uwrd_lo, uwrd_hi;
+	unsigned int ustore_addr, misc_control;
+
+	qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &misc_control);
+	qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL,
+			  misc_control & 0xfffffffb);
+	qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
+	uaddr |= UA_ECS;
+	for (i = 0; i < words_num; i++) {
+		qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
+		uaddr++;
+		qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_LOWER, &uwrd_lo);
+		qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_UPPER, &uwrd_hi);
+		uword[i] = uwrd_hi;
+		uword[i] = (uword[i] << 0x20) | uwrd_lo;
+	}
+	qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, misc_control);
+	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
+}
+
+void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle,
+		     unsigned char ae, unsigned int uaddr,
+		     unsigned int words_num, unsigned int *data)
+{
+	unsigned int i, ustore_addr;
+
+	qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
+	uaddr |= UA_ECS;
+	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
+	for (i = 0; i < words_num; i++) {
+		unsigned int uwrd_lo, uwrd_hi, tmp;
+
+		uwrd_lo = ((data[i] & 0xfff0000) << 4) | (0x3 << 18) |
+			  ((data[i] & 0xff00) << 2) |
+			  (0x3 << 8) | (data[i] & 0xff);
+		uwrd_hi = (0xf << 4) | ((data[i] & 0xf0000000) >> 28);
+		uwrd_hi |= (hweight32(data[i] & 0xffff) & 0x1) << 8;
+		tmp = ((data[i] >> 0x10) & 0xffff);
+		uwrd_hi |= (hweight32(tmp) & 0x1) << 9;
+		qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
+		qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
+	}
+	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
+}
+
+#define MAX_EXEC_INST 100
+static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
+				   unsigned char ae, unsigned char ctx,
+				   uint64_t *micro_inst, unsigned int inst_num,
+				   int code_off, unsigned int max_cycle,
+				   unsigned int *endpc)
+{
+	uint64_t savuwords[MAX_EXEC_INST];
+	unsigned int ind_lm_addr0, ind_lm_addr1;
+	unsigned int ind_lm_addr_byte0, ind_lm_addr_byte1;
+	unsigned int ind_cnt_sig;
+	unsigned int ind_sig, act_sig;
+	unsigned int csr_val = 0, newcsr_val;
+	unsigned int savctx;
+	unsigned int savcc, wakeup_events, savpc;
+	unsigned int ctxarb_ctl, ctx_enables;
+
+	if ((inst_num > handle->hal_handle->max_ustore) || !micro_inst) {
+		pr_err("QAT: invalid instruction num %d\n", inst_num);
+		return -EINVAL;
+	}
+	/* save current context */
+	qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_0_INDIRECT, &ind_lm_addr0);
+	qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_1_INDIRECT, &ind_lm_addr1);
+	qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_0_BYTE_INDEX,
+			    &ind_lm_addr_byte0);
+	qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_1_BYTE_INDEX,
+			    &ind_lm_addr_byte1);
+	if (inst_num <= MAX_EXEC_INST)
+		qat_hal_get_uwords(handle, ae, 0, inst_num, savuwords);
+	qat_hal_get_wakeup_event(handle, ae, ctx, &wakeup_events);
+	qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT, &savpc);
+	savpc = (savpc & handle->hal_handle->upc_mask) >> 0;
+	qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+	ctx_enables &= IGNORE_W1C_MASK;
+	qat_hal_rd_ae_csr(handle, ae, CC_ENABLE, &savcc);
+	qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
+	qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_ctl);
+	qat_hal_rd_indr_csr(handle, ae, ctx, FUTURE_COUNT_SIGNAL_INDIRECT,
+			    &ind_cnt_sig);
+	qat_hal_rd_indr_csr(handle, ae, ctx, CTX_SIG_EVENTS_INDIRECT, &ind_sig);
+	qat_hal_rd_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, &act_sig);
+	/* execute micro codes */
+	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
+	qat_hal_wr_uwords(handle, ae, 0, inst_num, micro_inst);
+	qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT, 0);
+	qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, ctx & ACS_ACNO);
+	if (code_off)
+		qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc & 0xffffdfff);
+	qat_hal_put_wakeup_event(handle, ae, (1 << ctx), XCWE_VOLUNTARY);
+	qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_SIG_EVENTS_INDIRECT, 0);
+	qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0);
+	qat_hal_enable_ctx(handle, ae, (1 << ctx));
+	/* wait for micro codes to finish */
+	if (qat_hal_wait_cycles(handle, ae, max_cycle, 1) != 0)
+		return -EFAULT;
+	if (endpc) {
+		unsigned int ctx_status;
+
+		qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT,
+				    &ctx_status);
+		*endpc = ctx_status & handle->hal_handle->upc_mask;
+	}
+	/* retore to saved context */
+	qat_hal_disable_ctx(handle, ae, (1 << ctx));
+	if (inst_num <= MAX_EXEC_INST)
+		qat_hal_wr_uwords(handle, ae, 0, inst_num, savuwords);
+	qat_hal_put_wakeup_event(handle, ae, (1 << ctx), wakeup_events);
+	qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT,
+			    handle->hal_handle->upc_mask & savpc);
+	qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
+	newcsr_val = CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS);
+	qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val);
+	qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc);
+	qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, savctx & ACS_ACNO);
+	qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_ctl);
+	qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+			    LM_ADDR_0_INDIRECT, ind_lm_addr0);
+	qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+			    LM_ADDR_1_INDIRECT, ind_lm_addr1);
+	qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+			    INDIRECT_LM_ADDR_0_BYTE_INDEX, ind_lm_addr_byte0);
+	qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+			    INDIRECT_LM_ADDR_1_BYTE_INDEX, ind_lm_addr_byte1);
+	qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+			    FUTURE_COUNT_SIGNAL_INDIRECT, ind_cnt_sig);
+	qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+			    CTX_SIG_EVENTS_INDIRECT, ind_sig);
+	qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, act_sig);
+	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
+
+	return 0;
+}
+
+static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle,
+			      unsigned char ae, unsigned char ctx,
+			      enum icp_qat_uof_regtype reg_type,
+			      unsigned short reg_num, unsigned int *data)
+{
+	unsigned int savctx, uaddr, uwrd_lo, uwrd_hi;
+	unsigned int ctxarb_cntl, ustore_addr, ctx_enables;
+	unsigned short reg_addr;
+	int status = 0;
+	uint64_t insts, savuword;
+
+	reg_addr = qat_hal_get_reg_addr(reg_type, reg_num);
+	if (reg_addr == BAD_REGADDR) {
+		pr_err("QAT: bad regaddr=0x%x\n", reg_addr);
+		return -EINVAL;
+	}
+	switch (reg_type) {
+	case ICP_GPA_REL:
+		insts = 0xA070000000ull | (reg_addr & 0x3ff);
+		break;
+	default:
+		insts = (uint64_t)0xA030000000ull | ((reg_addr & 0x3ff) << 10);
+		break;
+	}
+	qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
+	qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_cntl);
+	qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+	ctx_enables &= IGNORE_W1C_MASK;
+	if (ctx != (savctx & ACS_ACNO))
+		qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
+				  ctx & ACS_ACNO);
+	qat_hal_get_uwords(handle, ae, 0, 1, &savuword);
+	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
+	qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
+	uaddr = UA_ECS;
+	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
+	insts = qat_hal_set_uword_ecc(insts);
+	uwrd_lo = (unsigned int)(insts & 0xffffffff);
+	uwrd_hi = (unsigned int)(insts >> 0x20);
+	qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
+	qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
+	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
+	/* delay for at least 8 cycles */
+	qat_hal_wait_cycles(handle, ae, 0x8, 0);
+	/*
+	 * read ALU output
+	 * the instruction should have been executed
+	 * prior to clearing the ECS in putUwords
+	 */
+	qat_hal_rd_ae_csr(handle, ae, ALU_OUT, data);
+	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
+	qat_hal_wr_uwords(handle, ae, 0, 1, &savuword);
+	if (ctx != (savctx & ACS_ACNO))
+		qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
+				  savctx & ACS_ACNO);
+	qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_cntl);
+	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
+
+	return status;
+}
+
+static int qat_hal_wr_rel_reg(struct icp_qat_fw_loader_handle *handle,
+			      unsigned char ae, unsigned char ctx,
+			      enum icp_qat_uof_regtype reg_type,
+			      unsigned short reg_num, unsigned int data)
+{
+	unsigned short src_hiaddr, src_lowaddr, dest_addr, data16hi, data16lo;
+	uint64_t insts[] = {
+		0x0F440000000ull,
+		0x0F040000000ull,
+		0x0F0000C0300ull,
+		0x0E000010000ull
+	};
+	const int num_inst = ARRAY_SIZE(insts), code_off = 1;
+	const int imm_w1 = 0, imm_w0 = 1;
+
+	dest_addr = qat_hal_get_reg_addr(reg_type, reg_num);
+	if (dest_addr == BAD_REGADDR) {
+		pr_err("QAT: bad destAddr=0x%x\n", dest_addr);
+		return -EINVAL;
+	}
+
+	data16lo = 0xffff & data;
+	data16hi = 0xffff & (data >> 0x10);
+	src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short)
+					  (0xff & data16hi));
+	src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short)
+					   (0xff & data16lo));
+	switch (reg_type) {
+	case ICP_GPA_REL:
+		insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) |
+		    ((src_hiaddr & 0x3ff) << 10) | (dest_addr & 0x3ff);
+		insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) |
+		    ((src_lowaddr & 0x3ff) << 10) | (dest_addr & 0x3ff);
+		break;
+	default:
+		insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) |
+		    ((dest_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff);
+
+		insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) |
+		    ((dest_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff);
+		break;
+	}
+
+	return qat_hal_exec_micro_inst(handle, ae, ctx, insts, num_inst,
+				       code_off, num_inst * 0x5, NULL);
+}
+
+int qat_hal_get_ins_num(void)
+{
+	return ARRAY_SIZE(inst_4b);
+}
+
+static int qat_hal_concat_micro_code(uint64_t *micro_inst,
+				     unsigned int inst_num, unsigned int size,
+				     unsigned int addr, unsigned int *value)
+{
+	int i, val_indx;
+	unsigned int cur_value;
+	const uint64_t *inst_arr;
+	int fixup_offset;
+	int usize = 0;
+	int orig_num;
+
+	orig_num = inst_num;
+	val_indx = 0;
+	cur_value = value[val_indx++];
+	inst_arr = inst_4b;
+	usize = ARRAY_SIZE(inst_4b);
+	fixup_offset = inst_num;
+	for (i = 0; i < usize; i++)
+		micro_inst[inst_num++] = inst_arr[i];
+	INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], (addr));
+	fixup_offset++;
+	INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], 0);
+	fixup_offset++;
+	INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0));
+	fixup_offset++;
+	INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0x10));
+
+	return inst_num - orig_num;
+}
+
+static int qat_hal_exec_micro_init_lm(struct icp_qat_fw_loader_handle *handle,
+				      unsigned char ae, unsigned char ctx,
+				      int *pfirst_exec, uint64_t *micro_inst,
+				      unsigned int inst_num)
+{
+	int stat = 0;
+	unsigned int gpra0 = 0, gpra1 = 0, gpra2 = 0;
+	unsigned int gprb0 = 0, gprb1 = 0;
+
+	if (*pfirst_exec) {
+		qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, &gpra0);
+		qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, &gpra1);
+		qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, &gpra2);
+		qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, &gprb0);
+		qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, &gprb1);
+		*pfirst_exec = 0;
+	}
+	stat = qat_hal_exec_micro_inst(handle, ae, ctx, micro_inst, inst_num, 1,
+				       inst_num * 0x5, NULL);
+	if (stat != 0)
+		return -EFAULT;
+	qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, gpra0);
+	qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, gpra1);
+	qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, gpra2);
+	qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, gprb0);
+	qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, gprb1);
+
+	return 0;
+}
+
+int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle,
+			unsigned char ae,
+			struct icp_qat_uof_batch_init *lm_init_header)
+{
+	struct icp_qat_uof_batch_init *plm_init;
+	uint64_t *micro_inst_arry;
+	int micro_inst_num;
+	int alloc_inst_size;
+	int first_exec = 1;
+	int stat = 0;
+
+	plm_init = lm_init_header->next;
+	alloc_inst_size = lm_init_header->size;
+	if ((unsigned int)alloc_inst_size > handle->hal_handle->max_ustore)
+		alloc_inst_size = handle->hal_handle->max_ustore;
+	micro_inst_arry = kmalloc_array(alloc_inst_size, sizeof(uint64_t),
+					GFP_KERNEL);
+	if (!micro_inst_arry)
+		return -ENOMEM;
+	micro_inst_num = 0;
+	while (plm_init) {
+		unsigned int addr, *value, size;
+
+		ae = plm_init->ae;
+		addr = plm_init->addr;
+		value = plm_init->value;
+		size = plm_init->size;
+		micro_inst_num += qat_hal_concat_micro_code(micro_inst_arry,
+							    micro_inst_num,
+							    size, addr, value);
+		plm_init = plm_init->next;
+	}
+	/* exec micro codes */
+	if (micro_inst_arry && (micro_inst_num > 0)) {
+		micro_inst_arry[micro_inst_num++] = 0x0E000010000ull;
+		stat = qat_hal_exec_micro_init_lm(handle, ae, 0, &first_exec,
+						  micro_inst_arry,
+						  micro_inst_num);
+	}
+	kfree(micro_inst_arry);
+	return stat;
+}
+
+static int qat_hal_put_rel_rd_xfer(struct icp_qat_fw_loader_handle *handle,
+				   unsigned char ae, unsigned char ctx,
+				   enum icp_qat_uof_regtype reg_type,
+				   unsigned short reg_num, unsigned int val)
+{
+	int status = 0;
+	unsigned int reg_addr;
+	unsigned int ctx_enables;
+	unsigned short mask;
+	unsigned short dr_offset = 0x10;
+
+	status = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+	if (CE_INUSE_CONTEXTS & ctx_enables) {
+		if (ctx & 0x1) {
+			pr_err("QAT: bad 4-ctx mode,ctx=0x%x\n", ctx);
+			return -EINVAL;
+		}
+		mask = 0x1f;
+		dr_offset = 0x20;
+	} else {
+		mask = 0x0f;
+	}
+	if (reg_num & ~mask)
+		return -EINVAL;
+	reg_addr = reg_num + (ctx << 0x5);
+	switch (reg_type) {
+	case ICP_SR_RD_REL:
+	case ICP_SR_REL:
+		SET_AE_XFER(handle, ae, reg_addr, val);
+		break;
+	case ICP_DR_RD_REL:
+	case ICP_DR_REL:
+		SET_AE_XFER(handle, ae, (reg_addr + dr_offset), val);
+		break;
+	default:
+		status = -EINVAL;
+		break;
+	}
+	return status;
+}
+
+static int qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle,
+				   unsigned char ae, unsigned char ctx,
+				   enum icp_qat_uof_regtype reg_type,
+				   unsigned short reg_num, unsigned int data)
+{
+	unsigned int gprval, ctx_enables;
+	unsigned short src_hiaddr, src_lowaddr, gpr_addr, xfr_addr, data16hi,
+	    data16low;
+	unsigned short reg_mask;
+	int status = 0;
+	uint64_t micro_inst[] = {
+		0x0F440000000ull,
+		0x0F040000000ull,
+		0x0A000000000ull,
+		0x0F0000C0300ull,
+		0x0E000010000ull
+	};
+	const int num_inst = ARRAY_SIZE(micro_inst), code_off = 1;
+	const unsigned short gprnum = 0, dly = num_inst * 0x5;
+
+	qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+	if (CE_INUSE_CONTEXTS & ctx_enables) {
+		if (ctx & 0x1) {
+			pr_err("QAT: 4-ctx mode,ctx=0x%x\n", ctx);
+			return -EINVAL;
+		}
+		reg_mask = (unsigned short)~0x1f;
+	} else {
+		reg_mask = (unsigned short)~0xf;
+	}
+	if (reg_num & reg_mask)
+		return -EINVAL;
+	xfr_addr = qat_hal_get_reg_addr(reg_type, reg_num);
+	if (xfr_addr == BAD_REGADDR) {
+		pr_err("QAT: bad xfrAddr=0x%x\n", xfr_addr);
+		return -EINVAL;
+	}
+	qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, &gprval);
+	gpr_addr = qat_hal_get_reg_addr(ICP_GPB_REL, gprnum);
+	data16low = 0xffff & data;
+	data16hi = 0xffff & (data >> 0x10);
+	src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
+					  (unsigned short)(0xff & data16hi));
+	src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
+					   (unsigned short)(0xff & data16low));
+	micro_inst[0] = micro_inst[0x0] | ((data16hi >> 8) << 20) |
+	    ((gpr_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff);
+	micro_inst[1] = micro_inst[0x1] | ((data16low >> 8) << 20) |
+	    ((gpr_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff);
+	micro_inst[0x2] = micro_inst[0x2] |
+	    ((xfr_addr & 0x3ff) << 20) | ((gpr_addr & 0x3ff) << 10);
+	status = qat_hal_exec_micro_inst(handle, ae, ctx, micro_inst, num_inst,
+					 code_off, dly, NULL);
+	qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, gprval);
+	return status;
+}
+
+static int qat_hal_put_rel_nn(struct icp_qat_fw_loader_handle *handle,
+			      unsigned char ae, unsigned char ctx,
+			      unsigned short nn, unsigned int val)
+{
+	unsigned int ctx_enables;
+	int stat = 0;
+
+	qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+	ctx_enables &= IGNORE_W1C_MASK;
+	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables | CE_NN_MODE);
+
+	stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, ICP_NEIGH_REL, nn, val);
+	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
+	return stat;
+}
+
+static int qat_hal_convert_abs_to_rel(struct icp_qat_fw_loader_handle
+				      *handle, unsigned char ae,
+				      unsigned short absreg_num,
+				      unsigned short *relreg,
+				      unsigned char *ctx)
+{
+	unsigned int ctx_enables;
+
+	qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+	if (ctx_enables & CE_INUSE_CONTEXTS) {
+		/* 4-ctx mode */
+		*relreg = absreg_num & 0x1F;
+		*ctx = (absreg_num >> 0x4) & 0x6;
+	} else {
+		/* 8-ctx mode */
+		*relreg = absreg_num & 0x0F;
+		*ctx = (absreg_num >> 0x4) & 0x7;
+	}
+	return 0;
+}
+
+int qat_hal_init_gpr(struct icp_qat_fw_loader_handle *handle,
+		     unsigned char ae, unsigned char ctx_mask,
+		     enum icp_qat_uof_regtype reg_type,
+		     unsigned short reg_num, unsigned int regdata)
+{
+	int stat = 0;
+	unsigned short reg;
+	unsigned char ctx = 0;
+	enum icp_qat_uof_regtype type;
+
+	if (reg_num >= ICP_QAT_UCLO_MAX_GPR_REG)
+		return -EINVAL;
+
+	do {
+		if (ctx_mask == 0) {
+			qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
+						   &ctx);
+			type = reg_type - 1;
+		} else {
+			reg = reg_num;
+			type = reg_type;
+			if (!test_bit(ctx, (unsigned long *)&ctx_mask))
+				continue;
+		}
+		stat = qat_hal_wr_rel_reg(handle, ae, ctx, type, reg, regdata);
+		if (stat) {
+			pr_err("QAT: write gpr fail\n");
+			return -EINVAL;
+		}
+	} while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
+
+	return 0;
+}
+
+int qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle *handle,
+			 unsigned char ae, unsigned char ctx_mask,
+			 enum icp_qat_uof_regtype reg_type,
+			 unsigned short reg_num, unsigned int regdata)
+{
+	int stat = 0;
+	unsigned short reg;
+	unsigned char ctx = 0;
+	enum icp_qat_uof_regtype type;
+
+	if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG)
+		return -EINVAL;
+
+	do {
+		if (ctx_mask == 0) {
+			qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
+						   &ctx);
+			type = reg_type - 3;
+		} else {
+			reg = reg_num;
+			type = reg_type;
+			if (!test_bit(ctx, (unsigned long *)&ctx_mask))
+				continue;
+		}
+		stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, type, reg,
+					       regdata);
+		if (stat) {
+			pr_err("QAT: write wr xfer fail\n");
+			return -EINVAL;
+		}
+	} while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
+
+	return 0;
+}
+
+int qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle,
+			 unsigned char ae, unsigned char ctx_mask,
+			 enum icp_qat_uof_regtype reg_type,
+			 unsigned short reg_num, unsigned int regdata)
+{
+	int stat = 0;
+	unsigned short reg;
+	unsigned char ctx = 0;
+	enum icp_qat_uof_regtype type;
+
+	if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG)
+		return -EINVAL;
+
+	do {
+		if (ctx_mask == 0) {
+			qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
+						   &ctx);
+			type = reg_type - 3;
+		} else {
+			reg = reg_num;
+			type = reg_type;
+			if (!test_bit(ctx, (unsigned long *)&ctx_mask))
+				continue;
+		}
+		stat = qat_hal_put_rel_rd_xfer(handle, ae, ctx, type, reg,
+					       regdata);
+		if (stat) {
+			pr_err("QAT: write rd xfer fail\n");
+			return -EINVAL;
+		}
+	} while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
+
+	return 0;
+}
+
+int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle,
+		    unsigned char ae, unsigned char ctx_mask,
+		    unsigned short reg_num, unsigned int regdata)
+{
+	int stat = 0;
+	unsigned char ctx;
+
+	if (ctx_mask == 0)
+		return -EINVAL;
+
+	for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
+		if (!test_bit(ctx, (unsigned long *)&ctx_mask))
+			continue;
+		stat = qat_hal_put_rel_nn(handle, ae, ctx, reg_num, regdata);
+		if (stat) {
+			pr_err("QAT: write neigh error\n");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c
new file mode 100644
index 0000000..1e27f9f
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/qat_uclo.c
@@ -0,0 +1,1181 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/slab.h>
+#include <linux/ctype.h>
+#include <linux/kernel.h>
+
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "icp_qat_uclo.h"
+#include "icp_qat_hal.h"
+#include "icp_qat_fw_loader_handle.h"
+
+#define UWORD_CPYBUF_SIZE 1024
+#define INVLD_UWORD 0xffffffffffull
+#define PID_MINOR_REV 0xf
+#define PID_MAJOR_REV (0xf << 4)
+
+static int qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle,
+				 unsigned int ae, unsigned int image_num)
+{
+	struct icp_qat_uclo_aedata *ae_data;
+	struct icp_qat_uclo_encapme *encap_image;
+	struct icp_qat_uclo_page *page = NULL;
+	struct icp_qat_uclo_aeslice *ae_slice = NULL;
+
+	ae_data = &obj_handle->ae_data[ae];
+	encap_image = &obj_handle->ae_uimage[image_num];
+	ae_slice = &ae_data->ae_slices[ae_data->slice_num];
+	ae_slice->encap_image = encap_image;
+
+	if (encap_image->img_ptr) {
+		ae_slice->ctx_mask_assigned =
+					encap_image->img_ptr->ctx_assigned;
+		ae_data->eff_ustore_size = obj_handle->ustore_phy_size;
+	} else {
+		ae_slice->ctx_mask_assigned = 0;
+	}
+	ae_slice->region = kzalloc(sizeof(*ae_slice->region), GFP_KERNEL);
+	if (!ae_slice->region)
+		return -ENOMEM;
+	ae_slice->page = kzalloc(sizeof(*ae_slice->page), GFP_KERNEL);
+	if (!ae_slice->page)
+		goto out_err;
+	page = ae_slice->page;
+	page->encap_page = encap_image->page;
+	ae_slice->page->region = ae_slice->region;
+	ae_data->slice_num++;
+	return 0;
+out_err:
+	kfree(ae_slice->region);
+	ae_slice->region = NULL;
+	return -ENOMEM;
+}
+
+static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data)
+{
+	unsigned int i;
+
+	if (!ae_data) {
+		pr_err("QAT: bad argument, ae_data is NULL\n ");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < ae_data->slice_num; i++) {
+		kfree(ae_data->ae_slices[i].region);
+		ae_data->ae_slices[i].region = NULL;
+		kfree(ae_data->ae_slices[i].page);
+		ae_data->ae_slices[i].page = NULL;
+	}
+	return 0;
+}
+
+static char *qat_uclo_get_string(struct icp_qat_uof_strtable *str_table,
+				 unsigned int str_offset)
+{
+	if ((!str_table->table_len) || (str_offset > str_table->table_len))
+		return NULL;
+	return (char *)(((unsigned long)(str_table->strings)) + str_offset);
+}
+
+static int qat_uclo_check_format(struct icp_qat_uof_filehdr *hdr)
+{
+	int maj = hdr->maj_ver & 0xff;
+	int min = hdr->min_ver & 0xff;
+
+	if (hdr->file_id != ICP_QAT_UOF_FID) {
+		pr_err("QAT: Invalid header 0x%x\n", hdr->file_id);
+		return -EINVAL;
+	}
+	if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) {
+		pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n",
+		       maj, min);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle,
+				      unsigned int addr, unsigned int *val,
+				      unsigned int num_in_bytes)
+{
+	unsigned int outval;
+	unsigned char *ptr = (unsigned char *)val;
+
+	while (num_in_bytes) {
+		memcpy(&outval, ptr, 4);
+		SRAM_WRITE(handle, addr, outval);
+		num_in_bytes -= 4;
+		ptr += 4;
+		addr += 4;
+	}
+}
+
+static void qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle *handle,
+				      unsigned char ae, unsigned int addr,
+				      unsigned int *val,
+				      unsigned int num_in_bytes)
+{
+	unsigned int outval;
+	unsigned char *ptr = (unsigned char *)val;
+
+	addr >>= 0x2; /* convert to uword address */
+
+	while (num_in_bytes) {
+		memcpy(&outval, ptr, 4);
+		qat_hal_wr_umem(handle, ae, addr++, 1, &outval);
+		num_in_bytes -= 4;
+		ptr += 4;
+	}
+}
+
+static void qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle *handle,
+				   unsigned char ae,
+				   struct icp_qat_uof_batch_init
+				   *umem_init_header)
+{
+	struct icp_qat_uof_batch_init *umem_init;
+
+	if (!umem_init_header)
+		return;
+	umem_init = umem_init_header->next;
+	while (umem_init) {
+		unsigned int addr, *value, size;
+
+		ae = umem_init->ae;
+		addr = umem_init->addr;
+		value = umem_init->value;
+		size = umem_init->size;
+		qat_uclo_wr_umem_by_words(handle, ae, addr, value, size);
+		umem_init = umem_init->next;
+	}
+}
+
+static void
+qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle,
+				 struct icp_qat_uof_batch_init **base)
+{
+	struct icp_qat_uof_batch_init *umem_init;
+
+	umem_init = *base;
+	while (umem_init) {
+		struct icp_qat_uof_batch_init *pre;
+
+		pre = umem_init;
+		umem_init = umem_init->next;
+		kfree(pre);
+	}
+	*base = NULL;
+}
+
+static int qat_uclo_parse_num(char *str, unsigned int *num)
+{
+	char buf[16] = {0};
+	unsigned long ae = 0;
+	int i;
+
+	strncpy(buf, str, 15);
+	for (i = 0; i < 16; i++) {
+		if (!isdigit(buf[i])) {
+			buf[i] = '\0';
+			break;
+		}
+	}
+	if ((kstrtoul(buf, 10, &ae)))
+		return -EFAULT;
+
+	*num = (unsigned int)ae;
+	return 0;
+}
+
+static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle,
+				     struct icp_qat_uof_initmem *init_mem,
+				     unsigned int size_range, unsigned int *ae)
+{
+	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+	char *str;
+
+	if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) {
+		pr_err("QAT: initmem is out of range");
+		return -EINVAL;
+	}
+	if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) {
+		pr_err("QAT: Memory scope for init_mem error\n");
+		return -EINVAL;
+	}
+	str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name);
+	if (!str) {
+		pr_err("QAT: AE name assigned in UOF init table is NULL\n");
+		return -EINVAL;
+	}
+	if (qat_uclo_parse_num(str, ae)) {
+		pr_err("QAT: Parse num for AE number failed\n");
+		return -EINVAL;
+	}
+	if (*ae >= ICP_QAT_UCLO_MAX_AE) {
+		pr_err("QAT: ae %d out of range\n", *ae);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle
+					   *handle, struct icp_qat_uof_initmem
+					   *init_mem, unsigned int ae,
+					   struct icp_qat_uof_batch_init
+					   **init_tab_base)
+{
+	struct icp_qat_uof_batch_init *init_header, *tail;
+	struct icp_qat_uof_batch_init *mem_init, *tail_old;
+	struct icp_qat_uof_memvar_attr *mem_val_attr;
+	unsigned int i, flag = 0;
+
+	mem_val_attr =
+		(struct icp_qat_uof_memvar_attr *)((unsigned long)init_mem +
+		sizeof(struct icp_qat_uof_initmem));
+
+	init_header = *init_tab_base;
+	if (!init_header) {
+		init_header = kzalloc(sizeof(*init_header), GFP_KERNEL);
+		if (!init_header)
+			return -ENOMEM;
+		init_header->size = 1;
+		*init_tab_base = init_header;
+		flag = 1;
+	}
+	tail_old = init_header;
+	while (tail_old->next)
+		tail_old = tail_old->next;
+	tail = tail_old;
+	for (i = 0; i < init_mem->val_attr_num; i++) {
+		mem_init = kzalloc(sizeof(*mem_init), GFP_KERNEL);
+		if (!mem_init)
+			goto out_err;
+		mem_init->ae = ae;
+		mem_init->addr = init_mem->addr + mem_val_attr->offset_in_byte;
+		mem_init->value = &mem_val_attr->value;
+		mem_init->size = 4;
+		mem_init->next = NULL;
+		tail->next = mem_init;
+		tail = mem_init;
+		init_header->size += qat_hal_get_ins_num();
+		mem_val_attr++;
+	}
+	return 0;
+out_err:
+	while (tail_old) {
+		mem_init = tail_old->next;
+		kfree(tail_old);
+		tail_old = mem_init;
+	}
+	if (flag)
+		kfree(*init_tab_base);
+	return -ENOMEM;
+}
+
+static int qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle *handle,
+				  struct icp_qat_uof_initmem *init_mem)
+{
+	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+	unsigned int ae;
+
+	if (qat_uclo_fetch_initmem_ae(handle, init_mem,
+				      ICP_QAT_UCLO_MAX_LMEM_REG, &ae))
+		return -EINVAL;
+	if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
+					    &obj_handle->lm_init_tab[ae]))
+		return -EINVAL;
+	return 0;
+}
+
+static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle,
+				  struct icp_qat_uof_initmem *init_mem)
+{
+	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+	unsigned int ae, ustore_size, uaddr, i;
+
+	ustore_size = obj_handle->ustore_phy_size;
+	if (qat_uclo_fetch_initmem_ae(handle, init_mem, ustore_size, &ae))
+		return -EINVAL;
+	if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
+					    &obj_handle->umem_init_tab[ae]))
+		return -EINVAL;
+	/* set the highest ustore address referenced */
+	uaddr = (init_mem->addr + init_mem->num_in_bytes) >> 0x2;
+	for (i = 0; i < obj_handle->ae_data[ae].slice_num; i++) {
+		if (obj_handle->ae_data[ae].ae_slices[i].
+		    encap_image->uwords_num < uaddr)
+			obj_handle->ae_data[ae].ae_slices[i].
+			encap_image->uwords_num = uaddr;
+	}
+	return 0;
+}
+
+#define ICP_DH895XCC_PESRAM_BAR_SIZE 0x80000
+static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
+				   struct icp_qat_uof_initmem *init_mem)
+{
+	unsigned int i;
+	struct icp_qat_uof_memvar_attr *mem_val_attr;
+
+	mem_val_attr =
+		(struct icp_qat_uof_memvar_attr *)((unsigned long)init_mem +
+		sizeof(struct icp_qat_uof_initmem));
+
+	switch (init_mem->region) {
+	case ICP_QAT_UOF_SRAM_REGION:
+		if ((init_mem->addr + init_mem->num_in_bytes) >
+		    ICP_DH895XCC_PESRAM_BAR_SIZE) {
+			pr_err("QAT: initmem on SRAM is out of range");
+			return -EINVAL;
+		}
+		for (i = 0; i < init_mem->val_attr_num; i++) {
+			qat_uclo_wr_sram_by_words(handle,
+						  init_mem->addr +
+						  mem_val_attr->offset_in_byte,
+						  &mem_val_attr->value, 4);
+			mem_val_attr++;
+		}
+		break;
+	case ICP_QAT_UOF_LMEM_REGION:
+		if (qat_uclo_init_lmem_seg(handle, init_mem))
+			return -EINVAL;
+		break;
+	case ICP_QAT_UOF_UMEM_REGION:
+		if (qat_uclo_init_umem_seg(handle, init_mem))
+			return -EINVAL;
+		break;
+	default:
+		pr_err("QAT: initmem region error. region type=0x%x\n",
+		       init_mem->region);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle,
+				struct icp_qat_uclo_encapme *image)
+{
+	unsigned int i;
+	struct icp_qat_uclo_encap_page *page;
+	struct icp_qat_uof_image *uof_image;
+	unsigned char ae;
+	unsigned int ustore_size;
+	unsigned int patt_pos;
+	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+	uint64_t *fill_data;
+
+	uof_image = image->img_ptr;
+	fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(uint64_t),
+			    GFP_KERNEL);
+	if (!fill_data)
+		return -ENOMEM;
+	for (i = 0; i < ICP_QAT_UCLO_MAX_USTORE; i++)
+		memcpy(&fill_data[i], &uof_image->fill_pattern,
+		       sizeof(uint64_t));
+	page = image->page;
+
+	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+		if (!test_bit(ae, (unsigned long *)&uof_image->ae_assigned))
+			continue;
+		ustore_size = obj_handle->ae_data[ae].eff_ustore_size;
+		patt_pos = page->beg_addr_p + page->micro_words_num;
+
+		qat_hal_wr_uwords(handle, (unsigned char)ae, 0,
+				  page->beg_addr_p, &fill_data[0]);
+		qat_hal_wr_uwords(handle, (unsigned char)ae, patt_pos,
+				  ustore_size - patt_pos + 1,
+				  &fill_data[page->beg_addr_p]);
+	}
+	kfree(fill_data);
+	return 0;
+}
+
+static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle)
+{
+	int i, ae;
+	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+	struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem;
+
+	for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) {
+		if (initmem->num_in_bytes) {
+			if (qat_uclo_init_ae_memory(handle, initmem))
+				return -EINVAL;
+		}
+		initmem = (struct icp_qat_uof_initmem *)((unsigned long)(
+			(unsigned long)initmem +
+			sizeof(struct icp_qat_uof_initmem)) +
+			(sizeof(struct icp_qat_uof_memvar_attr) *
+			initmem->val_attr_num));
+	}
+	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+		if (qat_hal_batch_wr_lm(handle, ae,
+					obj_handle->lm_init_tab[ae])) {
+			pr_err("QAT: fail to batch init lmem for AE %d\n", ae);
+			return -EINVAL;
+		}
+		qat_uclo_cleanup_batch_init_list(handle,
+						 &obj_handle->lm_init_tab[ae]);
+		qat_uclo_batch_wr_umem(handle, ae,
+				       obj_handle->umem_init_tab[ae]);
+		qat_uclo_cleanup_batch_init_list(handle,
+						 &obj_handle->
+						 umem_init_tab[ae]);
+	}
+	return 0;
+}
+
+static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr,
+				 char *chunk_id, void *cur)
+{
+	int i;
+	struct icp_qat_uof_chunkhdr *chunk_hdr =
+	    (struct icp_qat_uof_chunkhdr *)
+	    ((unsigned long)obj_hdr + sizeof(struct icp_qat_uof_objhdr));
+
+	for (i = 0; i < obj_hdr->num_chunks; i++) {
+		if ((cur < (void *)&chunk_hdr[i]) &&
+		    !strncmp(chunk_hdr[i].chunk_id, chunk_id,
+			     ICP_QAT_UOF_OBJID_LEN)) {
+			return &chunk_hdr[i];
+		}
+	}
+	return NULL;
+}
+
+static unsigned int qat_uclo_calc_checksum(unsigned int reg, int ch)
+{
+	int i;
+	unsigned int topbit = 1 << 0xF;
+	unsigned int inbyte = (unsigned int)((reg >> 0x18) ^ ch);
+
+	reg ^= inbyte << 0x8;
+	for (i = 0; i < 0x8; i++) {
+		if (reg & topbit)
+			reg = (reg << 1) ^ 0x1021;
+		else
+			reg <<= 1;
+	}
+	return reg & 0xFFFF;
+}
+
+static unsigned int qat_uclo_calc_str_checksum(char *ptr, int num)
+{
+	unsigned int chksum = 0;
+
+	if (ptr)
+		while (num--)
+			chksum = qat_uclo_calc_checksum(chksum, *ptr++);
+	return chksum;
+}
+
+static struct icp_qat_uclo_objhdr *
+qat_uclo_map_chunk(char *buf, struct icp_qat_uof_filehdr *file_hdr,
+		   char *chunk_id)
+{
+	struct icp_qat_uof_filechunkhdr *file_chunk;
+	struct icp_qat_uclo_objhdr *obj_hdr;
+	char *chunk;
+	int i;
+
+	file_chunk = (struct icp_qat_uof_filechunkhdr *)
+		(buf + sizeof(struct icp_qat_uof_filehdr));
+	for (i = 0; i < file_hdr->num_chunks; i++) {
+		if (!strncmp(file_chunk->chunk_id, chunk_id,
+			     ICP_QAT_UOF_OBJID_LEN)) {
+			chunk = buf + file_chunk->offset;
+			if (file_chunk->checksum != qat_uclo_calc_str_checksum(
+				chunk, file_chunk->size))
+				break;
+			obj_hdr = kzalloc(sizeof(*obj_hdr), GFP_KERNEL);
+			if (!obj_hdr)
+				break;
+			obj_hdr->file_buff = chunk;
+			obj_hdr->checksum = file_chunk->checksum;
+			obj_hdr->size = file_chunk->size;
+			return obj_hdr;
+		}
+		file_chunk++;
+	}
+	return NULL;
+}
+
+static unsigned int
+qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj,
+			    struct icp_qat_uof_image *image)
+{
+	struct icp_qat_uof_objtable *uc_var_tab, *imp_var_tab, *imp_expr_tab;
+	struct icp_qat_uof_objtable *neigh_reg_tab;
+	struct icp_qat_uof_code_page *code_page;
+
+	code_page = (struct icp_qat_uof_code_page *)
+			((char *)image + sizeof(struct icp_qat_uof_image));
+	uc_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
+		     code_page->uc_var_tab_offset);
+	imp_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
+		      code_page->imp_var_tab_offset);
+	imp_expr_tab = (struct icp_qat_uof_objtable *)
+		       (encap_uof_obj->beg_uof +
+		       code_page->imp_expr_tab_offset);
+	if (uc_var_tab->entry_num || imp_var_tab->entry_num ||
+	    imp_expr_tab->entry_num) {
+		pr_err("QAT: UOF can't contain imported variable to be parsed");
+		return -EINVAL;
+	}
+	neigh_reg_tab = (struct icp_qat_uof_objtable *)
+			(encap_uof_obj->beg_uof +
+			code_page->neigh_reg_tab_offset);
+	if (neigh_reg_tab->entry_num) {
+		pr_err("QAT: UOF can't contain shared control store feature");
+		return -EINVAL;
+	}
+	if (image->numpages > 1) {
+		pr_err("QAT: UOF can't contain multiple pages");
+		return -EINVAL;
+	}
+	if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) {
+		pr_err("QAT: UOF can't use shared control store feature");
+		return -EFAULT;
+	}
+	if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) {
+		pr_err("QAT: UOF can't use reloadable feature");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static void qat_uclo_map_image_page(struct icp_qat_uof_encap_obj
+				     *encap_uof_obj,
+				     struct icp_qat_uof_image *img,
+				     struct icp_qat_uclo_encap_page *page)
+{
+	struct icp_qat_uof_code_page *code_page;
+	struct icp_qat_uof_code_area *code_area;
+	struct icp_qat_uof_objtable *uword_block_tab;
+	struct icp_qat_uof_uword_block *uwblock;
+	int i;
+
+	code_page = (struct icp_qat_uof_code_page *)
+			((char *)img + sizeof(struct icp_qat_uof_image));
+	page->def_page = code_page->def_page;
+	page->page_region = code_page->page_region;
+	page->beg_addr_v = code_page->beg_addr_v;
+	page->beg_addr_p = code_page->beg_addr_p;
+	code_area = (struct icp_qat_uof_code_area *)(encap_uof_obj->beg_uof +
+						code_page->code_area_offset);
+	page->micro_words_num = code_area->micro_words_num;
+	uword_block_tab = (struct icp_qat_uof_objtable *)
+			  (encap_uof_obj->beg_uof +
+			  code_area->uword_block_tab);
+	page->uwblock_num = uword_block_tab->entry_num;
+	uwblock = (struct icp_qat_uof_uword_block *)((char *)uword_block_tab +
+			sizeof(struct icp_qat_uof_objtable));
+	page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock;
+	for (i = 0; i < uword_block_tab->entry_num; i++)
+		page->uwblock[i].micro_words =
+		(unsigned long)encap_uof_obj->beg_uof + uwblock[i].uword_offset;
+}
+
+static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle,
+			       struct icp_qat_uclo_encapme *ae_uimage,
+			       int max_image)
+{
+	int i, j;
+	struct icp_qat_uof_chunkhdr *chunk_hdr = NULL;
+	struct icp_qat_uof_image *image;
+	struct icp_qat_uof_objtable *ae_regtab;
+	struct icp_qat_uof_objtable *init_reg_sym_tab;
+	struct icp_qat_uof_objtable *sbreak_tab;
+	struct icp_qat_uof_encap_obj *encap_uof_obj =
+					&obj_handle->encap_uof_obj;
+
+	for (j = 0; j < max_image; j++) {
+		chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
+						ICP_QAT_UOF_IMAG, chunk_hdr);
+		if (!chunk_hdr)
+			break;
+		image = (struct icp_qat_uof_image *)(encap_uof_obj->beg_uof +
+						     chunk_hdr->offset);
+		ae_regtab = (struct icp_qat_uof_objtable *)
+			   (image->reg_tab_offset +
+			   obj_handle->obj_hdr->file_buff);
+		ae_uimage[j].ae_reg_num = ae_regtab->entry_num;
+		ae_uimage[j].ae_reg = (struct icp_qat_uof_ae_reg *)
+			(((char *)ae_regtab) +
+			sizeof(struct icp_qat_uof_objtable));
+		init_reg_sym_tab = (struct icp_qat_uof_objtable *)
+				   (image->init_reg_sym_tab +
+				   obj_handle->obj_hdr->file_buff);
+		ae_uimage[j].init_regsym_num = init_reg_sym_tab->entry_num;
+		ae_uimage[j].init_regsym = (struct icp_qat_uof_init_regsym *)
+			(((char *)init_reg_sym_tab) +
+			sizeof(struct icp_qat_uof_objtable));
+		sbreak_tab = (struct icp_qat_uof_objtable *)
+			(image->sbreak_tab + obj_handle->obj_hdr->file_buff);
+		ae_uimage[j].sbreak_num = sbreak_tab->entry_num;
+		ae_uimage[j].sbreak = (struct icp_qat_uof_sbreak *)
+				      (((char *)sbreak_tab) +
+				      sizeof(struct icp_qat_uof_objtable));
+		ae_uimage[j].img_ptr = image;
+		if (qat_uclo_check_image_compat(encap_uof_obj, image))
+			goto out_err;
+		ae_uimage[j].page =
+			kzalloc(sizeof(struct icp_qat_uclo_encap_page),
+				GFP_KERNEL);
+		if (!ae_uimage[j].page)
+			goto out_err;
+		qat_uclo_map_image_page(encap_uof_obj, image,
+					ae_uimage[j].page);
+	}
+	return j;
+out_err:
+	for (i = 0; i < j; i++)
+		kfree(ae_uimage[i].page);
+	return 0;
+}
+
+static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
+{
+	int i, ae;
+	int mflag = 0;
+	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+
+	for (ae = 0; ae <= max_ae; ae++) {
+		if (!test_bit(ae,
+			      (unsigned long *)&handle->hal_handle->ae_mask))
+			continue;
+		for (i = 0; i < obj_handle->uimage_num; i++) {
+			if (!test_bit(ae, (unsigned long *)
+			&obj_handle->ae_uimage[i].img_ptr->ae_assigned))
+				continue;
+			mflag = 1;
+			if (qat_uclo_init_ae_data(obj_handle, ae, i))
+				return -EINVAL;
+		}
+	}
+	if (!mflag) {
+		pr_err("QAT: uimage uses AE not set");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static struct icp_qat_uof_strtable *
+qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr,
+		       char *tab_name, struct icp_qat_uof_strtable *str_table)
+{
+	struct icp_qat_uof_chunkhdr *chunk_hdr;
+
+	chunk_hdr = qat_uclo_find_chunk((struct icp_qat_uof_objhdr *)
+					obj_hdr->file_buff, tab_name, NULL);
+	if (chunk_hdr) {
+		int hdr_size;
+
+		memcpy(&str_table->table_len, obj_hdr->file_buff +
+		       chunk_hdr->offset, sizeof(str_table->table_len));
+		hdr_size = (char *)&str_table->strings - (char *)str_table;
+		str_table->strings = (unsigned long)obj_hdr->file_buff +
+					chunk_hdr->offset + hdr_size;
+		return str_table;
+	}
+	return NULL;
+}
+
+static void
+qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj,
+			   struct icp_qat_uclo_init_mem_table *init_mem_tab)
+{
+	struct icp_qat_uof_chunkhdr *chunk_hdr;
+
+	chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
+					ICP_QAT_UOF_IMEM, NULL);
+	if (chunk_hdr) {
+		memmove(&init_mem_tab->entry_num, encap_uof_obj->beg_uof +
+			chunk_hdr->offset, sizeof(unsigned int));
+		init_mem_tab->init_mem = (struct icp_qat_uof_initmem *)
+		(encap_uof_obj->beg_uof + chunk_hdr->offset +
+		sizeof(unsigned int));
+	}
+}
+
+static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle)
+{
+	unsigned int maj_ver, prod_type = obj_handle->prod_type;
+
+	if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->cpu_type)) {
+		pr_err("QAT: UOF type 0x%x not match with cur platform 0x%x\n",
+		       obj_handle->encap_uof_obj.obj_hdr->cpu_type, prod_type);
+		return -EINVAL;
+	}
+	maj_ver = obj_handle->prod_rev & 0xff;
+	if ((obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver) ||
+	    (obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver)) {
+		pr_err("QAT: UOF majVer 0x%x out of range\n", maj_ver);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
+			     unsigned char ae, unsigned char ctx_mask,
+			     enum icp_qat_uof_regtype reg_type,
+			     unsigned short reg_addr, unsigned int value)
+{
+	switch (reg_type) {
+	case ICP_GPA_ABS:
+	case ICP_GPB_ABS:
+		ctx_mask = 0;
+	case ICP_GPA_REL:
+	case ICP_GPB_REL:
+		return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type,
+					reg_addr, value);
+	case ICP_SR_ABS:
+	case ICP_DR_ABS:
+	case ICP_SR_RD_ABS:
+	case ICP_DR_RD_ABS:
+		ctx_mask = 0;
+	case ICP_SR_REL:
+	case ICP_DR_REL:
+	case ICP_SR_RD_REL:
+	case ICP_DR_RD_REL:
+		return qat_hal_init_rd_xfer(handle, ae, ctx_mask, reg_type,
+					    reg_addr, value);
+	case ICP_SR_WR_ABS:
+	case ICP_DR_WR_ABS:
+		ctx_mask = 0;
+	case ICP_SR_WR_REL:
+	case ICP_DR_WR_REL:
+		return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type,
+					    reg_addr, value);
+	case ICP_NEIGH_REL:
+		return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value);
+	default:
+		pr_err("QAT: UOF uses not supported reg type 0x%x\n", reg_type);
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle,
+				 unsigned int ae,
+				 struct icp_qat_uclo_encapme *encap_ae)
+{
+	unsigned int i;
+	unsigned char ctx_mask;
+	struct icp_qat_uof_init_regsym *init_regsym;
+
+	if (ICP_QAT_CTX_MODE(encap_ae->img_ptr->ae_mode) ==
+	    ICP_QAT_UCLO_MAX_CTX)
+		ctx_mask = 0xff;
+	else
+		ctx_mask = 0x55;
+
+	for (i = 0; i < encap_ae->init_regsym_num; i++) {
+		unsigned int exp_res;
+
+		init_regsym = &encap_ae->init_regsym[i];
+		exp_res = init_regsym->value;
+		switch (init_regsym->init_type) {
+		case ICP_QAT_UOF_INIT_REG:
+			qat_uclo_init_reg(handle, ae, ctx_mask,
+					  (enum icp_qat_uof_regtype)
+					  init_regsym->reg_type,
+					  (unsigned short)init_regsym->reg_addr,
+					  exp_res);
+			break;
+		case ICP_QAT_UOF_INIT_REG_CTX:
+			/* check if ctx is appropriate for the ctxMode */
+			if (!((1 << init_regsym->ctx) & ctx_mask)) {
+				pr_err("QAT: invalid ctx num = 0x%x\n",
+				       init_regsym->ctx);
+				return -EINVAL;
+			}
+			qat_uclo_init_reg(handle, ae,
+					  (unsigned char)
+					  (1 << init_regsym->ctx),
+					  (enum icp_qat_uof_regtype)
+					  init_regsym->reg_type,
+					  (unsigned short)init_regsym->reg_addr,
+					  exp_res);
+			break;
+		case ICP_QAT_UOF_INIT_EXPR:
+			pr_err("QAT: INIT_EXPR feature not supported\n");
+			return -EINVAL;
+		case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP:
+			pr_err("QAT: INIT_EXPR_ENDIAN_SWAP feature not supported\n");
+			return -EINVAL;
+		default:
+			break;
+		}
+	}
+	return 0;
+}
+
+static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle)
+{
+	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+	unsigned int s, ae;
+
+	if (obj_handle->global_inited)
+		return 0;
+	if (obj_handle->init_mem_tab.entry_num) {
+		if (qat_uclo_init_memory(handle)) {
+			pr_err("QAT: initialize memory failed\n");
+			return -EINVAL;
+		}
+	}
+	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+		for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) {
+			if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
+				continue;
+			if (qat_uclo_init_reg_sym(handle, ae,
+						  obj_handle->ae_data[ae].
+						  ae_slices[s].encap_image))
+				return -EINVAL;
+		}
+	}
+	obj_handle->global_inited = 1;
+	return 0;
+}
+
+static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle)
+{
+	unsigned char ae, nn_mode, s;
+	struct icp_qat_uof_image *uof_image;
+	struct icp_qat_uclo_aedata *ae_data;
+	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+
+	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+		if (!test_bit(ae,
+			      (unsigned long *)&handle->hal_handle->ae_mask))
+			continue;
+		ae_data = &obj_handle->ae_data[ae];
+		for (s = 0; s < min_t(unsigned int, ae_data->slice_num,
+				      ICP_QAT_UCLO_MAX_CTX); s++) {
+			if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
+				continue;
+			uof_image = ae_data->ae_slices[s].encap_image->img_ptr;
+			if (qat_hal_set_ae_ctx_mode(handle, ae,
+						    (char)ICP_QAT_CTX_MODE
+						    (uof_image->ae_mode))) {
+				pr_err("QAT: qat_hal_set_ae_ctx_mode error\n");
+				return -EFAULT;
+			}
+			nn_mode = ICP_QAT_NN_MODE(uof_image->ae_mode);
+			if (qat_hal_set_ae_nn_mode(handle, ae, nn_mode)) {
+				pr_err("QAT: qat_hal_set_ae_nn_mode error\n");
+				return -EFAULT;
+			}
+			if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0,
+						   (char)ICP_QAT_LOC_MEM0_MODE
+						   (uof_image->ae_mode))) {
+				pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n");
+				return -EFAULT;
+			}
+			if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1,
+						   (char)ICP_QAT_LOC_MEM1_MODE
+						   (uof_image->ae_mode))) {
+				pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n");
+				return -EFAULT;
+			}
+		}
+	}
+	return 0;
+}
+
+static void qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle *handle)
+{
+	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+	struct icp_qat_uclo_encapme *image;
+	int a;
+
+	for (a = 0; a < obj_handle->uimage_num; a++) {
+		image = &obj_handle->ae_uimage[a];
+		image->uwords_num = image->page->beg_addr_p +
+					image->page->micro_words_num;
+	}
+}
+
+static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
+{
+	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+	unsigned int ae;
+
+	obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(uint64_t),
+					GFP_KERNEL);
+	if (!obj_handle->uword_buf)
+		return -ENOMEM;
+	obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff;
+	obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *)
+					     obj_handle->obj_hdr->file_buff;
+	obj_handle->uword_in_bytes = 6;
+	obj_handle->prod_type = ICP_QAT_AC_C_CPU_TYPE;
+	obj_handle->prod_rev = PID_MAJOR_REV |
+			(PID_MINOR_REV & handle->hal_handle->revision_id);
+	if (qat_uclo_check_uof_compat(obj_handle)) {
+		pr_err("QAT: UOF incompatible\n");
+		return -EINVAL;
+	}
+	obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE;
+	if (!obj_handle->obj_hdr->file_buff ||
+	    !qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT,
+				    &obj_handle->str_table)) {
+		pr_err("QAT: UOF doesn't have effective images\n");
+		goto out_err;
+	}
+	obj_handle->uimage_num =
+		qat_uclo_map_uimage(obj_handle, obj_handle->ae_uimage,
+				    ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX);
+	if (!obj_handle->uimage_num)
+		goto out_err;
+	if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) {
+		pr_err("QAT: Bad object\n");
+		goto out_check_uof_aemask_err;
+	}
+	qat_uclo_init_uword_num(handle);
+	qat_uclo_map_initmem_table(&obj_handle->encap_uof_obj,
+				   &obj_handle->init_mem_tab);
+	if (qat_uclo_set_ae_mode(handle))
+		goto out_check_uof_aemask_err;
+	return 0;
+out_check_uof_aemask_err:
+	for (ae = 0; ae < obj_handle->uimage_num; ae++)
+		kfree(obj_handle->ae_uimage[ae].page);
+out_err:
+	kfree(obj_handle->uword_buf);
+	return -EFAULT;
+}
+
+int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
+			 void *addr_ptr, int mem_size)
+{
+	struct icp_qat_uof_filehdr *filehdr;
+	struct icp_qat_uclo_objhandle *objhdl;
+
+	BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >=
+		     (sizeof(handle->hal_handle->ae_mask) * 8));
+
+	if (!handle || !addr_ptr || mem_size < 24)
+		return -EINVAL;
+	objhdl = kzalloc(sizeof(*objhdl), GFP_KERNEL);
+	if (!objhdl)
+		return -ENOMEM;
+	objhdl->obj_buf = kmemdup(addr_ptr, mem_size, GFP_KERNEL);
+	if (!objhdl->obj_buf)
+		goto out_objbuf_err;
+	filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf;
+	if (qat_uclo_check_format(filehdr))
+		goto out_objhdr_err;
+	objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr,
+					     ICP_QAT_UOF_OBJS);
+	if (!objhdl->obj_hdr) {
+		pr_err("QAT: object file chunk is null\n");
+		goto out_objhdr_err;
+	}
+	handle->obj_handle = objhdl;
+	if (qat_uclo_parse_uof_obj(handle))
+		goto out_overlay_obj_err;
+	return 0;
+
+out_overlay_obj_err:
+	handle->obj_handle = NULL;
+	kfree(objhdl->obj_hdr);
+out_objhdr_err:
+	kfree(objhdl->obj_buf);
+out_objbuf_err:
+	kfree(objhdl);
+	return -ENOMEM;
+}
+
+void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle)
+{
+	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+	unsigned int a;
+
+	if (!obj_handle)
+		return;
+
+	kfree(obj_handle->uword_buf);
+	for (a = 0; a < obj_handle->uimage_num; a++)
+		kfree(obj_handle->ae_uimage[a].page);
+
+	for (a = 0; a < handle->hal_handle->ae_max_num; a++)
+		qat_uclo_free_ae_data(&obj_handle->ae_data[a]);
+
+	kfree(obj_handle->obj_hdr);
+	kfree(obj_handle->obj_buf);
+	kfree(obj_handle);
+	handle->obj_handle = NULL;
+}
+
+static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle,
+				 struct icp_qat_uclo_encap_page *encap_page,
+				 uint64_t *uword, unsigned int addr_p,
+				 unsigned int raddr, uint64_t fill)
+{
+	uint64_t uwrd = 0;
+	unsigned int i;
+
+	if (!encap_page) {
+		*uword = fill;
+		return;
+	}
+	for (i = 0; i < encap_page->uwblock_num; i++) {
+		if (raddr >= encap_page->uwblock[i].start_addr &&
+		    raddr <= encap_page->uwblock[i].start_addr +
+		    encap_page->uwblock[i].words_num - 1) {
+			raddr -= encap_page->uwblock[i].start_addr;
+			raddr *= obj_handle->uword_in_bytes;
+			memcpy(&uwrd, (void *)(((unsigned long)
+			       encap_page->uwblock[i].micro_words) + raddr),
+			       obj_handle->uword_in_bytes);
+			uwrd = uwrd & 0xbffffffffffull;
+		}
+	}
+	*uword = uwrd;
+	if (*uword == INVLD_UWORD)
+		*uword = fill;
+}
+
+static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle,
+					struct icp_qat_uclo_encap_page
+					*encap_page, unsigned int ae)
+{
+	unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen;
+	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+	uint64_t fill_pat;
+
+	/* load the page starting at appropriate ustore address */
+	/* get fill-pattern from an image -- they are all the same */
+	memcpy(&fill_pat, obj_handle->ae_uimage[0].img_ptr->fill_pattern,
+	       sizeof(uint64_t));
+	uw_physical_addr = encap_page->beg_addr_p;
+	uw_relative_addr = 0;
+	words_num = encap_page->micro_words_num;
+	while (words_num) {
+		if (words_num < UWORD_CPYBUF_SIZE)
+			cpylen = words_num;
+		else
+			cpylen = UWORD_CPYBUF_SIZE;
+
+		/* load the buffer */
+		for (i = 0; i < cpylen; i++)
+			qat_uclo_fill_uwords(obj_handle, encap_page,
+					     &obj_handle->uword_buf[i],
+					     uw_physical_addr + i,
+					     uw_relative_addr + i, fill_pat);
+
+		/* copy the buffer to ustore */
+		qat_hal_wr_uwords(handle, (unsigned char)ae,
+				  uw_physical_addr, cpylen,
+				  obj_handle->uword_buf);
+
+		uw_physical_addr += cpylen;
+		uw_relative_addr += cpylen;
+		words_num -= cpylen;
+	}
+}
+
+static void qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle,
+				    struct icp_qat_uof_image *image)
+{
+	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+	unsigned int ctx_mask, s;
+	struct icp_qat_uclo_page *page;
+	unsigned char ae;
+	int ctx;
+
+	if (ICP_QAT_CTX_MODE(image->ae_mode) == ICP_QAT_UCLO_MAX_CTX)
+		ctx_mask = 0xff;
+	else
+		ctx_mask = 0x55;
+	/* load the default page and set assigned CTX PC
+	 * to the entrypoint address */
+	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+		if (!test_bit(ae, (unsigned long *)&image->ae_assigned))
+			continue;
+		/* find the slice to which this image is assigned */
+		for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) {
+			if (image->ctx_assigned & obj_handle->ae_data[ae].
+			    ae_slices[s].ctx_mask_assigned)
+				break;
+		}
+		if (s >= obj_handle->ae_data[ae].slice_num)
+			continue;
+		page = obj_handle->ae_data[ae].ae_slices[s].page;
+		if (!page->encap_page->def_page)
+			continue;
+		qat_uclo_wr_uimage_raw_page(handle, page->encap_page, ae);
+
+		page = obj_handle->ae_data[ae].ae_slices[s].page;
+		for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++)
+			obj_handle->ae_data[ae].ae_slices[s].cur_page[ctx] =
+					(ctx_mask & (1 << ctx)) ? page : NULL;
+		qat_hal_set_live_ctx(handle, (unsigned char)ae,
+				     image->ctx_assigned);
+		qat_hal_set_pc(handle, (unsigned char)ae, image->ctx_assigned,
+			       image->entry_address);
+	}
+}
+
+int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle)
+{
+	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+	unsigned int i;
+
+	if (qat_uclo_init_globals(handle))
+		return -EINVAL;
+	for (i = 0; i < obj_handle->uimage_num; i++) {
+		if (!obj_handle->ae_uimage[i].img_ptr)
+			return -EINVAL;
+		if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i]))
+			return -EINVAL;
+		qat_uclo_wr_uimage_page(handle,
+					obj_handle->ae_uimage[i].img_ptr);
+	}
+	return 0;
+}
diff --git a/drivers/crypto/qat/qat_dh895xcc/Makefile b/drivers/crypto/qat/qat_dh895xcc/Makefile
new file mode 100644
index 0000000..25171c5
--- /dev/null
+++ b/drivers/crypto/qat/qat_dh895xcc/Makefile
@@ -0,0 +1,8 @@
+ccflags-y := -I$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o
+qat_dh895xcc-objs := adf_drv.o \
+		adf_isr.o \
+		adf_dh895xcc_hw_data.o \
+		adf_hw_arbiter.o \
+		qat_admin.o \
+		adf_admin.o
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_admin.c b/drivers/crypto/qat/qat_dh895xcc/adf_admin.c
new file mode 100644
index 0000000..978d6c5
--- /dev/null
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_admin.c
@@ -0,0 +1,144 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <adf_accel_devices.h>
+#include "adf_drv.h"
+#include "adf_dh895xcc_hw_data.h"
+
+#define ADF_ADMINMSG_LEN 32
+
+struct adf_admin_comms {
+	dma_addr_t phy_addr;
+	void *virt_addr;
+	void __iomem *mailbox_addr;
+	struct mutex lock;	/* protects adf_admin_comms struct */
+};
+
+int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev,
+			   uint32_t ae, void *in, void *out)
+{
+	struct adf_admin_comms *admin = accel_dev->admin;
+	int offset = ae * ADF_ADMINMSG_LEN * 2;
+	void __iomem *mailbox = admin->mailbox_addr;
+	int mb_offset = ae * ADF_DH895XCC_MAILBOX_STRIDE;
+	int times, received;
+
+	mutex_lock(&admin->lock);
+
+	if (ADF_CSR_RD(mailbox, mb_offset) == 1) {
+		mutex_unlock(&admin->lock);
+		return -EAGAIN;
+	}
+
+	memcpy(admin->virt_addr + offset, in, ADF_ADMINMSG_LEN);
+	ADF_CSR_WR(mailbox, mb_offset, 1);
+	received = 0;
+	for (times = 0; times < 50; times++) {
+		msleep(20);
+		if (ADF_CSR_RD(mailbox, mb_offset) == 0) {
+			received = 1;
+			break;
+		}
+	}
+	if (received)
+		memcpy(out, admin->virt_addr + offset +
+		       ADF_ADMINMSG_LEN, ADF_ADMINMSG_LEN);
+	else
+		pr_err("QAT: Failed to send admin msg to accelerator\n");
+
+	mutex_unlock(&admin->lock);
+	return received ? 0 : -EFAULT;
+}
+
+int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
+{
+	struct adf_admin_comms *admin;
+	struct adf_bar *pmisc = &GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR];
+	void __iomem *csr = pmisc->virt_addr;
+	void __iomem *mailbox = csr + ADF_DH895XCC_MAILBOX_BASE_OFFSET;
+	uint64_t reg_val;
+
+	admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL,
+			     accel_dev->numa_node);
+	if (!admin)
+		return -ENOMEM;
+	admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+					       &admin->phy_addr, GFP_KERNEL);
+	if (!admin->virt_addr) {
+		dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n");
+		kfree(admin);
+		return -ENOMEM;
+	}
+	reg_val = (uint64_t)admin->phy_addr;
+	ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGUR_OFFSET, reg_val >> 32);
+	ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGLR_OFFSET, reg_val);
+	mutex_init(&admin->lock);
+	admin->mailbox_addr = mailbox;
+	accel_dev->admin = admin;
+	return 0;
+}
+
+void adf_exit_admin_comms(struct adf_accel_dev *accel_dev)
+{
+	struct adf_admin_comms *admin = accel_dev->admin;
+
+	if (!admin)
+		return;
+
+	if (admin->virt_addr)
+		dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+				  admin->virt_addr, admin->phy_addr);
+
+	mutex_destroy(&admin->lock);
+	kfree(admin);
+	accel_dev->admin = NULL;
+}
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
new file mode 100644
index 0000000..ef05825
--- /dev/null
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
@@ -0,0 +1,214 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <adf_accel_devices.h>
+#include "adf_dh895xcc_hw_data.h"
+#include "adf_drv.h"
+
+/* Worker thread to service arbiter mappings based on dev SKUs */
+static const uint32_t thrd_to_arb_map_sku4[] = {
+	0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
+	0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000
+};
+
+static const uint32_t thrd_to_arb_map_sku6[] = {
+	0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
+	0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
+	0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222
+};
+
+static struct adf_hw_device_class dh895xcc_class = {
+	.name = ADF_DH895XCC_DEVICE_NAME,
+	.type = DEV_DH895XCC,
+	.instances = 0
+};
+
+static uint32_t get_accel_mask(uint32_t fuse)
+{
+	return (~fuse) >> ADF_DH895XCC_ACCELERATORS_REG_OFFSET &
+			  ADF_DH895XCC_ACCELERATORS_MASK;
+}
+
+static uint32_t get_ae_mask(uint32_t fuse)
+{
+	return (~fuse) & ADF_DH895XCC_ACCELENGINES_MASK;
+}
+
+static uint32_t get_num_accels(struct adf_hw_device_data *self)
+{
+	uint32_t i, ctr = 0;
+
+	if (!self || !self->accel_mask)
+		return 0;
+
+	for (i = 0; i < ADF_DH895XCC_MAX_ACCELERATORS; i++) {
+		if (self->accel_mask & (1 << i))
+			ctr++;
+	}
+	return ctr;
+}
+
+static uint32_t get_num_aes(struct adf_hw_device_data *self)
+{
+	uint32_t i, ctr = 0;
+
+	if (!self || !self->ae_mask)
+		return 0;
+
+	for (i = 0; i < ADF_DH895XCC_MAX_ACCELENGINES; i++) {
+		if (self->ae_mask & (1 << i))
+			ctr++;
+	}
+	return ctr;
+}
+
+static uint32_t get_misc_bar_id(struct adf_hw_device_data *self)
+{
+	return ADF_DH895XCC_PMISC_BAR;
+}
+
+static uint32_t get_etr_bar_id(struct adf_hw_device_data *self)
+{
+	return ADF_DH895XCC_ETR_BAR;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+	int sku = (self->fuses & ADF_DH895XCC_FUSECTL_SKU_MASK)
+	    >> ADF_DH895XCC_FUSECTL_SKU_SHIFT;
+
+	switch (sku) {
+	case ADF_DH895XCC_FUSECTL_SKU_1:
+		return DEV_SKU_1;
+	case ADF_DH895XCC_FUSECTL_SKU_2:
+		return DEV_SKU_2;
+	case ADF_DH895XCC_FUSECTL_SKU_3:
+		return DEV_SKU_3;
+	case ADF_DH895XCC_FUSECTL_SKU_4:
+		return DEV_SKU_4;
+	default:
+		return DEV_SKU_UNKNOWN;
+	}
+	return DEV_SKU_UNKNOWN;
+}
+
+void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
+			     uint32_t const **arb_map_config)
+{
+	switch (accel_dev->accel_pci_dev.sku) {
+	case DEV_SKU_1:
+		*arb_map_config = thrd_to_arb_map_sku4;
+		break;
+
+	case DEV_SKU_2:
+	case DEV_SKU_4:
+		*arb_map_config = thrd_to_arb_map_sku6;
+		break;
+	default:
+		pr_err("QAT: The configuration doesn't match any SKU");
+		*arb_map_config = NULL;
+	}
+}
+
+static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
+{
+	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+	struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR];
+	void __iomem *csr = misc_bar->virt_addr;
+	unsigned int val, i;
+
+	/* Enable Accel Engine error detection & correction */
+	for (i = 0; i < hw_device->get_num_aes(hw_device); i++) {
+		val = ADF_CSR_RD(csr, ADF_DH895XCC_AE_CTX_ENABLES(i));
+		val |= ADF_DH895XCC_ENABLE_AE_ECC_ERR;
+		ADF_CSR_WR(csr, ADF_DH895XCC_AE_CTX_ENABLES(i), val);
+		val = ADF_CSR_RD(csr, ADF_DH895XCC_AE_MISC_CONTROL(i));
+		val |= ADF_DH895XCC_ENABLE_AE_ECC_PARITY_CORR;
+		ADF_CSR_WR(csr, ADF_DH895XCC_AE_MISC_CONTROL(i), val);
+	}
+
+	/* Enable shared memory error detection & correction */
+	for (i = 0; i < hw_device->get_num_accels(hw_device); i++) {
+		val = ADF_CSR_RD(csr, ADF_DH895XCC_UERRSSMSH(i));
+		val |= ADF_DH895XCC_ERRSSMSH_EN;
+		ADF_CSR_WR(csr, ADF_DH895XCC_UERRSSMSH(i), val);
+		val = ADF_CSR_RD(csr, ADF_DH895XCC_CERRSSMSH(i));
+		val |= ADF_DH895XCC_ERRSSMSH_EN;
+		ADF_CSR_WR(csr, ADF_DH895XCC_CERRSSMSH(i), val);
+	}
+}
+
+void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
+{
+	hw_data->dev_class = &dh895xcc_class;
+	hw_data->instance_id = dh895xcc_class.instances++;
+	hw_data->num_banks = ADF_DH895XCC_ETR_MAX_BANKS;
+	hw_data->num_accel = ADF_DH895XCC_MAX_ACCELERATORS;
+	hw_data->pci_dev_id = ADF_DH895XCC_PCI_DEVICE_ID;
+	hw_data->num_logical_accel = 1;
+	hw_data->num_engines = ADF_DH895XCC_MAX_ACCELENGINES;
+	hw_data->tx_rx_gap = ADF_DH895XCC_RX_RINGS_OFFSET;
+	hw_data->tx_rings_mask = ADF_DH895XCC_TX_RINGS_MASK;
+	hw_data->alloc_irq = adf_isr_resource_alloc;
+	hw_data->free_irq = adf_isr_resource_free;
+	hw_data->enable_error_correction = adf_enable_error_correction;
+	hw_data->hw_arb_ring_enable = adf_update_ring_arb_enable;
+	hw_data->hw_arb_ring_disable = adf_update_ring_arb_enable;
+	hw_data->get_accel_mask = get_accel_mask;
+	hw_data->get_ae_mask = get_ae_mask;
+	hw_data->get_num_accels = get_num_accels;
+	hw_data->get_num_aes = get_num_aes;
+	hw_data->get_etr_bar_id = get_etr_bar_id;
+	hw_data->get_misc_bar_id = get_misc_bar_id;
+	hw_data->get_sku = get_sku;
+	hw_data->fw_name = ADF_DH895XCC_FW;
+}
+
+void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
+{
+	hw_data->dev_class->instances--;
+}
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
new file mode 100644
index 0000000..b707f29
--- /dev/null
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
@@ -0,0 +1,86 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_DH895x_HW_DATA_H_
+#define ADF_DH895x_HW_DATA_H_
+
+/* PCIe configuration space */
+#define ADF_DH895XCC_RX_RINGS_OFFSET 8
+#define ADF_DH895XCC_TX_RINGS_MASK 0xFF
+#define ADF_DH895XCC_FUSECTL_OFFSET 0x40
+#define ADF_DH895XCC_FUSECTL_SKU_MASK 0x300000
+#define ADF_DH895XCC_FUSECTL_SKU_SHIFT 20
+#define ADF_DH895XCC_FUSECTL_SKU_1 0x0
+#define ADF_DH895XCC_FUSECTL_SKU_2 0x1
+#define ADF_DH895XCC_FUSECTL_SKU_3 0x2
+#define ADF_DH895XCC_FUSECTL_SKU_4 0x3
+#define ADF_DH895XCC_MAX_ACCELERATORS 6
+#define ADF_DH895XCC_MAX_ACCELENGINES 12
+#define ADF_DH895XCC_ACCELERATORS_REG_OFFSET 13
+#define ADF_DH895XCC_ACCELERATORS_MASK 0x3F
+#define ADF_DH895XCC_ACCELENGINES_MASK 0xFFF
+#define ADF_DH895XCC_LEGFUSE_OFFSET 0x4C
+#define ADF_DH895XCC_ETR_MAX_BANKS 32
+#define ADF_DH895XCC_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28)
+#define ADF_DH895XCC_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30)
+#define ADF_DH895XCC_SMIA0_MASK 0xFFFF
+#define ADF_DH895XCC_SMIA1_MASK 0x1
+/* Error detection and correction */
+#define ADF_DH895XCC_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818)
+#define ADF_DH895XCC_AE_MISC_CONTROL(i) (i * 0x1000 + 0x20960)
+#define ADF_DH895XCC_ENABLE_AE_ECC_ERR (1 << 28)
+#define ADF_DH895XCC_ENABLE_AE_ECC_PARITY_CORR (1 << 24 | 1 << 12)
+#define ADF_DH895XCC_UERRSSMSH(i) (i * 0x4000 + 0x18)
+#define ADF_DH895XCC_CERRSSMSH(i) (i * 0x4000 + 0x10)
+#define ADF_DH895XCC_ERRSSMSH_EN (1 << 3)
+
+/* Admin Messages Registers */
+#define ADF_DH895XCC_ADMINMSGUR_OFFSET (0x3A000 + 0x574)
+#define ADF_DH895XCC_ADMINMSGLR_OFFSET (0x3A000 + 0x578)
+#define ADF_DH895XCC_MAILBOX_BASE_OFFSET 0x20970
+#define ADF_DH895XCC_MAILBOX_STRIDE 0x1000
+#define ADF_DH895XCC_FW "qat_895xcc.bin"
+#endif
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
new file mode 100644
index 0000000..0d0435a
--- /dev/null
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
@@ -0,0 +1,449 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include <adf_transport_access_macros.h>
+#include "adf_dh895xcc_hw_data.h"
+#include "adf_drv.h"
+
+static const char adf_driver_name[] = ADF_DH895XCC_DEVICE_NAME;
+
+#define ADF_SYSTEM_DEVICE(device_id) \
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
+
+static const struct pci_device_id adf_pci_tbl[] = {
+	ADF_SYSTEM_DEVICE(ADF_DH895XCC_PCI_DEVICE_ID),
+	{0,}
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
+static void adf_remove(struct pci_dev *dev);
+
+static struct pci_driver adf_driver = {
+	.id_table = adf_pci_tbl,
+	.name = adf_driver_name,
+	.probe = adf_probe,
+	.remove = adf_remove
+};
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+	struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
+	int i;
+
+	adf_exit_admin_comms(accel_dev);
+	adf_exit_arb(accel_dev);
+	adf_cleanup_etr_data(accel_dev);
+
+	for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
+		struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+
+		if (bar->virt_addr)
+			pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
+	}
+
+	if (accel_dev->hw_device) {
+		switch (accel_dev->hw_device->pci_dev_id) {
+		case ADF_DH895XCC_PCI_DEVICE_ID:
+			adf_clean_hw_data_dh895xcc(accel_dev->hw_device);
+			break;
+		default:
+			break;
+		}
+		kfree(accel_dev->hw_device);
+	}
+	adf_cfg_dev_remove(accel_dev);
+	debugfs_remove(accel_dev->debugfs_dir);
+	adf_devmgr_rm_dev(accel_dev);
+	pci_release_regions(accel_pci_dev->pci_dev);
+	pci_disable_device(accel_pci_dev->pci_dev);
+	kfree(accel_dev);
+}
+
+static uint8_t adf_get_dev_node_id(struct pci_dev *pdev)
+{
+	unsigned int bus_per_cpu = 0;
+	struct cpuinfo_x86 *c = &cpu_data(num_online_cpus() - 1);
+
+	if (!c->phys_proc_id)
+		return 0;
+
+	bus_per_cpu = 256 / (c->phys_proc_id + 1);
+
+	if (bus_per_cpu != 0)
+		return pdev->bus->number / bus_per_cpu;
+	return 0;
+}
+
+static int qat_dev_start(struct adf_accel_dev *accel_dev)
+{
+	int cpus = num_online_cpus();
+	int banks = GET_MAX_BANKS(accel_dev);
+	int instances = min(cpus, banks);
+	char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+	int i;
+	unsigned long val;
+
+	if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
+		goto err;
+	if (adf_cfg_section_add(accel_dev, "Accelerator0"))
+		goto err;
+	for (i = 0; i < instances; i++) {
+		val = i;
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i);
+		if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+						key, (void *)&val, ADF_DEC))
+			goto err;
+
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
+			 i);
+		if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+						key, (void *)&val, ADF_DEC))
+			goto err;
+
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
+		val = 128;
+		if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+						key, (void *)&val, ADF_DEC))
+			goto err;
+
+		val = 512;
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
+		if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+						key, (void *)&val, ADF_DEC))
+			goto err;
+
+		val = 0;
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
+		if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+						key, (void *)&val, ADF_DEC))
+			goto err;
+
+		val = 2;
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
+		if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+						key, (void *)&val, ADF_DEC))
+			goto err;
+
+		val = 4;
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_TX, i);
+		if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+						key, (void *)&val, ADF_DEC))
+			goto err;
+
+		val = 8;
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
+		if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+						key, (void *)&val, ADF_DEC))
+			goto err;
+
+		val = 10;
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
+		if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+						key, (void *)&val, ADF_DEC))
+			goto err;
+
+		val = 12;
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_RX, i);
+		if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+						key, (void *)&val, ADF_DEC))
+			goto err;
+
+		val = ADF_COALESCING_DEF_TIME;
+		snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
+		if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
+						key, (void *)&val, ADF_DEC))
+			goto err;
+	}
+
+	val = i;
+	if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+					ADF_NUM_CY, (void *)&val, ADF_DEC))
+		goto err;
+
+	set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+	return adf_dev_start(accel_dev);
+err:
+	dev_err(&GET_DEV(accel_dev), "Failed to start QAT accel dev\n");
+	return -EINVAL;
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct adf_accel_dev *accel_dev;
+	struct adf_accel_pci *accel_pci_dev;
+	struct adf_hw_device_data *hw_data;
+	void __iomem *pmisc_bar_addr = NULL;
+	char name[ADF_DEVICE_NAME_LENGTH];
+	unsigned int i, bar_nr;
+	uint8_t node;
+	int ret;
+
+	switch (ent->device) {
+	case ADF_DH895XCC_PCI_DEVICE_ID:
+		break;
+	default:
+		dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
+		return -ENODEV;
+	}
+
+	node = adf_get_dev_node_id(pdev);
+	accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL, node);
+	if (!accel_dev)
+		return -ENOMEM;
+
+	accel_dev->numa_node = node;
+	INIT_LIST_HEAD(&accel_dev->crypto_list);
+
+	/* Add accel device to accel table.
+	 * This should be called before adf_cleanup_accel is called */
+	if (adf_devmgr_add_dev(accel_dev)) {
+		dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+		kfree(accel_dev);
+		return -EFAULT;
+	}
+
+	accel_dev->owner = THIS_MODULE;
+	/* Allocate and configure device configuration structure */
+	hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL, node);
+	if (!hw_data) {
+		ret = -ENOMEM;
+		goto out_err;
+	}
+
+	accel_dev->hw_device = hw_data;
+	switch (ent->device) {
+	case ADF_DH895XCC_PCI_DEVICE_ID:
+		adf_init_hw_data_dh895xcc(accel_dev->hw_device);
+		break;
+	default:
+		return -ENODEV;
+	}
+	accel_pci_dev = &accel_dev->accel_pci_dev;
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
+	pci_read_config_dword(pdev, ADF_DH895XCC_FUSECTL_OFFSET,
+			      &hw_data->fuses);
+
+	/* Get Accelerators and Accelerators Engines masks */
+	hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses);
+	hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses);
+	accel_pci_dev->sku = hw_data->get_sku(hw_data);
+	accel_pci_dev->pci_dev = pdev;
+	/* If the device has no acceleration engines then ignore it. */
+	if (!hw_data->accel_mask || !hw_data->ae_mask ||
+	    ((~hw_data->ae_mask) & 0x01)) {
+		dev_err(&pdev->dev, "No acceleration units found");
+		ret = -EFAULT;
+		goto out_err;
+	}
+
+	/* Create dev top level debugfs entry */
+	snprintf(name, sizeof(name), "%s%s_dev%d", ADF_DEVICE_NAME_PREFIX,
+		 hw_data->dev_class->name, hw_data->instance_id);
+	accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
+	if (!accel_dev->debugfs_dir) {
+		dev_err(&pdev->dev, "Could not create debugfs dir\n");
+		ret = -EINVAL;
+		goto out_err;
+	}
+
+	/* Create device configuration table */
+	ret = adf_cfg_dev_add(accel_dev);
+	if (ret)
+		goto out_err;
+
+	/* enable PCI device */
+	if (pci_enable_device(pdev)) {
+		ret = -EFAULT;
+		goto out_err;
+	}
+
+	/* set dma identifier */
+	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+			dev_err(&pdev->dev, "No usable DMA configuration\n");
+			ret = -EFAULT;
+			goto out_err;
+		} else {
+			pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+		}
+
+	} else {
+		pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+	}
+
+	if (pci_request_regions(pdev, adf_driver_name)) {
+		ret = -EFAULT;
+		goto out_err;
+	}
+
+	/* Read accelerator capabilities mask */
+	pci_read_config_dword(pdev, ADF_DH895XCC_LEGFUSE_OFFSET,
+			      &hw_data->accel_capabilities_mask);
+
+	/* Find and map all the device's BARS */
+	for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
+		struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+
+		bar_nr = i * 2;
+		bar->base_addr = pci_resource_start(pdev, bar_nr);
+		if (!bar->base_addr)
+			break;
+		bar->size = pci_resource_len(pdev, bar_nr);
+		bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
+		if (!bar->virt_addr) {
+			dev_err(&pdev->dev, "Failed to map BAR %d\n", i);
+			ret = -EFAULT;
+			goto out_err;
+		}
+		if (i == ADF_DH895XCC_PMISC_BAR)
+			pmisc_bar_addr = bar->virt_addr;
+	}
+	pci_set_master(pdev);
+
+	if (adf_enable_aer(accel_dev, &adf_driver)) {
+		dev_err(&pdev->dev, "Failed to enable aer\n");
+		ret = -EFAULT;
+		goto out_err;
+	}
+
+	if (adf_init_etr_data(accel_dev)) {
+		dev_err(&pdev->dev, "Failed initialize etr\n");
+		ret = -EFAULT;
+		goto out_err;
+	}
+
+	if (adf_init_admin_comms(accel_dev)) {
+		dev_err(&pdev->dev, "Failed initialize admin comms\n");
+		ret = -EFAULT;
+		goto out_err;
+	}
+
+	if (adf_init_arb(accel_dev)) {
+		dev_err(&pdev->dev, "Failed initialize hw arbiter\n");
+		ret = -EFAULT;
+		goto out_err;
+	}
+	if (pci_save_state(pdev)) {
+		dev_err(&pdev->dev, "Failed to save pci state\n");
+		ret = -ENOMEM;
+		goto out_err;
+	}
+
+	/* Enable bundle and misc interrupts */
+	ADF_CSR_WR(pmisc_bar_addr, ADF_DH895XCC_SMIAPF0_MASK_OFFSET,
+		   ADF_DH895XCC_SMIA0_MASK);
+	ADF_CSR_WR(pmisc_bar_addr, ADF_DH895XCC_SMIAPF1_MASK_OFFSET,
+		   ADF_DH895XCC_SMIA1_MASK);
+
+	ret = qat_dev_start(accel_dev);
+	if (ret) {
+		adf_dev_stop(accel_dev);
+		goto out_err;
+	}
+
+	return 0;
+out_err:
+	adf_cleanup_accel(accel_dev);
+	return ret;
+}
+
+static void __exit adf_remove(struct pci_dev *pdev)
+{
+	struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+	if (!accel_dev) {
+		pr_err("QAT: Driver removal failed\n");
+		return;
+	}
+	if (adf_dev_stop(accel_dev))
+		dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
+	adf_disable_aer(accel_dev);
+	adf_cleanup_accel(accel_dev);
+}
+
+static int __init adfdrv_init(void)
+{
+	request_module("intel_qat");
+	if (qat_admin_register())
+		return -EFAULT;
+
+	if (pci_register_driver(&adf_driver)) {
+		pr_err("QAT: Driver initialization failed\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static void __exit adfdrv_release(void)
+{
+	pci_unregister_driver(&adf_driver);
+	qat_admin_unregister();
+}
+
+module_init(adfdrv_init);
+module_exit(adfdrv_release);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_FIRMWARE("qat_895xcc.bin");
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.h b/drivers/crypto/qat/qat_dh895xcc/adf_drv.h
new file mode 100644
index 0000000..a2fbb6c
--- /dev/null
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.h
@@ -0,0 +1,67 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_DH895x_DRV_H_
+#define ADF_DH895x_DRV_H_
+#include <adf_accel_devices.h>
+#include <adf_transport.h>
+
+void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data);
+int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev);
+void adf_isr_resource_free(struct adf_accel_dev *accel_dev);
+void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring);
+void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
+			     uint32_t const **arb_map_config);
+int adf_init_admin_comms(struct adf_accel_dev *accel_dev);
+void adf_exit_admin_comms(struct adf_accel_dev *accel_dev);
+int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev,
+			   uint32_t ae, void *in, void *out);
+int qat_admin_register(void);
+int qat_admin_unregister(void);
+int adf_init_arb(struct adf_accel_dev *accel_dev);
+void adf_exit_arb(struct adf_accel_dev *accel_dev);
+#endif
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c b/drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c
new file mode 100644
index 0000000..1864bdb
--- /dev/null
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c
@@ -0,0 +1,159 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <adf_accel_devices.h>
+#include <adf_transport_internal.h>
+#include "adf_drv.h"
+
+#define ADF_ARB_NUM 4
+#define ADF_ARB_REQ_RING_NUM 8
+#define ADF_ARB_REG_SIZE 0x4
+#define ADF_ARB_WTR_SIZE 0x20
+#define ADF_ARB_OFFSET 0x30000
+#define ADF_ARB_REG_SLOT 0x1000
+#define ADF_ARB_WTR_OFFSET 0x010
+#define ADF_ARB_RO_EN_OFFSET 0x090
+#define ADF_ARB_WQCFG_OFFSET 0x100
+#define ADF_ARB_WRK_2_SER_MAP_OFFSET 0x180
+#define ADF_ARB_WRK_2_SER_MAP 10
+#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
+
+#define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
+	ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
+	(ADF_ARB_REG_SLOT * index), value)
+
+#define WRITE_CSR_ARB_RESPORDERING(csr_addr, index, value) \
+	ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
+	ADF_ARB_RO_EN_OFFSET) + (ADF_ARB_REG_SIZE * index), value)
+
+#define WRITE_CSR_ARB_WEIGHT(csr_addr, arb, index, value) \
+	ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
+	ADF_ARB_WTR_OFFSET) + (ADF_ARB_WTR_SIZE * arb) + \
+	(ADF_ARB_REG_SIZE * index), value)
+
+#define WRITE_CSR_ARB_SARCONFIG(csr_addr, index, value) \
+	ADF_CSR_WR(csr_addr, ADF_ARB_OFFSET + \
+	(ADF_ARB_REG_SIZE * index), value)
+
+#define WRITE_CSR_ARB_WRK_2_SER_MAP(csr_addr, index, value) \
+	ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
+	ADF_ARB_WRK_2_SER_MAP_OFFSET) + \
+	(ADF_ARB_REG_SIZE * index), value)
+
+#define WRITE_CSR_ARB_WQCFG(csr_addr, index, value) \
+	ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
+	ADF_ARB_WQCFG_OFFSET) + (ADF_ARB_REG_SIZE * index), value)
+
+int adf_init_arb(struct adf_accel_dev *accel_dev)
+{
+	void __iomem *csr = accel_dev->transport->banks[0].csr_addr;
+	uint32_t arb_cfg = 0x1 << 31 | 0x4 << 4 | 0x1;
+	uint32_t arb, i;
+	const uint32_t *thd_2_arb_cfg;
+
+	/* Service arb configured for 32 bytes responses and
+	 * ring flow control check enabled. */
+	for (arb = 0; arb < ADF_ARB_NUM; arb++)
+		WRITE_CSR_ARB_SARCONFIG(csr, arb, arb_cfg);
+
+	/* Setup service weighting */
+	for (arb = 0; arb < ADF_ARB_NUM; arb++)
+		for (i = 0; i < ADF_ARB_REQ_RING_NUM; i++)
+			WRITE_CSR_ARB_WEIGHT(csr, arb, i, 0xFFFFFFFF);
+
+	/* Setup ring response ordering */
+	for (i = 0; i < ADF_ARB_REQ_RING_NUM; i++)
+		WRITE_CSR_ARB_RESPORDERING(csr, i, 0xFFFFFFFF);
+
+	/* Setup worker queue registers */
+	for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
+		WRITE_CSR_ARB_WQCFG(csr, i, i);
+
+	/* Map worker threads to service arbiters */
+	adf_get_arbiter_mapping(accel_dev, &thd_2_arb_cfg);
+
+	if (!thd_2_arb_cfg)
+		return -EFAULT;
+
+	for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
+		WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, *(thd_2_arb_cfg + i));
+
+	return 0;
+}
+
+void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring)
+{
+	WRITE_CSR_ARB_RINGSRVARBEN(ring->bank->csr_addr,
+				   ring->bank->bank_number,
+				   ring->bank->ring_mask & 0xFF);
+}
+
+void adf_exit_arb(struct adf_accel_dev *accel_dev)
+{
+	void __iomem *csr;
+	unsigned int i;
+
+	if (!accel_dev->transport)
+		return;
+
+	csr = accel_dev->transport->banks[0].csr_addr;
+
+	/* Reset arbiter configuration */
+	for (i = 0; i < ADF_ARB_NUM; i++)
+		WRITE_CSR_ARB_SARCONFIG(csr, i, 0);
+
+	/* Shutdown work queue */
+	for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
+		WRITE_CSR_ARB_WQCFG(csr, i, 0);
+
+	/* Unmap worker threads to service arbiters */
+	for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
+		WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, 0);
+
+	/* Disable arbitration on all rings */
+	for (i = 0; i < GET_MAX_BANKS(accel_dev); i++)
+		WRITE_CSR_ARB_RINGSRVARBEN(csr, i, 0);
+}
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
new file mode 100644
index 0000000..d4172de
--- /dev/null
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
@@ -0,0 +1,266 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include <adf_cfg_strings.h>
+#include <adf_cfg_common.h>
+#include <adf_transport_access_macros.h>
+#include <adf_transport_internal.h>
+#include "adf_drv.h"
+
+static int adf_enable_msix(struct adf_accel_dev *accel_dev)
+{
+	struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	uint32_t msix_num_entries = hw_data->num_banks + 1;
+	int i;
+
+	for (i = 0; i < msix_num_entries; i++)
+		pci_dev_info->msix_entries.entries[i].entry = i;
+
+	if (pci_enable_msix(pci_dev_info->pci_dev,
+			    pci_dev_info->msix_entries.entries,
+			    msix_num_entries)) {
+		pr_err("QAT: Failed to enable MSIX IRQ\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static void adf_disable_msix(struct adf_accel_pci *pci_dev_info)
+{
+	pci_disable_msix(pci_dev_info->pci_dev);
+}
+
+static irqreturn_t adf_msix_isr_bundle(int irq, void *bank_ptr)
+{
+	struct adf_etr_bank_data *bank = bank_ptr;
+
+	WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number, 0);
+	tasklet_hi_schedule(&bank->resp_hanlder);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
+{
+	struct adf_accel_dev *accel_dev = dev_ptr;
+
+	pr_info("QAT: qat_dev%d spurious AE interrupt\n", accel_dev->accel_id);
+	return IRQ_HANDLED;
+}
+
+static int adf_request_irqs(struct adf_accel_dev *accel_dev)
+{
+	struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
+	struct adf_etr_data *etr_data = accel_dev->transport;
+	int ret, i;
+	char *name;
+
+	/* Request msix irq for all banks */
+	for (i = 0; i < hw_data->num_banks; i++) {
+		struct adf_etr_bank_data *bank = &etr_data->banks[i];
+		unsigned int cpu, cpus = num_online_cpus();
+
+		name = *(pci_dev_info->msix_entries.names + i);
+		snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
+			 "qat%d-bundle%d", accel_dev->accel_id, i);
+		ret = request_irq(msixe[i].vector,
+				  adf_msix_isr_bundle, 0, name, bank);
+		if (ret) {
+			pr_err("QAT: failed to enable irq %d for %s\n",
+			       msixe[i].vector, name);
+			return ret;
+		}
+
+		cpu = ((accel_dev->accel_id * hw_data->num_banks) + i) % cpus;
+		irq_set_affinity_hint(msixe[i].vector, get_cpu_mask(cpu));
+	}
+
+	/* Request msix irq for AE */
+	name = *(pci_dev_info->msix_entries.names + i);
+	snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
+		 "qat%d-ae-cluster", accel_dev->accel_id);
+	ret = request_irq(msixe[i].vector, adf_msix_isr_ae, 0, name, accel_dev);
+	if (ret) {
+		pr_err("QAT: failed to enable irq %d, for %s\n",
+		       msixe[i].vector, name);
+		return ret;
+	}
+	return ret;
+}
+
+static void adf_free_irqs(struct adf_accel_dev *accel_dev)
+{
+	struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
+	struct adf_etr_data *etr_data = accel_dev->transport;
+	int i;
+
+	for (i = 0; i < hw_data->num_banks; i++) {
+		irq_set_affinity_hint(msixe[i].vector, NULL);
+		free_irq(msixe[i].vector, &etr_data->banks[i]);
+	}
+	irq_set_affinity_hint(msixe[i].vector, NULL);
+	free_irq(msixe[i].vector, accel_dev);
+}
+
+static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev)
+{
+	int i;
+	char **names;
+	struct msix_entry *entries;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	uint32_t msix_num_entries = hw_data->num_banks + 1;
+
+	entries = kzalloc_node(msix_num_entries * sizeof(*entries),
+			       GFP_KERNEL, accel_dev->numa_node);
+	if (!entries)
+		return -ENOMEM;
+
+	names = kcalloc(msix_num_entries, sizeof(char *), GFP_KERNEL);
+	if (!names) {
+		kfree(entries);
+		return -ENOMEM;
+	}
+	for (i = 0; i < msix_num_entries; i++) {
+		*(names + i) = kzalloc(ADF_MAX_MSIX_VECTOR_NAME, GFP_KERNEL);
+		if (!(*(names + i)))
+			goto err;
+	}
+	accel_dev->accel_pci_dev.msix_entries.entries = entries;
+	accel_dev->accel_pci_dev.msix_entries.names = names;
+	return 0;
+err:
+	for (i = 0; i < msix_num_entries; i++) {
+		if (*(names + i))
+			kfree(*(names + i));
+	}
+	kfree(entries);
+	kfree(names);
+	return -ENOMEM;
+}
+
+static void adf_isr_free_msix_entry_table(struct adf_accel_dev *accel_dev)
+{
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	uint32_t msix_num_entries = hw_data->num_banks + 1;
+	char **names = accel_dev->accel_pci_dev.msix_entries.names;
+	int i;
+
+	kfree(accel_dev->accel_pci_dev.msix_entries.entries);
+	for (i = 0; i < msix_num_entries; i++) {
+		if (*(names + i))
+			kfree(*(names + i));
+	}
+	kfree(names);
+}
+
+static int adf_setup_bh(struct adf_accel_dev *accel_dev)
+{
+	struct adf_etr_data *priv_data = accel_dev->transport;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	int i;
+
+	for (i = 0; i < hw_data->num_banks; i++)
+		tasklet_init(&priv_data->banks[i].resp_hanlder,
+			     adf_response_handler,
+			     (unsigned long)&priv_data->banks[i]);
+	return 0;
+}
+
+static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
+{
+	struct adf_etr_data *priv_data = accel_dev->transport;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	int i;
+
+	for (i = 0; i < hw_data->num_banks; i++) {
+		tasklet_disable(&priv_data->banks[i].resp_hanlder);
+		tasklet_kill(&priv_data->banks[i].resp_hanlder);
+	}
+}
+
+void adf_isr_resource_free(struct adf_accel_dev *accel_dev)
+{
+	adf_free_irqs(accel_dev);
+	adf_cleanup_bh(accel_dev);
+	adf_disable_msix(&accel_dev->accel_pci_dev);
+	adf_isr_free_msix_entry_table(accel_dev);
+}
+
+int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
+{
+	int ret;
+
+	ret = adf_isr_alloc_msix_entry_table(accel_dev);
+	if (ret)
+		return ret;
+	if (adf_enable_msix(accel_dev))
+		goto err_out;
+
+	if (adf_setup_bh(accel_dev))
+		goto err_out;
+
+	if (adf_request_irqs(accel_dev))
+		goto err_out;
+
+	return 0;
+err_out:
+	adf_isr_resource_free(accel_dev);
+	return -EFAULT;
+}
diff --git a/drivers/crypto/qat/qat_dh895xcc/qat_admin.c b/drivers/crypto/qat/qat_dh895xcc/qat_admin.c
new file mode 100644
index 0000000..55b7a8e
--- /dev/null
+++ b/drivers/crypto/qat/qat_dh895xcc/qat_admin.c
@@ -0,0 +1,107 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <icp_qat_fw_init_admin.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include "adf_drv.h"
+
+static struct service_hndl qat_admin;
+
+static int qat_send_admin_cmd(struct adf_accel_dev *accel_dev, int cmd)
+{
+	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+	struct icp_qat_fw_init_admin_req req;
+	struct icp_qat_fw_init_admin_resp resp;
+	int i;
+
+	memset(&req, 0, sizeof(struct icp_qat_fw_init_admin_req));
+	req.init_admin_cmd_id = cmd;
+	for (i = 0; i < hw_device->get_num_aes(hw_device); i++) {
+		memset(&resp, 0, sizeof(struct icp_qat_fw_init_admin_resp));
+		if (adf_put_admin_msg_sync(accel_dev, i, &req, &resp) ||
+		    resp.init_resp_hdr.status)
+			return -EFAULT;
+	}
+	return 0;
+}
+
+static int qat_admin_start(struct adf_accel_dev *accel_dev)
+{
+	return qat_send_admin_cmd(accel_dev, ICP_QAT_FW_INIT_ME);
+}
+
+static int qat_admin_event_handler(struct adf_accel_dev *accel_dev,
+				   enum adf_event event)
+{
+	int ret;
+
+	switch (event) {
+	case ADF_EVENT_START:
+		ret = qat_admin_start(accel_dev);
+		break;
+	case ADF_EVENT_STOP:
+	case ADF_EVENT_INIT:
+	case ADF_EVENT_SHUTDOWN:
+	default:
+		ret = 0;
+	}
+	return ret;
+}
+
+int qat_admin_register(void)
+{
+	memset(&qat_admin, 0, sizeof(struct service_hndl));
+	qat_admin.event_hld = qat_admin_event_handler;
+	qat_admin.name = "qat_admin";
+	qat_admin.admin = 1;
+	return adf_service_register(&qat_admin);
+}
+
+int qat_admin_unregister(void)
+{
+	return adf_service_unregister(&qat_admin);
+}
diff --git a/drivers/crypto/qce/Makefile b/drivers/crypto/qce/Makefile
new file mode 100644
index 0000000..348dc31
--- /dev/null
+++ b/drivers/crypto/qce/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_CRYPTO_DEV_QCE) += qcrypto.o
+qcrypto-objs := core.o \
+		common.o \
+		dma.o \
+		sha.o \
+		ablkcipher.o
diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/ablkcipher.c
new file mode 100644
index 0000000..ad592de
--- /dev/null
+++ b/drivers/crypto/qce/ablkcipher.c
@@ -0,0 +1,431 @@
+/*
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/des.h>
+
+#include "cipher.h"
+
+static LIST_HEAD(ablkcipher_algs);
+
+static void qce_ablkcipher_done(void *data)
+{
+	struct crypto_async_request *async_req = data;
+	struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
+	struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
+	struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
+	struct qce_device *qce = tmpl->qce;
+	enum dma_data_direction dir_src, dir_dst;
+	u32 status;
+	int error;
+	bool diff_dst;
+
+	diff_dst = (req->src != req->dst) ? true : false;
+	dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+	dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
+
+	error = qce_dma_terminate_all(&qce->dma);
+	if (error)
+		dev_dbg(qce->dev, "ablkcipher dma termination error (%d)\n",
+			error);
+
+	if (diff_dst)
+		qce_unmapsg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src,
+			    rctx->dst_chained);
+	qce_unmapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst,
+		    rctx->dst_chained);
+
+	sg_free_table(&rctx->dst_tbl);
+
+	error = qce_check_status(qce, &status);
+	if (error < 0)
+		dev_dbg(qce->dev, "ablkcipher operation error (%x)\n", status);
+
+	qce->async_req_done(tmpl->qce, error);
+}
+
+static int
+qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
+{
+	struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
+	struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
+	struct qce_device *qce = tmpl->qce;
+	enum dma_data_direction dir_src, dir_dst;
+	struct scatterlist *sg;
+	bool diff_dst;
+	gfp_t gfp;
+	int ret;
+
+	rctx->iv = req->info;
+	rctx->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+	rctx->cryptlen = req->nbytes;
+
+	diff_dst = (req->src != req->dst) ? true : false;
+	dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+	dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
+
+	rctx->src_nents = qce_countsg(req->src, req->nbytes,
+				      &rctx->src_chained);
+	if (diff_dst) {
+		rctx->dst_nents = qce_countsg(req->dst, req->nbytes,
+					      &rctx->dst_chained);
+	} else {
+		rctx->dst_nents = rctx->src_nents;
+		rctx->dst_chained = rctx->src_chained;
+	}
+
+	rctx->dst_nents += 1;
+
+	gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+						GFP_KERNEL : GFP_ATOMIC;
+
+	ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp);
+	if (ret)
+		return ret;
+
+	sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
+
+	sg = qce_sgtable_add(&rctx->dst_tbl, req->dst);
+	if (IS_ERR(sg)) {
+		ret = PTR_ERR(sg);
+		goto error_free;
+	}
+
+	sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg);
+	if (IS_ERR(sg)) {
+		ret = PTR_ERR(sg);
+		goto error_free;
+	}
+
+	sg_mark_end(sg);
+	rctx->dst_sg = rctx->dst_tbl.sgl;
+
+	ret = qce_mapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst,
+			rctx->dst_chained);
+	if (ret < 0)
+		goto error_free;
+
+	if (diff_dst) {
+		ret = qce_mapsg(qce->dev, req->src, rctx->src_nents, dir_src,
+				rctx->src_chained);
+		if (ret < 0)
+			goto error_unmap_dst;
+		rctx->src_sg = req->src;
+	} else {
+		rctx->src_sg = rctx->dst_sg;
+	}
+
+	ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents,
+			       rctx->dst_sg, rctx->dst_nents,
+			       qce_ablkcipher_done, async_req);
+	if (ret)
+		goto error_unmap_src;
+
+	qce_dma_issue_pending(&qce->dma);
+
+	ret = qce_start(async_req, tmpl->crypto_alg_type, req->nbytes, 0);
+	if (ret)
+		goto error_terminate;
+
+	return 0;
+
+error_terminate:
+	qce_dma_terminate_all(&qce->dma);
+error_unmap_src:
+	if (diff_dst)
+		qce_unmapsg(qce->dev, req->src, rctx->src_nents, dir_src,
+			    rctx->src_chained);
+error_unmap_dst:
+	qce_unmapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst,
+		    rctx->dst_chained);
+error_free:
+	sg_free_table(&rctx->dst_tbl);
+	return ret;
+}
+
+static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
+				 unsigned int keylen)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk);
+	struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	unsigned long flags = to_cipher_tmpl(tfm)->alg_flags;
+	int ret;
+
+	if (!key || !keylen)
+		return -EINVAL;
+
+	if (IS_AES(flags)) {
+		switch (keylen) {
+		case AES_KEYSIZE_128:
+		case AES_KEYSIZE_256:
+			break;
+		default:
+			goto fallback;
+		}
+	} else if (IS_DES(flags)) {
+		u32 tmp[DES_EXPKEY_WORDS];
+
+		ret = des_ekey(tmp, key);
+		if (!ret && crypto_ablkcipher_get_flags(ablk) &
+		    CRYPTO_TFM_REQ_WEAK_KEY)
+			goto weakkey;
+	}
+
+	ctx->enc_keylen = keylen;
+	memcpy(ctx->enc_key, key, keylen);
+	return 0;
+fallback:
+	ret = crypto_ablkcipher_setkey(ctx->fallback, key, keylen);
+	if (!ret)
+		ctx->enc_keylen = keylen;
+	return ret;
+weakkey:
+	crypto_ablkcipher_set_flags(ablk, CRYPTO_TFM_RES_WEAK_KEY);
+	return -EINVAL;
+}
+
+static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
+{
+	struct crypto_tfm *tfm =
+			crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
+	struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
+	struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
+	int ret;
+
+	rctx->flags = tmpl->alg_flags;
+	rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
+
+	if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 &&
+	    ctx->enc_keylen != AES_KEYSIZE_256) {
+		ablkcipher_request_set_tfm(req, ctx->fallback);
+		ret = encrypt ? crypto_ablkcipher_encrypt(req) :
+				crypto_ablkcipher_decrypt(req);
+		ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
+		return ret;
+	}
+
+	return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base);
+}
+
+static int qce_ablkcipher_encrypt(struct ablkcipher_request *req)
+{
+	return qce_ablkcipher_crypt(req, 1);
+}
+
+static int qce_ablkcipher_decrypt(struct ablkcipher_request *req)
+{
+	return qce_ablkcipher_crypt(req, 0);
+}
+
+static int qce_ablkcipher_init(struct crypto_tfm *tfm)
+{
+	struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	memset(ctx, 0, sizeof(*ctx));
+	tfm->crt_ablkcipher.reqsize = sizeof(struct qce_cipher_reqctx);
+
+	ctx->fallback = crypto_alloc_ablkcipher(crypto_tfm_alg_name(tfm),
+						CRYPTO_ALG_TYPE_ABLKCIPHER,
+						CRYPTO_ALG_ASYNC |
+						CRYPTO_ALG_NEED_FALLBACK);
+	if (IS_ERR(ctx->fallback))
+		return PTR_ERR(ctx->fallback);
+
+	return 0;
+}
+
+static void qce_ablkcipher_exit(struct crypto_tfm *tfm)
+{
+	struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	crypto_free_ablkcipher(ctx->fallback);
+}
+
+struct qce_ablkcipher_def {
+	unsigned long flags;
+	const char *name;
+	const char *drv_name;
+	unsigned int blocksize;
+	unsigned int ivsize;
+	unsigned int min_keysize;
+	unsigned int max_keysize;
+};
+
+static const struct qce_ablkcipher_def ablkcipher_def[] = {
+	{
+		.flags		= QCE_ALG_AES | QCE_MODE_ECB,
+		.name		= "ecb(aes)",
+		.drv_name	= "ecb-aes-qce",
+		.blocksize	= AES_BLOCK_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+	},
+	{
+		.flags		= QCE_ALG_AES | QCE_MODE_CBC,
+		.name		= "cbc(aes)",
+		.drv_name	= "cbc-aes-qce",
+		.blocksize	= AES_BLOCK_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+	},
+	{
+		.flags		= QCE_ALG_AES | QCE_MODE_CTR,
+		.name		= "ctr(aes)",
+		.drv_name	= "ctr-aes-qce",
+		.blocksize	= AES_BLOCK_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+	},
+	{
+		.flags		= QCE_ALG_AES | QCE_MODE_XTS,
+		.name		= "xts(aes)",
+		.drv_name	= "xts-aes-qce",
+		.blocksize	= AES_BLOCK_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+	},
+	{
+		.flags		= QCE_ALG_DES | QCE_MODE_ECB,
+		.name		= "ecb(des)",
+		.drv_name	= "ecb-des-qce",
+		.blocksize	= DES_BLOCK_SIZE,
+		.ivsize		= 0,
+		.min_keysize	= DES_KEY_SIZE,
+		.max_keysize	= DES_KEY_SIZE,
+	},
+	{
+		.flags		= QCE_ALG_DES | QCE_MODE_CBC,
+		.name		= "cbc(des)",
+		.drv_name	= "cbc-des-qce",
+		.blocksize	= DES_BLOCK_SIZE,
+		.ivsize		= DES_BLOCK_SIZE,
+		.min_keysize	= DES_KEY_SIZE,
+		.max_keysize	= DES_KEY_SIZE,
+	},
+	{
+		.flags		= QCE_ALG_3DES | QCE_MODE_ECB,
+		.name		= "ecb(des3_ede)",
+		.drv_name	= "ecb-3des-qce",
+		.blocksize	= DES3_EDE_BLOCK_SIZE,
+		.ivsize		= 0,
+		.min_keysize	= DES3_EDE_KEY_SIZE,
+		.max_keysize	= DES3_EDE_KEY_SIZE,
+	},
+	{
+		.flags		= QCE_ALG_3DES | QCE_MODE_CBC,
+		.name		= "cbc(des3_ede)",
+		.drv_name	= "cbc-3des-qce",
+		.blocksize	= DES3_EDE_BLOCK_SIZE,
+		.ivsize		= DES3_EDE_BLOCK_SIZE,
+		.min_keysize	= DES3_EDE_KEY_SIZE,
+		.max_keysize	= DES3_EDE_KEY_SIZE,
+	},
+};
+
+static int qce_ablkcipher_register_one(const struct qce_ablkcipher_def *def,
+				       struct qce_device *qce)
+{
+	struct qce_alg_template *tmpl;
+	struct crypto_alg *alg;
+	int ret;
+
+	tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
+	if (!tmpl)
+		return -ENOMEM;
+
+	alg = &tmpl->alg.crypto;
+
+	snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+	snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+		 def->drv_name);
+
+	alg->cra_blocksize = def->blocksize;
+	alg->cra_ablkcipher.ivsize = def->ivsize;
+	alg->cra_ablkcipher.min_keysize = def->min_keysize;
+	alg->cra_ablkcipher.max_keysize = def->max_keysize;
+	alg->cra_ablkcipher.setkey = qce_ablkcipher_setkey;
+	alg->cra_ablkcipher.encrypt = qce_ablkcipher_encrypt;
+	alg->cra_ablkcipher.decrypt = qce_ablkcipher_decrypt;
+
+	alg->cra_priority = 300;
+	alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
+			 CRYPTO_ALG_NEED_FALLBACK;
+	alg->cra_ctxsize = sizeof(struct qce_cipher_ctx);
+	alg->cra_alignmask = 0;
+	alg->cra_type = &crypto_ablkcipher_type;
+	alg->cra_module = THIS_MODULE;
+	alg->cra_init = qce_ablkcipher_init;
+	alg->cra_exit = qce_ablkcipher_exit;
+	INIT_LIST_HEAD(&alg->cra_list);
+
+	INIT_LIST_HEAD(&tmpl->entry);
+	tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_ABLKCIPHER;
+	tmpl->alg_flags = def->flags;
+	tmpl->qce = qce;
+
+	ret = crypto_register_alg(alg);
+	if (ret) {
+		kfree(tmpl);
+		dev_err(qce->dev, "%s registration failed\n", alg->cra_name);
+		return ret;
+	}
+
+	list_add_tail(&tmpl->entry, &ablkcipher_algs);
+	dev_dbg(qce->dev, "%s is registered\n", alg->cra_name);
+	return 0;
+}
+
+static void qce_ablkcipher_unregister(struct qce_device *qce)
+{
+	struct qce_alg_template *tmpl, *n;
+
+	list_for_each_entry_safe(tmpl, n, &ablkcipher_algs, entry) {
+		crypto_unregister_alg(&tmpl->alg.crypto);
+		list_del(&tmpl->entry);
+		kfree(tmpl);
+	}
+}
+
+static int qce_ablkcipher_register(struct qce_device *qce)
+{
+	int ret, i;
+
+	for (i = 0; i < ARRAY_SIZE(ablkcipher_def); i++) {
+		ret = qce_ablkcipher_register_one(&ablkcipher_def[i], qce);
+		if (ret)
+			goto err;
+	}
+
+	return 0;
+err:
+	qce_ablkcipher_unregister(qce);
+	return ret;
+}
+
+const struct qce_algo_ops ablkcipher_ops = {
+	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+	.register_algs = qce_ablkcipher_register,
+	.unregister_algs = qce_ablkcipher_unregister,
+	.async_req_handle = qce_ablkcipher_async_req_handle,
+};
diff --git a/drivers/crypto/qce/cipher.h b/drivers/crypto/qce/cipher.h
new file mode 100644
index 0000000..d5757cf
--- /dev/null
+++ b/drivers/crypto/qce/cipher.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CIPHER_H_
+#define _CIPHER_H_
+
+#include "common.h"
+#include "core.h"
+
+#define QCE_MAX_KEY_SIZE	64
+
+struct qce_cipher_ctx {
+	u8 enc_key[QCE_MAX_KEY_SIZE];
+	unsigned int enc_keylen;
+	struct crypto_ablkcipher *fallback;
+};
+
+/**
+ * struct qce_cipher_reqctx - holds private cipher objects per request
+ * @flags: operation flags
+ * @iv: pointer to the IV
+ * @ivsize: IV size
+ * @src_nents: source entries
+ * @dst_nents: destination entries
+ * @src_chained: is source chained
+ * @dst_chained: is destination chained
+ * @result_sg: scatterlist used for result buffer
+ * @dst_tbl: destination sg table
+ * @dst_sg: destination sg pointer table beginning
+ * @src_tbl: source sg table
+ * @src_sg: source sg pointer table beginning;
+ * @cryptlen: crypto length
+ */
+struct qce_cipher_reqctx {
+	unsigned long flags;
+	u8 *iv;
+	unsigned int ivsize;
+	int src_nents;
+	int dst_nents;
+	bool src_chained;
+	bool dst_chained;
+	struct scatterlist result_sg;
+	struct sg_table dst_tbl;
+	struct scatterlist *dst_sg;
+	struct sg_table src_tbl;
+	struct scatterlist *src_sg;
+	unsigned int cryptlen;
+};
+
+static inline struct qce_alg_template *to_cipher_tmpl(struct crypto_tfm *tfm)
+{
+	struct crypto_alg *alg = tfm->__crt_alg;
+	return container_of(alg, struct qce_alg_template, alg.crypto);
+}
+
+extern const struct qce_algo_ops ablkcipher_ops;
+
+#endif /* _CIPHER_H_ */
diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
new file mode 100644
index 0000000..1fb5fde
--- /dev/null
+++ b/drivers/crypto/qce/common.c
@@ -0,0 +1,438 @@
+/*
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/sha.h>
+
+#include "cipher.h"
+#include "common.h"
+#include "core.h"
+#include "regs-v5.h"
+#include "sha.h"
+
+#define QCE_SECTOR_SIZE		512
+
+static inline u32 qce_read(struct qce_device *qce, u32 offset)
+{
+	return readl(qce->base + offset);
+}
+
+static inline void qce_write(struct qce_device *qce, u32 offset, u32 val)
+{
+	writel(val, qce->base + offset);
+}
+
+static inline void qce_write_array(struct qce_device *qce, u32 offset,
+				   const u32 *val, unsigned int len)
+{
+	int i;
+
+	for (i = 0; i < len; i++)
+		qce_write(qce, offset + i * sizeof(u32), val[i]);
+}
+
+static inline void
+qce_clear_array(struct qce_device *qce, u32 offset, unsigned int len)
+{
+	int i;
+
+	for (i = 0; i < len; i++)
+		qce_write(qce, offset + i * sizeof(u32), 0);
+}
+
+static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
+{
+	u32 cfg = 0;
+
+	if (IS_AES(flags)) {
+		if (aes_key_size == AES_KEYSIZE_128)
+			cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT;
+		else if (aes_key_size == AES_KEYSIZE_256)
+			cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT;
+	}
+
+	if (IS_AES(flags))
+		cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT;
+	else if (IS_DES(flags) || IS_3DES(flags))
+		cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT;
+
+	if (IS_DES(flags))
+		cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT;
+
+	if (IS_3DES(flags))
+		cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT;
+
+	switch (flags & QCE_MODE_MASK) {
+	case QCE_MODE_ECB:
+		cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT;
+		break;
+	case QCE_MODE_CBC:
+		cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT;
+		break;
+	case QCE_MODE_CTR:
+		cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT;
+		break;
+	case QCE_MODE_XTS:
+		cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT;
+		break;
+	case QCE_MODE_CCM:
+		cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT;
+		cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT;
+		break;
+	default:
+		return ~0;
+	}
+
+	return cfg;
+}
+
+static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
+{
+	u32 cfg = 0;
+
+	if (IS_AES(flags) && (IS_CCM(flags) || IS_CMAC(flags)))
+		cfg |= AUTH_ALG_AES << AUTH_ALG_SHIFT;
+	else
+		cfg |= AUTH_ALG_SHA << AUTH_ALG_SHIFT;
+
+	if (IS_CCM(flags) || IS_CMAC(flags)) {
+		if (key_size == AES_KEYSIZE_128)
+			cfg |= AUTH_KEY_SZ_AES128 << AUTH_KEY_SIZE_SHIFT;
+		else if (key_size == AES_KEYSIZE_256)
+			cfg |= AUTH_KEY_SZ_AES256 << AUTH_KEY_SIZE_SHIFT;
+	}
+
+	if (IS_SHA1(flags) || IS_SHA1_HMAC(flags))
+		cfg |= AUTH_SIZE_SHA1 << AUTH_SIZE_SHIFT;
+	else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags))
+		cfg |= AUTH_SIZE_SHA256 << AUTH_SIZE_SHIFT;
+	else if (IS_CMAC(flags))
+		cfg |= AUTH_SIZE_ENUM_16_BYTES << AUTH_SIZE_SHIFT;
+
+	if (IS_SHA1(flags) || IS_SHA256(flags))
+		cfg |= AUTH_MODE_HASH << AUTH_MODE_SHIFT;
+	else if (IS_SHA1_HMAC(flags) || IS_SHA256_HMAC(flags) ||
+		 IS_CBC(flags) || IS_CTR(flags))
+		cfg |= AUTH_MODE_HMAC << AUTH_MODE_SHIFT;
+	else if (IS_AES(flags) && IS_CCM(flags))
+		cfg |= AUTH_MODE_CCM << AUTH_MODE_SHIFT;
+	else if (IS_AES(flags) && IS_CMAC(flags))
+		cfg |= AUTH_MODE_CMAC << AUTH_MODE_SHIFT;
+
+	if (IS_SHA(flags) || IS_SHA_HMAC(flags))
+		cfg |= AUTH_POS_BEFORE << AUTH_POS_SHIFT;
+
+	if (IS_CCM(flags))
+		cfg |= QCE_MAX_NONCE_WORDS << AUTH_NONCE_NUM_WORDS_SHIFT;
+
+	if (IS_CBC(flags) || IS_CTR(flags) || IS_CCM(flags) ||
+	    IS_CMAC(flags))
+		cfg |= BIT(AUTH_LAST_SHIFT) | BIT(AUTH_FIRST_SHIFT);
+
+	return cfg;
+}
+
+static u32 qce_config_reg(struct qce_device *qce, int little)
+{
+	u32 beats = (qce->burst_size >> 3) - 1;
+	u32 pipe_pair = qce->pipe_pair_id;
+	u32 config;
+
+	config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK;
+	config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) |
+		  BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT);
+	config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK;
+	config &= ~HIGH_SPD_EN_N_SHIFT;
+
+	if (little)
+		config |= BIT(LITTLE_ENDIAN_MODE_SHIFT);
+
+	return config;
+}
+
+void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len)
+{
+	__be32 *d = dst;
+	const u8 *s = src;
+	unsigned int n;
+
+	n = len / sizeof(u32);
+	for (; n > 0; n--) {
+		*d = cpu_to_be32p((const __u32 *) s);
+		s += sizeof(__u32);
+		d++;
+	}
+}
+
+static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize)
+{
+	u8 swap[QCE_AES_IV_LENGTH];
+	u32 i, j;
+
+	if (ivsize > QCE_AES_IV_LENGTH)
+		return;
+
+	memset(swap, 0, QCE_AES_IV_LENGTH);
+
+	for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1;
+	     i < QCE_AES_IV_LENGTH; i++, j--)
+		swap[i] = src[j];
+
+	qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH);
+}
+
+static void qce_xtskey(struct qce_device *qce, const u8 *enckey,
+		       unsigned int enckeylen, unsigned int cryptlen)
+{
+	u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
+	unsigned int xtsklen = enckeylen / (2 * sizeof(u32));
+	unsigned int xtsdusize;
+
+	qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2,
+			       enckeylen / 2);
+	qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen);
+
+	/* xts du size 512B */
+	xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen);
+	qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize);
+}
+
+static void qce_setup_config(struct qce_device *qce)
+{
+	u32 config;
+
+	/* get big endianness */
+	config = qce_config_reg(qce, 0);
+
+	/* clear status */
+	qce_write(qce, REG_STATUS, 0);
+	qce_write(qce, REG_CONFIG, config);
+}
+
+static inline void qce_crypto_go(struct qce_device *qce)
+{
+	qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT));
+}
+
+static int qce_setup_regs_ahash(struct crypto_async_request *async_req,
+				u32 totallen, u32 offset)
+{
+	struct ahash_request *req = ahash_request_cast(async_req);
+	struct crypto_ahash *ahash = __crypto_ahash_cast(async_req->tfm);
+	struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+	struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm);
+	struct qce_device *qce = tmpl->qce;
+	unsigned int digestsize = crypto_ahash_digestsize(ahash);
+	unsigned int blocksize = crypto_tfm_alg_blocksize(async_req->tfm);
+	__be32 auth[SHA256_DIGEST_SIZE / sizeof(__be32)] = {0};
+	__be32 mackey[QCE_SHA_HMAC_KEY_SIZE / sizeof(__be32)] = {0};
+	u32 auth_cfg = 0, config;
+	unsigned int iv_words;
+
+	/* if not the last, the size has to be on the block boundary */
+	if (!rctx->last_blk && req->nbytes % blocksize)
+		return -EINVAL;
+
+	qce_setup_config(qce);
+
+	if (IS_CMAC(rctx->flags)) {
+		qce_write(qce, REG_AUTH_SEG_CFG, 0);
+		qce_write(qce, REG_ENCR_SEG_CFG, 0);
+		qce_write(qce, REG_ENCR_SEG_SIZE, 0);
+		qce_clear_array(qce, REG_AUTH_IV0, 16);
+		qce_clear_array(qce, REG_AUTH_KEY0, 16);
+		qce_clear_array(qce, REG_AUTH_BYTECNT0, 4);
+
+		auth_cfg = qce_auth_cfg(rctx->flags, rctx->authklen);
+	}
+
+	if (IS_SHA_HMAC(rctx->flags) || IS_CMAC(rctx->flags)) {
+		u32 authkey_words = rctx->authklen / sizeof(u32);
+
+		qce_cpu_to_be32p_array(mackey, rctx->authkey, rctx->authklen);
+		qce_write_array(qce, REG_AUTH_KEY0, (u32 *)mackey,
+				authkey_words);
+	}
+
+	if (IS_CMAC(rctx->flags))
+		goto go_proc;
+
+	if (rctx->first_blk)
+		memcpy(auth, rctx->digest, digestsize);
+	else
+		qce_cpu_to_be32p_array(auth, rctx->digest, digestsize);
+
+	iv_words = (IS_SHA1(rctx->flags) || IS_SHA1_HMAC(rctx->flags)) ? 5 : 8;
+	qce_write_array(qce, REG_AUTH_IV0, (u32 *)auth, iv_words);
+
+	if (rctx->first_blk)
+		qce_clear_array(qce, REG_AUTH_BYTECNT0, 4);
+	else
+		qce_write_array(qce, REG_AUTH_BYTECNT0,
+				(u32 *)rctx->byte_count, 2);
+
+	auth_cfg = qce_auth_cfg(rctx->flags, 0);
+
+	if (rctx->last_blk)
+		auth_cfg |= BIT(AUTH_LAST_SHIFT);
+	else
+		auth_cfg &= ~BIT(AUTH_LAST_SHIFT);
+
+	if (rctx->first_blk)
+		auth_cfg |= BIT(AUTH_FIRST_SHIFT);
+	else
+		auth_cfg &= ~BIT(AUTH_FIRST_SHIFT);
+
+go_proc:
+	qce_write(qce, REG_AUTH_SEG_CFG, auth_cfg);
+	qce_write(qce, REG_AUTH_SEG_SIZE, req->nbytes);
+	qce_write(qce, REG_AUTH_SEG_START, 0);
+	qce_write(qce, REG_ENCR_SEG_CFG, 0);
+	qce_write(qce, REG_SEG_SIZE, req->nbytes);
+
+	/* get little endianness */
+	config = qce_config_reg(qce, 1);
+	qce_write(qce, REG_CONFIG, config);
+
+	qce_crypto_go(qce);
+
+	return 0;
+}
+
+static int qce_setup_regs_ablkcipher(struct crypto_async_request *async_req,
+				     u32 totallen, u32 offset)
+{
+	struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
+	struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
+	struct qce_cipher_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
+	struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
+	struct qce_device *qce = tmpl->qce;
+	__be32 enckey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(__be32)] = {0};
+	__be32 enciv[QCE_MAX_IV_SIZE / sizeof(__be32)] = {0};
+	unsigned int enckey_words, enciv_words;
+	unsigned int keylen;
+	u32 encr_cfg = 0, auth_cfg = 0, config;
+	unsigned int ivsize = rctx->ivsize;
+	unsigned long flags = rctx->flags;
+
+	qce_setup_config(qce);
+
+	if (IS_XTS(flags))
+		keylen = ctx->enc_keylen / 2;
+	else
+		keylen = ctx->enc_keylen;
+
+	qce_cpu_to_be32p_array(enckey, ctx->enc_key, keylen);
+	enckey_words = keylen / sizeof(u32);
+
+	qce_write(qce, REG_AUTH_SEG_CFG, auth_cfg);
+
+	encr_cfg = qce_encr_cfg(flags, keylen);
+
+	if (IS_DES(flags)) {
+		enciv_words = 2;
+		enckey_words = 2;
+	} else if (IS_3DES(flags)) {
+		enciv_words = 2;
+		enckey_words = 6;
+	} else if (IS_AES(flags)) {
+		if (IS_XTS(flags))
+			qce_xtskey(qce, ctx->enc_key, ctx->enc_keylen,
+				   rctx->cryptlen);
+		enciv_words = 4;
+	} else {
+		return -EINVAL;
+	}
+
+	qce_write_array(qce, REG_ENCR_KEY0, (u32 *)enckey, enckey_words);
+
+	if (!IS_ECB(flags)) {
+		if (IS_XTS(flags))
+			qce_xts_swapiv(enciv, rctx->iv, ivsize);
+		else
+			qce_cpu_to_be32p_array(enciv, rctx->iv, ivsize);
+
+		qce_write_array(qce, REG_CNTR0_IV0, (u32 *)enciv, enciv_words);
+	}
+
+	if (IS_ENCRYPT(flags))
+		encr_cfg |= BIT(ENCODE_SHIFT);
+
+	qce_write(qce, REG_ENCR_SEG_CFG, encr_cfg);
+	qce_write(qce, REG_ENCR_SEG_SIZE, rctx->cryptlen);
+	qce_write(qce, REG_ENCR_SEG_START, offset & 0xffff);
+
+	if (IS_CTR(flags)) {
+		qce_write(qce, REG_CNTR_MASK, ~0);
+		qce_write(qce, REG_CNTR_MASK0, ~0);
+		qce_write(qce, REG_CNTR_MASK1, ~0);
+		qce_write(qce, REG_CNTR_MASK2, ~0);
+	}
+
+	qce_write(qce, REG_SEG_SIZE, totallen);
+
+	/* get little endianness */
+	config = qce_config_reg(qce, 1);
+	qce_write(qce, REG_CONFIG, config);
+
+	qce_crypto_go(qce);
+
+	return 0;
+}
+
+int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
+	      u32 offset)
+{
+	switch (type) {
+	case CRYPTO_ALG_TYPE_ABLKCIPHER:
+		return qce_setup_regs_ablkcipher(async_req, totallen, offset);
+	case CRYPTO_ALG_TYPE_AHASH:
+		return qce_setup_regs_ahash(async_req, totallen, offset);
+	default:
+		return -EINVAL;
+	}
+}
+
+#define STATUS_ERRORS	\
+		(BIT(SW_ERR_SHIFT) | BIT(AXI_ERR_SHIFT) | BIT(HSD_ERR_SHIFT))
+
+int qce_check_status(struct qce_device *qce, u32 *status)
+{
+	int ret = 0;
+
+	*status = qce_read(qce, REG_STATUS);
+
+	/*
+	 * Don't use result dump status. The operation may not be complete.
+	 * Instead, use the status we just read from device. In case, we need to
+	 * use result_status from result dump the result_status needs to be byte
+	 * swapped, since we set the device to little endian.
+	 */
+	if (*status & STATUS_ERRORS || !(*status & BIT(OPERATION_DONE_SHIFT)))
+		ret = -ENXIO;
+
+	return ret;
+}
+
+void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 *step)
+{
+	u32 val;
+
+	val = qce_read(qce, REG_VERSION);
+	*major = (val & CORE_MAJOR_REV_MASK) >> CORE_MAJOR_REV_SHIFT;
+	*minor = (val & CORE_MINOR_REV_MASK) >> CORE_MINOR_REV_SHIFT;
+	*step = (val & CORE_STEP_REV_MASK) >> CORE_STEP_REV_SHIFT;
+}
diff --git a/drivers/crypto/qce/common.h b/drivers/crypto/qce/common.h
new file mode 100644
index 0000000..a4addd4
--- /dev/null
+++ b/drivers/crypto/qce/common.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _COMMON_H_
+#define _COMMON_H_
+
+#include <linux/crypto.h>
+#include <linux/types.h>
+#include <crypto/aes.h>
+#include <crypto/hash.h>
+
+/* key size in bytes */
+#define QCE_SHA_HMAC_KEY_SIZE		64
+#define QCE_MAX_CIPHER_KEY_SIZE		AES_KEYSIZE_256
+
+/* IV length in bytes */
+#define QCE_AES_IV_LENGTH		AES_BLOCK_SIZE
+/* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
+#define QCE_MAX_IV_SIZE			AES_BLOCK_SIZE
+
+/* maximum nonce bytes  */
+#define QCE_MAX_NONCE			16
+#define QCE_MAX_NONCE_WORDS		(QCE_MAX_NONCE / sizeof(u32))
+
+/* burst size alignment requirement */
+#define QCE_MAX_ALIGN_SIZE		64
+
+/* cipher algorithms */
+#define QCE_ALG_DES			BIT(0)
+#define QCE_ALG_3DES			BIT(1)
+#define QCE_ALG_AES			BIT(2)
+
+/* hash and hmac algorithms */
+#define QCE_HASH_SHA1			BIT(3)
+#define QCE_HASH_SHA256			BIT(4)
+#define QCE_HASH_SHA1_HMAC		BIT(5)
+#define QCE_HASH_SHA256_HMAC		BIT(6)
+#define QCE_HASH_AES_CMAC		BIT(7)
+
+/* cipher modes */
+#define QCE_MODE_CBC			BIT(8)
+#define QCE_MODE_ECB			BIT(9)
+#define QCE_MODE_CTR			BIT(10)
+#define QCE_MODE_XTS			BIT(11)
+#define QCE_MODE_CCM			BIT(12)
+#define QCE_MODE_MASK			GENMASK(12, 8)
+
+/* cipher encryption/decryption operations */
+#define QCE_ENCRYPT			BIT(13)
+#define QCE_DECRYPT			BIT(14)
+
+#define IS_DES(flags)			(flags & QCE_ALG_DES)
+#define IS_3DES(flags)			(flags & QCE_ALG_3DES)
+#define IS_AES(flags)			(flags & QCE_ALG_AES)
+
+#define IS_SHA1(flags)			(flags & QCE_HASH_SHA1)
+#define IS_SHA256(flags)		(flags & QCE_HASH_SHA256)
+#define IS_SHA1_HMAC(flags)		(flags & QCE_HASH_SHA1_HMAC)
+#define IS_SHA256_HMAC(flags)		(flags & QCE_HASH_SHA256_HMAC)
+#define IS_CMAC(flags)			(flags & QCE_HASH_AES_CMAC)
+#define IS_SHA(flags)			(IS_SHA1(flags) || IS_SHA256(flags))
+#define IS_SHA_HMAC(flags)		\
+		(IS_SHA1_HMAC(flags) || IS_SHA256_HMAC(flags))
+
+#define IS_CBC(mode)			(mode & QCE_MODE_CBC)
+#define IS_ECB(mode)			(mode & QCE_MODE_ECB)
+#define IS_CTR(mode)			(mode & QCE_MODE_CTR)
+#define IS_XTS(mode)			(mode & QCE_MODE_XTS)
+#define IS_CCM(mode)			(mode & QCE_MODE_CCM)
+
+#define IS_ENCRYPT(dir)			(dir & QCE_ENCRYPT)
+#define IS_DECRYPT(dir)			(dir & QCE_DECRYPT)
+
+struct qce_alg_template {
+	struct list_head entry;
+	u32 crypto_alg_type;
+	unsigned long alg_flags;
+	const u32 *std_iv;
+	union {
+		struct crypto_alg crypto;
+		struct ahash_alg ahash;
+	} alg;
+	struct qce_device *qce;
+};
+
+void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len);
+int qce_check_status(struct qce_device *qce, u32 *status);
+void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 *step);
+int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
+	      u32 offset);
+
+#endif /* _COMMON_H_ */
diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c
new file mode 100644
index 0000000..33ae354
--- /dev/null
+++ b/drivers/crypto/qce/core.c
@@ -0,0 +1,286 @@
+/*
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+
+#include "core.h"
+#include "cipher.h"
+#include "sha.h"
+
+#define QCE_MAJOR_VERSION5	0x05
+#define QCE_QUEUE_LENGTH	1
+
+static const struct qce_algo_ops *qce_ops[] = {
+	&ablkcipher_ops,
+	&ahash_ops,
+};
+
+static void qce_unregister_algs(struct qce_device *qce)
+{
+	const struct qce_algo_ops *ops;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
+		ops = qce_ops[i];
+		ops->unregister_algs(qce);
+	}
+}
+
+static int qce_register_algs(struct qce_device *qce)
+{
+	const struct qce_algo_ops *ops;
+	int i, ret = -ENODEV;
+
+	for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
+		ops = qce_ops[i];
+		ret = ops->register_algs(qce);
+		if (ret)
+			break;
+	}
+
+	return ret;
+}
+
+static int qce_handle_request(struct crypto_async_request *async_req)
+{
+	int ret = -EINVAL, i;
+	const struct qce_algo_ops *ops;
+	u32 type = crypto_tfm_alg_type(async_req->tfm);
+
+	for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
+		ops = qce_ops[i];
+		if (type != ops->type)
+			continue;
+		ret = ops->async_req_handle(async_req);
+		break;
+	}
+
+	return ret;
+}
+
+static int qce_handle_queue(struct qce_device *qce,
+			    struct crypto_async_request *req)
+{
+	struct crypto_async_request *async_req, *backlog;
+	unsigned long flags;
+	int ret = 0, err;
+
+	spin_lock_irqsave(&qce->lock, flags);
+
+	if (req)
+		ret = crypto_enqueue_request(&qce->queue, req);
+
+	/* busy, do not dequeue request */
+	if (qce->req) {
+		spin_unlock_irqrestore(&qce->lock, flags);
+		return ret;
+	}
+
+	backlog = crypto_get_backlog(&qce->queue);
+	async_req = crypto_dequeue_request(&qce->queue);
+	if (async_req)
+		qce->req = async_req;
+
+	spin_unlock_irqrestore(&qce->lock, flags);
+
+	if (!async_req)
+		return ret;
+
+	if (backlog) {
+		spin_lock_bh(&qce->lock);
+		backlog->complete(backlog, -EINPROGRESS);
+		spin_unlock_bh(&qce->lock);
+	}
+
+	err = qce_handle_request(async_req);
+	if (err) {
+		qce->result = err;
+		tasklet_schedule(&qce->done_tasklet);
+	}
+
+	return ret;
+}
+
+static void qce_tasklet_req_done(unsigned long data)
+{
+	struct qce_device *qce = (struct qce_device *)data;
+	struct crypto_async_request *req;
+	unsigned long flags;
+
+	spin_lock_irqsave(&qce->lock, flags);
+	req = qce->req;
+	qce->req = NULL;
+	spin_unlock_irqrestore(&qce->lock, flags);
+
+	if (req)
+		req->complete(req, qce->result);
+
+	qce_handle_queue(qce, NULL);
+}
+
+static int qce_async_request_enqueue(struct qce_device *qce,
+				     struct crypto_async_request *req)
+{
+	return qce_handle_queue(qce, req);
+}
+
+static void qce_async_request_done(struct qce_device *qce, int ret)
+{
+	qce->result = ret;
+	tasklet_schedule(&qce->done_tasklet);
+}
+
+static int qce_check_version(struct qce_device *qce)
+{
+	u32 major, minor, step;
+
+	qce_get_version(qce, &major, &minor, &step);
+
+	/*
+	 * the driver does not support v5 with minor 0 because it has special
+	 * alignment requirements.
+	 */
+	if (major != QCE_MAJOR_VERSION5 || minor == 0)
+		return -ENODEV;
+
+	qce->burst_size = QCE_BAM_BURST_SIZE;
+	qce->pipe_pair_id = 1;
+
+	dev_dbg(qce->dev, "Crypto device found, version %d.%d.%d\n",
+		major, minor, step);
+
+	return 0;
+}
+
+static int qce_crypto_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct qce_device *qce;
+	struct resource *res;
+	int ret;
+
+	qce = devm_kzalloc(dev, sizeof(*qce), GFP_KERNEL);
+	if (!qce)
+		return -ENOMEM;
+
+	qce->dev = dev;
+	platform_set_drvdata(pdev, qce);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	qce->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(qce->base))
+		return PTR_ERR(qce->base);
+
+	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+	if (ret < 0)
+		return ret;
+
+	qce->core = devm_clk_get(qce->dev, "core");
+	if (IS_ERR(qce->core))
+		return PTR_ERR(qce->core);
+
+	qce->iface = devm_clk_get(qce->dev, "iface");
+	if (IS_ERR(qce->iface))
+		return PTR_ERR(qce->iface);
+
+	qce->bus = devm_clk_get(qce->dev, "bus");
+	if (IS_ERR(qce->bus))
+		return PTR_ERR(qce->bus);
+
+	ret = clk_prepare_enable(qce->core);
+	if (ret)
+		return ret;
+
+	ret = clk_prepare_enable(qce->iface);
+	if (ret)
+		goto err_clks_core;
+
+	ret = clk_prepare_enable(qce->bus);
+	if (ret)
+		goto err_clks_iface;
+
+	ret = qce_dma_request(qce->dev, &qce->dma);
+	if (ret)
+		goto err_clks;
+
+	ret = qce_check_version(qce);
+	if (ret)
+		goto err_clks;
+
+	spin_lock_init(&qce->lock);
+	tasklet_init(&qce->done_tasklet, qce_tasklet_req_done,
+		     (unsigned long)qce);
+	crypto_init_queue(&qce->queue, QCE_QUEUE_LENGTH);
+
+	qce->async_req_enqueue = qce_async_request_enqueue;
+	qce->async_req_done = qce_async_request_done;
+
+	ret = qce_register_algs(qce);
+	if (ret)
+		goto err_dma;
+
+	return 0;
+
+err_dma:
+	qce_dma_release(&qce->dma);
+err_clks:
+	clk_disable_unprepare(qce->bus);
+err_clks_iface:
+	clk_disable_unprepare(qce->iface);
+err_clks_core:
+	clk_disable_unprepare(qce->core);
+	return ret;
+}
+
+static int qce_crypto_remove(struct platform_device *pdev)
+{
+	struct qce_device *qce = platform_get_drvdata(pdev);
+
+	tasklet_kill(&qce->done_tasklet);
+	qce_unregister_algs(qce);
+	qce_dma_release(&qce->dma);
+	clk_disable_unprepare(qce->bus);
+	clk_disable_unprepare(qce->iface);
+	clk_disable_unprepare(qce->core);
+	return 0;
+}
+
+static const struct of_device_id qce_crypto_of_match[] = {
+	{ .compatible = "qcom,crypto-v5.1", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, qce_crypto_of_match);
+
+static struct platform_driver qce_crypto_driver = {
+	.probe = qce_crypto_probe,
+	.remove = qce_crypto_remove,
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = KBUILD_MODNAME,
+		.of_match_table = qce_crypto_of_match,
+	},
+};
+module_platform_driver(qce_crypto_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm crypto engine driver");
+MODULE_ALIAS("platform:" KBUILD_MODNAME);
+MODULE_AUTHOR("The Linux Foundation");
diff --git a/drivers/crypto/qce/core.h b/drivers/crypto/qce/core.h
new file mode 100644
index 0000000..549965d
--- /dev/null
+++ b/drivers/crypto/qce/core.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CORE_H_
+#define _CORE_H_
+
+#include "dma.h"
+
+/**
+ * struct qce_device - crypto engine device structure
+ * @queue: crypto request queue
+ * @lock: the lock protects queue and req
+ * @done_tasklet: done tasklet object
+ * @req: current active request
+ * @result: result of current transform
+ * @base: virtual IO base
+ * @dev: pointer to device structure
+ * @core: core device clock
+ * @iface: interface clock
+ * @bus: bus clock
+ * @dma: pointer to dma data
+ * @burst_size: the crypto burst size
+ * @pipe_pair_id: which pipe pair id the device using
+ * @async_req_enqueue: invoked by every algorithm to enqueue a request
+ * @async_req_done: invoked by every algorithm to finish its request
+ */
+struct qce_device {
+	struct crypto_queue queue;
+	spinlock_t lock;
+	struct tasklet_struct done_tasklet;
+	struct crypto_async_request *req;
+	int result;
+	void __iomem *base;
+	struct device *dev;
+	struct clk *core, *iface, *bus;
+	struct qce_dma_data dma;
+	int burst_size;
+	unsigned int pipe_pair_id;
+	int (*async_req_enqueue)(struct qce_device *qce,
+				 struct crypto_async_request *req);
+	void (*async_req_done)(struct qce_device *qce, int ret);
+};
+
+/**
+ * struct qce_algo_ops - algorithm operations per crypto type
+ * @type: should be CRYPTO_ALG_TYPE_XXX
+ * @register_algs: invoked by core to register the algorithms
+ * @unregister_algs: invoked by core to unregister the algorithms
+ * @async_req_handle: invoked by core to handle enqueued request
+ */
+struct qce_algo_ops {
+	u32 type;
+	int (*register_algs)(struct qce_device *qce);
+	void (*unregister_algs)(struct qce_device *qce);
+	int (*async_req_handle)(struct crypto_async_request *async_req);
+};
+
+#endif /* _CORE_H_ */
diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c
new file mode 100644
index 0000000..0fb21e1
--- /dev/null
+++ b/drivers/crypto/qce/dma.c
@@ -0,0 +1,186 @@
+/*
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/dmaengine.h>
+#include <crypto/scatterwalk.h>
+
+#include "dma.h"
+
+int qce_dma_request(struct device *dev, struct qce_dma_data *dma)
+{
+	int ret;
+
+	dma->txchan = dma_request_slave_channel_reason(dev, "tx");
+	if (IS_ERR(dma->txchan))
+		return PTR_ERR(dma->txchan);
+
+	dma->rxchan = dma_request_slave_channel_reason(dev, "rx");
+	if (IS_ERR(dma->rxchan)) {
+		ret = PTR_ERR(dma->rxchan);
+		goto error_rx;
+	}
+
+	dma->result_buf = kmalloc(QCE_RESULT_BUF_SZ + QCE_IGNORE_BUF_SZ,
+				  GFP_KERNEL);
+	if (!dma->result_buf) {
+		ret = -ENOMEM;
+		goto error_nomem;
+	}
+
+	dma->ignore_buf = dma->result_buf + QCE_RESULT_BUF_SZ;
+
+	return 0;
+error_nomem:
+	dma_release_channel(dma->rxchan);
+error_rx:
+	dma_release_channel(dma->txchan);
+	return ret;
+}
+
+void qce_dma_release(struct qce_dma_data *dma)
+{
+	dma_release_channel(dma->txchan);
+	dma_release_channel(dma->rxchan);
+	kfree(dma->result_buf);
+}
+
+int qce_mapsg(struct device *dev, struct scatterlist *sg, int nents,
+	      enum dma_data_direction dir, bool chained)
+{
+	int err;
+
+	if (chained) {
+		while (sg) {
+			err = dma_map_sg(dev, sg, 1, dir);
+			if (!err)
+				return -EFAULT;
+			sg = scatterwalk_sg_next(sg);
+		}
+	} else {
+		err = dma_map_sg(dev, sg, nents, dir);
+		if (!err)
+			return -EFAULT;
+	}
+
+	return nents;
+}
+
+void qce_unmapsg(struct device *dev, struct scatterlist *sg, int nents,
+		 enum dma_data_direction dir, bool chained)
+{
+	if (chained)
+		while (sg) {
+			dma_unmap_sg(dev, sg, 1, dir);
+			sg = scatterwalk_sg_next(sg);
+		}
+	else
+		dma_unmap_sg(dev, sg, nents, dir);
+}
+
+int qce_countsg(struct scatterlist *sglist, int nbytes, bool *chained)
+{
+	struct scatterlist *sg = sglist;
+	int nents = 0;
+
+	if (chained)
+		*chained = false;
+
+	while (nbytes > 0 && sg) {
+		nents++;
+		nbytes -= sg->length;
+		if (!sg_is_last(sg) && (sg + 1)->length == 0 && chained)
+			*chained = true;
+		sg = scatterwalk_sg_next(sg);
+	}
+
+	return nents;
+}
+
+struct scatterlist *
+qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl)
+{
+	struct scatterlist *sg = sgt->sgl, *sg_last = NULL;
+
+	while (sg) {
+		if (!sg_page(sg))
+			break;
+		sg = sg_next(sg);
+	}
+
+	if (!sg)
+		return ERR_PTR(-EINVAL);
+
+	while (new_sgl && sg) {
+		sg_set_page(sg, sg_page(new_sgl), new_sgl->length,
+			    new_sgl->offset);
+		sg_last = sg;
+		sg = sg_next(sg);
+		new_sgl = sg_next(new_sgl);
+	}
+
+	return sg_last;
+}
+
+static int qce_dma_prep_sg(struct dma_chan *chan, struct scatterlist *sg,
+			   int nents, unsigned long flags,
+			   enum dma_transfer_direction dir,
+			   dma_async_tx_callback cb, void *cb_param)
+{
+	struct dma_async_tx_descriptor *desc;
+	dma_cookie_t cookie;
+
+	if (!sg || !nents)
+		return -EINVAL;
+
+	desc = dmaengine_prep_slave_sg(chan, sg, nents, dir, flags);
+	if (!desc)
+		return -EINVAL;
+
+	desc->callback = cb;
+	desc->callback_param = cb_param;
+	cookie = dmaengine_submit(desc);
+
+	return dma_submit_error(cookie);
+}
+
+int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *rx_sg,
+		     int rx_nents, struct scatterlist *tx_sg, int tx_nents,
+		     dma_async_tx_callback cb, void *cb_param)
+{
+	struct dma_chan *rxchan = dma->rxchan;
+	struct dma_chan *txchan = dma->txchan;
+	unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
+	int ret;
+
+	ret = qce_dma_prep_sg(rxchan, rx_sg, rx_nents, flags, DMA_MEM_TO_DEV,
+			     NULL, NULL);
+	if (ret)
+		return ret;
+
+	return qce_dma_prep_sg(txchan, tx_sg, tx_nents, flags, DMA_DEV_TO_MEM,
+			       cb, cb_param);
+}
+
+void qce_dma_issue_pending(struct qce_dma_data *dma)
+{
+	dma_async_issue_pending(dma->rxchan);
+	dma_async_issue_pending(dma->txchan);
+}
+
+int qce_dma_terminate_all(struct qce_dma_data *dma)
+{
+	int ret;
+
+	ret = dmaengine_terminate_all(dma->rxchan);
+	return ret ?: dmaengine_terminate_all(dma->txchan);
+}
diff --git a/drivers/crypto/qce/dma.h b/drivers/crypto/qce/dma.h
new file mode 100644
index 0000000..805e378
--- /dev/null
+++ b/drivers/crypto/qce/dma.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DMA_H_
+#define _DMA_H_
+
+/* maximum data transfer block size between BAM and CE */
+#define QCE_BAM_BURST_SIZE		64
+
+#define QCE_AUTHIV_REGS_CNT		16
+#define QCE_AUTH_BYTECOUNT_REGS_CNT	4
+#define QCE_CNTRIV_REGS_CNT		4
+
+struct qce_result_dump {
+	u32 auth_iv[QCE_AUTHIV_REGS_CNT];
+	u32 auth_byte_count[QCE_AUTH_BYTECOUNT_REGS_CNT];
+	u32 encr_cntr_iv[QCE_CNTRIV_REGS_CNT];
+	u32 status;
+	u32 status2;
+};
+
+#define QCE_IGNORE_BUF_SZ	(2 * QCE_BAM_BURST_SIZE)
+#define QCE_RESULT_BUF_SZ	\
+		ALIGN(sizeof(struct qce_result_dump), QCE_BAM_BURST_SIZE)
+
+struct qce_dma_data {
+	struct dma_chan *txchan;
+	struct dma_chan *rxchan;
+	struct qce_result_dump *result_buf;
+	void *ignore_buf;
+};
+
+int qce_dma_request(struct device *dev, struct qce_dma_data *dma);
+void qce_dma_release(struct qce_dma_data *dma);
+int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *sg_in,
+		     int in_ents, struct scatterlist *sg_out, int out_ents,
+		     dma_async_tx_callback cb, void *cb_param);
+void qce_dma_issue_pending(struct qce_dma_data *dma);
+int qce_dma_terminate_all(struct qce_dma_data *dma);
+int qce_countsg(struct scatterlist *sg_list, int nbytes, bool *chained);
+void qce_unmapsg(struct device *dev, struct scatterlist *sg, int nents,
+		 enum dma_data_direction dir, bool chained);
+int qce_mapsg(struct device *dev, struct scatterlist *sg, int nents,
+	      enum dma_data_direction dir, bool chained);
+struct scatterlist *
+qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add);
+
+#endif /* _DMA_H_ */
diff --git a/drivers/crypto/qce/regs-v5.h b/drivers/crypto/qce/regs-v5.h
new file mode 100644
index 0000000..f0e19e3
--- /dev/null
+++ b/drivers/crypto/qce/regs-v5.h
@@ -0,0 +1,334 @@
+/*
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _REGS_V5_H_
+#define _REGS_V5_H_
+
+#include <linux/bitops.h>
+
+#define REG_VERSION			0x000
+#define REG_STATUS			0x100
+#define REG_STATUS2			0x104
+#define REG_ENGINES_AVAIL		0x108
+#define REG_FIFO_SIZES			0x10c
+#define REG_SEG_SIZE			0x110
+#define REG_GOPROC			0x120
+#define REG_ENCR_SEG_CFG		0x200
+#define REG_ENCR_SEG_SIZE		0x204
+#define REG_ENCR_SEG_START		0x208
+#define REG_CNTR0_IV0			0x20c
+#define REG_CNTR1_IV1			0x210
+#define REG_CNTR2_IV2			0x214
+#define REG_CNTR3_IV3			0x218
+#define REG_CNTR_MASK			0x21C
+#define REG_ENCR_CCM_INT_CNTR0		0x220
+#define REG_ENCR_CCM_INT_CNTR1		0x224
+#define REG_ENCR_CCM_INT_CNTR2		0x228
+#define REG_ENCR_CCM_INT_CNTR3		0x22c
+#define REG_ENCR_XTS_DU_SIZE		0x230
+#define REG_CNTR_MASK2			0x234
+#define REG_CNTR_MASK1			0x238
+#define REG_CNTR_MASK0			0x23c
+#define REG_AUTH_SEG_CFG		0x300
+#define REG_AUTH_SEG_SIZE		0x304
+#define REG_AUTH_SEG_START		0x308
+#define REG_AUTH_IV0			0x310
+#define REG_AUTH_IV1			0x314
+#define REG_AUTH_IV2			0x318
+#define REG_AUTH_IV3			0x31c
+#define REG_AUTH_IV4			0x320
+#define REG_AUTH_IV5			0x324
+#define REG_AUTH_IV6			0x328
+#define REG_AUTH_IV7			0x32c
+#define REG_AUTH_IV8			0x330
+#define REG_AUTH_IV9			0x334
+#define REG_AUTH_IV10			0x338
+#define REG_AUTH_IV11			0x33c
+#define REG_AUTH_IV12			0x340
+#define REG_AUTH_IV13			0x344
+#define REG_AUTH_IV14			0x348
+#define REG_AUTH_IV15			0x34c
+#define REG_AUTH_INFO_NONCE0		0x350
+#define REG_AUTH_INFO_NONCE1		0x354
+#define REG_AUTH_INFO_NONCE2		0x358
+#define REG_AUTH_INFO_NONCE3		0x35c
+#define REG_AUTH_BYTECNT0		0x390
+#define REG_AUTH_BYTECNT1		0x394
+#define REG_AUTH_BYTECNT2		0x398
+#define REG_AUTH_BYTECNT3		0x39c
+#define REG_AUTH_EXP_MAC0		0x3a0
+#define REG_AUTH_EXP_MAC1		0x3a4
+#define REG_AUTH_EXP_MAC2		0x3a8
+#define REG_AUTH_EXP_MAC3		0x3ac
+#define REG_AUTH_EXP_MAC4		0x3b0
+#define REG_AUTH_EXP_MAC5		0x3b4
+#define REG_AUTH_EXP_MAC6		0x3b8
+#define REG_AUTH_EXP_MAC7		0x3bc
+#define REG_CONFIG			0x400
+#define REG_GOPROC_QC_KEY		0x1000
+#define REG_GOPROC_OEM_KEY		0x2000
+#define REG_ENCR_KEY0			0x3000
+#define REG_ENCR_KEY1			0x3004
+#define REG_ENCR_KEY2			0x3008
+#define REG_ENCR_KEY3			0x300c
+#define REG_ENCR_KEY4			0x3010
+#define REG_ENCR_KEY5			0x3014
+#define REG_ENCR_KEY6			0x3018
+#define REG_ENCR_KEY7			0x301c
+#define REG_ENCR_XTS_KEY0		0x3020
+#define REG_ENCR_XTS_KEY1		0x3024
+#define REG_ENCR_XTS_KEY2		0x3028
+#define REG_ENCR_XTS_KEY3		0x302c
+#define REG_ENCR_XTS_KEY4		0x3030
+#define REG_ENCR_XTS_KEY5		0x3034
+#define REG_ENCR_XTS_KEY6		0x3038
+#define REG_ENCR_XTS_KEY7		0x303c
+#define REG_AUTH_KEY0			0x3040
+#define REG_AUTH_KEY1			0x3044
+#define REG_AUTH_KEY2			0x3048
+#define REG_AUTH_KEY3			0x304c
+#define REG_AUTH_KEY4			0x3050
+#define REG_AUTH_KEY5			0x3054
+#define REG_AUTH_KEY6			0x3058
+#define REG_AUTH_KEY7			0x305c
+#define REG_AUTH_KEY8			0x3060
+#define REG_AUTH_KEY9			0x3064
+#define REG_AUTH_KEY10			0x3068
+#define REG_AUTH_KEY11			0x306c
+#define REG_AUTH_KEY12			0x3070
+#define REG_AUTH_KEY13			0x3074
+#define REG_AUTH_KEY14			0x3078
+#define REG_AUTH_KEY15			0x307c
+
+/* Register bits - REG_VERSION */
+#define CORE_STEP_REV_SHIFT		0
+#define CORE_STEP_REV_MASK		GENMASK(15, 0)
+#define CORE_MINOR_REV_SHIFT		16
+#define CORE_MINOR_REV_MASK		GENMASK(23, 16)
+#define CORE_MAJOR_REV_SHIFT		24
+#define CORE_MAJOR_REV_MASK		GENMASK(31, 24)
+
+/* Register bits - REG_STATUS */
+#define MAC_FAILED_SHIFT		31
+#define DOUT_SIZE_AVAIL_SHIFT		26
+#define DOUT_SIZE_AVAIL_MASK		GENMASK(30, 26)
+#define DIN_SIZE_AVAIL_SHIFT		21
+#define DIN_SIZE_AVAIL_MASK		GENMASK(25, 21)
+#define HSD_ERR_SHIFT			20
+#define ACCESS_VIOL_SHIFT		19
+#define PIPE_ACTIVE_ERR_SHIFT		18
+#define CFG_CHNG_ERR_SHIFT		17
+#define DOUT_ERR_SHIFT			16
+#define DIN_ERR_SHIFT			15
+#define AXI_ERR_SHIFT			14
+#define CRYPTO_STATE_SHIFT		10
+#define CRYPTO_STATE_MASK		GENMASK(13, 10)
+#define ENCR_BUSY_SHIFT			9
+#define AUTH_BUSY_SHIFT			8
+#define DOUT_INTR_SHIFT			7
+#define DIN_INTR_SHIFT			6
+#define OP_DONE_INTR_SHIFT		5
+#define ERR_INTR_SHIFT			4
+#define DOUT_RDY_SHIFT			3
+#define DIN_RDY_SHIFT			2
+#define OPERATION_DONE_SHIFT		1
+#define SW_ERR_SHIFT			0
+
+/* Register bits - REG_STATUS2 */
+#define AXI_EXTRA_SHIFT			1
+#define LOCKED_SHIFT			2
+
+/* Register bits - REG_CONFIG */
+#define REQ_SIZE_SHIFT			17
+#define REQ_SIZE_MASK			GENMASK(20, 17)
+#define REQ_SIZE_ENUM_1_BEAT		0
+#define REQ_SIZE_ENUM_2_BEAT		1
+#define REQ_SIZE_ENUM_3_BEAT		2
+#define REQ_SIZE_ENUM_4_BEAT		3
+#define REQ_SIZE_ENUM_5_BEAT		4
+#define REQ_SIZE_ENUM_6_BEAT		5
+#define REQ_SIZE_ENUM_7_BEAT		6
+#define REQ_SIZE_ENUM_8_BEAT		7
+#define REQ_SIZE_ENUM_9_BEAT		8
+#define REQ_SIZE_ENUM_10_BEAT		9
+#define REQ_SIZE_ENUM_11_BEAT		10
+#define REQ_SIZE_ENUM_12_BEAT		11
+#define REQ_SIZE_ENUM_13_BEAT		12
+#define REQ_SIZE_ENUM_14_BEAT		13
+#define REQ_SIZE_ENUM_15_BEAT		14
+#define REQ_SIZE_ENUM_16_BEAT		15
+
+#define MAX_QUEUED_REQ_SHIFT		14
+#define MAX_QUEUED_REQ_MASK		GENMASK(24, 16)
+#define ENUM_1_QUEUED_REQS		0
+#define ENUM_2_QUEUED_REQS		1
+#define ENUM_3_QUEUED_REQS		2
+
+#define IRQ_ENABLES_SHIFT		10
+#define IRQ_ENABLES_MASK		GENMASK(13, 10)
+
+#define LITTLE_ENDIAN_MODE_SHIFT	9
+#define PIPE_SET_SELECT_SHIFT		5
+#define PIPE_SET_SELECT_MASK		GENMASK(8, 5)
+
+#define HIGH_SPD_EN_N_SHIFT		4
+#define MASK_DOUT_INTR_SHIFT		3
+#define MASK_DIN_INTR_SHIFT		2
+#define MASK_OP_DONE_INTR_SHIFT		1
+#define MASK_ERR_INTR_SHIFT		0
+
+/* Register bits - REG_AUTH_SEG_CFG */
+#define COMP_EXP_MAC_SHIFT		24
+#define COMP_EXP_MAC_DISABLED		0
+#define COMP_EXP_MAC_ENABLED		1
+
+#define F9_DIRECTION_SHIFT		23
+#define F9_DIRECTION_UPLINK		0
+#define F9_DIRECTION_DOWNLINK		1
+
+#define AUTH_NONCE_NUM_WORDS_SHIFT	20
+#define AUTH_NONCE_NUM_WORDS_MASK	GENMASK(22, 20)
+
+#define USE_PIPE_KEY_AUTH_SHIFT		19
+#define USE_HW_KEY_AUTH_SHIFT		18
+#define AUTH_FIRST_SHIFT		17
+#define AUTH_LAST_SHIFT			16
+
+#define AUTH_POS_SHIFT			14
+#define AUTH_POS_MASK			GENMASK(15, 14)
+#define AUTH_POS_BEFORE			0
+#define AUTH_POS_AFTER			1
+
+#define AUTH_SIZE_SHIFT			9
+#define AUTH_SIZE_MASK			GENMASK(13, 9)
+#define AUTH_SIZE_SHA1			0
+#define AUTH_SIZE_SHA256		1
+#define AUTH_SIZE_ENUM_1_BYTES		0
+#define AUTH_SIZE_ENUM_2_BYTES		1
+#define AUTH_SIZE_ENUM_3_BYTES		2
+#define AUTH_SIZE_ENUM_4_BYTES		3
+#define AUTH_SIZE_ENUM_5_BYTES		4
+#define AUTH_SIZE_ENUM_6_BYTES		5
+#define AUTH_SIZE_ENUM_7_BYTES		6
+#define AUTH_SIZE_ENUM_8_BYTES		7
+#define AUTH_SIZE_ENUM_9_BYTES		8
+#define AUTH_SIZE_ENUM_10_BYTES		9
+#define AUTH_SIZE_ENUM_11_BYTES		10
+#define AUTH_SIZE_ENUM_12_BYTES		11
+#define AUTH_SIZE_ENUM_13_BYTES		12
+#define AUTH_SIZE_ENUM_14_BYTES		13
+#define AUTH_SIZE_ENUM_15_BYTES		14
+#define AUTH_SIZE_ENUM_16_BYTES		15
+
+#define AUTH_MODE_SHIFT			6
+#define AUTH_MODE_MASK			GENMASK(8, 6)
+#define AUTH_MODE_HASH			0
+#define AUTH_MODE_HMAC			1
+#define AUTH_MODE_CCM			0
+#define AUTH_MODE_CMAC			1
+
+#define AUTH_KEY_SIZE_SHIFT		3
+#define AUTH_KEY_SIZE_MASK		GENMASK(5, 3)
+#define AUTH_KEY_SZ_AES128		0
+#define AUTH_KEY_SZ_AES256		2
+
+#define AUTH_ALG_SHIFT			0
+#define AUTH_ALG_MASK			GENMASK(2, 0)
+#define AUTH_ALG_NONE			0
+#define AUTH_ALG_SHA			1
+#define AUTH_ALG_AES			2
+#define AUTH_ALG_KASUMI			3
+#define AUTH_ALG_SNOW3G			4
+#define AUTH_ALG_ZUC			5
+
+/* Register bits - REG_ENCR_XTS_DU_SIZE */
+#define ENCR_XTS_DU_SIZE_SHIFT		0
+#define ENCR_XTS_DU_SIZE_MASK		GENMASK(19, 0)
+
+/* Register bits - REG_ENCR_SEG_CFG */
+#define F8_KEYSTREAM_ENABLE_SHIFT	17
+#define F8_KEYSTREAM_DISABLED		0
+#define F8_KEYSTREAM_ENABLED		1
+
+#define F8_DIRECTION_SHIFT		16
+#define F8_DIRECTION_UPLINK		0
+#define F8_DIRECTION_DOWNLINK		1
+
+#define USE_PIPE_KEY_ENCR_SHIFT		15
+#define USE_PIPE_KEY_ENCR_ENABLED	1
+#define USE_KEY_REGISTERS		0
+
+#define USE_HW_KEY_ENCR_SHIFT		14
+#define USE_KEY_REG			0
+#define USE_HW_KEY			1
+
+#define LAST_CCM_SHIFT			13
+#define LAST_CCM_XFR			1
+#define INTERM_CCM_XFR			0
+
+#define CNTR_ALG_SHIFT			11
+#define CNTR_ALG_MASK			GENMASK(12, 11)
+#define CNTR_ALG_NIST			0
+
+#define ENCODE_SHIFT			10
+
+#define ENCR_MODE_SHIFT			6
+#define ENCR_MODE_MASK			GENMASK(9, 6)
+#define ENCR_MODE_ECB			0
+#define ENCR_MODE_CBC			1
+#define ENCR_MODE_CTR			2
+#define ENCR_MODE_XTS			3
+#define ENCR_MODE_CCM			4
+
+#define ENCR_KEY_SZ_SHIFT		3
+#define ENCR_KEY_SZ_MASK		GENMASK(5, 3)
+#define ENCR_KEY_SZ_DES			0
+#define ENCR_KEY_SZ_3DES		1
+#define ENCR_KEY_SZ_AES128		0
+#define ENCR_KEY_SZ_AES256		2
+
+#define ENCR_ALG_SHIFT			0
+#define ENCR_ALG_MASK			GENMASK(2, 0)
+#define ENCR_ALG_NONE			0
+#define ENCR_ALG_DES			1
+#define ENCR_ALG_AES			2
+#define ENCR_ALG_KASUMI			4
+#define ENCR_ALG_SNOW_3G		5
+#define ENCR_ALG_ZUC			6
+
+/* Register bits - REG_GOPROC */
+#define GO_SHIFT			0
+#define CLR_CNTXT_SHIFT			1
+#define RESULTS_DUMP_SHIFT		2
+
+/* Register bits - REG_ENGINES_AVAIL */
+#define ENCR_AES_SEL_SHIFT		0
+#define DES_SEL_SHIFT			1
+#define ENCR_SNOW3G_SEL_SHIFT		2
+#define ENCR_KASUMI_SEL_SHIFT		3
+#define SHA_SEL_SHIFT			4
+#define SHA512_SEL_SHIFT		5
+#define AUTH_AES_SEL_SHIFT		6
+#define AUTH_SNOW3G_SEL_SHIFT		7
+#define AUTH_KASUMI_SEL_SHIFT		8
+#define BAM_PIPE_SETS_SHIFT		9
+#define BAM_PIPE_SETS_MASK		GENMASK(12, 9)
+#define AXI_WR_BEATS_SHIFT		13
+#define AXI_WR_BEATS_MASK		GENMASK(18, 13)
+#define AXI_RD_BEATS_SHIFT		19
+#define AXI_RD_BEATS_MASK		GENMASK(24, 19)
+#define ENCR_ZUC_SEL_SHIFT		26
+#define AUTH_ZUC_SEL_SHIFT		27
+#define ZUC_ENABLE_SHIFT		28
+
+#endif /* _REGS_V5_H_ */
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
new file mode 100644
index 0000000..f338593
--- /dev/null
+++ b/drivers/crypto/qce/sha.c
@@ -0,0 +1,588 @@
+/*
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <crypto/internal/hash.h>
+
+#include "common.h"
+#include "core.h"
+#include "sha.h"
+
+/* crypto hw padding constant for first operation */
+#define SHA_PADDING		64
+#define SHA_PADDING_MASK	(SHA_PADDING - 1)
+
+static LIST_HEAD(ahash_algs);
+
+static const u32 std_iv_sha1[SHA256_DIGEST_SIZE / sizeof(u32)] = {
+	SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, 0, 0, 0
+};
+
+static const u32 std_iv_sha256[SHA256_DIGEST_SIZE / sizeof(u32)] = {
+	SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
+	SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7
+};
+
+static void qce_ahash_done(void *data)
+{
+	struct crypto_async_request *async_req = data;
+	struct ahash_request *req = ahash_request_cast(async_req);
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+	struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+	struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm);
+	struct qce_device *qce = tmpl->qce;
+	struct qce_result_dump *result = qce->dma.result_buf;
+	unsigned int digestsize = crypto_ahash_digestsize(ahash);
+	int error;
+	u32 status;
+
+	error = qce_dma_terminate_all(&qce->dma);
+	if (error)
+		dev_dbg(qce->dev, "ahash dma termination error (%d)\n", error);
+
+	qce_unmapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE,
+		    rctx->src_chained);
+	qce_unmapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0);
+
+	memcpy(rctx->digest, result->auth_iv, digestsize);
+	if (req->result)
+		memcpy(req->result, result->auth_iv, digestsize);
+
+	rctx->byte_count[0] = cpu_to_be32(result->auth_byte_count[0]);
+	rctx->byte_count[1] = cpu_to_be32(result->auth_byte_count[1]);
+
+	error = qce_check_status(qce, &status);
+	if (error < 0)
+		dev_dbg(qce->dev, "ahash operation error (%x)\n", status);
+
+	req->src = rctx->src_orig;
+	req->nbytes = rctx->nbytes_orig;
+	rctx->last_blk = false;
+	rctx->first_blk = false;
+
+	qce->async_req_done(tmpl->qce, error);
+}
+
+static int qce_ahash_async_req_handle(struct crypto_async_request *async_req)
+{
+	struct ahash_request *req = ahash_request_cast(async_req);
+	struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+	struct qce_sha_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
+	struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm);
+	struct qce_device *qce = tmpl->qce;
+	unsigned long flags = rctx->flags;
+	int ret;
+
+	if (IS_SHA_HMAC(flags)) {
+		rctx->authkey = ctx->authkey;
+		rctx->authklen = QCE_SHA_HMAC_KEY_SIZE;
+	} else if (IS_CMAC(flags)) {
+		rctx->authkey = ctx->authkey;
+		rctx->authklen = AES_KEYSIZE_128;
+	}
+
+	rctx->src_nents = qce_countsg(req->src, req->nbytes,
+				      &rctx->src_chained);
+	ret = qce_mapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE,
+			rctx->src_chained);
+	if (ret < 0)
+		return ret;
+
+	sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
+
+	ret = qce_mapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0);
+	if (ret < 0)
+		goto error_unmap_src;
+
+	ret = qce_dma_prep_sgs(&qce->dma, req->src, rctx->src_nents,
+			       &rctx->result_sg, 1, qce_ahash_done, async_req);
+	if (ret)
+		goto error_unmap_dst;
+
+	qce_dma_issue_pending(&qce->dma);
+
+	ret = qce_start(async_req, tmpl->crypto_alg_type, 0, 0);
+	if (ret)
+		goto error_terminate;
+
+	return 0;
+
+error_terminate:
+	qce_dma_terminate_all(&qce->dma);
+error_unmap_dst:
+	qce_unmapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0);
+error_unmap_src:
+	qce_unmapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE,
+		    rctx->src_chained);
+	return ret;
+}
+
+static int qce_ahash_init(struct ahash_request *req)
+{
+	struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+	struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm);
+	const u32 *std_iv = tmpl->std_iv;
+
+	memset(rctx, 0, sizeof(*rctx));
+	rctx->first_blk = true;
+	rctx->last_blk = false;
+	rctx->flags = tmpl->alg_flags;
+	memcpy(rctx->digest, std_iv, sizeof(rctx->digest));
+
+	return 0;
+}
+
+static int qce_ahash_export(struct ahash_request *req, void *out)
+{
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+	struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+	unsigned long flags = rctx->flags;
+	unsigned int digestsize = crypto_ahash_digestsize(ahash);
+	unsigned int blocksize =
+			crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
+
+	if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) {
+		struct sha1_state *out_state = out;
+
+		out_state->count = rctx->count;
+		qce_cpu_to_be32p_array((__be32 *)out_state->state,
+				       rctx->digest, digestsize);
+		memcpy(out_state->buffer, rctx->buf, blocksize);
+	} else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) {
+		struct sha256_state *out_state = out;
+
+		out_state->count = rctx->count;
+		qce_cpu_to_be32p_array((__be32 *)out_state->state,
+				       rctx->digest, digestsize);
+		memcpy(out_state->buf, rctx->buf, blocksize);
+	} else {
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int qce_import_common(struct ahash_request *req, u64 in_count,
+			     const u32 *state, const u8 *buffer, bool hmac)
+{
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+	struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+	unsigned int digestsize = crypto_ahash_digestsize(ahash);
+	unsigned int blocksize;
+	u64 count = in_count;
+
+	blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
+	rctx->count = in_count;
+	memcpy(rctx->buf, buffer, blocksize);
+
+	if (in_count <= blocksize) {
+		rctx->first_blk = 1;
+	} else {
+		rctx->first_blk = 0;
+		/*
+		 * For HMAC, there is a hardware padding done when first block
+		 * is set. Therefore the byte_count must be incremened by 64
+		 * after the first block operation.
+		 */
+		if (hmac)
+			count += SHA_PADDING;
+	}
+
+	rctx->byte_count[0] = (__force __be32)(count & ~SHA_PADDING_MASK);
+	rctx->byte_count[1] = (__force __be32)(count >> 32);
+	qce_cpu_to_be32p_array((__be32 *)rctx->digest, (const u8 *)state,
+			       digestsize);
+	rctx->buflen = (unsigned int)(in_count & (blocksize - 1));
+
+	return 0;
+}
+
+static int qce_ahash_import(struct ahash_request *req, const void *in)
+{
+	struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+	unsigned long flags = rctx->flags;
+	bool hmac = IS_SHA_HMAC(flags);
+	int ret = -EINVAL;
+
+	if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) {
+		const struct sha1_state *state = in;
+
+		ret = qce_import_common(req, state->count, state->state,
+					state->buffer, hmac);
+	} else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) {
+		const struct sha256_state *state = in;
+
+		ret = qce_import_common(req, state->count, state->state,
+					state->buf, hmac);
+	}
+
+	return ret;
+}
+
+static int qce_ahash_update(struct ahash_request *req)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+	struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm);
+	struct qce_device *qce = tmpl->qce;
+	struct scatterlist *sg_last, *sg;
+	unsigned int total, len;
+	unsigned int hash_later;
+	unsigned int nbytes;
+	unsigned int blocksize;
+
+	blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+	rctx->count += req->nbytes;
+
+	/* check for buffer from previous updates and append it */
+	total = req->nbytes + rctx->buflen;
+
+	if (total <= blocksize) {
+		scatterwalk_map_and_copy(rctx->buf + rctx->buflen, req->src,
+					 0, req->nbytes, 0);
+		rctx->buflen += req->nbytes;
+		return 0;
+	}
+
+	/* save the original req structure fields */
+	rctx->src_orig = req->src;
+	rctx->nbytes_orig = req->nbytes;
+
+	/*
+	 * if we have data from previous update copy them on buffer. The old
+	 * data will be combined with current request bytes.
+	 */
+	if (rctx->buflen)
+		memcpy(rctx->tmpbuf, rctx->buf, rctx->buflen);
+
+	/* calculate how many bytes will be hashed later */
+	hash_later = total % blocksize;
+	if (hash_later) {
+		unsigned int src_offset = req->nbytes - hash_later;
+		scatterwalk_map_and_copy(rctx->buf, req->src, src_offset,
+					 hash_later, 0);
+	}
+
+	/* here nbytes is multiple of blocksize */
+	nbytes = total - hash_later;
+
+	len = rctx->buflen;
+	sg = sg_last = req->src;
+
+	while (len < nbytes && sg) {
+		if (len + sg_dma_len(sg) > nbytes)
+			break;
+		len += sg_dma_len(sg);
+		sg_last = sg;
+		sg = scatterwalk_sg_next(sg);
+	}
+
+	if (!sg_last)
+		return -EINVAL;
+
+	sg_mark_end(sg_last);
+
+	if (rctx->buflen) {
+		sg_init_table(rctx->sg, 2);
+		sg_set_buf(rctx->sg, rctx->tmpbuf, rctx->buflen);
+		scatterwalk_sg_chain(rctx->sg, 2, req->src);
+		req->src = rctx->sg;
+	}
+
+	req->nbytes = nbytes;
+	rctx->buflen = hash_later;
+
+	return qce->async_req_enqueue(tmpl->qce, &req->base);
+}
+
+static int qce_ahash_final(struct ahash_request *req)
+{
+	struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+	struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm);
+	struct qce_device *qce = tmpl->qce;
+
+	if (!rctx->buflen)
+		return 0;
+
+	rctx->last_blk = true;
+
+	rctx->src_orig = req->src;
+	rctx->nbytes_orig = req->nbytes;
+
+	memcpy(rctx->tmpbuf, rctx->buf, rctx->buflen);
+	sg_init_one(rctx->sg, rctx->tmpbuf, rctx->buflen);
+
+	req->src = rctx->sg;
+	req->nbytes = rctx->buflen;
+
+	return qce->async_req_enqueue(tmpl->qce, &req->base);
+}
+
+static int qce_ahash_digest(struct ahash_request *req)
+{
+	struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+	struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm);
+	struct qce_device *qce = tmpl->qce;
+	int ret;
+
+	ret = qce_ahash_init(req);
+	if (ret)
+		return ret;
+
+	rctx->src_orig = req->src;
+	rctx->nbytes_orig = req->nbytes;
+	rctx->first_blk = true;
+	rctx->last_blk = true;
+
+	return qce->async_req_enqueue(tmpl->qce, &req->base);
+}
+
+struct qce_ahash_result {
+	struct completion completion;
+	int error;
+};
+
+static void qce_digest_complete(struct crypto_async_request *req, int error)
+{
+	struct qce_ahash_result *result = req->data;
+
+	if (error == -EINPROGRESS)
+		return;
+
+	result->error = error;
+	complete(&result->completion);
+}
+
+static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+				 unsigned int keylen)
+{
+	unsigned int digestsize = crypto_ahash_digestsize(tfm);
+	struct qce_sha_ctx *ctx = crypto_tfm_ctx(&tfm->base);
+	struct qce_ahash_result result;
+	struct ahash_request *req;
+	struct scatterlist sg;
+	unsigned int blocksize;
+	struct crypto_ahash *ahash_tfm;
+	u8 *buf;
+	int ret;
+	const char *alg_name;
+
+	blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+	memset(ctx->authkey, 0, sizeof(ctx->authkey));
+
+	if (keylen <= blocksize) {
+		memcpy(ctx->authkey, key, keylen);
+		return 0;
+	}
+
+	if (digestsize == SHA1_DIGEST_SIZE)
+		alg_name = "sha1-qce";
+	else if (digestsize == SHA256_DIGEST_SIZE)
+		alg_name = "sha256-qce";
+	else
+		return -EINVAL;
+
+	ahash_tfm = crypto_alloc_ahash(alg_name, CRYPTO_ALG_TYPE_AHASH,
+				       CRYPTO_ALG_TYPE_AHASH_MASK);
+	if (IS_ERR(ahash_tfm))
+		return PTR_ERR(ahash_tfm);
+
+	req = ahash_request_alloc(ahash_tfm, GFP_KERNEL);
+	if (!req) {
+		ret = -ENOMEM;
+		goto err_free_ahash;
+	}
+
+	init_completion(&result.completion);
+	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+				   qce_digest_complete, &result);
+	crypto_ahash_clear_flags(ahash_tfm, ~0);
+
+	buf = kzalloc(keylen + QCE_MAX_ALIGN_SIZE, GFP_KERNEL);
+	if (!buf) {
+		ret = -ENOMEM;
+		goto err_free_req;
+	}
+
+	memcpy(buf, key, keylen);
+	sg_init_one(&sg, buf, keylen);
+	ahash_request_set_crypt(req, &sg, ctx->authkey, keylen);
+
+	ret = crypto_ahash_digest(req);
+	if (ret == -EINPROGRESS || ret == -EBUSY) {
+		ret = wait_for_completion_interruptible(&result.completion);
+		if (!ret)
+			ret = result.error;
+	}
+
+	if (ret)
+		crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+
+	kfree(buf);
+err_free_req:
+	ahash_request_free(req);
+err_free_ahash:
+	crypto_free_ahash(ahash_tfm);
+	return ret;
+}
+
+static int qce_ahash_cra_init(struct crypto_tfm *tfm)
+{
+	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+	struct qce_sha_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	crypto_ahash_set_reqsize(ahash, sizeof(struct qce_sha_reqctx));
+	memset(ctx, 0, sizeof(*ctx));
+	return 0;
+}
+
+struct qce_ahash_def {
+	unsigned long flags;
+	const char *name;
+	const char *drv_name;
+	unsigned int digestsize;
+	unsigned int blocksize;
+	unsigned int statesize;
+	const u32 *std_iv;
+};
+
+static const struct qce_ahash_def ahash_def[] = {
+	{
+		.flags		= QCE_HASH_SHA1,
+		.name		= "sha1",
+		.drv_name	= "sha1-qce",
+		.digestsize	= SHA1_DIGEST_SIZE,
+		.blocksize	= SHA1_BLOCK_SIZE,
+		.statesize	= sizeof(struct sha1_state),
+		.std_iv		= std_iv_sha1,
+	},
+	{
+		.flags		= QCE_HASH_SHA256,
+		.name		= "sha256",
+		.drv_name	= "sha256-qce",
+		.digestsize	= SHA256_DIGEST_SIZE,
+		.blocksize	= SHA256_BLOCK_SIZE,
+		.statesize	= sizeof(struct sha256_state),
+		.std_iv		= std_iv_sha256,
+	},
+	{
+		.flags		= QCE_HASH_SHA1_HMAC,
+		.name		= "hmac(sha1)",
+		.drv_name	= "hmac-sha1-qce",
+		.digestsize	= SHA1_DIGEST_SIZE,
+		.blocksize	= SHA1_BLOCK_SIZE,
+		.statesize	= sizeof(struct sha1_state),
+		.std_iv		= std_iv_sha1,
+	},
+	{
+		.flags		= QCE_HASH_SHA256_HMAC,
+		.name		= "hmac(sha256)",
+		.drv_name	= "hmac-sha256-qce",
+		.digestsize	= SHA256_DIGEST_SIZE,
+		.blocksize	= SHA256_BLOCK_SIZE,
+		.statesize	= sizeof(struct sha256_state),
+		.std_iv		= std_iv_sha256,
+	},
+};
+
+static int qce_ahash_register_one(const struct qce_ahash_def *def,
+				  struct qce_device *qce)
+{
+	struct qce_alg_template *tmpl;
+	struct ahash_alg *alg;
+	struct crypto_alg *base;
+	int ret;
+
+	tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
+	if (!tmpl)
+		return -ENOMEM;
+
+	tmpl->std_iv = def->std_iv;
+
+	alg = &tmpl->alg.ahash;
+	alg->init = qce_ahash_init;
+	alg->update = qce_ahash_update;
+	alg->final = qce_ahash_final;
+	alg->digest = qce_ahash_digest;
+	alg->export = qce_ahash_export;
+	alg->import = qce_ahash_import;
+	if (IS_SHA_HMAC(def->flags))
+		alg->setkey = qce_ahash_hmac_setkey;
+	alg->halg.digestsize = def->digestsize;
+	alg->halg.statesize = def->statesize;
+
+	base = &alg->halg.base;
+	base->cra_blocksize = def->blocksize;
+	base->cra_priority = 300;
+	base->cra_flags = CRYPTO_ALG_ASYNC;
+	base->cra_ctxsize = sizeof(struct qce_sha_ctx);
+	base->cra_alignmask = 0;
+	base->cra_module = THIS_MODULE;
+	base->cra_init = qce_ahash_cra_init;
+	INIT_LIST_HEAD(&base->cra_list);
+
+	snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+	snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+		 def->drv_name);
+
+	INIT_LIST_HEAD(&tmpl->entry);
+	tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_AHASH;
+	tmpl->alg_flags = def->flags;
+	tmpl->qce = qce;
+
+	ret = crypto_register_ahash(alg);
+	if (ret) {
+		kfree(tmpl);
+		dev_err(qce->dev, "%s registration failed\n", base->cra_name);
+		return ret;
+	}
+
+	list_add_tail(&tmpl->entry, &ahash_algs);
+	dev_dbg(qce->dev, "%s is registered\n", base->cra_name);
+	return 0;
+}
+
+static void qce_ahash_unregister(struct qce_device *qce)
+{
+	struct qce_alg_template *tmpl, *n;
+
+	list_for_each_entry_safe(tmpl, n, &ahash_algs, entry) {
+		crypto_unregister_ahash(&tmpl->alg.ahash);
+		list_del(&tmpl->entry);
+		kfree(tmpl);
+	}
+}
+
+static int qce_ahash_register(struct qce_device *qce)
+{
+	int ret, i;
+
+	for (i = 0; i < ARRAY_SIZE(ahash_def); i++) {
+		ret = qce_ahash_register_one(&ahash_def[i], qce);
+		if (ret)
+			goto err;
+	}
+
+	return 0;
+err:
+	qce_ahash_unregister(qce);
+	return ret;
+}
+
+const struct qce_algo_ops ahash_ops = {
+	.type = CRYPTO_ALG_TYPE_AHASH,
+	.register_algs = qce_ahash_register,
+	.unregister_algs = qce_ahash_unregister,
+	.async_req_handle = qce_ahash_async_req_handle,
+};
diff --git a/drivers/crypto/qce/sha.h b/drivers/crypto/qce/sha.h
new file mode 100644
index 0000000..286f0d5
--- /dev/null
+++ b/drivers/crypto/qce/sha.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SHA_H_
+#define _SHA_H_
+
+#include <crypto/scatterwalk.h>
+#include <crypto/sha.h>
+
+#include "common.h"
+#include "core.h"
+
+#define QCE_SHA_MAX_BLOCKSIZE		SHA256_BLOCK_SIZE
+#define QCE_SHA_MAX_DIGESTSIZE		SHA256_DIGEST_SIZE
+
+struct qce_sha_ctx {
+	u8 authkey[QCE_SHA_MAX_BLOCKSIZE];
+};
+
+/**
+ * struct qce_sha_reqctx - holds private ahash objects per request
+ * @buf: used during update, import and export
+ * @tmpbuf: buffer for internal use
+ * @digest: calculated digest buffer
+ * @buflen: length of the buffer
+ * @flags: operation flags
+ * @src_orig: original request sg list
+ * @nbytes_orig: original request number of bytes
+ * @src_chained: is source scatterlist chained
+ * @src_nents: source number of entries
+ * @byte_count: byte count
+ * @count: save count in states during update, import and export
+ * @first_blk: is it the first block
+ * @last_blk: is it the last block
+ * @sg: used to chain sg lists
+ * @authkey: pointer to auth key in sha ctx
+ * @authklen: auth key length
+ * @result_sg: scatterlist used for result buffer
+ */
+struct qce_sha_reqctx {
+	u8 buf[QCE_SHA_MAX_BLOCKSIZE];
+	u8 tmpbuf[QCE_SHA_MAX_BLOCKSIZE];
+	u8 digest[QCE_SHA_MAX_DIGESTSIZE];
+	unsigned int buflen;
+	unsigned long flags;
+	struct scatterlist *src_orig;
+	unsigned int nbytes_orig;
+	bool src_chained;
+	int src_nents;
+	__be32 byte_count[2];
+	u64 count;
+	bool first_blk;
+	bool last_blk;
+	struct scatterlist sg[2];
+	u8 *authkey;
+	unsigned int authklen;
+	struct scatterlist result_sg;
+};
+
+static inline struct qce_alg_template *to_ahash_tmpl(struct crypto_tfm *tfm)
+{
+	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+	struct ahash_alg *alg = container_of(crypto_hash_alg_common(ahash),
+					     struct ahash_alg, halg);
+
+	return container_of(alg, struct qce_alg_template, alg.ahash);
+}
+
+extern const struct qce_algo_ops ahash_ops;
+
+#endif /* _SHA_H_ */
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index a999f53..92105f3 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -190,7 +190,7 @@
 static irqreturn_t cryp_interrupt_handler(int irq, void *param)
 {
 	struct cryp_ctx *ctx;
-	int i;
+	int count;
 	struct cryp_device_data *device_data;
 
 	if (param == NULL) {
@@ -215,12 +215,11 @@
 	if (cryp_pending_irq_src(device_data,
 				 CRYP_IRQ_SRC_OUTPUT_FIFO)) {
 		if (ctx->outlen / ctx->blocksize > 0) {
-			for (i = 0; i < ctx->blocksize / 4; i++) {
-				*(ctx->outdata) = readl_relaxed(
-						&device_data->base->dout);
-				ctx->outdata += 4;
-				ctx->outlen -= 4;
-			}
+			count = ctx->blocksize / 4;
+
+			readsl(&device_data->base->dout, ctx->outdata, count);
+			ctx->outdata += count;
+			ctx->outlen -= count;
 
 			if (ctx->outlen == 0) {
 				cryp_disable_irq_src(device_data,
@@ -230,12 +229,12 @@
 	} else if (cryp_pending_irq_src(device_data,
 					CRYP_IRQ_SRC_INPUT_FIFO)) {
 		if (ctx->datalen / ctx->blocksize > 0) {
-			for (i = 0 ; i < ctx->blocksize / 4; i++) {
-				writel_relaxed(ctx->indata,
-						&device_data->base->din);
-				ctx->indata += 4;
-				ctx->datalen -= 4;
-			}
+			count = ctx->blocksize / 4;
+
+			writesl(&device_data->base->din, ctx->indata, count);
+
+			ctx->indata += count;
+			ctx->datalen -= count;
 
 			if (ctx->datalen == 0)
 				cryp_disable_irq_src(device_data,
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 878f090..e339c6b 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -186,6 +186,13 @@
 	  Support for error detection and correction on the Intel
 	  3200 and 3210 server chipsets.
 
+config EDAC_IE31200
+	tristate "Intel e312xx"
+	depends on EDAC_MM_EDAC && PCI && X86
+	help
+	  Support for error detection and correction on the Intel
+	  E3-1200 based DRAM controllers.
+
 config EDAC_X38
 	tristate "Intel X38"
 	depends on EDAC_MM_EDAC && PCI && X86
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 4154ed6..c479a24 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -37,6 +37,7 @@
 obj-$(CONFIG_EDAC_I82975X)		+= i82975x_edac.o
 obj-$(CONFIG_EDAC_I3000)		+= i3000_edac.o
 obj-$(CONFIG_EDAC_I3200)		+= i3200_edac.o
+obj-$(CONFIG_EDAC_IE31200)		+= ie31200_edac.o
 obj-$(CONFIG_EDAC_X38)			+= x38_edac.o
 obj-$(CONFIG_EDAC_I82860)		+= i82860_edac.o
 obj-$(CONFIG_EDAC_R82600)		+= r82600_edac.o
diff --git a/drivers/edac/edac_module.c b/drivers/edac/edac_module.c
index a66941f..e6d1691 100644
--- a/drivers/edac/edac_module.c
+++ b/drivers/edac/edac_module.c
@@ -28,7 +28,7 @@
 	if (ret)
 		return ret;
 
-	if (val < 0 || val > 4)
+	if (val > 4)
 		return -EINVAL;
 
 	return param_set_int(buf, kp);
diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c
new file mode 100644
index 0000000..a981dc6
--- /dev/null
+++ b/drivers/edac/ie31200_edac.c
@@ -0,0 +1,536 @@
+/*
+ * Intel E3-1200
+ * Copyright (C) 2014 Jason Baron <jbaron@akamai.com>
+ *
+ * Support for the E3-1200 processor family. Heavily based on previous
+ * Intel EDAC drivers.
+ *
+ * Since the DRAM controller is on the cpu chip, we can use its PCI device
+ * id to identify these processors.
+ *
+ * PCI DRAM controller device ids (Taken from The PCI ID Repository - http://pci-ids.ucw.cz/)
+ *
+ * 0108: Xeon E3-1200 Processor Family DRAM Controller
+ * 010c: Xeon E3-1200/2nd Generation Core Processor Family DRAM Controller
+ * 0150: Xeon E3-1200 v2/3rd Gen Core processor DRAM Controller
+ * 0158: Xeon E3-1200 v2/Ivy Bridge DRAM Controller
+ * 015c: Xeon E3-1200 v2/3rd Gen Core processor DRAM Controller
+ * 0c04: Xeon E3-1200 v3/4th Gen Core Processor DRAM Controller
+ * 0c08: Xeon E3-1200 v3 Processor DRAM Controller
+ *
+ * Based on Intel specification:
+ * http://www.intel.com/content/dam/www/public/us/en/documents/datasheets/xeon-e3-1200v3-vol-2-datasheet.pdf
+ * http://www.intel.com/content/www/us/en/processors/xeon/xeon-e3-1200-family-vol-2-datasheet.html
+ *
+ * According to the above datasheet (p.16):
+ * "
+ * 6. Software must not access B0/D0/F0 32-bit memory-mapped registers with
+ * requests that cross a DW boundary.
+ * "
+ *
+ * Thus, we make use of the explicit: lo_hi_readq(), which breaks the readq into
+ * 2 readl() calls. This restriction may be lifted in subsequent chip releases,
+ * but lo_hi_readq() ensures that we are safe across all e3-1200 processors.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/edac.h>
+
+#include <asm-generic/io-64-nonatomic-lo-hi.h>
+#include "edac_core.h"
+
+#define IE31200_REVISION "1.0"
+#define EDAC_MOD_STR "ie31200_edac"
+
+#define ie31200_printk(level, fmt, arg...) \
+	edac_printk(level, "ie31200", fmt, ##arg)
+
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_1 0x0108
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_2 0x010c
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_3 0x0150
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_4 0x0158
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_5 0x015c
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_6 0x0c04
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_7 0x0c08
+
+#define IE31200_DIMMS			4
+#define IE31200_RANKS			8
+#define IE31200_RANKS_PER_CHANNEL	4
+#define IE31200_DIMMS_PER_CHANNEL	2
+#define IE31200_CHANNELS		2
+
+/* Intel IE31200 register addresses - device 0 function 0 - DRAM Controller */
+#define IE31200_MCHBAR_LOW		0x48
+#define IE31200_MCHBAR_HIGH		0x4c
+#define IE31200_MCHBAR_MASK		GENMASK_ULL(38, 15)
+#define IE31200_MMR_WINDOW_SIZE		BIT(15)
+
+/*
+ * Error Status Register (16b)
+ *
+ * 15    reserved
+ * 14    Isochronous TBWRR Run Behind FIFO Full
+ *       (ITCV)
+ * 13    Isochronous TBWRR Run Behind FIFO Put
+ *       (ITSTV)
+ * 12    reserved
+ * 11    MCH Thermal Sensor Event
+ *       for SMI/SCI/SERR (GTSE)
+ * 10    reserved
+ *  9    LOCK to non-DRAM Memory Flag (LCKF)
+ *  8    reserved
+ *  7    DRAM Throttle Flag (DTF)
+ *  6:2  reserved
+ *  1    Multi-bit DRAM ECC Error Flag (DMERR)
+ *  0    Single-bit DRAM ECC Error Flag (DSERR)
+ */
+#define IE31200_ERRSTS			0xc8
+#define IE31200_ERRSTS_UE		BIT(1)
+#define IE31200_ERRSTS_CE		BIT(0)
+#define IE31200_ERRSTS_BITS		(IE31200_ERRSTS_UE | IE31200_ERRSTS_CE)
+
+/*
+ * Channel 0 ECC Error Log (64b)
+ *
+ * 63:48 Error Column Address (ERRCOL)
+ * 47:32 Error Row Address (ERRROW)
+ * 31:29 Error Bank Address (ERRBANK)
+ * 28:27 Error Rank Address (ERRRANK)
+ * 26:24 reserved
+ * 23:16 Error Syndrome (ERRSYND)
+ * 15: 2 reserved
+ *    1  Multiple Bit Error Status (MERRSTS)
+ *    0  Correctable Error Status (CERRSTS)
+ */
+#define IE31200_C0ECCERRLOG			0x40c8
+#define IE31200_C1ECCERRLOG			0x44c8
+#define IE31200_ECCERRLOG_CE			BIT(0)
+#define IE31200_ECCERRLOG_UE			BIT(1)
+#define IE31200_ECCERRLOG_RANK_BITS		GENMASK_ULL(28, 27)
+#define IE31200_ECCERRLOG_RANK_SHIFT		27
+#define IE31200_ECCERRLOG_SYNDROME_BITS		GENMASK_ULL(23, 16)
+#define IE31200_ECCERRLOG_SYNDROME_SHIFT	16
+
+#define IE31200_ECCERRLOG_SYNDROME(log)		   \
+	((log & IE31200_ECCERRLOG_SYNDROME_BITS) >> \
+	 IE31200_ECCERRLOG_SYNDROME_SHIFT)
+
+#define IE31200_CAPID0			0xe4
+#define IE31200_CAPID0_PDCD		BIT(4)
+#define IE31200_CAPID0_DDPCD		BIT(6)
+#define IE31200_CAPID0_ECC		BIT(1)
+
+#define IE31200_MAD_DIMM_0_OFFSET	0x5004
+#define IE31200_MAD_DIMM_SIZE		GENMASK_ULL(7, 0)
+#define IE31200_MAD_DIMM_A_RANK		BIT(17)
+#define IE31200_MAD_DIMM_A_WIDTH	BIT(19)
+
+#define IE31200_PAGES(n)		(n << (28 - PAGE_SHIFT))
+
+static int nr_channels;
+
+struct ie31200_priv {
+	void __iomem *window;
+};
+
+enum ie31200_chips {
+	IE31200 = 0,
+};
+
+struct ie31200_dev_info {
+	const char *ctl_name;
+};
+
+struct ie31200_error_info {
+	u16 errsts;
+	u16 errsts2;
+	u64 eccerrlog[IE31200_CHANNELS];
+};
+
+static const struct ie31200_dev_info ie31200_devs[] = {
+	[IE31200] = {
+		.ctl_name = "IE31200"
+	},
+};
+
+struct dimm_data {
+	u8 size; /* in 256MB multiples */
+	u8 dual_rank : 1,
+	   x16_width : 1; /* 0 means x8 width */
+};
+
+static int how_many_channels(struct pci_dev *pdev)
+{
+	int n_channels;
+	unsigned char capid0_2b; /* 2nd byte of CAPID0 */
+
+	pci_read_config_byte(pdev, IE31200_CAPID0 + 1, &capid0_2b);
+
+	/* check PDCD: Dual Channel Disable */
+	if (capid0_2b & IE31200_CAPID0_PDCD) {
+		edac_dbg(0, "In single channel mode\n");
+		n_channels = 1;
+	} else {
+		edac_dbg(0, "In dual channel mode\n");
+		n_channels = 2;
+	}
+
+	/* check DDPCD - check if both channels are filled */
+	if (capid0_2b & IE31200_CAPID0_DDPCD)
+		edac_dbg(0, "2 DIMMS per channel disabled\n");
+	else
+		edac_dbg(0, "2 DIMMS per channel enabled\n");
+
+	return n_channels;
+}
+
+static bool ecc_capable(struct pci_dev *pdev)
+{
+	unsigned char capid0_4b; /* 4th byte of CAPID0 */
+
+	pci_read_config_byte(pdev, IE31200_CAPID0 + 3, &capid0_4b);
+	if (capid0_4b & IE31200_CAPID0_ECC)
+		return false;
+	return true;
+}
+
+static int eccerrlog_row(int channel, u64 log)
+{
+	int rank = ((log & IE31200_ECCERRLOG_RANK_BITS) >>
+		IE31200_ECCERRLOG_RANK_SHIFT);
+	return rank | (channel * IE31200_RANKS_PER_CHANNEL);
+}
+
+static void ie31200_clear_error_info(struct mem_ctl_info *mci)
+{
+	/*
+	 * Clear any error bits.
+	 * (Yes, we really clear bits by writing 1 to them.)
+	 */
+	pci_write_bits16(to_pci_dev(mci->pdev), IE31200_ERRSTS,
+			 IE31200_ERRSTS_BITS, IE31200_ERRSTS_BITS);
+}
+
+static void ie31200_get_and_clear_error_info(struct mem_ctl_info *mci,
+					     struct ie31200_error_info *info)
+{
+	struct pci_dev *pdev;
+	struct ie31200_priv *priv = mci->pvt_info;
+	void __iomem *window = priv->window;
+
+	pdev = to_pci_dev(mci->pdev);
+
+	/*
+	 * This is a mess because there is no atomic way to read all the
+	 * registers at once and the registers can transition from CE being
+	 * overwritten by UE.
+	 */
+	pci_read_config_word(pdev, IE31200_ERRSTS, &info->errsts);
+	if (!(info->errsts & IE31200_ERRSTS_BITS))
+		return;
+
+	info->eccerrlog[0] = lo_hi_readq(window + IE31200_C0ECCERRLOG);
+	if (nr_channels == 2)
+		info->eccerrlog[1] = lo_hi_readq(window + IE31200_C1ECCERRLOG);
+
+	pci_read_config_word(pdev, IE31200_ERRSTS, &info->errsts2);
+
+	/*
+	 * If the error is the same for both reads then the first set
+	 * of reads is valid.  If there is a change then there is a CE
+	 * with no info and the second set of reads is valid and
+	 * should be UE info.
+	 */
+	if ((info->errsts ^ info->errsts2) & IE31200_ERRSTS_BITS) {
+		info->eccerrlog[0] = lo_hi_readq(window + IE31200_C0ECCERRLOG);
+		if (nr_channels == 2)
+			info->eccerrlog[1] =
+				lo_hi_readq(window + IE31200_C1ECCERRLOG);
+	}
+
+	ie31200_clear_error_info(mci);
+}
+
+static void ie31200_process_error_info(struct mem_ctl_info *mci,
+				       struct ie31200_error_info *info)
+{
+	int channel;
+	u64 log;
+
+	if (!(info->errsts & IE31200_ERRSTS_BITS))
+		return;
+
+	if ((info->errsts ^ info->errsts2) & IE31200_ERRSTS_BITS) {
+		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
+				     -1, -1, -1, "UE overwrote CE", "");
+		info->errsts = info->errsts2;
+	}
+
+	for (channel = 0; channel < nr_channels; channel++) {
+		log = info->eccerrlog[channel];
+		if (log & IE31200_ECCERRLOG_UE) {
+			edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
+					     0, 0, 0,
+					     eccerrlog_row(channel, log),
+					     channel, -1,
+					     "ie31200 UE", "");
+		} else if (log & IE31200_ECCERRLOG_CE) {
+			edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
+					     0, 0,
+					     IE31200_ECCERRLOG_SYNDROME(log),
+					     eccerrlog_row(channel, log),
+					     channel, -1,
+					     "ie31200 CE", "");
+		}
+	}
+}
+
+static void ie31200_check(struct mem_ctl_info *mci)
+{
+	struct ie31200_error_info info;
+
+	edac_dbg(1, "MC%d\n", mci->mc_idx);
+	ie31200_get_and_clear_error_info(mci, &info);
+	ie31200_process_error_info(mci, &info);
+}
+
+static void __iomem *ie31200_map_mchbar(struct pci_dev *pdev)
+{
+	union {
+		u64 mchbar;
+		struct {
+			u32 mchbar_low;
+			u32 mchbar_high;
+		};
+	} u;
+	void __iomem *window;
+
+	pci_read_config_dword(pdev, IE31200_MCHBAR_LOW, &u.mchbar_low);
+	pci_read_config_dword(pdev, IE31200_MCHBAR_HIGH, &u.mchbar_high);
+	u.mchbar &= IE31200_MCHBAR_MASK;
+
+	if (u.mchbar != (resource_size_t)u.mchbar) {
+		ie31200_printk(KERN_ERR, "mmio space beyond accessible range (0x%llx)\n",
+			       (unsigned long long)u.mchbar);
+		return NULL;
+	}
+
+	window = ioremap_nocache(u.mchbar, IE31200_MMR_WINDOW_SIZE);
+	if (!window)
+		ie31200_printk(KERN_ERR, "Cannot map mmio space at 0x%llx\n",
+			       (unsigned long long)u.mchbar);
+
+	return window;
+}
+
+static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
+{
+	int i, j, ret;
+	struct mem_ctl_info *mci = NULL;
+	struct edac_mc_layer layers[2];
+	struct dimm_data dimm_info[IE31200_CHANNELS][IE31200_DIMMS_PER_CHANNEL];
+	void __iomem *window;
+	struct ie31200_priv *priv;
+	u32 addr_decode;
+
+	edac_dbg(0, "MC:\n");
+
+	if (!ecc_capable(pdev)) {
+		ie31200_printk(KERN_INFO, "No ECC support\n");
+		return -ENODEV;
+	}
+
+	nr_channels = how_many_channels(pdev);
+	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+	layers[0].size = IE31200_DIMMS;
+	layers[0].is_virt_csrow = true;
+	layers[1].type = EDAC_MC_LAYER_CHANNEL;
+	layers[1].size = nr_channels;
+	layers[1].is_virt_csrow = false;
+	mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
+			    sizeof(struct ie31200_priv));
+	if (!mci)
+		return -ENOMEM;
+
+	window = ie31200_map_mchbar(pdev);
+	if (!window) {
+		ret = -ENODEV;
+		goto fail_free;
+	}
+
+	edac_dbg(3, "MC: init mci\n");
+	mci->pdev = &pdev->dev;
+	mci->mtype_cap = MEM_FLAG_DDR3;
+	mci->edac_ctl_cap = EDAC_FLAG_SECDED;
+	mci->edac_cap = EDAC_FLAG_SECDED;
+	mci->mod_name = EDAC_MOD_STR;
+	mci->mod_ver = IE31200_REVISION;
+	mci->ctl_name = ie31200_devs[dev_idx].ctl_name;
+	mci->dev_name = pci_name(pdev);
+	mci->edac_check = ie31200_check;
+	mci->ctl_page_to_phys = NULL;
+	priv = mci->pvt_info;
+	priv->window = window;
+
+	/* populate DIMM info */
+	for (i = 0; i < IE31200_CHANNELS; i++) {
+		addr_decode = readl(window + IE31200_MAD_DIMM_0_OFFSET +
+					(i * 4));
+		edac_dbg(0, "addr_decode: 0x%x\n", addr_decode);
+		for (j = 0; j < IE31200_DIMMS_PER_CHANNEL; j++) {
+			dimm_info[i][j].size = (addr_decode >> (j * 8)) &
+						IE31200_MAD_DIMM_SIZE;
+			dimm_info[i][j].dual_rank = (addr_decode &
+				(IE31200_MAD_DIMM_A_RANK << j)) ? 1 : 0;
+			dimm_info[i][j].x16_width = (addr_decode &
+				(IE31200_MAD_DIMM_A_WIDTH << j)) ? 1 : 0;
+			edac_dbg(0, "size: 0x%x, rank: %d, width: %d\n",
+				 dimm_info[i][j].size,
+				 dimm_info[i][j].dual_rank,
+				 dimm_info[i][j].x16_width);
+		}
+	}
+
+	/*
+	 * The dram rank boundary (DRB) reg values are boundary addresses
+	 * for each DRAM rank with a granularity of 64MB.  DRB regs are
+	 * cumulative; the last one will contain the total memory
+	 * contained in all ranks.
+	 */
+	for (i = 0; i < IE31200_DIMMS_PER_CHANNEL; i++) {
+		for (j = 0; j < IE31200_CHANNELS; j++) {
+			struct dimm_info *dimm;
+			unsigned long nr_pages;
+
+			nr_pages = IE31200_PAGES(dimm_info[j][i].size);
+			if (nr_pages == 0)
+				continue;
+
+			if (dimm_info[j][i].dual_rank) {
+				nr_pages = nr_pages / 2;
+				dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
+						     mci->n_layers, (i * 2) + 1,
+						     j, 0);
+				dimm->nr_pages = nr_pages;
+				edac_dbg(0, "set nr pages: 0x%lx\n", nr_pages);
+				dimm->grain = 8; /* just a guess */
+				dimm->mtype = MEM_DDR3;
+				dimm->dtype = DEV_UNKNOWN;
+				dimm->edac_mode = EDAC_UNKNOWN;
+			}
+			dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
+					     mci->n_layers, i * 2, j, 0);
+			dimm->nr_pages = nr_pages;
+			edac_dbg(0, "set nr pages: 0x%lx\n", nr_pages);
+			dimm->grain = 8; /* same guess */
+			dimm->mtype = MEM_DDR3;
+			dimm->dtype = DEV_UNKNOWN;
+			dimm->edac_mode = EDAC_UNKNOWN;
+		}
+	}
+
+	ie31200_clear_error_info(mci);
+
+	if (edac_mc_add_mc(mci)) {
+		edac_dbg(3, "MC: failed edac_mc_add_mc()\n");
+		ret = -ENODEV;
+		goto fail_unmap;
+	}
+
+	/* get this far and it's successful */
+	edac_dbg(3, "MC: success\n");
+	return 0;
+
+fail_unmap:
+	iounmap(window);
+
+fail_free:
+	edac_mc_free(mci);
+
+	return ret;
+}
+
+static int ie31200_init_one(struct pci_dev *pdev,
+			    const struct pci_device_id *ent)
+{
+	edac_dbg(0, "MC:\n");
+
+	if (pci_enable_device(pdev) < 0)
+		return -EIO;
+
+	return ie31200_probe1(pdev, ent->driver_data);
+}
+
+static void ie31200_remove_one(struct pci_dev *pdev)
+{
+	struct mem_ctl_info *mci;
+	struct ie31200_priv *priv;
+
+	edac_dbg(0, "\n");
+	mci = edac_mc_del_mc(&pdev->dev);
+	if (!mci)
+		return;
+	priv = mci->pvt_info;
+	iounmap(priv->window);
+	edac_mc_free(mci);
+}
+
+static const struct pci_device_id ie31200_pci_tbl[] = {
+	{
+		PCI_VEND_DEV(INTEL, IE31200_HB_1), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		IE31200},
+	{
+		PCI_VEND_DEV(INTEL, IE31200_HB_2), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		IE31200},
+	{
+		PCI_VEND_DEV(INTEL, IE31200_HB_3), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		IE31200},
+	{
+		PCI_VEND_DEV(INTEL, IE31200_HB_4), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		IE31200},
+	{
+		PCI_VEND_DEV(INTEL, IE31200_HB_5), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		IE31200},
+	{
+		PCI_VEND_DEV(INTEL, IE31200_HB_6), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		IE31200},
+	{
+		PCI_VEND_DEV(INTEL, IE31200_HB_7), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		IE31200},
+	{
+		0,
+	}            /* 0 terminated list. */
+};
+MODULE_DEVICE_TABLE(pci, ie31200_pci_tbl);
+
+static struct pci_driver ie31200_driver = {
+	.name = EDAC_MOD_STR,
+	.probe = ie31200_init_one,
+	.remove = ie31200_remove_one,
+	.id_table = ie31200_pci_tbl,
+};
+
+static int __init ie31200_init(void)
+{
+	edac_dbg(3, "MC:\n");
+	/* Ensure that the OPSTATE is set correctly for POLL or NMI */
+	opstate_init();
+
+	return pci_register_driver(&ie31200_driver);
+}
+
+static void __exit ie31200_exit(void)
+{
+	edac_dbg(3, "MC:\n");
+	pci_unregister_driver(&ie31200_driver);
+}
+
+module_init(ie31200_init);
+module_exit(ie31200_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jason Baron <jbaron@akamai.com>");
+MODULE_DESCRIPTION("MC support for Intel Processor E31200 memory hub controllers");
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index 5f43620..f78c1c5 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -78,7 +78,8 @@
 	"uop queue",
 	"insn buffer",
 	"predecode buffer",
-	"fetch address FIFO"
+	"fetch address FIFO",
+	"dispatch uop queue"
 };
 
 static const char * const f15h_mc2_mce_desc[] = {
@@ -267,6 +268,12 @@
 			pr_cont("System Read Data Error.\n");
 		else
 			pr_cont(" Internal error condition type %d.\n", xec);
+	} else if (INT_ERROR(ec)) {
+		if (xec <= 0x1f)
+			pr_cont("Hardware Assert.\n");
+		else
+			ret = false;
+
 	} else
 		ret = false;
 
@@ -373,7 +380,7 @@
 		pr_cont("%s.\n", f15h_mc1_mce_desc[xec-4]);
 		break;
 
-	case 0x11 ... 0x14:
+	case 0x11 ... 0x15:
 		pr_cont("Decoder %s parity error.\n", f15h_mc1_mce_desc[xec-4]);
 		break;
 
@@ -397,10 +404,20 @@
 		bool k8 = (boot_cpu_data.x86 == 0xf && (m->status & BIT_64(58)));
 
 		pr_cont("during %s.\n", (k8 ? "system linefill" : "NB data read"));
+	} else if (INT_ERROR(ec)) {
+		if (xec <= 0x3f)
+			pr_cont("Hardware Assert.\n");
+		else
+			goto wrong_mc1_mce;
 	} else if (fam_ops->mc1_mce(ec, xec))
 		;
 	else
-		pr_emerg(HW_ERR "Corrupted MC1 MCE info?\n");
+		goto wrong_mc1_mce;
+
+	return;
+
+wrong_mc1_mce:
+	pr_emerg(HW_ERR "Corrupted MC1 MCE info?\n");
 }
 
 static bool k8_mc2_mce(u16 ec, u8 xec)
@@ -468,6 +485,11 @@
 		default:
 			ret = false;
 		}
+	} else if (INT_ERROR(ec)) {
+		if (xec <= 0x3f)
+			pr_cont("Hardware Assert.\n");
+		else
+			ret = false;
 	}
 
 	return ret;
@@ -615,6 +637,7 @@
 static void decode_mc5_mce(struct mce *m)
 {
 	struct cpuinfo_x86 *c = &boot_cpu_data;
+	u16 ec = EC(m->status);
 	u8 xec = XEC(m->status, xec_mask);
 
 	if (c->x86 == 0xf || c->x86 == 0x11)
@@ -622,6 +645,14 @@
 
 	pr_emerg(HW_ERR "MC5 Error: ");
 
+	if (INT_ERROR(ec)) {
+		if (xec <= 0x1f) {
+			pr_cont("Hardware Assert.\n");
+			return;
+		} else
+			goto wrong_mc5_mce;
+	}
+
 	if (xec == 0x0 || xec == 0xc)
 		pr_cont("%s.\n", mc5_mce_desc[xec]);
 	else if (xec <= 0xd)
@@ -642,6 +673,10 @@
 	pr_emerg(HW_ERR "MC6 Error: ");
 
 	switch (xec) {
+	case 0x0:
+		pr_cont("Hardware Assertion");
+		break;
+
 	case 0x1:
 		pr_cont("Free List");
 		break;
@@ -857,7 +892,8 @@
 		break;
 
 	case 0x15:
-		xec_mask = 0x1f;
+		xec_mask = c->x86_model == 0x60 ? 0x3f : 0x1f;
+
 		fam_ops->mc0_mce = f15h_mc0_mce;
 		fam_ops->mc1_mce = f15h_mc1_mce;
 		fam_ops->mc2_mce = f15h_mc2_mce;
diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
index 4891b45..e644b52 100644
--- a/drivers/edac/x38_edac.c
+++ b/drivers/edac/x38_edac.c
@@ -14,6 +14,8 @@
 #include <linux/pci.h>
 #include <linux/pci_ids.h>
 #include <linux/edac.h>
+
+#include <asm-generic/io-64-nonatomic-lo-hi.h>
 #include "edac_core.h"
 
 #define X38_REVISION		"1.1"
@@ -161,11 +163,6 @@
 			 X38_ERRSTS_BITS);
 }
 
-static u64 x38_readq(const void __iomem *addr)
-{
-	return readl(addr) | (((u64)readl(addr + 4)) << 32);
-}
-
 static void x38_get_and_clear_error_info(struct mem_ctl_info *mci,
 				 struct x38_error_info *info)
 {
@@ -183,9 +180,9 @@
 	if (!(info->errsts & X38_ERRSTS_BITS))
 		return;
 
-	info->eccerrlog[0] = x38_readq(window + X38_C0ECCERRLOG);
+	info->eccerrlog[0] = lo_hi_readq(window + X38_C0ECCERRLOG);
 	if (x38_channel_num == 2)
-		info->eccerrlog[1] = x38_readq(window + X38_C1ECCERRLOG);
+		info->eccerrlog[1] = lo_hi_readq(window + X38_C1ECCERRLOG);
 
 	pci_read_config_word(pdev, X38_ERRSTS, &info->errsts2);
 
@@ -196,10 +193,10 @@
 	 * should be UE info.
 	 */
 	if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) {
-		info->eccerrlog[0] = x38_readq(window + X38_C0ECCERRLOG);
+		info->eccerrlog[0] = lo_hi_readq(window + X38_C0ECCERRLOG);
 		if (x38_channel_num == 2)
 			info->eccerrlog[1] =
-				x38_readq(window + X38_C1ECCERRLOG);
+				lo_hi_readq(window + X38_C1ECCERRLOG);
 	}
 
 	x38_clear_error_info(mci);
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 7c7f4b8..66aa83b 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -42,6 +42,7 @@
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/of_irq.h>
+#include <linux/clk/clk-conf.h>
 #include <linux/completion.h>
 #include <linux/hardirq.h>
 #include <linux/irqflags.h>
@@ -274,6 +275,10 @@
 					client->flags & I2C_CLIENT_WAKE);
 	dev_dbg(dev, "probe\n");
 
+	status = of_clk_set_defaults(dev->of_node, false);
+	if (status < 0)
+		return status;
+
 	acpi_dev_pm_attach(&client->dev, true);
 	status = driver->probe(client, i2c_match_id(driver->id_table, client));
 	if (status)
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index a7e68c8..a077cc8 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -68,13 +68,13 @@
 /* Defaults values */
 #define BMA180_DEF_PMODE	0
 #define BMA180_DEF_BW		20
-#define BMA180_DEF_SCALE	250
+#define BMA180_DEF_SCALE	2452
 
 /* Available values for sysfs */
 #define BMA180_FLP_FREQ_AVAILABLE \
 	"10 20 40 75 150 300"
 #define BMA180_SCALE_AVAILABLE \
-	"0.000130 0.000190 0.000250 0.000380 0.000500 0.000990 0.001980"
+	"0.001275 0.001863 0.002452 0.003727 0.004903 0.009709 0.019417"
 
 struct bma180_data {
 	struct i2c_client *client;
@@ -94,7 +94,7 @@
 };
 
 static int bw_table[] = { 10, 20, 40, 75, 150, 300 }; /* Hz */
-static int scale_table[] = { 130, 190, 250, 380, 500, 990, 1980 };
+static int scale_table[] = { 1275, 1863, 2452, 3727, 4903, 9709, 19417 };
 
 static int bma180_get_acc_reg(struct bma180_data *data, enum bma180_axis axis)
 {
@@ -376,6 +376,8 @@
 		mutex_unlock(&data->mutex);
 		return ret;
 	case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+		if (val2)
+			return -EINVAL;
 		mutex_lock(&data->mutex);
 		ret = bma180_set_bw(data, val);
 		mutex_unlock(&data->mutex);
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 36b1ae9..9f1a140 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -966,7 +966,7 @@
 
 	/* Now we have the two masks, work from least sig and build up sizes */
 	for_each_set_bit(out_ind,
-			 indio_dev->active_scan_mask,
+			 buffer->scan_mask,
 			 indio_dev->masklength) {
 		in_ind = find_next_bit(indio_dev->active_scan_mask,
 				       indio_dev->masklength,
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index bbb746e..7f0c2a3 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -10,6 +10,11 @@
 config GIC_NON_BANKED
 	bool
 
+config ARM_GIC_V3
+	bool
+	select IRQ_DOMAIN
+	select MULTI_IRQ_HANDLER
+
 config ARM_NVIC
 	bool
 	select IRQ_DOMAIN
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 62a13e5..c57e642 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -15,7 +15,8 @@
 obj-$(CONFIG_ARCH_SUNXI)		+= irq-sun4i.o
 obj-$(CONFIG_ARCH_SUNXI)		+= irq-sunxi-nmi.o
 obj-$(CONFIG_ARCH_SPEAR3XX)		+= spear-shirq.o
-obj-$(CONFIG_ARM_GIC)			+= irq-gic.o
+obj-$(CONFIG_ARM_GIC)			+= irq-gic.o irq-gic-common.o
+obj-$(CONFIG_ARM_GIC_V3)		+= irq-gic-v3.o irq-gic-common.o
 obj-$(CONFIG_ARM_NVIC)			+= irq-nvic.o
 obj-$(CONFIG_ARM_VIC)			+= irq-vic.o
 obj-$(CONFIG_IMGPDC_IRQ)		+= irq-imgpdc.o
diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c
new file mode 100644
index 0000000..60ac704
--- /dev/null
+++ b/drivers/irqchip/irq-gic-common.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2002 ARM Limited, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip/arm-gic.h>
+
+#include "irq-gic-common.h"
+
+void gic_configure_irq(unsigned int irq, unsigned int type,
+		       void __iomem *base, void (*sync_access)(void))
+{
+	u32 enablemask = 1 << (irq % 32);
+	u32 enableoff = (irq / 32) * 4;
+	u32 confmask = 0x2 << ((irq % 16) * 2);
+	u32 confoff = (irq / 16) * 4;
+	bool enabled = false;
+	u32 val;
+
+	/*
+	 * Read current configuration register, and insert the config
+	 * for "irq", depending on "type".
+	 */
+	val = readl_relaxed(base + GIC_DIST_CONFIG + confoff);
+	if (type == IRQ_TYPE_LEVEL_HIGH)
+		val &= ~confmask;
+	else if (type == IRQ_TYPE_EDGE_RISING)
+		val |= confmask;
+
+	/*
+	 * As recommended by the spec, disable the interrupt before changing
+	 * the configuration
+	 */
+	if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) {
+		writel_relaxed(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff);
+		if (sync_access)
+			sync_access();
+		enabled = true;
+	}
+
+	/*
+	 * Write back the new configuration, and possibly re-enable
+	 * the interrupt.
+	 */
+	writel_relaxed(val, base + GIC_DIST_CONFIG + confoff);
+
+	if (enabled)
+		writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);
+
+	if (sync_access)
+		sync_access();
+}
+
+void __init gic_dist_config(void __iomem *base, int gic_irqs,
+			    void (*sync_access)(void))
+{
+	unsigned int i;
+
+	/*
+	 * Set all global interrupts to be level triggered, active low.
+	 */
+	for (i = 32; i < gic_irqs; i += 16)
+		writel_relaxed(0, base + GIC_DIST_CONFIG + i / 4);
+
+	/*
+	 * Set priority on all global interrupts.
+	 */
+	for (i = 32; i < gic_irqs; i += 4)
+		writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i);
+
+	/*
+	 * Disable all interrupts.  Leave the PPI and SGIs alone
+	 * as they are enabled by redistributor registers.
+	 */
+	for (i = 32; i < gic_irqs; i += 32)
+		writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i / 8);
+
+	if (sync_access)
+		sync_access();
+}
+
+void gic_cpu_config(void __iomem *base, void (*sync_access)(void))
+{
+	int i;
+
+	/*
+	 * Deal with the banked PPI and SGI interrupts - disable all
+	 * PPI interrupts, ensure all SGI interrupts are enabled.
+	 */
+	writel_relaxed(0xffff0000, base + GIC_DIST_ENABLE_CLEAR);
+	writel_relaxed(0x0000ffff, base + GIC_DIST_ENABLE_SET);
+
+	/*
+	 * Set priority on PPI and SGI interrupts
+	 */
+	for (i = 0; i < 32; i += 4)
+		writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
+
+	if (sync_access)
+		sync_access();
+}
diff --git a/drivers/irqchip/irq-gic-common.h b/drivers/irqchip/irq-gic-common.h
new file mode 100644
index 0000000..b41f024
--- /dev/null
+++ b/drivers/irqchip/irq-gic-common.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2002 ARM Limited, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _IRQ_GIC_COMMON_H
+#define _IRQ_GIC_COMMON_H
+
+#include <linux/of.h>
+#include <linux/irqdomain.h>
+
+void gic_configure_irq(unsigned int irq, unsigned int type,
+                       void __iomem *base, void (*sync_access)(void));
+void gic_dist_config(void __iomem *base, int gic_irqs,
+		     void (*sync_access)(void));
+void gic_cpu_config(void __iomem *base, void (*sync_access)(void));
+
+#endif /* _IRQ_GIC_COMMON_H */
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
new file mode 100644
index 0000000..57eaa5a
--- /dev/null
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -0,0 +1,692 @@
+/*
+ * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/cpu.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+
+#include <linux/irqchip/arm-gic-v3.h>
+
+#include <asm/cputype.h>
+#include <asm/exception.h>
+#include <asm/smp_plat.h>
+
+#include "irq-gic-common.h"
+#include "irqchip.h"
+
+struct gic_chip_data {
+	void __iomem		*dist_base;
+	void __iomem		**redist_base;
+	void __percpu __iomem	**rdist;
+	struct irq_domain	*domain;
+	u64			redist_stride;
+	u32			redist_regions;
+	unsigned int		irq_nr;
+};
+
+static struct gic_chip_data gic_data __read_mostly;
+
+#define gic_data_rdist()		(this_cpu_ptr(gic_data.rdist))
+#define gic_data_rdist_rd_base()	(*gic_data_rdist())
+#define gic_data_rdist_sgi_base()	(gic_data_rdist_rd_base() + SZ_64K)
+
+/* Our default, arbitrary priority value. Linux only uses one anyway. */
+#define DEFAULT_PMR_VALUE	0xf0
+
+static inline unsigned int gic_irq(struct irq_data *d)
+{
+	return d->hwirq;
+}
+
+static inline int gic_irq_in_rdist(struct irq_data *d)
+{
+	return gic_irq(d) < 32;
+}
+
+static inline void __iomem *gic_dist_base(struct irq_data *d)
+{
+	if (gic_irq_in_rdist(d))	/* SGI+PPI -> SGI_base for this CPU */
+		return gic_data_rdist_sgi_base();
+
+	if (d->hwirq <= 1023)		/* SPI -> dist_base */
+		return gic_data.dist_base;
+
+	if (d->hwirq >= 8192)
+		BUG();		/* LPI Detected!!! */
+
+	return NULL;
+}
+
+static void gic_do_wait_for_rwp(void __iomem *base)
+{
+	u32 count = 1000000;	/* 1s! */
+
+	while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) {
+		count--;
+		if (!count) {
+			pr_err_ratelimited("RWP timeout, gone fishing\n");
+			return;
+		}
+		cpu_relax();
+		udelay(1);
+	};
+}
+
+/* Wait for completion of a distributor change */
+static void gic_dist_wait_for_rwp(void)
+{
+	gic_do_wait_for_rwp(gic_data.dist_base);
+}
+
+/* Wait for completion of a redistributor change */
+static void gic_redist_wait_for_rwp(void)
+{
+	gic_do_wait_for_rwp(gic_data_rdist_rd_base());
+}
+
+/* Low level accessors */
+static u64 gic_read_iar(void)
+{
+	u64 irqstat;
+
+	asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat));
+	return irqstat;
+}
+
+static void gic_write_pmr(u64 val)
+{
+	asm volatile("msr_s " __stringify(ICC_PMR_EL1) ", %0" : : "r" (val));
+}
+
+static void gic_write_ctlr(u64 val)
+{
+	asm volatile("msr_s " __stringify(ICC_CTLR_EL1) ", %0" : : "r" (val));
+	isb();
+}
+
+static void gic_write_grpen1(u64 val)
+{
+	asm volatile("msr_s " __stringify(ICC_GRPEN1_EL1) ", %0" : : "r" (val));
+	isb();
+}
+
+static void gic_write_sgi1r(u64 val)
+{
+	asm volatile("msr_s " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val));
+}
+
+static void gic_enable_sre(void)
+{
+	u64 val;
+
+	asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1) : "=r" (val));
+	val |= ICC_SRE_EL1_SRE;
+	asm volatile("msr_s " __stringify(ICC_SRE_EL1) ", %0" : : "r" (val));
+	isb();
+
+	/*
+	 * Need to check that the SRE bit has actually been set. If
+	 * not, it means that SRE is disabled at EL2. We're going to
+	 * die painfully, and there is nothing we can do about it.
+	 *
+	 * Kindly inform the luser.
+	 */
+	asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1) : "=r" (val));
+	if (!(val & ICC_SRE_EL1_SRE))
+		pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
+}
+
+static void gic_enable_redist(void)
+{
+	void __iomem *rbase;
+	u32 count = 1000000;	/* 1s! */
+	u32 val;
+
+	rbase = gic_data_rdist_rd_base();
+
+	/* Wake up this CPU redistributor */
+	val = readl_relaxed(rbase + GICR_WAKER);
+	val &= ~GICR_WAKER_ProcessorSleep;
+	writel_relaxed(val, rbase + GICR_WAKER);
+
+	while (readl_relaxed(rbase + GICR_WAKER) & GICR_WAKER_ChildrenAsleep) {
+		count--;
+		if (!count) {
+			pr_err_ratelimited("redist didn't wake up...\n");
+			return;
+		}
+		cpu_relax();
+		udelay(1);
+	};
+}
+
+/*
+ * Routines to disable, enable, EOI and route interrupts
+ */
+static void gic_poke_irq(struct irq_data *d, u32 offset)
+{
+	u32 mask = 1 << (gic_irq(d) % 32);
+	void (*rwp_wait)(void);
+	void __iomem *base;
+
+	if (gic_irq_in_rdist(d)) {
+		base = gic_data_rdist_sgi_base();
+		rwp_wait = gic_redist_wait_for_rwp;
+	} else {
+		base = gic_data.dist_base;
+		rwp_wait = gic_dist_wait_for_rwp;
+	}
+
+	writel_relaxed(mask, base + offset + (gic_irq(d) / 32) * 4);
+	rwp_wait();
+}
+
+static int gic_peek_irq(struct irq_data *d, u32 offset)
+{
+	u32 mask = 1 << (gic_irq(d) % 32);
+	void __iomem *base;
+
+	if (gic_irq_in_rdist(d))
+		base = gic_data_rdist_sgi_base();
+	else
+		base = gic_data.dist_base;
+
+	return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask);
+}
+
+static void gic_mask_irq(struct irq_data *d)
+{
+	gic_poke_irq(d, GICD_ICENABLER);
+}
+
+static void gic_unmask_irq(struct irq_data *d)
+{
+	gic_poke_irq(d, GICD_ISENABLER);
+}
+
+static void gic_eoi_irq(struct irq_data *d)
+{
+	gic_write_eoir(gic_irq(d));
+}
+
+static int gic_set_type(struct irq_data *d, unsigned int type)
+{
+	unsigned int irq = gic_irq(d);
+	void (*rwp_wait)(void);
+	void __iomem *base;
+
+	/* Interrupt configuration for SGIs can't be changed */
+	if (irq < 16)
+		return -EINVAL;
+
+	if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
+		return -EINVAL;
+
+	if (gic_irq_in_rdist(d)) {
+		base = gic_data_rdist_sgi_base();
+		rwp_wait = gic_redist_wait_for_rwp;
+	} else {
+		base = gic_data.dist_base;
+		rwp_wait = gic_dist_wait_for_rwp;
+	}
+
+	gic_configure_irq(irq, type, base, rwp_wait);
+
+	return 0;
+}
+
+static u64 gic_mpidr_to_affinity(u64 mpidr)
+{
+	u64 aff;
+
+	aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
+	       MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
+	       MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8  |
+	       MPIDR_AFFINITY_LEVEL(mpidr, 0));
+
+	return aff;
+}
+
+static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
+{
+	u64 irqnr;
+
+	do {
+		irqnr = gic_read_iar();
+
+		if (likely(irqnr > 15 && irqnr < 1020)) {
+			u64 irq = irq_find_mapping(gic_data.domain, irqnr);
+			if (likely(irq)) {
+				handle_IRQ(irq, regs);
+				continue;
+			}
+
+			WARN_ONCE(true, "Unexpected SPI received!\n");
+			gic_write_eoir(irqnr);
+		}
+		if (irqnr < 16) {
+			gic_write_eoir(irqnr);
+#ifdef CONFIG_SMP
+			handle_IPI(irqnr, regs);
+#else
+			WARN_ONCE(true, "Unexpected SGI received!\n");
+#endif
+			continue;
+		}
+	} while (irqnr != ICC_IAR1_EL1_SPURIOUS);
+}
+
+static void __init gic_dist_init(void)
+{
+	unsigned int i;
+	u64 affinity;
+	void __iomem *base = gic_data.dist_base;
+
+	/* Disable the distributor */
+	writel_relaxed(0, base + GICD_CTLR);
+	gic_dist_wait_for_rwp();
+
+	gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp);
+
+	/* Enable distributor with ARE, Group1 */
+	writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1,
+		       base + GICD_CTLR);
+
+	/*
+	 * Set all global interrupts to the boot CPU only. ARE must be
+	 * enabled.
+	 */
+	affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
+	for (i = 32; i < gic_data.irq_nr; i++)
+		writeq_relaxed(affinity, base + GICD_IROUTER + i * 8);
+}
+
+static int gic_populate_rdist(void)
+{
+	u64 mpidr = cpu_logical_map(smp_processor_id());
+	u64 typer;
+	u32 aff;
+	int i;
+
+	/*
+	 * Convert affinity to a 32bit value that can be matched to
+	 * GICR_TYPER bits [63:32].
+	 */
+	aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
+	       MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
+	       MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
+	       MPIDR_AFFINITY_LEVEL(mpidr, 0));
+
+	for (i = 0; i < gic_data.redist_regions; i++) {
+		void __iomem *ptr = gic_data.redist_base[i];
+		u32 reg;
+
+		reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
+		if (reg != GIC_PIDR2_ARCH_GICv3 &&
+		    reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */
+			pr_warn("No redistributor present @%p\n", ptr);
+			break;
+		}
+
+		do {
+			typer = readq_relaxed(ptr + GICR_TYPER);
+			if ((typer >> 32) == aff) {
+				gic_data_rdist_rd_base() = ptr;
+				pr_info("CPU%d: found redistributor %llx @%p\n",
+					smp_processor_id(),
+					(unsigned long long)mpidr, ptr);
+				return 0;
+			}
+
+			if (gic_data.redist_stride) {
+				ptr += gic_data.redist_stride;
+			} else {
+				ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */
+				if (typer & GICR_TYPER_VLPIS)
+					ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */
+			}
+		} while (!(typer & GICR_TYPER_LAST));
+	}
+
+	/* We couldn't even deal with ourselves... */
+	WARN(true, "CPU%d: mpidr %llx has no re-distributor!\n",
+	     smp_processor_id(), (unsigned long long)mpidr);
+	return -ENODEV;
+}
+
+static void gic_cpu_init(void)
+{
+	void __iomem *rbase;
+
+	/* Register ourselves with the rest of the world */
+	if (gic_populate_rdist())
+		return;
+
+	gic_enable_redist();
+
+	rbase = gic_data_rdist_sgi_base();
+
+	gic_cpu_config(rbase, gic_redist_wait_for_rwp);
+
+	/* Enable system registers */
+	gic_enable_sre();
+
+	/* Set priority mask register */
+	gic_write_pmr(DEFAULT_PMR_VALUE);
+
+	/* EOI deactivates interrupt too (mode 0) */
+	gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
+
+	/* ... and let's hit the road... */
+	gic_write_grpen1(1);
+}
+
+#ifdef CONFIG_SMP
+static int gic_secondary_init(struct notifier_block *nfb,
+			      unsigned long action, void *hcpu)
+{
+	if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
+		gic_cpu_init();
+	return NOTIFY_OK;
+}
+
+/*
+ * Notifier for enabling the GIC CPU interface. Set an arbitrarily high
+ * priority because the GIC needs to be up before the ARM generic timers.
+ */
+static struct notifier_block gic_cpu_notifier = {
+	.notifier_call = gic_secondary_init,
+	.priority = 100,
+};
+
+static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
+				   u64 cluster_id)
+{
+	int cpu = *base_cpu;
+	u64 mpidr = cpu_logical_map(cpu);
+	u16 tlist = 0;
+
+	while (cpu < nr_cpu_ids) {
+		/*
+		 * If we ever get a cluster of more than 16 CPUs, just
+		 * scream and skip that CPU.
+		 */
+		if (WARN_ON((mpidr & 0xff) >= 16))
+			goto out;
+
+		tlist |= 1 << (mpidr & 0xf);
+
+		cpu = cpumask_next(cpu, mask);
+		if (cpu == nr_cpu_ids)
+			goto out;
+
+		mpidr = cpu_logical_map(cpu);
+
+		if (cluster_id != (mpidr & ~0xffUL)) {
+			cpu--;
+			goto out;
+		}
+	}
+out:
+	*base_cpu = cpu;
+	return tlist;
+}
+
+static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
+{
+	u64 val;
+
+	val = (MPIDR_AFFINITY_LEVEL(cluster_id, 3) << 48	|
+	       MPIDR_AFFINITY_LEVEL(cluster_id, 2) << 32	|
+	       irq << 24			    		|
+	       MPIDR_AFFINITY_LEVEL(cluster_id, 1) << 16	|
+	       tlist);
+
+	pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
+	gic_write_sgi1r(val);
+}
+
+static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
+{
+	int cpu;
+
+	if (WARN_ON(irq >= 16))
+		return;
+
+	/*
+	 * Ensure that stores to Normal memory are visible to the
+	 * other CPUs before issuing the IPI.
+	 */
+	smp_wmb();
+
+	for_each_cpu_mask(cpu, *mask) {
+		u64 cluster_id = cpu_logical_map(cpu) & ~0xffUL;
+		u16 tlist;
+
+		tlist = gic_compute_target_list(&cpu, mask, cluster_id);
+		gic_send_sgi(cluster_id, tlist, irq);
+	}
+
+	/* Force the above writes to ICC_SGI1R_EL1 to be executed */
+	isb();
+}
+
+static void gic_smp_init(void)
+{
+	set_smp_cross_call(gic_raise_softirq);
+	register_cpu_notifier(&gic_cpu_notifier);
+}
+
+static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
+			    bool force)
+{
+	unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
+	void __iomem *reg;
+	int enabled;
+	u64 val;
+
+	if (gic_irq_in_rdist(d))
+		return -EINVAL;
+
+	/* If interrupt was enabled, disable it first */
+	enabled = gic_peek_irq(d, GICD_ISENABLER);
+	if (enabled)
+		gic_mask_irq(d);
+
+	reg = gic_dist_base(d) + GICD_IROUTER + (gic_irq(d) * 8);
+	val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
+
+	writeq_relaxed(val, reg);
+
+	/*
+	 * If the interrupt was enabled, enabled it again. Otherwise,
+	 * just wait for the distributor to have digested our changes.
+	 */
+	if (enabled)
+		gic_unmask_irq(d);
+	else
+		gic_dist_wait_for_rwp();
+
+	return IRQ_SET_MASK_OK;
+}
+#else
+#define gic_set_affinity	NULL
+#define gic_smp_init()		do { } while(0)
+#endif
+
+static struct irq_chip gic_chip = {
+	.name			= "GICv3",
+	.irq_mask		= gic_mask_irq,
+	.irq_unmask		= gic_unmask_irq,
+	.irq_eoi		= gic_eoi_irq,
+	.irq_set_type		= gic_set_type,
+	.irq_set_affinity	= gic_set_affinity,
+};
+
+static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
+			      irq_hw_number_t hw)
+{
+	/* SGIs are private to the core kernel */
+	if (hw < 16)
+		return -EPERM;
+	/* PPIs */
+	if (hw < 32) {
+		irq_set_percpu_devid(irq);
+		irq_set_chip_and_handler(irq, &gic_chip,
+					 handle_percpu_devid_irq);
+		set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
+	}
+	/* SPIs */
+	if (hw >= 32 && hw < gic_data.irq_nr) {
+		irq_set_chip_and_handler(irq, &gic_chip,
+					 handle_fasteoi_irq);
+		set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+	}
+	irq_set_chip_data(irq, d->host_data);
+	return 0;
+}
+
+static int gic_irq_domain_xlate(struct irq_domain *d,
+				struct device_node *controller,
+				const u32 *intspec, unsigned int intsize,
+				unsigned long *out_hwirq, unsigned int *out_type)
+{
+	if (d->of_node != controller)
+		return -EINVAL;
+	if (intsize < 3)
+		return -EINVAL;
+
+	switch(intspec[0]) {
+	case 0:			/* SPI */
+		*out_hwirq = intspec[1] + 32;
+		break;
+	case 1:			/* PPI */
+		*out_hwirq = intspec[1] + 16;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	*out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
+	return 0;
+}
+
+static const struct irq_domain_ops gic_irq_domain_ops = {
+	.map = gic_irq_domain_map,
+	.xlate = gic_irq_domain_xlate,
+};
+
+static int __init gic_of_init(struct device_node *node, struct device_node *parent)
+{
+	void __iomem *dist_base;
+	void __iomem **redist_base;
+	u64 redist_stride;
+	u32 redist_regions;
+	u32 reg;
+	int gic_irqs;
+	int err;
+	int i;
+
+	dist_base = of_iomap(node, 0);
+	if (!dist_base) {
+		pr_err("%s: unable to map gic dist registers\n",
+			node->full_name);
+		return -ENXIO;
+	}
+
+	reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
+	if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4) {
+		pr_err("%s: no distributor detected, giving up\n",
+			node->full_name);
+		err = -ENODEV;
+		goto out_unmap_dist;
+	}
+
+	if (of_property_read_u32(node, "#redistributor-regions", &redist_regions))
+		redist_regions = 1;
+
+	redist_base = kzalloc(sizeof(*redist_base) * redist_regions, GFP_KERNEL);
+	if (!redist_base) {
+		err = -ENOMEM;
+		goto out_unmap_dist;
+	}
+
+	for (i = 0; i < redist_regions; i++) {
+		redist_base[i] = of_iomap(node, 1 + i);
+		if (!redist_base[i]) {
+			pr_err("%s: couldn't map region %d\n",
+			       node->full_name, i);
+			err = -ENODEV;
+			goto out_unmap_rdist;
+		}
+	}
+
+	if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
+		redist_stride = 0;
+
+	gic_data.dist_base = dist_base;
+	gic_data.redist_base = redist_base;
+	gic_data.redist_regions = redist_regions;
+	gic_data.redist_stride = redist_stride;
+
+	/*
+	 * Find out how many interrupts are supported.
+	 * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
+	 */
+	gic_irqs = readl_relaxed(gic_data.dist_base + GICD_TYPER) & 0x1f;
+	gic_irqs = (gic_irqs + 1) * 32;
+	if (gic_irqs > 1020)
+		gic_irqs = 1020;
+	gic_data.irq_nr = gic_irqs;
+
+	gic_data.domain = irq_domain_add_tree(node, &gic_irq_domain_ops,
+					      &gic_data);
+	gic_data.rdist = alloc_percpu(typeof(*gic_data.rdist));
+
+	if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdist)) {
+		err = -ENOMEM;
+		goto out_free;
+	}
+
+	set_handle_irq(gic_handle_irq);
+
+	gic_smp_init();
+	gic_dist_init();
+	gic_cpu_init();
+
+	return 0;
+
+out_free:
+	if (gic_data.domain)
+		irq_domain_remove(gic_data.domain);
+	free_percpu(gic_data.rdist);
+out_unmap_rdist:
+	for (i = 0; i < redist_regions; i++)
+		if (redist_base[i])
+			iounmap(redist_base[i]);
+	kfree(redist_base);
+out_unmap_dist:
+	iounmap(dist_base);
+	return err;
+}
+
+IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 7c131cf..9c1f883 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -47,6 +47,7 @@
 #include <asm/exception.h>
 #include <asm/smp_plat.h>
 
+#include "irq-gic-common.h"
 #include "irqchip.h"
 
 union gic_base {
@@ -189,12 +190,6 @@
 {
 	void __iomem *base = gic_dist_base(d);
 	unsigned int gicirq = gic_irq(d);
-	u32 enablemask = 1 << (gicirq % 32);
-	u32 enableoff = (gicirq / 32) * 4;
-	u32 confmask = 0x2 << ((gicirq % 16) * 2);
-	u32 confoff = (gicirq / 16) * 4;
-	bool enabled = false;
-	u32 val;
 
 	/* Interrupt configuration for SGIs can't be changed */
 	if (gicirq < 16)
@@ -208,25 +203,7 @@
 	if (gic_arch_extn.irq_set_type)
 		gic_arch_extn.irq_set_type(d, type);
 
-	val = readl_relaxed(base + GIC_DIST_CONFIG + confoff);
-	if (type == IRQ_TYPE_LEVEL_HIGH)
-		val &= ~confmask;
-	else if (type == IRQ_TYPE_EDGE_RISING)
-		val |= confmask;
-
-	/*
-	 * As recommended by the spec, disable the interrupt before changing
-	 * the configuration
-	 */
-	if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) {
-		writel_relaxed(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff);
-		enabled = true;
-	}
-
-	writel_relaxed(val, base + GIC_DIST_CONFIG + confoff);
-
-	if (enabled)
-		writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);
+	gic_configure_irq(gicirq, type, base, NULL);
 
 	raw_spin_unlock(&irq_controller_lock);
 
@@ -388,12 +365,6 @@
 	writel_relaxed(0, base + GIC_DIST_CTRL);
 
 	/*
-	 * Set all global interrupts to be level triggered, active low.
-	 */
-	for (i = 32; i < gic_irqs; i += 16)
-		writel_relaxed(0, base + GIC_DIST_CONFIG + i * 4 / 16);
-
-	/*
 	 * Set all global interrupts to this CPU only.
 	 */
 	cpumask = gic_get_cpumask(gic);
@@ -402,18 +373,7 @@
 	for (i = 32; i < gic_irqs; i += 4)
 		writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
 
-	/*
-	 * Set priority on all global interrupts.
-	 */
-	for (i = 32; i < gic_irqs; i += 4)
-		writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
-
-	/*
-	 * Disable all interrupts.  Leave the PPI and SGIs alone
-	 * as these enables are banked registers.
-	 */
-	for (i = 32; i < gic_irqs; i += 32)
-		writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
+	gic_dist_config(base, gic_irqs, NULL);
 
 	writel_relaxed(1, base + GIC_DIST_CTRL);
 }
@@ -440,18 +400,7 @@
 		if (i != cpu)
 			gic_cpu_map[i] &= ~cpu_mask;
 
-	/*
-	 * Deal with the banked PPI and SGI interrupts - disable all
-	 * PPI interrupts, ensure all SGI interrupts are enabled.
-	 */
-	writel_relaxed(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR);
-	writel_relaxed(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET);
-
-	/*
-	 * Set priority on PPI and SGI interrupts
-	 */
-	for (i = 0; i < 32; i += 4)
-		writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4);
+	gic_cpu_config(dist_base, NULL);
 
 	writel_relaxed(0xf0, base + GIC_CPU_PRIMASK);
 	writel_relaxed(1, base + GIC_CPU_CTRL);
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index c44950d..b7ae0a0 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -2400,6 +2400,7 @@
 error:
 	freeurbs(cs);
 	usb_set_intfdata(interface, NULL);
+	usb_put_dev(udev);
 	gigaset_freecs(cs);
 	return rc;
 }
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 96c92b7..ab472c5 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1528,7 +1528,7 @@
 	BUG_ON(block_size < 1 << SECTOR_SHIFT ||
 	       (block_size & (block_size - 1)));
 
-	c = kmalloc(sizeof(*c), GFP_KERNEL);
+	c = kzalloc(sizeof(*c), GFP_KERNEL);
 	if (!c) {
 		r = -ENOMEM;
 		goto bad_client;
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 5f054c4..2c63326 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -231,7 +231,7 @@
 	/*
 	 * cache_size entries, dirty if set
 	 */
-	dm_cblock_t nr_dirty;
+	atomic_t nr_dirty;
 	unsigned long *dirty_bitset;
 
 	/*
@@ -492,7 +492,7 @@
 static void set_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
 {
 	if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) {
-		cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) + 1);
+		atomic_inc(&cache->nr_dirty);
 		policy_set_dirty(cache->policy, oblock);
 	}
 }
@@ -501,8 +501,7 @@
 {
 	if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) {
 		policy_clear_dirty(cache->policy, oblock);
-		cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) - 1);
-		if (!from_cblock(cache->nr_dirty))
+		if (atomic_dec_return(&cache->nr_dirty) == 0)
 			dm_table_event(cache->ti->table);
 	}
 }
@@ -2269,7 +2268,7 @@
 	atomic_set(&cache->quiescing_ack, 0);
 
 	r = -ENOMEM;
-	cache->nr_dirty = 0;
+	atomic_set(&cache->nr_dirty, 0);
 	cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
 	if (!cache->dirty_bitset) {
 		*error = "could not allocate dirty bitset";
@@ -2808,7 +2807,7 @@
 
 		residency = policy_residency(cache->policy);
 
-		DMEMIT("%u %llu/%llu %u %llu/%llu %u %u %u %u %u %u %llu ",
+		DMEMIT("%u %llu/%llu %u %llu/%llu %u %u %u %u %u %u %lu ",
 		       (unsigned)(DM_CACHE_METADATA_BLOCK_SIZE >> SECTOR_SHIFT),
 		       (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
 		       (unsigned long long)nr_blocks_metadata,
@@ -2821,7 +2820,7 @@
 		       (unsigned) atomic_read(&cache->stats.write_miss),
 		       (unsigned) atomic_read(&cache->stats.demotion),
 		       (unsigned) atomic_read(&cache->stats.promotion),
-		       (unsigned long long) from_cblock(cache->nr_dirty));
+		       (unsigned long) atomic_read(&cache->nr_dirty));
 
 		if (writethrough_mode(&cache->features))
 			DMEMIT("1 writethrough ");
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 824108c..12430be 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -287,7 +287,8 @@
 			break;
 		}
 
-		priv->raminit_ctrlreg = devm_ioremap_resource(&pdev->dev, res);
+		priv->raminit_ctrlreg = devm_ioremap(&pdev->dev, res->start,
+						     resource_size(res));
 		if (IS_ERR(priv->raminit_ctrlreg) || priv->instance < 0)
 			dev_info(&pdev->dev, "control memory is not used for raminit\n");
 		else
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index c83584a..5a1891f 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -339,7 +339,8 @@
 	/* Calculate the number of Tx and Rx rings to be created */
 	pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
 				     pdata->hw_feat.tx_ch_cnt);
-	if (netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count)) {
+	ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count);
+	if (ret) {
 		dev_err(dev, "error setting real tx queue count\n");
 		goto err_io;
 	}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 4cab09d..8206a29 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -346,6 +346,7 @@
 	u8		flags;
 /* Set on the first BD descriptor when there is a split BD */
 #define BNX2X_TSO_SPLIT_BD		(1<<0)
+#define BNX2X_HAS_SECOND_PBD		(1<<1)
 };
 
 struct sw_rx_page {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 4b875da..c43e7238 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -227,6 +227,12 @@
 	--nbd;
 	bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 
+	if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
+		/* Skip second parse bd... */
+		--nbd;
+		bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+	}
+
 	/* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
 	if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
 		tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
@@ -3889,6 +3895,9 @@
 			/* set encapsulation flag in start BD */
 			SET_FLAG(tx_start_bd->general_data,
 				 ETH_TX_START_BD_TUNNEL_EXIST, 1);
+
+			tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
+
 			nbd++;
 		} else if (xmit_type & XMIT_CSUM) {
 			/* Set PBD in checksum offload case w/o encapsulation */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index bd0600c..25eddd9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -379,6 +379,7 @@
 			break;
 		case PORT_FIBRE:
 		case PORT_DA:
+		case PORT_NONE:
 			if (!(bp->port.supported[0] & SUPPORTED_FIBRE ||
 			      bp->port.supported[1] & SUPPORTED_FIBRE)) {
 				DP(BNX2X_MSG_ETHTOOL,
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 16281ad..4e615de 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1149,6 +1149,11 @@
 		goto out;
 	}
 
+	if (skb_padto(skb, ETH_ZLEN)) {
+		ret = NETDEV_TX_OK;
+		goto out;
+	}
+
 	/* set the SKB transmit checksum */
 	if (priv->desc_64b_en) {
 		ret = bcmgenet_put_tx_csum(dev, skb);
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index fd411d6..d813bfb 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -610,6 +610,13 @@
 	return err;
 }
 
+static inline bool port_is_up(struct vnet_port *vnet)
+{
+	struct vio_driver_state *vio = &vnet->vio;
+
+	return !!(vio->hs_state & VIO_HS_COMPLETE);
+}
+
 struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
 {
 	unsigned int hash = vnet_hashfn(skb->data);
@@ -617,14 +624,19 @@
 	struct vnet_port *port;
 
 	hlist_for_each_entry(port, hp, hash) {
+		if (!port_is_up(port))
+			continue;
 		if (ether_addr_equal(port->raddr, skb->data))
 			return port;
 	}
-	port = NULL;
-	if (!list_empty(&vp->port_list))
-		port = list_entry(vp->port_list.next, struct vnet_port, list);
-
-	return port;
+	list_for_each_entry(port, &vp->port_list, list) {
+		if (!port->switch_port)
+			continue;
+		if (!port_is_up(port))
+			continue;
+		return port;
+	}
+	return NULL;
 }
 
 struct vnet_port *tx_port_find(struct vnet *vp, struct sk_buff *skb)
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 4ed38ea..d97d5f3 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -378,8 +378,10 @@
 
 	net_device->send_section_map =
 		kzalloc(net_device->map_words * sizeof(ulong), GFP_KERNEL);
-	if (net_device->send_section_map == NULL)
+	if (net_device->send_section_map == NULL) {
+		ret = -ENOMEM;
 		goto cleanup;
+	}
 
 	goto exit;
 
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 4eaadcf..203651e 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -255,6 +255,7 @@
 
 	bus->dev.parent = bus->parent;
 	bus->dev.class = &mdio_bus_class;
+	bus->dev.driver = bus->parent->driver;
 	bus->dev.groups = NULL;
 	dev_set_name(&bus->dev, "%s", bus->id);
 
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 35d753d..22c57be 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -355,7 +355,7 @@
 	phydev->bus->phy_map[phydev->addr] = phydev;
 
 	/* Run all of the fixups for this PHY */
-	err = phy_init_hw(phydev);
+	err = phy_scan_fixups(phydev);
 	if (err) {
 		pr_err("PHY %d failed to initialize\n", phydev->addr);
 		goto out;
@@ -575,6 +575,7 @@
 		      u32 flags, phy_interface_t interface)
 {
 	struct device *d = &phydev->dev;
+	struct module *bus_module;
 	int err;
 
 	/* Assume that if there is no driver, that it doesn't
@@ -599,6 +600,14 @@
 		return -EBUSY;
 	}
 
+	/* Increment the bus module reference count */
+	bus_module = phydev->bus->dev.driver ?
+		     phydev->bus->dev.driver->owner : NULL;
+	if (!try_module_get(bus_module)) {
+		dev_err(&dev->dev, "failed to get the bus module\n");
+		return -EIO;
+	}
+
 	phydev->attached_dev = dev;
 	dev->phydev = phydev;
 
@@ -664,6 +673,10 @@
 void phy_detach(struct phy_device *phydev)
 {
 	int i;
+
+	if (phydev->bus->dev.driver)
+		module_put(phydev->bus->dev.driver->owner);
+
 	phydev->attached_dev->phydev = NULL;
 	phydev->attached_dev = NULL;
 	phy_suspend(phydev);
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 9ea4bfe..2a32d91 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -341,6 +341,22 @@
 		usb_driver_release_interface(driver, info->data);
 		return -ENODEV;
 	}
+
+	/* Some devices don't initialise properly. In particular
+	 * the packet filter is not reset. There are devices that
+	 * don't do reset all the way. So the packet filter should
+	 * be set to a sane initial value.
+	 */
+	usb_control_msg(dev->udev,
+			usb_sndctrlpipe(dev->udev, 0),
+			USB_CDC_SET_ETHERNET_PACKET_FILTER,
+			USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+			USB_CDC_PACKET_TYPE_ALL_MULTICAST | USB_CDC_PACKET_TYPE_DIRECTED | USB_CDC_PACKET_TYPE_BROADCAST,
+			intf->cur_altsetting->desc.bInterfaceNumber,
+			NULL,
+			0,
+			USB_CTRL_SET_TIMEOUT
+		);
 	return 0;
 
 bad_desc:
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 7bad2d3..3eab74c 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -282,7 +282,7 @@
 /* USB_DEV_STAT */
 #define STAT_SPEED_MASK		0x0006
 #define STAT_SPEED_HIGH		0x0000
-#define STAT_SPEED_FULL		0x0001
+#define STAT_SPEED_FULL		0x0002
 
 /* USB_TX_AGG */
 #define TX_AGG_MAX_THRESHOLD	0x03
@@ -2292,9 +2292,8 @@
 	/* rx share fifo credit full threshold */
 	ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, RXFIFO_THR1_NORMAL);
 
-	ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_DEV_STAT);
-	ocp_data &= STAT_SPEED_MASK;
-	if (ocp_data == STAT_SPEED_FULL) {
+	if (tp->udev->speed == USB_SPEED_FULL ||
+	    tp->udev->speed == USB_SPEED_LOW) {
 		/* rx share fifo credit near full threshold */
 		ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1,
 				RXFIFO_THR2_FULL);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index ade33ef..9f79192 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -339,7 +339,7 @@
 	ndm->ndm_state = fdb->state;
 	ndm->ndm_ifindex = vxlan->dev->ifindex;
 	ndm->ndm_flags = fdb->flags;
-	ndm->ndm_type = NDA_DST;
+	ndm->ndm_type = RTN_UNICAST;
 
 	if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
 		goto nla_put_failure;
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 66acb2c..7c28cb5 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -887,6 +887,15 @@
 
 		tx_info = IEEE80211_SKB_CB(skb);
 		tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
+
+		/*
+		 * No aggregation session is running, but there may be frames
+		 * from a previous session or a failed attempt in the queue.
+		 * Send them out as normal data frames
+		 */
+		if (!tid->active)
+			tx_info->flags &= ~IEEE80211_TX_CTL_AMPDU;
+
 		if (!(tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
 			bf->bf_state.bf_type = 0;
 			return bf;
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index 725ba49..8b79081 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -1072,8 +1072,12 @@
 	/* Fill the common data for all mac context types */
 	iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
 
-	/* Also enable probe requests to pass */
-	cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
+	/*
+	 * pass probe requests and beacons from other APs (needed
+	 * for ht protection)
+	 */
+	cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST |
+					MAC_FILTER_IN_BEACON);
 
 	/* Fill the data specific for ap mode */
 	iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.ap,
@@ -1094,6 +1098,13 @@
 	/* Fill the common data for all mac context types */
 	iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
 
+	/*
+	 * pass probe requests and beacons from other APs (needed
+	 * for ht protection)
+	 */
+	cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST |
+					MAC_FILTER_IN_BEACON);
+
 	/* Fill the data specific for GO mode */
 	iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.go.ap,
 				     action == FW_CTXT_ACTION_ADD);
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 9bfb906..98556d0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -303,13 +303,6 @@
 		hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
 	}
 
-	if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT &&
-	    !iwlwifi_mod_params.uapsd_disable) {
-		hw->flags |= IEEE80211_HW_SUPPORTS_UAPSD;
-		hw->uapsd_queues = IWL_UAPSD_AC_INFO;
-		hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
-	}
-
 	hw->sta_data_size = sizeof(struct iwl_mvm_sta);
 	hw->vif_data_size = sizeof(struct iwl_mvm_vif);
 	hw->chanctx_data_size = sizeof(u16);
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index b777d8f..9aa012e 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -26,6 +26,54 @@
 #include <asm/setup.h>  /* for COMMAND_LINE_SIZE */
 #include <asm/page.h>
 
+/*
+ * of_fdt_limit_memory - limit the number of regions in the /memory node
+ * @limit: maximum entries
+ *
+ * Adjust the flattened device tree to have at most 'limit' number of
+ * memory entries in the /memory node. This function may be called
+ * any time after initial_boot_param is set.
+ */
+void of_fdt_limit_memory(int limit)
+{
+	int memory;
+	int len;
+	const void *val;
+	int nr_address_cells = OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
+	int nr_size_cells = OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
+	const uint32_t *addr_prop;
+	const uint32_t *size_prop;
+	int root_offset;
+	int cell_size;
+
+	root_offset = fdt_path_offset(initial_boot_params, "/");
+	if (root_offset < 0)
+		return;
+
+	addr_prop = fdt_getprop(initial_boot_params, root_offset,
+				"#address-cells", NULL);
+	if (addr_prop)
+		nr_address_cells = fdt32_to_cpu(*addr_prop);
+
+	size_prop = fdt_getprop(initial_boot_params, root_offset,
+				"#size-cells", NULL);
+	if (size_prop)
+		nr_size_cells = fdt32_to_cpu(*size_prop);
+
+	cell_size = sizeof(uint32_t)*(nr_address_cells + nr_size_cells);
+
+	memory = fdt_path_offset(initial_boot_params, "/memory");
+	if (memory > 0) {
+		val = fdt_getprop(initial_boot_params, memory, "reg", &len);
+		if (len > limit*cell_size) {
+			len = limit*cell_size;
+			pr_debug("Limiting number of entries to %d\n", limit);
+			fdt_setprop(initial_boot_params, memory, "reg", val,
+					len);
+		}
+	}
+}
+
 /**
  * of_fdt_is_compatible - Return true if given node from the given blob has
  * compat in its compatible list
@@ -937,7 +985,7 @@
 }
 #endif
 
-bool __init early_init_dt_scan(void *params)
+bool __init early_init_dt_verify(void *params)
 {
 	if (!params)
 		return false;
@@ -951,6 +999,12 @@
 		return false;
 	}
 
+	return true;
+}
+
+
+void __init early_init_dt_scan_nodes(void)
+{
 	/* Retrieve various information from the /chosen node */
 	of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line);
 
@@ -959,7 +1013,17 @@
 
 	/* Setup memory, calling early_init_dt_add_memory_arch */
 	of_scan_flat_dt(early_init_dt_scan_memory, NULL);
+}
 
+bool __init early_init_dt_scan(void *params)
+{
+	bool status;
+
+	status = early_init_dt_verify(params);
+	if (!status)
+		return false;
+
+	early_init_dt_scan_nodes();
 	return true;
 }
 
diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c
index 44fe6aa..3d2076f 100644
--- a/drivers/pci/host/pci-host-generic.c
+++ b/drivers/pci/host/pci-host-generic.c
@@ -385,4 +385,4 @@
 
 MODULE_DESCRIPTION("Generic PCI host driver");
 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
-MODULE_LICENSE("GPLv2");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index ce23e0f..a8c6f1a 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -1094,4 +1094,4 @@
 
 MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
 MODULE_DESCRIPTION("Marvell EBU PCIe driver");
-MODULE_LICENSE("GPLv2");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 083cf37..c284e84 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -1716,4 +1716,4 @@
 
 MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
 MODULE_DESCRIPTION("NVIDIA Tegra PCIe driver");
-MODULE_LICENSE("GPLv2");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index f7d3de3..4884ee5 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -105,7 +105,7 @@
 #define  PCIE_CONF_DEV(d)	(((d) & 0x1f) << 19)
 #define  PCIE_CONF_FUNC(f)	(((f) & 0x7) << 16)
 
-#define PCI_MAX_RESOURCES 4
+#define RCAR_PCI_MAX_RESOURCES 4
 #define MAX_NR_INBOUND_MAPS 6
 
 struct rcar_msi {
@@ -127,7 +127,7 @@
 struct rcar_pcie {
 	struct device		*dev;
 	void __iomem		*base;
-	struct resource		res[PCI_MAX_RESOURCES];
+	struct resource		res[RCAR_PCI_MAX_RESOURCES];
 	struct resource		busn;
 	int			root_bus_nr;
 	struct clk		*clk;
@@ -140,36 +140,37 @@
 	return sys->private_data;
 }
 
-static void pci_write_reg(struct rcar_pcie *pcie, unsigned long val,
-			  unsigned long reg)
+static void rcar_pci_write_reg(struct rcar_pcie *pcie, unsigned long val,
+			       unsigned long reg)
 {
 	writel(val, pcie->base + reg);
 }
 
-static unsigned long pci_read_reg(struct rcar_pcie *pcie, unsigned long reg)
+static unsigned long rcar_pci_read_reg(struct rcar_pcie *pcie,
+				       unsigned long reg)
 {
 	return readl(pcie->base + reg);
 }
 
 enum {
-	PCI_ACCESS_READ,
-	PCI_ACCESS_WRITE,
+	RCAR_PCI_ACCESS_READ,
+	RCAR_PCI_ACCESS_WRITE,
 };
 
 static void rcar_rmw32(struct rcar_pcie *pcie, int where, u32 mask, u32 data)
 {
 	int shift = 8 * (where & 3);
-	u32 val = pci_read_reg(pcie, where & ~3);
+	u32 val = rcar_pci_read_reg(pcie, where & ~3);
 
 	val &= ~(mask << shift);
 	val |= data << shift;
-	pci_write_reg(pcie, val, where & ~3);
+	rcar_pci_write_reg(pcie, val, where & ~3);
 }
 
 static u32 rcar_read_conf(struct rcar_pcie *pcie, int where)
 {
 	int shift = 8 * (where & 3);
-	u32 val = pci_read_reg(pcie, where & ~3);
+	u32 val = rcar_pci_read_reg(pcie, where & ~3);
 
 	return val >> shift;
 }
@@ -205,14 +206,14 @@
 		if (dev != 0)
 			return PCIBIOS_DEVICE_NOT_FOUND;
 
-		if (access_type == PCI_ACCESS_READ) {
-			*data = pci_read_reg(pcie, PCICONF(index));
+		if (access_type == RCAR_PCI_ACCESS_READ) {
+			*data = rcar_pci_read_reg(pcie, PCICONF(index));
 		} else {
 			/* Keep an eye out for changes to the root bus number */
 			if (pci_is_root_bus(bus) && (reg == PCI_PRIMARY_BUS))
 				pcie->root_bus_nr = *data & 0xff;
 
-			pci_write_reg(pcie, *data, PCICONF(index));
+			rcar_pci_write_reg(pcie, *data, PCICONF(index));
 		}
 
 		return PCIBIOS_SUCCESSFUL;
@@ -222,20 +223,20 @@
 		return PCIBIOS_DEVICE_NOT_FOUND;
 
 	/* Clear errors */
-	pci_write_reg(pcie, pci_read_reg(pcie, PCIEERRFR), PCIEERRFR);
+	rcar_pci_write_reg(pcie, rcar_pci_read_reg(pcie, PCIEERRFR), PCIEERRFR);
 
 	/* Set the PIO address */
-	pci_write_reg(pcie, PCIE_CONF_BUS(bus->number) | PCIE_CONF_DEV(dev) |
-				PCIE_CONF_FUNC(func) | reg, PCIECAR);
+	rcar_pci_write_reg(pcie, PCIE_CONF_BUS(bus->number) |
+		PCIE_CONF_DEV(dev) | PCIE_CONF_FUNC(func) | reg, PCIECAR);
 
 	/* Enable the configuration access */
 	if (bus->parent->number == pcie->root_bus_nr)
-		pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE0, PCIECCTLR);
+		rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE0, PCIECCTLR);
 	else
-		pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE1, PCIECCTLR);
+		rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE1, PCIECCTLR);
 
 	/* Check for errors */
-	if (pci_read_reg(pcie, PCIEERRFR) & UNSUPPORTED_REQUEST)
+	if (rcar_pci_read_reg(pcie, PCIEERRFR) & UNSUPPORTED_REQUEST)
 		return PCIBIOS_DEVICE_NOT_FOUND;
 
 	/* Check for master and target aborts */
@@ -243,13 +244,13 @@
 		(PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT))
 		return PCIBIOS_DEVICE_NOT_FOUND;
 
-	if (access_type == PCI_ACCESS_READ)
-		*data = pci_read_reg(pcie, PCIECDR);
+	if (access_type == RCAR_PCI_ACCESS_READ)
+		*data = rcar_pci_read_reg(pcie, PCIECDR);
 	else
-		pci_write_reg(pcie, *data, PCIECDR);
+		rcar_pci_write_reg(pcie, *data, PCIECDR);
 
 	/* Disable the configuration access */
-	pci_write_reg(pcie, 0, PCIECCTLR);
+	rcar_pci_write_reg(pcie, 0, PCIECCTLR);
 
 	return PCIBIOS_SUCCESSFUL;
 }
@@ -260,12 +261,7 @@
 	struct rcar_pcie *pcie = sys_to_pcie(bus->sysdata);
 	int ret;
 
-	if ((size == 2) && (where & 1))
-		return PCIBIOS_BAD_REGISTER_NUMBER;
-	else if ((size == 4) && (where & 3))
-		return PCIBIOS_BAD_REGISTER_NUMBER;
-
-	ret = rcar_pcie_config_access(pcie, PCI_ACCESS_READ,
+	ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_READ,
 				      bus, devfn, where, val);
 	if (ret != PCIBIOS_SUCCESSFUL) {
 		*val = 0xffffffff;
@@ -291,12 +287,7 @@
 	int shift, ret;
 	u32 data;
 
-	if ((size == 2) && (where & 1))
-		return PCIBIOS_BAD_REGISTER_NUMBER;
-	else if ((size == 4) && (where & 3))
-		return PCIBIOS_BAD_REGISTER_NUMBER;
-
-	ret = rcar_pcie_config_access(pcie, PCI_ACCESS_READ,
+	ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_READ,
 				      bus, devfn, where, &data);
 	if (ret != PCIBIOS_SUCCESSFUL)
 		return ret;
@@ -315,7 +306,7 @@
 	} else
 		data = val;
 
-	ret = rcar_pcie_config_access(pcie, PCI_ACCESS_WRITE,
+	ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_WRITE,
 				      bus, devfn, where, &data);
 
 	return ret;
@@ -326,14 +317,15 @@
 	.write	= rcar_pcie_write_conf,
 };
 
-static void rcar_pcie_setup_window(int win, struct resource *res,
-				   struct rcar_pcie *pcie)
+static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie)
 {
+	struct resource *res = &pcie->res[win];
+
 	/* Setup PCIe address space mappings for each resource */
 	resource_size_t size;
 	u32 mask;
 
-	pci_write_reg(pcie, 0x00000000, PCIEPTCTLR(win));
+	rcar_pci_write_reg(pcie, 0x00000000, PCIEPTCTLR(win));
 
 	/*
 	 * The PAMR mask is calculated in units of 128Bytes, which
@@ -341,17 +333,17 @@
 	 */
 	size = resource_size(res);
 	mask = (roundup_pow_of_two(size) / SZ_128) - 1;
-	pci_write_reg(pcie, mask << 7, PCIEPAMR(win));
+	rcar_pci_write_reg(pcie, mask << 7, PCIEPAMR(win));
 
-	pci_write_reg(pcie, upper_32_bits(res->start), PCIEPARH(win));
-	pci_write_reg(pcie, lower_32_bits(res->start), PCIEPARL(win));
+	rcar_pci_write_reg(pcie, upper_32_bits(res->start), PCIEPARH(win));
+	rcar_pci_write_reg(pcie, lower_32_bits(res->start), PCIEPARL(win));
 
 	/* First resource is for IO */
 	mask = PAR_ENABLE;
 	if (res->flags & IORESOURCE_IO)
 		mask |= IO_SPACE;
 
-	pci_write_reg(pcie, mask, PCIEPTCTLR(win));
+	rcar_pci_write_reg(pcie, mask, PCIEPTCTLR(win));
 }
 
 static int rcar_pcie_setup(int nr, struct pci_sys_data *sys)
@@ -363,13 +355,13 @@
 	pcie->root_bus_nr = -1;
 
 	/* Setup PCI resources */
-	for (i = 0; i < PCI_MAX_RESOURCES; i++) {
+	for (i = 0; i < RCAR_PCI_MAX_RESOURCES; i++) {
 
 		res = &pcie->res[i];
 		if (!res->flags)
 			continue;
 
-		rcar_pcie_setup_window(i, res, pcie);
+		rcar_pcie_setup_window(i, pcie);
 
 		if (res->flags & IORESOURCE_IO)
 			pci_ioremap_io(nr * SZ_64K, res->start);
@@ -415,7 +407,7 @@
 	unsigned int timeout = 100;
 
 	while (timeout--) {
-		if (pci_read_reg(pcie, H1_PCIEPHYADRR) & PHY_ACK)
+		if (rcar_pci_read_reg(pcie, H1_PCIEPHYADRR) & PHY_ACK)
 			return 0;
 
 		udelay(100);
@@ -438,15 +430,15 @@
 		((addr & 0xff) << ADR_POS);
 
 	/* Set write data */
-	pci_write_reg(pcie, data, H1_PCIEPHYDOUTR);
-	pci_write_reg(pcie, phyaddr, H1_PCIEPHYADRR);
+	rcar_pci_write_reg(pcie, data, H1_PCIEPHYDOUTR);
+	rcar_pci_write_reg(pcie, phyaddr, H1_PCIEPHYADRR);
 
 	/* Ignore errors as they will be dealt with if the data link is down */
 	phy_wait_for_ack(pcie);
 
 	/* Clear command */
-	pci_write_reg(pcie, 0, H1_PCIEPHYDOUTR);
-	pci_write_reg(pcie, 0, H1_PCIEPHYADRR);
+	rcar_pci_write_reg(pcie, 0, H1_PCIEPHYDOUTR);
+	rcar_pci_write_reg(pcie, 0, H1_PCIEPHYADRR);
 
 	/* Ignore errors as they will be dealt with if the data link is down */
 	phy_wait_for_ack(pcie);
@@ -457,7 +449,7 @@
 	unsigned int timeout = 10;
 
 	while (timeout--) {
-		if ((pci_read_reg(pcie, PCIETSTR) & DATA_LINK_ACTIVE))
+		if ((rcar_pci_read_reg(pcie, PCIETSTR) & DATA_LINK_ACTIVE))
 			return 0;
 
 		msleep(5);
@@ -471,17 +463,17 @@
 	int err;
 
 	/* Begin initialization */
-	pci_write_reg(pcie, 0, PCIETCTLR);
+	rcar_pci_write_reg(pcie, 0, PCIETCTLR);
 
 	/* Set mode */
-	pci_write_reg(pcie, 1, PCIEMSR);
+	rcar_pci_write_reg(pcie, 1, PCIEMSR);
 
 	/*
 	 * Initial header for port config space is type 1, set the device
 	 * class to match. Hardware takes care of propagating the IDSETR
 	 * settings, so there is no need to bother with a quirk.
 	 */
-	pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI << 16, IDSETR1);
+	rcar_pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI << 16, IDSETR1);
 
 	/*
 	 * Setup Secondary Bus Number & Subordinate Bus Number, even though
@@ -491,33 +483,31 @@
 	rcar_rmw32(pcie, RCONF(PCI_SUBORDINATE_BUS), 0xff, 1);
 
 	/* Initialize default capabilities. */
-	rcar_rmw32(pcie, REXPCAP(0), 0, PCI_CAP_ID_EXP);
+	rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP);
 	rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS),
 		PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ROOT_PORT << 4);
 	rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f,
 		PCI_HEADER_TYPE_BRIDGE);
 
 	/* Enable data link layer active state reporting */
-	rcar_rmw32(pcie, REXPCAP(PCI_EXP_LNKCAP), 0, PCI_EXP_LNKCAP_DLLLARC);
+	rcar_rmw32(pcie, REXPCAP(PCI_EXP_LNKCAP), PCI_EXP_LNKCAP_DLLLARC,
+		PCI_EXP_LNKCAP_DLLLARC);
 
 	/* Write out the physical slot number = 0 */
 	rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0);
 
 	/* Set the completion timer timeout to the maximum 50ms. */
-	rcar_rmw32(pcie, TLCTLR+1, 0x3f, 50);
+	rcar_rmw32(pcie, TLCTLR + 1, 0x3f, 50);
 
 	/* Terminate list of capabilities (Next Capability Offset=0) */
-	rcar_rmw32(pcie, RVCCAP(0), 0xfff0, 0);
-
-	/* Enable MAC data scrambling. */
-	rcar_rmw32(pcie, MACCTLR, SCRAMBLE_DISABLE, 0);
+	rcar_rmw32(pcie, RVCCAP(0), 0xfff00000, 0);
 
 	/* Enable MSI */
 	if (IS_ENABLED(CONFIG_PCI_MSI))
-		pci_write_reg(pcie, 0x101f0000, PCIEMSITXR);
+		rcar_pci_write_reg(pcie, 0x101f0000, PCIEMSITXR);
 
 	/* Finish initialization - establish a PCI Express link */
-	pci_write_reg(pcie, CFINIT, PCIETCTLR);
+	rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
 
 	/* This will timeout if we don't have a link. */
 	err = rcar_pcie_wait_for_dl(pcie);
@@ -527,11 +517,6 @@
 	/* Enable INTx interrupts */
 	rcar_rmw32(pcie, PCIEINTXR, 0, 0xF << 8);
 
-	/* Enable slave Bus Mastering */
-	rcar_rmw32(pcie, RCONF(PCI_STATUS), PCI_STATUS_DEVSEL_MASK,
-		PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
-		PCI_STATUS_CAP_LIST | PCI_STATUS_DEVSEL_FAST);
-
 	wmb();
 
 	return 0;
@@ -560,7 +545,7 @@
 	phy_write_reg(pcie, 0, 0x66, 0x1, 0x00008000);
 
 	while (timeout--) {
-		if (pci_read_reg(pcie, H1_PCIEPHYSR))
+		if (rcar_pci_read_reg(pcie, H1_PCIEPHYSR))
 			return rcar_pcie_hw_init(pcie);
 
 		msleep(5);
@@ -599,7 +584,7 @@
 	struct rcar_msi *msi = &pcie->msi;
 	unsigned long reg;
 
-	reg = pci_read_reg(pcie, PCIEMSIFR);
+	reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
 
 	/* MSI & INTx share an interrupt - we only handle MSI here */
 	if (!reg)
@@ -610,7 +595,7 @@
 		unsigned int irq;
 
 		/* clear the interrupt */
-		pci_write_reg(pcie, 1 << index, PCIEMSIFR);
+		rcar_pci_write_reg(pcie, 1 << index, PCIEMSIFR);
 
 		irq = irq_find_mapping(msi->domain, index);
 		if (irq) {
@@ -624,7 +609,7 @@
 		}
 
 		/* see if there's any more pending in this vector */
-		reg = pci_read_reg(pcie, PCIEMSIFR);
+		reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
 	}
 
 	return IRQ_HANDLED;
@@ -651,8 +636,8 @@
 
 	irq_set_msi_desc(irq, desc);
 
-	msg.address_lo = pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
-	msg.address_hi = pci_read_reg(pcie, PCIEMSIAUR);
+	msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
+	msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
 	msg.data = hwirq;
 
 	write_msi_msg(irq, &msg);
@@ -729,11 +714,11 @@
 	msi->pages = __get_free_pages(GFP_KERNEL, 0);
 	base = virt_to_phys((void *)msi->pages);
 
-	pci_write_reg(pcie, base | MSIFE, PCIEMSIALR);
-	pci_write_reg(pcie, 0, PCIEMSIAUR);
+	rcar_pci_write_reg(pcie, base | MSIFE, PCIEMSIALR);
+	rcar_pci_write_reg(pcie, 0, PCIEMSIAUR);
 
 	/* enable all MSI interrupts */
-	pci_write_reg(pcie, 0xffffffff, PCIEMSIIER);
+	rcar_pci_write_reg(pcie, 0xffffffff, PCIEMSIIER);
 
 	return 0;
 
@@ -826,6 +811,7 @@
 	if (cpu_addr > 0) {
 		unsigned long nr_zeros = __ffs64(cpu_addr);
 		u64 alignment = 1ULL << nr_zeros;
+
 		size = min(range->size, alignment);
 	} else {
 		size = range->size;
@@ -841,13 +827,13 @@
 		 * Set up 64-bit inbound regions as the range parser doesn't
 		 * distinguish between 32 and 64-bit types.
 		 */
-		pci_write_reg(pcie, lower_32_bits(pci_addr), PCIEPRAR(idx));
-		pci_write_reg(pcie, lower_32_bits(cpu_addr), PCIELAR(idx));
-		pci_write_reg(pcie, lower_32_bits(mask) | flags, PCIELAMR(idx));
+		rcar_pci_write_reg(pcie, lower_32_bits(pci_addr), PCIEPRAR(idx));
+		rcar_pci_write_reg(pcie, lower_32_bits(cpu_addr), PCIELAR(idx));
+		rcar_pci_write_reg(pcie, lower_32_bits(mask) | flags, PCIELAMR(idx));
 
-		pci_write_reg(pcie, upper_32_bits(pci_addr), PCIEPRAR(idx+1));
-		pci_write_reg(pcie, upper_32_bits(cpu_addr), PCIELAR(idx+1));
-		pci_write_reg(pcie, 0, PCIELAMR(idx+1));
+		rcar_pci_write_reg(pcie, upper_32_bits(pci_addr), PCIEPRAR(idx+1));
+		rcar_pci_write_reg(pcie, upper_32_bits(cpu_addr), PCIELAR(idx+1));
+		rcar_pci_write_reg(pcie, 0, PCIELAMR(idx + 1));
 
 		pci_addr += size;
 		cpu_addr += size;
@@ -952,7 +938,7 @@
 		of_pci_range_to_resource(&range, pdev->dev.of_node,
 						&pcie->res[win++]);
 
-		if (win > PCI_MAX_RESOURCES)
+		if (win > RCAR_PCI_MAX_RESOURCES)
 			break;
 	}
 
@@ -982,7 +968,7 @@
 		return 0;
 	}
 
-	data = pci_read_reg(pcie, MACSR);
+	data = rcar_pci_read_reg(pcie, MACSR);
 	dev_info(&pdev->dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
 
 	rcar_pcie_enable(pcie);
@@ -1003,4 +989,4 @@
 
 MODULE_AUTHOR("Phil Edworthy <phil.edworthy@renesas.com>");
 MODULE_DESCRIPTION("Renesas R-Car PCIe driver");
-MODULE_LICENSE("GPLv2");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/hotplug/cpqphp_sysfs.c b/drivers/pci/hotplug/cpqphp_sysfs.c
index 4a392c4..d81648f 100644
--- a/drivers/pci/hotplug/cpqphp_sysfs.c
+++ b/drivers/pci/hotplug/cpqphp_sysfs.c
@@ -216,8 +216,7 @@
 
 void cpqhp_remove_debugfs_files(struct controller *ctrl)
 {
-	if (ctrl->dentry)
-		debugfs_remove(ctrl->dentry);
+	debugfs_remove(ctrl->dentry);
 	ctrl->dentry = NULL;
 }
 
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 8e9012d..9e5a9fb 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -92,9 +92,10 @@
 	struct slot *slot;
 	wait_queue_head_t queue;	/* sleep & wake process */
 	u32 slot_cap;
+	u32 slot_ctrl;
 	struct timer_list poll_timer;
+	unsigned long cmd_started;	/* jiffies */
 	unsigned int cmd_busy:1;
-	unsigned int no_cmd_complete:1;
 	unsigned int link_active_reporting:1;
 	unsigned int notification_enabled:1;
 	unsigned int power_fault_detected;
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index a2297db..07aa722 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -255,6 +255,13 @@
 	else if (pciehp_acpi_slot_detection_check(dev->port))
 		goto err_out_none;
 
+	if (!dev->port->subordinate) {
+		/* Can happen if we run out of bus numbers during probe */
+		dev_err(&dev->device,
+			"Hotplug bridge without secondary bus, ignoring\n");
+		goto err_out_none;
+	}
+
 	ctrl = pcie_init(dev);
 	if (!ctrl) {
 		dev_err(&dev->device, "Controller initialization failed\n");
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 42914e0..9da84b8 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -104,11 +104,10 @@
 		free_irq(ctrl->pcie->irq, ctrl);
 }
 
-static int pcie_poll_cmd(struct controller *ctrl)
+static int pcie_poll_cmd(struct controller *ctrl, int timeout)
 {
 	struct pci_dev *pdev = ctrl_dev(ctrl);
 	u16 slot_status;
-	int timeout = 1000;
 
 	pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
 	if (slot_status & PCI_EXP_SLTSTA_CC) {
@@ -129,18 +128,52 @@
 	return 0;	/* timeout */
 }
 
-static void pcie_wait_cmd(struct controller *ctrl, int poll)
+static void pcie_wait_cmd(struct controller *ctrl)
 {
 	unsigned int msecs = pciehp_poll_mode ? 2500 : 1000;
-	unsigned long timeout = msecs_to_jiffies(msecs);
+	unsigned long duration = msecs_to_jiffies(msecs);
+	unsigned long cmd_timeout = ctrl->cmd_started + duration;
+	unsigned long now, timeout;
 	int rc;
 
-	if (poll)
-		rc = pcie_poll_cmd(ctrl);
+	/*
+	 * If the controller does not generate notifications for command
+	 * completions, we never need to wait between writes.
+	 */
+	if (NO_CMD_CMPL(ctrl))
+		return;
+
+	if (!ctrl->cmd_busy)
+		return;
+
+	/*
+	 * Even if the command has already timed out, we want to call
+	 * pcie_poll_cmd() so it can clear PCI_EXP_SLTSTA_CC.
+	 */
+	now = jiffies;
+	if (time_before_eq(cmd_timeout, now))
+		timeout = 1;
 	else
+		timeout = cmd_timeout - now;
+
+	if (ctrl->slot_ctrl & PCI_EXP_SLTCTL_HPIE &&
+	    ctrl->slot_ctrl & PCI_EXP_SLTCTL_CCIE)
 		rc = wait_event_timeout(ctrl->queue, !ctrl->cmd_busy, timeout);
+	else
+		rc = pcie_poll_cmd(ctrl, timeout);
+
+	/*
+	 * Controllers with errata like Intel CF118 don't generate
+	 * completion notifications unless the power/indicator/interlock
+	 * control bits are changed.  On such controllers, we'll emit this
+	 * timeout message when we wait for completion of commands that
+	 * don't change those bits, e.g., commands that merely enable
+	 * interrupts.
+	 */
 	if (!rc)
-		ctrl_dbg(ctrl, "Command not completed in 1000 msec\n");
+		ctrl_info(ctrl, "Timeout on hotplug command %#010x (issued %u msec ago)\n",
+			  ctrl->slot_ctrl,
+			  jiffies_to_msecs(now - ctrl->cmd_started));
 }
 
 /**
@@ -152,34 +185,12 @@
 static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
 {
 	struct pci_dev *pdev = ctrl_dev(ctrl);
-	u16 slot_status;
 	u16 slot_ctrl;
 
 	mutex_lock(&ctrl->ctrl_lock);
 
-	pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
-	if (slot_status & PCI_EXP_SLTSTA_CC) {
-		pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
-					   PCI_EXP_SLTSTA_CC);
-		if (!ctrl->no_cmd_complete) {
-			/*
-			 * After 1 sec and CMD_COMPLETED still not set, just
-			 * proceed forward to issue the next command according
-			 * to spec. Just print out the error message.
-			 */
-			ctrl_dbg(ctrl, "CMD_COMPLETED not clear after 1 sec\n");
-		} else if (!NO_CMD_CMPL(ctrl)) {
-			/*
-			 * This controller seems to notify of command completed
-			 * event even though it supports none of power
-			 * controller, attention led, power led and EMI.
-			 */
-			ctrl_dbg(ctrl, "Unexpected CMD_COMPLETED. Need to wait for command completed event\n");
-			ctrl->no_cmd_complete = 0;
-		} else {
-			ctrl_dbg(ctrl, "Unexpected CMD_COMPLETED. Maybe the controller is broken\n");
-		}
-	}
+	/* Wait for any previous command that might still be in progress */
+	pcie_wait_cmd(ctrl);
 
 	pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
 	slot_ctrl &= ~mask;
@@ -187,22 +198,9 @@
 	ctrl->cmd_busy = 1;
 	smp_mb();
 	pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, slot_ctrl);
+	ctrl->cmd_started = jiffies;
+	ctrl->slot_ctrl = slot_ctrl;
 
-	/*
-	 * Wait for command completion.
-	 */
-	if (!ctrl->no_cmd_complete) {
-		int poll = 0;
-		/*
-		 * if hotplug interrupt is not enabled or command
-		 * completed interrupt is not enabled, we need to poll
-		 * command completed event.
-		 */
-		if (!(slot_ctrl & PCI_EXP_SLTCTL_HPIE) ||
-		    !(slot_ctrl & PCI_EXP_SLTCTL_CCIE))
-			poll = 1;
-		pcie_wait_cmd(ctrl, poll);
-	}
 	mutex_unlock(&ctrl->ctrl_lock);
 }
 
@@ -773,15 +771,6 @@
 	mutex_init(&ctrl->ctrl_lock);
 	init_waitqueue_head(&ctrl->queue);
 	dbg_ctrl(ctrl);
-	/*
-	 * Controller doesn't notify of command completion if the "No
-	 * Command Completed Support" bit is set in Slot Capability
-	 * register or the controller supports none of power
-	 * controller, attention led, power led and EMI.
-	 */
-	if (NO_CMD_CMPL(ctrl) ||
-	    !(POWER_CTRL(ctrl) | ATTN_LED(ctrl) | PWR_LED(ctrl) | EMI(ctrl)))
-		ctrl->no_cmd_complete = 1;
 
 	/* Check if Data Link Layer Link Active Reporting is implemented */
 	pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &link_cap);
@@ -794,7 +783,7 @@
 	pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
 		PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
 		PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC |
-		PCI_EXP_SLTSTA_CC);
+		PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_DLLSC);
 
 	/* Disable software notification */
 	pcie_disable_notification(ctrl);
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 13f3d30..5a40516 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -149,15 +149,14 @@
 	pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
 }
 
-static void msix_set_enable(struct pci_dev *dev, int enable)
+static void msix_clear_and_set_ctrl(struct pci_dev *dev, u16 clear, u16 set)
 {
-	u16 control;
+	u16 ctrl;
 
-	pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
-	control &= ~PCI_MSIX_FLAGS_ENABLE;
-	if (enable)
-		control |= PCI_MSIX_FLAGS_ENABLE;
-	pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control);
+	pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
+	ctrl &= ~clear;
+	ctrl |= set;
+	pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, ctrl);
 }
 
 static inline __attribute_const__ u32 msi_mask(unsigned x)
@@ -168,16 +167,6 @@
 	return (1 << (1 << x)) - 1;
 }
 
-static inline __attribute_const__ u32 msi_capable_mask(u16 control)
-{
-	return msi_mask((control >> 1) & 7);
-}
-
-static inline __attribute_const__ u32 msi_enabled_mask(u16 control)
-{
-	return msi_mask((control >> 4) & 7);
-}
-
 /*
  * PCI 2.3 does not specify mask bits for each MSI interrupt.  Attempting to
  * mask all MSI interrupts by clearing the MSI enable bit does not work
@@ -246,7 +235,7 @@
 		msix_mask_irq(desc, flag);
 		readl(desc->mask_base);		/* Flush write to device */
 	} else {
-		unsigned offset = data->irq - desc->dev->irq;
+		unsigned offset = data->irq - desc->irq;
 		msi_mask_irq(desc, 1 << offset, flag << offset);
 	}
 }
@@ -460,7 +449,8 @@
 	arch_restore_msi_irqs(dev);
 
 	pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
-	msi_mask_irq(entry, msi_capable_mask(control), entry->masked);
+	msi_mask_irq(entry, msi_mask(entry->msi_attrib.multi_cap),
+		     entry->masked);
 	control &= ~PCI_MSI_FLAGS_QSIZE;
 	control |= (entry->msi_attrib.multiple << 4) | PCI_MSI_FLAGS_ENABLE;
 	pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
@@ -469,26 +459,22 @@
 static void __pci_restore_msix_state(struct pci_dev *dev)
 {
 	struct msi_desc *entry;
-	u16 control;
 
 	if (!dev->msix_enabled)
 		return;
 	BUG_ON(list_empty(&dev->msi_list));
-	entry = list_first_entry(&dev->msi_list, struct msi_desc, list);
-	pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
 
 	/* route the table */
 	pci_intx_for_msi(dev, 0);
-	control |= PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL;
-	pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control);
+	msix_clear_and_set_ctrl(dev, 0,
+				PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL);
 
 	arch_restore_msi_irqs(dev);
 	list_for_each_entry(entry, &dev->msi_list, list) {
 		msix_mask_irq(entry, entry->masked);
 	}
 
-	control &= ~PCI_MSIX_FLAGS_MASKALL;
-	pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control);
+	msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
 }
 
 void pci_restore_msi_state(struct pci_dev *dev)
@@ -501,7 +487,6 @@
 static ssize_t msi_mode_show(struct device *dev, struct device_attribute *attr,
 			     char *buf)
 {
-	struct pci_dev *pdev = to_pci_dev(dev);
 	struct msi_desc *entry;
 	unsigned long irq;
 	int retval;
@@ -510,12 +495,11 @@
 	if (retval)
 		return retval;
 
-	list_for_each_entry(entry, &pdev->msi_list, list) {
-		if (entry->irq == irq) {
-			return sprintf(buf, "%s\n",
-				       entry->msi_attrib.is_msix ? "msix" : "msi");
-		}
-	}
+	entry = irq_get_msi_desc(irq);
+	if (entry)
+		return sprintf(buf, "%s\n",
+				entry->msi_attrib.is_msix ? "msix" : "msi");
+
 	return -ENODEV;
 }
 
@@ -594,6 +578,38 @@
 	return ret;
 }
 
+static struct msi_desc *msi_setup_entry(struct pci_dev *dev)
+{
+	u16 control;
+	struct msi_desc *entry;
+
+	/* MSI Entry Initialization */
+	entry = alloc_msi_entry(dev);
+	if (!entry)
+		return NULL;
+
+	pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
+
+	entry->msi_attrib.is_msix	= 0;
+	entry->msi_attrib.is_64		= !!(control & PCI_MSI_FLAGS_64BIT);
+	entry->msi_attrib.entry_nr	= 0;
+	entry->msi_attrib.maskbit	= !!(control & PCI_MSI_FLAGS_MASKBIT);
+	entry->msi_attrib.default_irq	= dev->irq;	/* Save IOAPIC IRQ */
+	entry->msi_attrib.pos		= dev->msi_cap;
+	entry->msi_attrib.multi_cap	= (control & PCI_MSI_FLAGS_QMASK) >> 1;
+
+	if (control & PCI_MSI_FLAGS_64BIT)
+		entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64;
+	else
+		entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_32;
+
+	/* Save the initial mask status */
+	if (entry->msi_attrib.maskbit)
+		pci_read_config_dword(dev, entry->mask_pos, &entry->masked);
+
+	return entry;
+}
+
 /**
  * msi_capability_init - configure device's MSI capability structure
  * @dev: pointer to the pci_dev data structure of MSI device function
@@ -609,32 +625,16 @@
 {
 	struct msi_desc *entry;
 	int ret;
-	u16 control;
 	unsigned mask;
 
 	msi_set_enable(dev, 0);	/* Disable MSI during set up */
 
-	pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
-	/* MSI Entry Initialization */
-	entry = alloc_msi_entry(dev);
+	entry = msi_setup_entry(dev);
 	if (!entry)
 		return -ENOMEM;
 
-	entry->msi_attrib.is_msix	= 0;
-	entry->msi_attrib.is_64		= !!(control & PCI_MSI_FLAGS_64BIT);
-	entry->msi_attrib.entry_nr	= 0;
-	entry->msi_attrib.maskbit	= !!(control & PCI_MSI_FLAGS_MASKBIT);
-	entry->msi_attrib.default_irq	= dev->irq;	/* Save IOAPIC IRQ */
-	entry->msi_attrib.pos		= dev->msi_cap;
-
-	if (control & PCI_MSI_FLAGS_64BIT)
-		entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64;
-	else
-		entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_32;
 	/* All MSIs are unmasked by default, Mask them all */
-	if (entry->msi_attrib.maskbit)
-		pci_read_config_dword(dev, entry->mask_pos, &entry->masked);
-	mask = msi_capable_mask(control);
+	mask = msi_mask(entry->msi_attrib.multi_cap);
 	msi_mask_irq(entry, mask, mask);
 
 	list_add_tail(&entry->list, &dev->msi_list);
@@ -743,12 +743,10 @@
 	u16 control;
 	void __iomem *base;
 
-	pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
-
 	/* Ensure MSI-X is disabled while it is set up */
-	control &= ~PCI_MSIX_FLAGS_ENABLE;
-	pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control);
+	msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
 
+	pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
 	/* Request & Map MSI-X table region */
 	base = msix_map_region(dev, msix_table_size(control));
 	if (!base)
@@ -767,8 +765,8 @@
 	 * MSI-X registers.  We need to mask all the vectors to prevent
 	 * interrupts coming in before they're fully set up.
 	 */
-	control |= PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE;
-	pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control);
+	msix_clear_and_set_ctrl(dev, 0,
+				PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE);
 
 	msix_program_entries(dev, entries);
 
@@ -780,8 +778,7 @@
 	pci_intx_for_msi(dev, 0);
 	dev->msix_enabled = 1;
 
-	control &= ~PCI_MSIX_FLAGS_MASKALL;
-	pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control);
+	msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
 
 	return 0;
 
@@ -882,7 +879,6 @@
 {
 	struct msi_desc *desc;
 	u32 mask;
-	u16 ctrl;
 
 	if (!pci_msi_enable || !dev || !dev->msi_enabled)
 		return;
@@ -895,8 +891,7 @@
 	dev->msi_enabled = 0;
 
 	/* Return the device with MSI unmasked as initial states */
-	pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &ctrl);
-	mask = msi_capable_mask(ctrl);
+	mask = msi_mask(desc->msi_attrib.multi_cap);
 	/* Keep cached state to be restored */
 	arch_msi_mask_irq(desc, mask, ~mask);
 
@@ -1001,7 +996,7 @@
 		arch_msix_mask_irq(entry, 1);
 	}
 
-	msix_set_enable(dev, 0);
+	msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
 	pci_intx_for_msi(dev, 1);
 	dev->msix_enabled = 0;
 }
@@ -1016,24 +1011,6 @@
 }
 EXPORT_SYMBOL(pci_disable_msix);
 
-/**
- * msi_remove_pci_irq_vectors - reclaim MSI(X) irqs to unused state
- * @dev: pointer to the pci_dev data structure of MSI(X) device function
- *
- * Being called during hotplug remove, from which the device function
- * is hot-removed. All previous assigned MSI/MSI-X irqs, if
- * allocated for this device function, are reclaimed to unused state,
- * which may be used later on.
- **/
-void msi_remove_pci_irq_vectors(struct pci_dev *dev)
-{
-	if (!pci_msi_enable || !dev)
-		return;
-
-	if (dev->msi_enabled || dev->msix_enabled)
-		free_msi_irqs(dev);
-}
-
 void pci_no_msi(void)
 {
 	pci_msi_enable = 0;
@@ -1065,7 +1042,7 @@
 
 	dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX);
 	if (dev->msix_cap)
-		msix_set_enable(dev, 0);
+		msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
 }
 
 /**
diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c
index a3fbe20..2ab1b47 100644
--- a/drivers/pci/pci-label.c
+++ b/drivers/pci/pci-label.c
@@ -161,8 +161,8 @@
 static void dsm_label_utf16s_to_utf8s(union acpi_object *obj, char *buf)
 {
 	int len;
-	len = utf16s_to_utf8s((const wchar_t *)obj->string.pointer,
-			      obj->string.length,
+	len = utf16s_to_utf8s((const wchar_t *)obj->buffer.pointer,
+			      obj->buffer.length,
 			      UTF16_LITTLE_ENDIAN,
 			      buf, PAGE_SIZE);
 	buf[len] = '\n';
@@ -187,16 +187,22 @@
 	tmp = obj->package.elements;
 	if (obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 2 &&
 	    tmp[0].type == ACPI_TYPE_INTEGER &&
-	    tmp[1].type == ACPI_TYPE_STRING) {
+	    (tmp[1].type == ACPI_TYPE_STRING ||
+	     tmp[1].type == ACPI_TYPE_BUFFER)) {
 		/*
 		 * The second string element is optional even when
 		 * this _DSM is implemented; when not implemented,
 		 * this entry must return a null string.
 		 */
-		if (attr == ACPI_ATTR_INDEX_SHOW)
+		if (attr == ACPI_ATTR_INDEX_SHOW) {
 			scnprintf(buf, PAGE_SIZE, "%llu\n", tmp->integer.value);
-		else if (attr == ACPI_ATTR_LABEL_SHOW)
-			dsm_label_utf16s_to_utf8s(tmp + 1, buf);
+		} else if (attr == ACPI_ATTR_LABEL_SHOW) {
+			if (tmp[1].type == ACPI_TYPE_STRING)
+				scnprintf(buf, PAGE_SIZE, "%s\n",
+					  tmp[1].string.pointer);
+			else if (tmp[1].type == ACPI_TYPE_BUFFER)
+				dsm_label_utf16s_to_utf8s(tmp + 1, buf);
+		}
 		len = strlen(buf) > 0 ? strlen(buf) : -1;
 	}
 
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 1c8592b..2c9ac70 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -839,12 +839,6 @@
 
 	if (!__pci_complete_power_transition(dev, state))
 		error = 0;
-	/*
-	 * When aspm_policy is "powersave" this call ensures
-	 * that ASPM is configured.
-	 */
-	if (!error && dev->bus->self)
-		pcie_aspm_powersave_config_link(dev->bus->self);
 
 	return error;
 }
@@ -1195,12 +1189,18 @@
 static int do_pci_enable_device(struct pci_dev *dev, int bars)
 {
 	int err;
+	struct pci_dev *bridge;
 	u16 cmd;
 	u8 pin;
 
 	err = pci_set_power_state(dev, PCI_D0);
 	if (err < 0 && err != -EIO)
 		return err;
+
+	bridge = pci_upstream_bridge(dev);
+	if (bridge)
+		pcie_aspm_powersave_config_link(bridge);
+
 	err = pcibios_enable_device(dev, bars);
 	if (err < 0)
 		return err;
@@ -3198,7 +3198,7 @@
 	return 0;
 }
 
-void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
+void pci_reset_secondary_bus(struct pci_dev *dev)
 {
 	u16 ctrl;
 
@@ -3224,6 +3224,11 @@
 	ssleep(1);
 }
 
+void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
+{
+	pci_reset_secondary_bus(dev);
+}
+
 /**
  * pci_reset_bridge_secondary_bus - Reset the secondary bus on a PCI bridge.
  * @dev: Bridge device
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 80887ea..2ccc9b9 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -203,10 +203,6 @@
 	     (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM)))
 		return -ENODEV;
 
-	if (!dev->irq && dev->pin) {
-		dev_warn(&dev->dev, "device [%04x:%04x] has invalid IRQ; check vendor BIOS\n",
-			 dev->vendor, dev->device);
-	}
 	status = pcie_port_device_register(dev);
 	if (status)
 		return status;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index d0f6926..ad56682 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3405,6 +3405,8 @@
 DECLARE_PCI_FIXUP_HEADER(0x10e3, 0x8113, quirk_use_pcie_bridge_dma_alias);
 /* ITE 8892, https://bugzilla.kernel.org/show_bug.cgi?id=73551 */
 DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8892, quirk_use_pcie_bridge_dma_alias);
+/* Intel 82801, https://bugzilla.kernel.org/show_bug.cgi?id=44881#c49 */
+DECLARE_PCI_FIXUP_HEADER(0x8086, 0x244e, quirk_use_pcie_bridge_dma_alias);
 
 static struct pci_dev *pci_func_0_dma_source(struct pci_dev *dev)
 {
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index a5a63ec..6373985 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -925,7 +925,7 @@
 {
 	struct pci_dev *dev;
 	resource_size_t min_align, align, size, size0, size1;
-	resource_size_t aligns[14];	/* Alignments from 1Mb to 8Gb */
+	resource_size_t aligns[18];	/* Alignments from 1Mb to 128Gb */
 	int order, max_order;
 	struct resource *b_res = find_free_bus_resource(bus,
 					mask | IORESOURCE_PREFETCH, type);
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index caed1ce..b7c3a5e 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -166,11 +166,10 @@
 {
 	struct resource *root, *conflict;
 	resource_size_t fw_addr, start, end;
-	int ret = 0;
 
 	fw_addr = pcibios_retrieve_fw_addr(dev, resno);
 	if (!fw_addr)
-		return 1;
+		return -ENOMEM;
 
 	start = res->start;
 	end = res->end;
@@ -189,14 +188,13 @@
 		 resno, res);
 	conflict = request_resource_conflict(root, res);
 	if (conflict) {
-		dev_info(&dev->dev,
-			 "BAR %d: %pR conflicts with %s %pR\n", resno,
-			 res, conflict->name, conflict);
+		dev_info(&dev->dev, "BAR %d: %pR conflicts with %s %pR\n",
+			 resno, res, conflict->name, conflict);
 		res->start = start;
 		res->end = end;
-		ret = 1;
+		return -EBUSY;
 	}
-	return ret;
+	return 0;
 }
 
 static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
@@ -250,10 +248,8 @@
 static int _pci_assign_resource(struct pci_dev *dev, int resno,
 				resource_size_t size, resource_size_t min_align)
 {
-	struct resource *res = dev->resource + resno;
 	struct pci_bus *bus;
 	int ret;
-	char *type;
 
 	bus = dev->bus;
 	while ((ret = __pci_assign_resource(bus, dev, resno, size, min_align))) {
@@ -262,21 +258,6 @@
 		bus = bus->parent;
 	}
 
-	if (ret) {
-		if (res->flags & IORESOURCE_MEM)
-			if (res->flags & IORESOURCE_PREFETCH)
-				type = "mem pref";
-			else
-				type = "mem";
-		else if (res->flags & IORESOURCE_IO)
-			type = "io";
-		else
-			type = "unknown";
-		dev_info(&dev->dev,
-			 "BAR %d: can't assign %s (size %#llx)\n",
-			 resno, type, (unsigned long long) resource_size(res));
-	}
-
 	return ret;
 }
 
@@ -302,17 +283,24 @@
 	 * where firmware left it.  That at least has a chance of
 	 * working, which is better than just leaving it disabled.
 	 */
-	if (ret < 0)
+	if (ret < 0) {
+		dev_info(&dev->dev, "BAR %d: no space for %pR\n", resno, res);
 		ret = pci_revert_fw_address(res, dev, resno, size);
-
-	if (!ret) {
-		res->flags &= ~IORESOURCE_UNSET;
-		res->flags &= ~IORESOURCE_STARTALIGN;
-		dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res);
-		if (resno < PCI_BRIDGE_RESOURCES)
-			pci_update_resource(dev, resno);
 	}
-	return ret;
+
+	if (ret < 0) {
+		dev_info(&dev->dev, "BAR %d: failed to assign %pR\n", resno,
+			 res);
+		return ret;
+	}
+
+	res->flags &= ~IORESOURCE_UNSET;
+	res->flags &= ~IORESOURCE_STARTALIGN;
+	dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res);
+	if (resno < PCI_BRIDGE_RESOURCES)
+		pci_update_resource(dev, resno);
+
+	return 0;
 }
 EXPORT_SYMBOL(pci_assign_resource);
 
@@ -320,9 +308,11 @@
 			resource_size_t min_align)
 {
 	struct resource *res = dev->resource + resno;
+	unsigned long flags;
 	resource_size_t new_size;
 	int ret;
 
+	flags = res->flags;
 	res->flags |= IORESOURCE_UNSET;
 	if (!res->parent) {
 		dev_info(&dev->dev, "BAR %d: can't reassign an unassigned resource %pR\n",
@@ -333,14 +323,21 @@
 	/* already aligned with min_align */
 	new_size = resource_size(res) + addsize;
 	ret = _pci_assign_resource(dev, resno, new_size, min_align);
-	if (!ret) {
-		res->flags &= ~IORESOURCE_UNSET;
-		res->flags &= ~IORESOURCE_STARTALIGN;
-		dev_info(&dev->dev, "BAR %d: reassigned %pR\n", resno, res);
-		if (resno < PCI_BRIDGE_RESOURCES)
-			pci_update_resource(dev, resno);
+	if (ret) {
+		res->flags = flags;
+		dev_info(&dev->dev, "BAR %d: %pR (failed to expand by %#llx)\n",
+			 resno, res, (unsigned long long) addsize);
+		return ret;
 	}
-	return ret;
+
+	res->flags &= ~IORESOURCE_UNSET;
+	res->flags &= ~IORESOURCE_STARTALIGN;
+	dev_info(&dev->dev, "BAR %d: reassigned %pR (expanded by %#llx)\n",
+		 resno, res, (unsigned long long) addsize);
+	if (resno < PCI_BRIDGE_RESOURCES)
+		pci_update_resource(dev, resno);
+
+	return 0;
 }
 
 int pci_enable_resources(struct pci_dev *dev, int mask)
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index b81448b..a5c6cb7 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -319,8 +319,7 @@
 	struct pnp_dev *pnp = _pnp;
 
 	/* true means it matched */
-	return !acpi->physical_node_count
-	    && compare_pnp_id(pnp->id, acpi_device_hid(acpi));
+	return pnp->data == acpi;
 }
 
 static struct acpi_device * __init acpi_pnp_find_companion(struct device *dev)
diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c
index 9b60b1f..44341dc 100644
--- a/drivers/rapidio/devices/tsi721_dma.c
+++ b/drivers/rapidio/devices/tsi721_dma.c
@@ -287,6 +287,12 @@
 			"desc %p not ACKed\n", tx_desc);
 	}
 
+	if (ret == NULL) {
+		dev_dbg(bdma_chan->dchan.device->dev,
+			"%s: unable to obtain tx descriptor\n", __func__);
+		goto err_out;
+	}
+
 	i = bdma_chan->wr_count_next % bdma_chan->bd_num;
 	if (i == bdma_chan->bd_num - 1) {
 		i = 0;
@@ -297,7 +303,7 @@
 	tx_desc->txd.phys = bdma_chan->bd_phys +
 				i * sizeof(struct tsi721_dma_desc);
 	tx_desc->hw_desc = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[i];
-
+err_out:
 	spin_unlock_bh(&bdma_chan->lock);
 
 	return ret;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index f7e3163..3f50dfc 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -733,6 +733,14 @@
 			scsi_next_command(cmd);
 			return;
 		}
+	} else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) {
+		/*
+		 * Certain non BLOCK_PC requests are commands that don't
+		 * actually transfer anything (FLUSH), so cannot use
+		 * good_bytes != blk_rq_bytes(req) as the signal for an error.
+		 * This sets the error explicitly for the problem case.
+		 */
+		error = __scsi_error_from_host_byte(cmd, result);
 	}
 
 	/* no bidi support for !REQ_TYPE_BLOCK_PC yet */
diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c
index 2bea4f0..503594e 100644
--- a/drivers/scsi/scsi_trace.c
+++ b/drivers/scsi/scsi_trace.c
@@ -28,7 +28,7 @@
 static const char *
 scsi_trace_rw6(struct trace_seq *p, unsigned char *cdb, int len)
 {
-	const char *ret = p->buffer + p->len;
+	const char *ret = trace_seq_buffer_ptr(p);
 	sector_t lba = 0, txlen = 0;
 
 	lba |= ((cdb[1] & 0x1F) << 16);
@@ -46,7 +46,7 @@
 static const char *
 scsi_trace_rw10(struct trace_seq *p, unsigned char *cdb, int len)
 {
-	const char *ret = p->buffer + p->len;
+	const char *ret = trace_seq_buffer_ptr(p);
 	sector_t lba = 0, txlen = 0;
 
 	lba |= (cdb[2] << 24);
@@ -71,7 +71,7 @@
 static const char *
 scsi_trace_rw12(struct trace_seq *p, unsigned char *cdb, int len)
 {
-	const char *ret = p->buffer + p->len;
+	const char *ret = trace_seq_buffer_ptr(p);
 	sector_t lba = 0, txlen = 0;
 
 	lba |= (cdb[2] << 24);
@@ -94,7 +94,7 @@
 static const char *
 scsi_trace_rw16(struct trace_seq *p, unsigned char *cdb, int len)
 {
-	const char *ret = p->buffer + p->len;
+	const char *ret = trace_seq_buffer_ptr(p);
 	sector_t lba = 0, txlen = 0;
 
 	lba |= ((u64)cdb[2] << 56);
@@ -125,7 +125,7 @@
 static const char *
 scsi_trace_rw32(struct trace_seq *p, unsigned char *cdb, int len)
 {
-	const char *ret = p->buffer + p->len, *cmd;
+	const char *ret = trace_seq_buffer_ptr(p), *cmd;
 	sector_t lba = 0, txlen = 0;
 	u32 ei_lbrt = 0;
 
@@ -180,7 +180,7 @@
 static const char *
 scsi_trace_unmap(struct trace_seq *p, unsigned char *cdb, int len)
 {
-	const char *ret = p->buffer + p->len;
+	const char *ret = trace_seq_buffer_ptr(p);
 	unsigned int regions = cdb[7] << 8 | cdb[8];
 
 	trace_seq_printf(p, "regions=%u", (regions - 8) / 16);
@@ -192,7 +192,7 @@
 static const char *
 scsi_trace_service_action_in(struct trace_seq *p, unsigned char *cdb, int len)
 {
-	const char *ret = p->buffer + p->len, *cmd;
+	const char *ret = trace_seq_buffer_ptr(p), *cmd;
 	sector_t lba = 0;
 	u32 alloc_len = 0;
 
@@ -247,7 +247,7 @@
 static const char *
 scsi_trace_misc(struct trace_seq *p, unsigned char *cdb, int len)
 {
-	const char *ret = p->buffer + p->len;
+	const char *ret = trace_seq_buffer_ptr(p);
 
 	trace_seq_printf(p, "-");
 	trace_seq_putc(p, 0);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index d4f9670..22aa41c 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -29,6 +29,7 @@
 #include <linux/mutex.h>
 #include <linux/of_device.h>
 #include <linux/of_irq.h>
+#include <linux/clk/clk-conf.h>
 #include <linux/slab.h>
 #include <linux/mod_devicetable.h>
 #include <linux/spi/spi.h>
@@ -259,6 +260,10 @@
 	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);
 	int ret;
 
+	ret = of_clk_set_defaults(dev->of_node, false);
+	if (ret)
+		return ret;
+
 	acpi_dev_pm_attach(dev, true);
 	ret = sdrv->probe(to_spi_device(dev));
 	if (ret)
diff --git a/drivers/staging/rtl8723au/os_dep/usb_intf.c b/drivers/staging/rtl8723au/os_dep/usb_intf.c
index 8b25c1a..ebb19b2 100644
--- a/drivers/staging/rtl8723au/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8723au/os_dep/usb_intf.c
@@ -530,8 +530,10 @@
 	pwrpriv->bkeepfwalive = false;
 
 	DBG_8723A("bkeepfwalive(%x)\n", pwrpriv->bkeepfwalive);
-	if (pm_netdev_open23a(pnetdev, true) != 0)
+	if (pm_netdev_open23a(pnetdev, true) != 0) {
+		up(&pwrpriv->lock);
 		goto exit;
+	}
 
 	netif_device_attach(pnetdev);
 	netif_carrier_on(pnetdev);
diff --git a/drivers/staging/vt6655/bssdb.c b/drivers/staging/vt6655/bssdb.c
index 59679cd..69b80e8 100644
--- a/drivers/staging/vt6655/bssdb.c
+++ b/drivers/staging/vt6655/bssdb.c
@@ -981,7 +981,7 @@
 		pDevice->byERPFlag &= ~(WLAN_SET_ERP_USE_PROTECTION(1));
 	}
 
-	{
+	if (pDevice->eCommandState == WLAN_ASSOCIATE_WAIT) {
 		pDevice->byReAssocCount++;
 		/* 10 sec timeout */
 		if ((pDevice->byReAssocCount > 10) && (!pDevice->bLinkPass)) {
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 1d3908d..5a5fd93 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -2318,6 +2318,7 @@
 	int             handled = 0;
 	unsigned char byData = 0;
 	int             ii = 0;
+	unsigned long flags;
 
 	MACvReadISR(pDevice->PortOffset, &pDevice->dwIsr);
 
@@ -2331,7 +2332,8 @@
 
 	handled = 1;
 	MACvIntDisable(pDevice->PortOffset);
-	spin_lock_irq(&pDevice->lock);
+
+	spin_lock_irqsave(&pDevice->lock, flags);
 
 	//Make sure current page is 0
 	VNSvInPortB(pDevice->PortOffset + MAC_REG_PAGE1SEL, &byOrgPageSel);
@@ -2560,7 +2562,8 @@
 	if (byOrgPageSel == 1)
 		MACvSelectPage1(pDevice->PortOffset);
 
-	spin_unlock_irq(&pDevice->lock);
+	spin_unlock_irqrestore(&pDevice->lock, flags);
+
 	MACvIntEnable(pDevice->PortOffset, IMR_MASK_VALUE);
 
 	return IRQ_RETVAL(handled);
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index c036595..fddfae6 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -825,7 +825,7 @@
 
 	ret = core_dev_export(dev, tpg, lun);
 	if (ret < 0) {
-		percpu_ref_cancel_init(&lun->lun_ref);
+		percpu_ref_exit(&lun->lun_ref);
 		return ret;
 	}
 
@@ -880,5 +880,7 @@
 	lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
 	spin_unlock(&tpg->tpg_lun_lock);
 
+	percpu_ref_exit(&lun->lun_ref);
+
 	return 0;
 }
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index ae9618f..982f6ab 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -19,8 +19,6 @@
 
 static bool request_mem_succeeded = false;
 
-static struct pci_dev *default_vga;
-
 static struct fb_var_screeninfo efifb_defined = {
 	.activate		= FB_ACTIVATE_NOW,
 	.height			= -1,
@@ -84,23 +82,10 @@
 	.fb_imageblit	= cfb_imageblit,
 };
 
-struct pci_dev *vga_default_device(void)
-{
-	return default_vga;
-}
-
-EXPORT_SYMBOL_GPL(vga_default_device);
-
-void vga_set_default_device(struct pci_dev *pdev)
-{
-	default_vga = pdev;
-}
-
 static int efifb_setup(char *options)
 {
 	char *this_opt;
 	int i;
-	struct pci_dev *dev = NULL;
 
 	if (options && *options) {
 		while ((this_opt = strsep(&options, ",")) != NULL) {
@@ -126,30 +111,6 @@
 		}
 	}
 
-	for_each_pci_dev(dev) {
-		int i;
-
-		if ((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
-			continue;
-
-		for (i=0; i < DEVICE_COUNT_RESOURCE; i++) {
-			resource_size_t start, end;
-
-			if (!(pci_resource_flags(dev, i) & IORESOURCE_MEM))
-				continue;
-
-			start = pci_resource_start(dev, i);
-			end  = pci_resource_end(dev, i);
-
-			if (!start || !end)
-				continue;
-
-			if (screen_info.lfb_base >= start &&
-			    (screen_info.lfb_base + screen_info.lfb_size) < end)
-				default_vga = dev;
-		}
-	}
-
 	return 0;
 }
 
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 5d4de88..eeba754 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -1195,18 +1195,20 @@
 int gnttab_init(void)
 {
 	int i;
+	unsigned long max_nr_grant_frames;
 	unsigned int max_nr_glist_frames, nr_glist_frames;
 	unsigned int nr_init_grefs;
 	int ret;
 
 	gnttab_request_version();
+	max_nr_grant_frames = gnttab_max_grant_frames();
 	nr_grant_frames = 1;
 
 	/* Determine the maximum number of frames required for the
 	 * grant reference free list on the current hypervisor.
 	 */
 	BUG_ON(grefs_per_grant_frame == 0);
-	max_nr_glist_frames = (gnttab_max_grant_frames() *
+	max_nr_glist_frames = (max_nr_grant_frames *
 			       grefs_per_grant_frame / RPP);
 
 	gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
@@ -1223,6 +1225,11 @@
 		}
 	}
 
+	ret = arch_gnttab_init(max_nr_grant_frames,
+			       nr_status_frames(max_nr_grant_frames));
+	if (ret < 0)
+		goto ini_nomem;
+
 	if (gnttab_setup() < 0) {
 		ret = -ENODEV;
 		goto ini_nomem;
diff --git a/drivers/zorro/names.c b/drivers/zorro/names.c
index 6f3fd99..83eeddd 100644
--- a/drivers/zorro/names.c
+++ b/drivers/zorro/names.c
@@ -46,13 +46,13 @@
 #include "devlist.h"
 
 static struct zorro_manuf_info __initdata zorro_manuf_list[] = {
-#define MANUF( manuf, name )		{ 0x##manuf, sizeof(__prods_##manuf) / sizeof(struct zorro_prod_info), __manufstr_##manuf, __prods_##manuf },
+#define MANUF( manuf, name )		{ 0x##manuf, ARRAY_SIZE(__prods_##manuf), __manufstr_##manuf, __prods_##manuf },
 #define ENDMANUF()
 #define PRODUCT( manuf, prod, name )
 #include "devlist.h"
 };
 
-#define MANUFS (sizeof(zorro_manuf_list)/sizeof(struct zorro_manuf_info))
+#define MANUFS ARRAY_SIZE(zorro_manuf_list)
 
 void __init zorro_name_device(struct zorro_dev *dev)
 {
diff --git a/fs/afs/main.c b/fs/afs/main.c
index 42dd2e4..35de0c0 100644
--- a/fs/afs/main.c
+++ b/fs/afs/main.c
@@ -55,13 +55,13 @@
 	afs_uuid.time_low = uuidtime;
 	afs_uuid.time_mid = uuidtime >> 32;
 	afs_uuid.time_hi_and_version = (uuidtime >> 48) & AFS_UUID_TIMEHI_MASK;
-	afs_uuid.time_hi_and_version = AFS_UUID_VERSION_TIME;
+	afs_uuid.time_hi_and_version |= AFS_UUID_VERSION_TIME;
 
 	get_random_bytes(&clockseq, 2);
 	afs_uuid.clock_seq_low = clockseq;
 	afs_uuid.clock_seq_hi_and_reserved =
 		(clockseq >> 8) & AFS_UUID_CLOCKHI_MASK;
-	afs_uuid.clock_seq_hi_and_reserved = AFS_UUID_VARIANT_STD;
+	afs_uuid.clock_seq_hi_and_reserved |= AFS_UUID_VARIANT_STD;
 
 	_debug("AFS UUID: %08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x",
 	       afs_uuid.time_low,
diff --git a/fs/aio.c b/fs/aio.c
index 1c9c5f0..bd7ec2c 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -506,6 +506,8 @@
 
 	aio_free_ring(ctx);
 	free_percpu(ctx->cpu);
+	percpu_ref_exit(&ctx->reqs);
+	percpu_ref_exit(&ctx->users);
 	kmem_cache_free(kioctx_cachep, ctx);
 }
 
@@ -715,8 +717,8 @@
 err:
 	mutex_unlock(&ctx->ring_lock);
 	free_percpu(ctx->cpu);
-	free_percpu(ctx->reqs.pcpu_count);
-	free_percpu(ctx->users.pcpu_count);
+	percpu_ref_exit(&ctx->reqs);
+	percpu_ref_exit(&ctx->users);
 	kmem_cache_free(kioctx_cachep, ctx);
 	pr_debug("error allocating ioctx %d\n", err);
 	return ERR_PTR(err);
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 194d0d1..17e39b0 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -71,7 +71,6 @@
 					   been performed at the start of a
 					   write */
 	int pages_in_io;		/* approximate total IO pages */
-	size_t	size;			/* total request size (doesn't change)*/
 	sector_t block_in_file;		/* Current offset into the underlying
 					   file in dio_block units. */
 	unsigned blocks_available;	/* At block_in_file.  changes */
@@ -1104,7 +1103,8 @@
 	unsigned blkbits = i_blkbits;
 	unsigned blocksize_mask = (1 << blkbits) - 1;
 	ssize_t retval = -EINVAL;
-	loff_t end = offset + iov_iter_count(iter);
+	size_t count = iov_iter_count(iter);
+	loff_t end = offset + count;
 	struct dio *dio;
 	struct dio_submit sdio = { 0, };
 	struct buffer_head map_bh = { 0, };
@@ -1287,10 +1287,9 @@
 	 */
 	BUG_ON(retval == -EIOCBQUEUED);
 	if (dio->is_async && retval == 0 && dio->result &&
-	    ((rw == READ) || (dio->result == sdio.size)))
+	    (rw == READ || dio->result == count))
 		retval = -EIOCBQUEUED;
-
-	if (retval != -EIOCBQUEUED)
+	else
 		dio_await_completion(dio);
 
 	if (drop_refcount(dio) == 0) {
diff --git a/fs/locks.c b/fs/locks.c
index 717fbc4..a6f5480 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -325,7 +325,7 @@
 		return -ENOMEM;
 
 	fl->fl_file = filp;
-	fl->fl_owner = (fl_owner_t)filp;
+	fl->fl_owner = filp;
 	fl->fl_pid = current->tgid;
 	fl->fl_flags = FL_FLOCK;
 	fl->fl_type = type;
@@ -431,7 +431,7 @@
 	if (assign_type(fl, type) != 0)
 		return -EINVAL;
 
-	fl->fl_owner = (fl_owner_t)current->files;
+	fl->fl_owner = current->files;
 	fl->fl_pid = current->tgid;
 
 	fl->fl_file = filp;
@@ -1155,7 +1155,6 @@
 int locks_mandatory_locked(struct file *file)
 {
 	struct inode *inode = file_inode(file);
-	fl_owner_t owner = current->files;
 	struct file_lock *fl;
 
 	/*
@@ -1165,7 +1164,8 @@
 	for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
 		if (!IS_POSIX(fl))
 			continue;
-		if (fl->fl_owner != owner && fl->fl_owner != (fl_owner_t)file)
+		if (fl->fl_owner != current->files &&
+		    fl->fl_owner != file)
 			break;
 	}
 	spin_unlock(&inode->i_lock);
@@ -1205,7 +1205,7 @@
 
 	for (;;) {
 		if (filp) {
-			fl.fl_owner = (fl_owner_t)filp;
+			fl.fl_owner = filp;
 			fl.fl_flags &= ~FL_SLEEP;
 			error = __posix_lock_file(inode, &fl, NULL);
 			if (!error)
@@ -1948,7 +1948,7 @@
 
 		cmd = F_GETLK;
 		file_lock.fl_flags |= FL_OFDLCK;
-		file_lock.fl_owner = (fl_owner_t)filp;
+		file_lock.fl_owner = filp;
 	}
 
 	error = vfs_test_lock(filp, &file_lock);
@@ -2103,7 +2103,7 @@
 
 		cmd = F_SETLK;
 		file_lock->fl_flags |= FL_OFDLCK;
-		file_lock->fl_owner = (fl_owner_t)filp;
+		file_lock->fl_owner = filp;
 		break;
 	case F_OFD_SETLKW:
 		error = -EINVAL;
@@ -2112,7 +2112,7 @@
 
 		cmd = F_SETLKW;
 		file_lock->fl_flags |= FL_OFDLCK;
-		file_lock->fl_owner = (fl_owner_t)filp;
+		file_lock->fl_owner = filp;
 		/* Fallthrough */
 	case F_SETLKW:
 		file_lock->fl_flags |= FL_SLEEP;
@@ -2170,7 +2170,7 @@
 
 		cmd = F_GETLK64;
 		file_lock.fl_flags |= FL_OFDLCK;
-		file_lock.fl_owner = (fl_owner_t)filp;
+		file_lock.fl_owner = filp;
 	}
 
 	error = vfs_test_lock(filp, &file_lock);
@@ -2242,7 +2242,7 @@
 
 		cmd = F_SETLK64;
 		file_lock->fl_flags |= FL_OFDLCK;
-		file_lock->fl_owner = (fl_owner_t)filp;
+		file_lock->fl_owner = filp;
 		break;
 	case F_OFD_SETLKW:
 		error = -EINVAL;
@@ -2251,7 +2251,7 @@
 
 		cmd = F_SETLKW64;
 		file_lock->fl_flags |= FL_OFDLCK;
-		file_lock->fl_owner = (fl_owner_t)filp;
+		file_lock->fl_owner = filp;
 		/* Fallthrough */
 	case F_SETLKW64:
 		file_lock->fl_flags |= FL_SLEEP;
@@ -2324,11 +2324,11 @@
 	if (!inode->i_flock)
 		return;
 
-	locks_remove_posix(filp, (fl_owner_t)filp);
+	locks_remove_posix(filp, filp);
 
 	if (filp->f_op->flock) {
 		struct file_lock fl = {
-			.fl_owner = (fl_owner_t)filp,
+			.fl_owner = filp,
 			.fl_pid = current->tgid,
 			.fl_file = filp,
 			.fl_flags = FL_FLOCK,
diff --git a/fs/open.c b/fs/open.c
index 36662d0..d6fd3ac 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -263,11 +263,10 @@
 		return -EPERM;
 
 	/*
-	 * We can not allow to do any fallocate operation on an active
-	 * swapfile
+	 * We cannot allow any fallocate operation on an active swapfile
 	 */
 	if (IS_SWAPFILE(inode))
-		ret = -ETXTBSY;
+		return -ETXTBSY;
 
 	/*
 	 * Revalidate the write permissions, in case security policy has
diff --git a/include/asm-generic/io-64-nonatomic-hi-lo.h b/include/asm-generic/io-64-nonatomic-hi-lo.h
index a6806a9..2e29d13 100644
--- a/include/asm-generic/io-64-nonatomic-hi-lo.h
+++ b/include/asm-generic/io-64-nonatomic-hi-lo.h
@@ -4,8 +4,7 @@
 #include <linux/io.h>
 #include <asm-generic/int-ll64.h>
 
-#ifndef readq
-static inline __u64 readq(const volatile void __iomem *addr)
+static inline __u64 hi_lo_readq(const volatile void __iomem *addr)
 {
 	const volatile u32 __iomem *p = addr;
 	u32 low, high;
@@ -15,14 +14,19 @@
 
 	return low + ((u64)high << 32);
 }
-#endif
 
-#ifndef writeq
-static inline void writeq(__u64 val, volatile void __iomem *addr)
+static inline void hi_lo_writeq(__u64 val, volatile void __iomem *addr)
 {
 	writel(val >> 32, addr + 4);
 	writel(val, addr);
 }
+
+#ifndef readq
+#define readq hi_lo_readq
+#endif
+
+#ifndef writeq
+#define writeq hi_lo_writeq
 #endif
 
 #endif	/* _ASM_IO_64_NONATOMIC_HI_LO_H_ */
diff --git a/include/asm-generic/io-64-nonatomic-lo-hi.h b/include/asm-generic/io-64-nonatomic-lo-hi.h
index ca546b1..0efacff 100644
--- a/include/asm-generic/io-64-nonatomic-lo-hi.h
+++ b/include/asm-generic/io-64-nonatomic-lo-hi.h
@@ -4,8 +4,7 @@
 #include <linux/io.h>
 #include <asm-generic/int-ll64.h>
 
-#ifndef readq
-static inline __u64 readq(const volatile void __iomem *addr)
+static inline __u64 lo_hi_readq(const volatile void __iomem *addr)
 {
 	const volatile u32 __iomem *p = addr;
 	u32 low, high;
@@ -15,14 +14,19 @@
 
 	return low + ((u64)high << 32);
 }
-#endif
 
-#ifndef writeq
-static inline void writeq(__u64 val, volatile void __iomem *addr)
+static inline void lo_hi_writeq(__u64 val, volatile void __iomem *addr)
 {
 	writel(val, addr);
 	writel(val >> 32, addr + 4);
 }
+
+#ifndef readq
+#define readq lo_hi_readq
+#endif
+
+#ifndef writeq
+#define writeq lo_hi_writeq
 #endif
 
 #endif	/* _ASM_IO_64_NONATOMIC_LO_HI_H_ */
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 0703aa7..4d9f233 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -36,55 +36,17 @@
 #endif
 
 /*
- * Add a offset to a pointer but keep the pointer as is.
- *
- * Only S390 provides its own means of moving the pointer.
+ * Arch may define arch_raw_cpu_ptr() to provide more efficient address
+ * translations for raw_cpu_ptr().
  */
-#ifndef SHIFT_PERCPU_PTR
-/* Weird cast keeps both GCC and sparse happy. */
-#define SHIFT_PERCPU_PTR(__p, __offset)	({				\
-	__verify_pcpu_ptr((__p));					\
-	RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset)); \
-})
+#ifndef arch_raw_cpu_ptr
+#define arch_raw_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset)
 #endif
 
-/*
- * A percpu variable may point to a discarded regions. The following are
- * established ways to produce a usable pointer from the percpu variable
- * offset.
- */
-#define per_cpu(var, cpu) \
-	(*SHIFT_PERCPU_PTR(&(var), per_cpu_offset(cpu)))
-
-#ifndef raw_cpu_ptr
-#define raw_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset)
-#endif
-#ifdef CONFIG_DEBUG_PREEMPT
-#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset)
-#else
-#define this_cpu_ptr(ptr) raw_cpu_ptr(ptr)
-#endif
-
-#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
-#define __raw_get_cpu_var(var) (*raw_cpu_ptr(&(var)))
-
 #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
 extern void setup_per_cpu_areas(void);
 #endif
 
-#else /* ! SMP */
-
-#define VERIFY_PERCPU_PTR(__p) ({			\
-	__verify_pcpu_ptr((__p));			\
-	(typeof(*(__p)) __kernel __force *)(__p);	\
-})
-
-#define per_cpu(var, cpu)	(*((void)(cpu), VERIFY_PERCPU_PTR(&(var))))
-#define __get_cpu_var(var)	(*VERIFY_PERCPU_PTR(&(var)))
-#define __raw_get_cpu_var(var)	(*VERIFY_PERCPU_PTR(&(var)))
-#define this_cpu_ptr(ptr)	per_cpu_ptr(ptr, 0)
-#define raw_cpu_ptr(ptr)	this_cpu_ptr(ptr)
-
 #endif	/* SMP */
 
 #ifndef PER_CPU_BASE_SECTION
@@ -95,25 +57,6 @@
 #endif
 #endif
 
-#ifdef CONFIG_SMP
-
-#ifdef MODULE
-#define PER_CPU_SHARED_ALIGNED_SECTION ""
-#define PER_CPU_ALIGNED_SECTION ""
-#else
-#define PER_CPU_SHARED_ALIGNED_SECTION "..shared_aligned"
-#define PER_CPU_ALIGNED_SECTION "..shared_aligned"
-#endif
-#define PER_CPU_FIRST_SECTION "..first"
-
-#else
-
-#define PER_CPU_SHARED_ALIGNED_SECTION ""
-#define PER_CPU_ALIGNED_SECTION "..shared_aligned"
-#define PER_CPU_FIRST_SECTION ""
-
-#endif
-
 #ifndef PER_CPU_ATTRIBUTES
 #define PER_CPU_ATTRIBUTES
 #endif
@@ -122,7 +65,356 @@
 #define PER_CPU_DEF_ATTRIBUTES
 #endif
 
-/* Keep until we have removed all uses of __this_cpu_ptr */
-#define __this_cpu_ptr raw_cpu_ptr
+#define raw_cpu_generic_to_op(pcp, val, op)				\
+do {									\
+	*raw_cpu_ptr(&(pcp)) op val;					\
+} while (0)
+
+#define raw_cpu_generic_add_return(pcp, val)				\
+({									\
+	raw_cpu_add(pcp, val);						\
+	raw_cpu_read(pcp);						\
+})
+
+#define raw_cpu_generic_xchg(pcp, nval)					\
+({									\
+	typeof(pcp) __ret;						\
+	__ret = raw_cpu_read(pcp);					\
+	raw_cpu_write(pcp, nval);					\
+	__ret;								\
+})
+
+#define raw_cpu_generic_cmpxchg(pcp, oval, nval)			\
+({									\
+	typeof(pcp) __ret;						\
+	__ret = raw_cpu_read(pcp);					\
+	if (__ret == (oval))						\
+		raw_cpu_write(pcp, nval);				\
+	__ret;								\
+})
+
+#define raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+({									\
+	int __ret = 0;							\
+	if (raw_cpu_read(pcp1) == (oval1) &&				\
+			 raw_cpu_read(pcp2)  == (oval2)) {		\
+		raw_cpu_write(pcp1, nval1);				\
+		raw_cpu_write(pcp2, nval2);				\
+		__ret = 1;						\
+	}								\
+	(__ret);							\
+})
+
+#define this_cpu_generic_read(pcp)					\
+({									\
+	typeof(pcp) __ret;						\
+	preempt_disable();						\
+	__ret = *this_cpu_ptr(&(pcp));					\
+	preempt_enable();						\
+	__ret;								\
+})
+
+#define this_cpu_generic_to_op(pcp, val, op)				\
+do {									\
+	unsigned long __flags;						\
+	raw_local_irq_save(__flags);					\
+	*raw_cpu_ptr(&(pcp)) op val;					\
+	raw_local_irq_restore(__flags);					\
+} while (0)
+
+#define this_cpu_generic_add_return(pcp, val)				\
+({									\
+	typeof(pcp) __ret;						\
+	unsigned long __flags;						\
+	raw_local_irq_save(__flags);					\
+	raw_cpu_add(pcp, val);						\
+	__ret = raw_cpu_read(pcp);					\
+	raw_local_irq_restore(__flags);					\
+	__ret;								\
+})
+
+#define this_cpu_generic_xchg(pcp, nval)				\
+({									\
+	typeof(pcp) __ret;						\
+	unsigned long __flags;						\
+	raw_local_irq_save(__flags);					\
+	__ret = raw_cpu_read(pcp);					\
+	raw_cpu_write(pcp, nval);					\
+	raw_local_irq_restore(__flags);					\
+	__ret;								\
+})
+
+#define this_cpu_generic_cmpxchg(pcp, oval, nval)			\
+({									\
+	typeof(pcp) __ret;						\
+	unsigned long __flags;						\
+	raw_local_irq_save(__flags);					\
+	__ret = raw_cpu_read(pcp);					\
+	if (__ret == (oval))						\
+		raw_cpu_write(pcp, nval);				\
+	raw_local_irq_restore(__flags);					\
+	__ret;								\
+})
+
+#define this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
+({									\
+	int __ret;							\
+	unsigned long __flags;						\
+	raw_local_irq_save(__flags);					\
+	__ret = raw_cpu_generic_cmpxchg_double(pcp1, pcp2,		\
+			oval1, oval2, nval1, nval2);			\
+	raw_local_irq_restore(__flags);					\
+	__ret;								\
+})
+
+#ifndef raw_cpu_read_1
+#define raw_cpu_read_1(pcp)		(*raw_cpu_ptr(&(pcp)))
+#endif
+#ifndef raw_cpu_read_2
+#define raw_cpu_read_2(pcp)		(*raw_cpu_ptr(&(pcp)))
+#endif
+#ifndef raw_cpu_read_4
+#define raw_cpu_read_4(pcp)		(*raw_cpu_ptr(&(pcp)))
+#endif
+#ifndef raw_cpu_read_8
+#define raw_cpu_read_8(pcp)		(*raw_cpu_ptr(&(pcp)))
+#endif
+
+#ifndef raw_cpu_write_1
+#define raw_cpu_write_1(pcp, val)	raw_cpu_generic_to_op(pcp, val, =)
+#endif
+#ifndef raw_cpu_write_2
+#define raw_cpu_write_2(pcp, val)	raw_cpu_generic_to_op(pcp, val, =)
+#endif
+#ifndef raw_cpu_write_4
+#define raw_cpu_write_4(pcp, val)	raw_cpu_generic_to_op(pcp, val, =)
+#endif
+#ifndef raw_cpu_write_8
+#define raw_cpu_write_8(pcp, val)	raw_cpu_generic_to_op(pcp, val, =)
+#endif
+
+#ifndef raw_cpu_add_1
+#define raw_cpu_add_1(pcp, val)		raw_cpu_generic_to_op(pcp, val, +=)
+#endif
+#ifndef raw_cpu_add_2
+#define raw_cpu_add_2(pcp, val)		raw_cpu_generic_to_op(pcp, val, +=)
+#endif
+#ifndef raw_cpu_add_4
+#define raw_cpu_add_4(pcp, val)		raw_cpu_generic_to_op(pcp, val, +=)
+#endif
+#ifndef raw_cpu_add_8
+#define raw_cpu_add_8(pcp, val)		raw_cpu_generic_to_op(pcp, val, +=)
+#endif
+
+#ifndef raw_cpu_and_1
+#define raw_cpu_and_1(pcp, val)		raw_cpu_generic_to_op(pcp, val, &=)
+#endif
+#ifndef raw_cpu_and_2
+#define raw_cpu_and_2(pcp, val)		raw_cpu_generic_to_op(pcp, val, &=)
+#endif
+#ifndef raw_cpu_and_4
+#define raw_cpu_and_4(pcp, val)		raw_cpu_generic_to_op(pcp, val, &=)
+#endif
+#ifndef raw_cpu_and_8
+#define raw_cpu_and_8(pcp, val)		raw_cpu_generic_to_op(pcp, val, &=)
+#endif
+
+#ifndef raw_cpu_or_1
+#define raw_cpu_or_1(pcp, val)		raw_cpu_generic_to_op(pcp, val, |=)
+#endif
+#ifndef raw_cpu_or_2
+#define raw_cpu_or_2(pcp, val)		raw_cpu_generic_to_op(pcp, val, |=)
+#endif
+#ifndef raw_cpu_or_4
+#define raw_cpu_or_4(pcp, val)		raw_cpu_generic_to_op(pcp, val, |=)
+#endif
+#ifndef raw_cpu_or_8
+#define raw_cpu_or_8(pcp, val)		raw_cpu_generic_to_op(pcp, val, |=)
+#endif
+
+#ifndef raw_cpu_add_return_1
+#define raw_cpu_add_return_1(pcp, val)	raw_cpu_generic_add_return(pcp, val)
+#endif
+#ifndef raw_cpu_add_return_2
+#define raw_cpu_add_return_2(pcp, val)	raw_cpu_generic_add_return(pcp, val)
+#endif
+#ifndef raw_cpu_add_return_4
+#define raw_cpu_add_return_4(pcp, val)	raw_cpu_generic_add_return(pcp, val)
+#endif
+#ifndef raw_cpu_add_return_8
+#define raw_cpu_add_return_8(pcp, val)	raw_cpu_generic_add_return(pcp, val)
+#endif
+
+#ifndef raw_cpu_xchg_1
+#define raw_cpu_xchg_1(pcp, nval)	raw_cpu_generic_xchg(pcp, nval)
+#endif
+#ifndef raw_cpu_xchg_2
+#define raw_cpu_xchg_2(pcp, nval)	raw_cpu_generic_xchg(pcp, nval)
+#endif
+#ifndef raw_cpu_xchg_4
+#define raw_cpu_xchg_4(pcp, nval)	raw_cpu_generic_xchg(pcp, nval)
+#endif
+#ifndef raw_cpu_xchg_8
+#define raw_cpu_xchg_8(pcp, nval)	raw_cpu_generic_xchg(pcp, nval)
+#endif
+
+#ifndef raw_cpu_cmpxchg_1
+#define raw_cpu_cmpxchg_1(pcp, oval, nval) \
+	raw_cpu_generic_cmpxchg(pcp, oval, nval)
+#endif
+#ifndef raw_cpu_cmpxchg_2
+#define raw_cpu_cmpxchg_2(pcp, oval, nval) \
+	raw_cpu_generic_cmpxchg(pcp, oval, nval)
+#endif
+#ifndef raw_cpu_cmpxchg_4
+#define raw_cpu_cmpxchg_4(pcp, oval, nval) \
+	raw_cpu_generic_cmpxchg(pcp, oval, nval)
+#endif
+#ifndef raw_cpu_cmpxchg_8
+#define raw_cpu_cmpxchg_8(pcp, oval, nval) \
+	raw_cpu_generic_cmpxchg(pcp, oval, nval)
+#endif
+
+#ifndef raw_cpu_cmpxchg_double_1
+#define raw_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+	raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+#endif
+#ifndef raw_cpu_cmpxchg_double_2
+#define raw_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+	raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+#endif
+#ifndef raw_cpu_cmpxchg_double_4
+#define raw_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+	raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+#endif
+#ifndef raw_cpu_cmpxchg_double_8
+#define raw_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+	raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+#endif
+
+#ifndef this_cpu_read_1
+#define this_cpu_read_1(pcp)		this_cpu_generic_read(pcp)
+#endif
+#ifndef this_cpu_read_2
+#define this_cpu_read_2(pcp)		this_cpu_generic_read(pcp)
+#endif
+#ifndef this_cpu_read_4
+#define this_cpu_read_4(pcp)		this_cpu_generic_read(pcp)
+#endif
+#ifndef this_cpu_read_8
+#define this_cpu_read_8(pcp)		this_cpu_generic_read(pcp)
+#endif
+
+#ifndef this_cpu_write_1
+#define this_cpu_write_1(pcp, val)	this_cpu_generic_to_op(pcp, val, =)
+#endif
+#ifndef this_cpu_write_2
+#define this_cpu_write_2(pcp, val)	this_cpu_generic_to_op(pcp, val, =)
+#endif
+#ifndef this_cpu_write_4
+#define this_cpu_write_4(pcp, val)	this_cpu_generic_to_op(pcp, val, =)
+#endif
+#ifndef this_cpu_write_8
+#define this_cpu_write_8(pcp, val)	this_cpu_generic_to_op(pcp, val, =)
+#endif
+
+#ifndef this_cpu_add_1
+#define this_cpu_add_1(pcp, val)	this_cpu_generic_to_op(pcp, val, +=)
+#endif
+#ifndef this_cpu_add_2
+#define this_cpu_add_2(pcp, val)	this_cpu_generic_to_op(pcp, val, +=)
+#endif
+#ifndef this_cpu_add_4
+#define this_cpu_add_4(pcp, val)	this_cpu_generic_to_op(pcp, val, +=)
+#endif
+#ifndef this_cpu_add_8
+#define this_cpu_add_8(pcp, val)	this_cpu_generic_to_op(pcp, val, +=)
+#endif
+
+#ifndef this_cpu_and_1
+#define this_cpu_and_1(pcp, val)	this_cpu_generic_to_op(pcp, val, &=)
+#endif
+#ifndef this_cpu_and_2
+#define this_cpu_and_2(pcp, val)	this_cpu_generic_to_op(pcp, val, &=)
+#endif
+#ifndef this_cpu_and_4
+#define this_cpu_and_4(pcp, val)	this_cpu_generic_to_op(pcp, val, &=)
+#endif
+#ifndef this_cpu_and_8
+#define this_cpu_and_8(pcp, val)	this_cpu_generic_to_op(pcp, val, &=)
+#endif
+
+#ifndef this_cpu_or_1
+#define this_cpu_or_1(pcp, val)		this_cpu_generic_to_op(pcp, val, |=)
+#endif
+#ifndef this_cpu_or_2
+#define this_cpu_or_2(pcp, val)		this_cpu_generic_to_op(pcp, val, |=)
+#endif
+#ifndef this_cpu_or_4
+#define this_cpu_or_4(pcp, val)		this_cpu_generic_to_op(pcp, val, |=)
+#endif
+#ifndef this_cpu_or_8
+#define this_cpu_or_8(pcp, val)		this_cpu_generic_to_op(pcp, val, |=)
+#endif
+
+#ifndef this_cpu_add_return_1
+#define this_cpu_add_return_1(pcp, val)	this_cpu_generic_add_return(pcp, val)
+#endif
+#ifndef this_cpu_add_return_2
+#define this_cpu_add_return_2(pcp, val)	this_cpu_generic_add_return(pcp, val)
+#endif
+#ifndef this_cpu_add_return_4
+#define this_cpu_add_return_4(pcp, val)	this_cpu_generic_add_return(pcp, val)
+#endif
+#ifndef this_cpu_add_return_8
+#define this_cpu_add_return_8(pcp, val)	this_cpu_generic_add_return(pcp, val)
+#endif
+
+#ifndef this_cpu_xchg_1
+#define this_cpu_xchg_1(pcp, nval)	this_cpu_generic_xchg(pcp, nval)
+#endif
+#ifndef this_cpu_xchg_2
+#define this_cpu_xchg_2(pcp, nval)	this_cpu_generic_xchg(pcp, nval)
+#endif
+#ifndef this_cpu_xchg_4
+#define this_cpu_xchg_4(pcp, nval)	this_cpu_generic_xchg(pcp, nval)
+#endif
+#ifndef this_cpu_xchg_8
+#define this_cpu_xchg_8(pcp, nval)	this_cpu_generic_xchg(pcp, nval)
+#endif
+
+#ifndef this_cpu_cmpxchg_1
+#define this_cpu_cmpxchg_1(pcp, oval, nval) \
+	this_cpu_generic_cmpxchg(pcp, oval, nval)
+#endif
+#ifndef this_cpu_cmpxchg_2
+#define this_cpu_cmpxchg_2(pcp, oval, nval) \
+	this_cpu_generic_cmpxchg(pcp, oval, nval)
+#endif
+#ifndef this_cpu_cmpxchg_4
+#define this_cpu_cmpxchg_4(pcp, oval, nval) \
+	this_cpu_generic_cmpxchg(pcp, oval, nval)
+#endif
+#ifndef this_cpu_cmpxchg_8
+#define this_cpu_cmpxchg_8(pcp, oval, nval) \
+	this_cpu_generic_cmpxchg(pcp, oval, nval)
+#endif
+
+#ifndef this_cpu_cmpxchg_double_1
+#define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+	this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+#endif
+#ifndef this_cpu_cmpxchg_double_2
+#define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+	this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+#endif
+#ifndef this_cpu_cmpxchg_double_4
+#define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+	this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+#endif
+#ifndef this_cpu_cmpxchg_double_8
+#define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+	this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+#endif
 
 #endif /* _ASM_GENERIC_PERCPU_H_ */
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index 0edf949..94b19be 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -75,9 +75,9 @@
 
 static inline void aead_givcrypt_set_callback(
 	struct aead_givcrypt_request *req, u32 flags,
-	crypto_completion_t complete, void *data)
+	crypto_completion_t compl, void *data)
 {
-	aead_request_set_callback(&req->areq, flags, complete, data);
+	aead_request_set_callback(&req->areq, flags, compl, data);
 }
 
 static inline void aead_givcrypt_set_crypt(struct aead_givcrypt_request *req,
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index 016c2f1..623a59c 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -410,4 +410,10 @@
 	return __crypto_memneq(a, b, size) != 0UL ? 1 : 0;
 }
 
+static inline void crypto_yield(u32 flags)
+{
+	if (flags & CRYPTO_TFM_REQ_MAY_SLEEP)
+		cond_resched();
+}
+
 #endif	/* _CRYPTO_ALGAPI_H */
diff --git a/include/crypto/des.h b/include/crypto/des.h
index 2971c63..fc6274c 100644
--- a/include/crypto/des.h
+++ b/include/crypto/des.h
@@ -16,4 +16,7 @@
 
 extern unsigned long des_ekey(u32 *pe, const u8 *k);
 
+extern int __des3_ede_setkey(u32 *expkey, u32 *flags, const u8 *key,
+			     unsigned int keylen);
+
 #endif /* __CRYPTO_DES_H */
diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h
new file mode 100644
index 0000000..831d786
--- /dev/null
+++ b/include/crypto/drbg.h
@@ -0,0 +1,290 @@
+/*
+ * DRBG based on NIST SP800-90A
+ *
+ * Copyright Stephan Mueller <smueller@chronox.de>, 2014
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, and the entire permission notice in its entirety,
+ *    including the disclaimer of warranties.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote
+ *    products derived from this software without specific prior
+ *    written permission.
+ *
+ * ALTERNATIVELY, this product may be distributed under the terms of
+ * the GNU General Public License, in which case the provisions of the GPL are
+ * required INSTEAD OF the above restrictions.  (This clause is
+ * necessary due to a potential bad interaction between the GPL and
+ * the restrictions contained in a BSD-style copyright.)
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
+ * WHICH ARE HEREBY DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#ifndef _DRBG_H
+#define _DRBG_H
+
+
+#include <linux/random.h>
+#include <linux/scatterlist.h>
+#include <crypto/hash.h>
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <linux/slab.h>
+#include <crypto/internal/rng.h>
+#include <crypto/rng.h>
+#include <linux/fips.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+
+/*
+ * Concatenation Helper and string operation helper
+ *
+ * SP800-90A requires the concatenation of different data. To avoid copying
+ * buffers around or allocate additional memory, the following data structure
+ * is used to point to the original memory with its size. In addition, it
+ * is used to build a linked list. The linked list defines the concatenation
+ * of individual buffers. The order of memory block referenced in that
+ * linked list determines the order of concatenation.
+ */
+struct drbg_string {
+	const unsigned char *buf;
+	size_t len;
+	struct list_head list;
+};
+
+static inline void drbg_string_fill(struct drbg_string *string,
+				    const unsigned char *buf, size_t len)
+{
+	string->buf = buf;
+	string->len = len;
+	INIT_LIST_HEAD(&string->list);
+}
+
+struct drbg_state;
+typedef uint32_t drbg_flag_t;
+
+struct drbg_core {
+	drbg_flag_t flags;	/* flags for the cipher */
+	__u8 statelen;		/* maximum state length */
+	/*
+	 * maximum length of personalization string or additional input
+	 * string -- exponent for base 2
+	 */
+	__u8 max_addtllen;
+	/* maximum bits per RNG request -- exponent for base 2*/
+	__u8 max_bits;
+	/* maximum number of requests -- exponent for base 2 */
+	__u8 max_req;
+	__u8 blocklen_bytes;	/* block size of output in bytes */
+	char cra_name[CRYPTO_MAX_ALG_NAME]; /* mapping to kernel crypto API */
+	 /* kernel crypto API backend cipher name */
+	char backend_cra_name[CRYPTO_MAX_ALG_NAME];
+};
+
+struct drbg_state_ops {
+	int (*update)(struct drbg_state *drbg, struct list_head *seed,
+		      int reseed);
+	int (*generate)(struct drbg_state *drbg,
+			unsigned char *buf, unsigned int buflen,
+			struct list_head *addtl);
+	int (*crypto_init)(struct drbg_state *drbg);
+	int (*crypto_fini)(struct drbg_state *drbg);
+
+};
+
+struct drbg_test_data {
+	struct drbg_string *testentropy; /* TEST PARAMETER: test entropy */
+};
+
+struct drbg_state {
+	spinlock_t drbg_lock;	/* lock around DRBG */
+	unsigned char *V;	/* internal state 10.1.1.1 1a) */
+	/* hash: static value 10.1.1.1 1b) hmac / ctr: key */
+	unsigned char *C;
+	/* Number of RNG requests since last reseed -- 10.1.1.1 1c) */
+	size_t reseed_ctr;
+	 /* some memory the DRBG can use for its operation */
+	unsigned char *scratchpad;
+	void *priv_data;	/* Cipher handle */
+	bool seeded;		/* DRBG fully seeded? */
+	bool pr;		/* Prediction resistance enabled? */
+#ifdef CONFIG_CRYPTO_FIPS
+	bool fips_primed;	/* Continuous test primed? */
+	unsigned char *prev;	/* FIPS 140-2 continuous test value */
+#endif
+	const struct drbg_state_ops *d_ops;
+	const struct drbg_core *core;
+	struct drbg_test_data *test_data;
+};
+
+static inline __u8 drbg_statelen(struct drbg_state *drbg)
+{
+	if (drbg && drbg->core)
+		return drbg->core->statelen;
+	return 0;
+}
+
+static inline __u8 drbg_blocklen(struct drbg_state *drbg)
+{
+	if (drbg && drbg->core)
+		return drbg->core->blocklen_bytes;
+	return 0;
+}
+
+static inline __u8 drbg_keylen(struct drbg_state *drbg)
+{
+	if (drbg && drbg->core)
+		return (drbg->core->statelen - drbg->core->blocklen_bytes);
+	return 0;
+}
+
+static inline size_t drbg_max_request_bytes(struct drbg_state *drbg)
+{
+	/* max_bits is in bits, but buflen is in bytes */
+	return (1 << (drbg->core->max_bits - 3));
+}
+
+static inline size_t drbg_max_addtl(struct drbg_state *drbg)
+{
+	return (1UL<<(drbg->core->max_addtllen));
+}
+
+static inline size_t drbg_max_requests(struct drbg_state *drbg)
+{
+	return (1UL<<(drbg->core->max_req));
+}
+
+/*
+ * kernel crypto API input data structure for DRBG generate in case dlen
+ * is set to 0
+ */
+struct drbg_gen {
+	unsigned char *outbuf;	/* output buffer for random numbers */
+	unsigned int outlen;	/* size of output buffer */
+	struct drbg_string *addtl;	/* additional information string */
+	struct drbg_test_data *test_data;	/* test data */
+};
+
+/*
+ * This is a wrapper to the kernel crypto API function of
+ * crypto_rng_get_bytes() to allow the caller to provide additional data.
+ *
+ * @drng DRBG handle -- see crypto_rng_get_bytes
+ * @outbuf output buffer -- see crypto_rng_get_bytes
+ * @outlen length of output buffer -- see crypto_rng_get_bytes
+ * @addtl_input additional information string input buffer
+ * @addtllen length of additional information string buffer
+ *
+ * return
+ *	see crypto_rng_get_bytes
+ */
+static inline int crypto_drbg_get_bytes_addtl(struct crypto_rng *drng,
+			unsigned char *outbuf, unsigned int outlen,
+			struct drbg_string *addtl)
+{
+	int ret;
+	struct drbg_gen genbuf;
+	genbuf.outbuf = outbuf;
+	genbuf.outlen = outlen;
+	genbuf.addtl = addtl;
+	genbuf.test_data = NULL;
+	ret = crypto_rng_get_bytes(drng, (u8 *)&genbuf, 0);
+	return ret;
+}
+
+/*
+ * TEST code
+ *
+ * This is a wrapper to the kernel crypto API function of
+ * crypto_rng_get_bytes() to allow the caller to provide additional data and
+ * allow furnishing of test_data
+ *
+ * @drng DRBG handle -- see crypto_rng_get_bytes
+ * @outbuf output buffer -- see crypto_rng_get_bytes
+ * @outlen length of output buffer -- see crypto_rng_get_bytes
+ * @addtl_input additional information string input buffer
+ * @addtllen length of additional information string buffer
+ * @test_data filled test data
+ *
+ * return
+ *	see crypto_rng_get_bytes
+ */
+static inline int crypto_drbg_get_bytes_addtl_test(struct crypto_rng *drng,
+			unsigned char *outbuf, unsigned int outlen,
+			struct drbg_string *addtl,
+			struct drbg_test_data *test_data)
+{
+	int ret;
+	struct drbg_gen genbuf;
+	genbuf.outbuf = outbuf;
+	genbuf.outlen = outlen;
+	genbuf.addtl = addtl;
+	genbuf.test_data = test_data;
+	ret = crypto_rng_get_bytes(drng, (u8 *)&genbuf, 0);
+	return ret;
+}
+
+/*
+ * TEST code
+ *
+ * This is a wrapper to the kernel crypto API function of
+ * crypto_rng_reset() to allow the caller to provide test_data
+ *
+ * @drng DRBG handle -- see crypto_rng_reset
+ * @pers personalization string input buffer
+ * @perslen length of additional information string buffer
+ * @test_data filled test data
+ *
+ * return
+ *	see crypto_rng_reset
+ */
+static inline int crypto_drbg_reset_test(struct crypto_rng *drng,
+					 struct drbg_string *pers,
+					 struct drbg_test_data *test_data)
+{
+	int ret;
+	struct drbg_gen genbuf;
+	genbuf.outbuf = NULL;
+	genbuf.outlen = 0;
+	genbuf.addtl = pers;
+	genbuf.test_data = test_data;
+	ret = crypto_rng_reset(drng, (u8 *)&genbuf, 0);
+	return ret;
+}
+
+/* DRBG type flags */
+#define DRBG_CTR	((drbg_flag_t)1<<0)
+#define DRBG_HMAC	((drbg_flag_t)1<<1)
+#define DRBG_HASH	((drbg_flag_t)1<<2)
+#define DRBG_TYPE_MASK	(DRBG_CTR | DRBG_HMAC | DRBG_HASH)
+/* DRBG strength flags */
+#define DRBG_STRENGTH128	((drbg_flag_t)1<<3)
+#define DRBG_STRENGTH192	((drbg_flag_t)1<<4)
+#define DRBG_STRENGTH256	((drbg_flag_t)1<<5)
+#define DRBG_STRENGTH_MASK	(DRBG_STRENGTH128 | DRBG_STRENGTH192 | \
+				 DRBG_STRENGTH256)
+
+enum drbg_prefixes {
+	DRBG_PREFIX0 = 0x00,
+	DRBG_PREFIX1,
+	DRBG_PREFIX2,
+	DRBG_PREFIX3
+};
+
+#endif /* _DRBG_H */
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 26cb1eb..a391955 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -238,10 +238,10 @@
 
 static inline void ahash_request_set_callback(struct ahash_request *req,
 					      u32 flags,
-					      crypto_completion_t complete,
+					      crypto_completion_t compl,
 					      void *data)
 {
-	req->base.complete = complete;
+	req->base.complete = compl;
 	req->base.data = data;
 	req->base.flags = flags;
 }
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
index 06e8b32..b3a46c5 100644
--- a/include/crypto/internal/skcipher.h
+++ b/include/crypto/internal/skcipher.h
@@ -81,8 +81,7 @@
 static inline struct skcipher_givcrypt_request *skcipher_dequeue_givcrypt(
 	struct crypto_queue *queue)
 {
-	return __crypto_dequeue_request(
-		queue, offsetof(struct skcipher_givcrypt_request, creq.base));
+	return skcipher_givcrypt_cast(crypto_dequeue_request(queue));
 }
 
 static inline void *skcipher_givcrypt_reqctx(
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index 6a626a5..7ef512f 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -25,12 +25,6 @@
 #include <linux/scatterlist.h>
 #include <linux/sched.h>
 
-static inline void crypto_yield(u32 flags)
-{
-	if (flags & CRYPTO_TFM_REQ_MAY_SLEEP)
-		cond_resched();
-}
-
 static inline void scatterwalk_sg_chain(struct scatterlist *sg1, int num,
 					struct scatterlist *sg2)
 {
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index 25fd612..07d245f 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -86,9 +86,9 @@
 
 static inline void skcipher_givcrypt_set_callback(
 	struct skcipher_givcrypt_request *req, u32 flags,
-	crypto_completion_t complete, void *data)
+	crypto_completion_t compl, void *data)
 {
-	ablkcipher_request_set_callback(&req->creq, flags, complete, data);
+	ablkcipher_request_set_callback(&req->creq, flags, compl, data);
 }
 
 static inline void skcipher_givcrypt_set_crypt(
diff --git a/include/dt-bindings/clock/clps711x-clock.h b/include/dt-bindings/clock/clps711x-clock.h
new file mode 100644
index 0000000..0c4c80b
--- /dev/null
+++ b/include/dt-bindings/clock/clps711x-clock.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2014 Alexander Shiyan <shc_work@mail.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_CLPS711X_H
+#define __DT_BINDINGS_CLOCK_CLPS711X_H
+
+#define CLPS711X_CLK_DUMMY	0
+#define CLPS711X_CLK_CPU	1
+#define CLPS711X_CLK_BUS	2
+#define CLPS711X_CLK_PLL	3
+#define CLPS711X_CLK_TIMERREF	4
+#define CLPS711X_CLK_TIMER1	5
+#define CLPS711X_CLK_TIMER2	6
+#define CLPS711X_CLK_PWM	7
+#define CLPS711X_CLK_SPIREF	8
+#define CLPS711X_CLK_SPI	9
+#define CLPS711X_CLK_UART	10
+#define CLPS711X_CLK_TICK	11
+#define CLPS711X_CLK_MAX	12
+
+#endif
diff --git a/include/dt-bindings/clock/exynos4.h b/include/dt-bindings/clock/exynos4.h
index 1106ca5..459bd2b 100644
--- a/include/dt-bindings/clock/exynos4.h
+++ b/include/dt-bindings/clock/exynos4.h
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2013 Samsung Electronics Co., Ltd.
- * Author: Andrzej Haja <a.hajda@samsung.com>
+ * Author: Andrzej Hajda <a.hajda@samsung.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -34,6 +34,11 @@
 #define CLK_MOUT_CORE		19
 #define CLK_MOUT_APLL		20
 #define CLK_SCLK_HDMIPHY	22
+#define CLK_OUT_DMC		23
+#define CLK_OUT_TOP		24
+#define CLK_OUT_LEFTBUS		25
+#define CLK_OUT_RIGHTBUS	26
+#define CLK_OUT_CPU		27
 
 /* gate for special clocks (sclk) */
 #define CLK_SCLK_FIMC0		128
@@ -230,6 +235,24 @@
 #define CLK_MOUT_G3D		394
 #define CLK_ACLK400_MCUISP	395 /* Exynos4x12 only */
 
+/* gate clocks - ppmu */
+#define CLK_PPMULEFT		400
+#define CLK_PPMURIGHT		401
+#define CLK_PPMUCAMIF		402
+#define CLK_PPMUTV		403
+#define CLK_PPMUMFC_L		404
+#define CLK_PPMUMFC_R		405
+#define CLK_PPMUG3D		406
+#define CLK_PPMUIMAGE		407
+#define CLK_PPMULCD0		408
+#define CLK_PPMULCD1		409 /* Exynos4210 only */
+#define CLK_PPMUFILE		410
+#define CLK_PPMUGPS		411
+#define CLK_PPMUDMC0		412
+#define CLK_PPMUDMC1		413
+#define CLK_PPMUCPU		414
+#define CLK_PPMUACP		415
+
 /* div clocks */
 #define CLK_DIV_ISP0		450 /* Exynos4x12 only */
 #define CLK_DIV_ISP1		451 /* Exynos4x12 only */
diff --git a/include/dt-bindings/clock/exynos5250.h b/include/dt-bindings/clock/exynos5250.h
index be6e97c..4273891 100644
--- a/include/dt-bindings/clock/exynos5250.h
+++ b/include/dt-bindings/clock/exynos5250.h
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2013 Samsung Electronics Co., Ltd.
- * Author: Andrzej Haja <a.hajda@samsung.com>
+ * Author: Andrzej Hajda <a.hajda@samsung.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
diff --git a/include/dt-bindings/clock/exynos5420.h b/include/dt-bindings/clock/exynos5420.h
index 21d51ae..8dc0913 100644
--- a/include/dt-bindings/clock/exynos5420.h
+++ b/include/dt-bindings/clock/exynos5420.h
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2013 Samsung Electronics Co., Ltd.
- * Author: Andrzej Haja <a.hajda@samsung.com>
+ * Author: Andrzej Hajda <a.hajda@samsung.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
diff --git a/include/dt-bindings/clock/exynos5440.h b/include/dt-bindings/clock/exynos5440.h
index 70cd850..c66fc40 100644
--- a/include/dt-bindings/clock/exynos5440.h
+++ b/include/dt-bindings/clock/exynos5440.h
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2013 Samsung Electronics Co., Ltd.
- * Author: Andrzej Haja <a.hajda-Sze3O3UU22JBDgjK7y7TUQ@public.gmane.org>
+ * Author: Andrzej Hajda <a.hajda@samsung.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
diff --git a/include/dt-bindings/clock/qcom,gcc-apq8084.h b/include/dt-bindings/clock/qcom,gcc-apq8084.h
new file mode 100644
index 0000000..2c0da56
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-apq8084.h
@@ -0,0 +1,351 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_APQ_GCC_8084_H
+#define _DT_BINDINGS_CLK_APQ_GCC_8084_H
+
+#define GPLL0						0
+#define GPLL0_VOTE					1
+#define GPLL1						2
+#define GPLL1_VOTE					3
+#define GPLL2						4
+#define GPLL2_VOTE					5
+#define GPLL3						6
+#define GPLL3_VOTE					7
+#define GPLL4						8
+#define GPLL4_VOTE					9
+#define CONFIG_NOC_CLK_SRC				10
+#define PERIPH_NOC_CLK_SRC				11
+#define SYSTEM_NOC_CLK_SRC				12
+#define BLSP_UART_SIM_CLK_SRC				13
+#define QDSS_TSCTR_CLK_SRC				14
+#define UFS_AXI_CLK_SRC					15
+#define RPM_CLK_SRC					16
+#define KPSS_AHB_CLK_SRC				17
+#define QDSS_AT_CLK_SRC					18
+#define BIMC_DDR_CLK_SRC				19
+#define USB30_MASTER_CLK_SRC				20
+#define USB30_SEC_MASTER_CLK_SRC			21
+#define USB_HSIC_AHB_CLK_SRC				22
+#define MMSS_BIMC_GFX_CLK_SRC				23
+#define QDSS_STM_CLK_SRC				24
+#define ACC_CLK_SRC					25
+#define SEC_CTRL_CLK_SRC				26
+#define BLSP1_QUP1_I2C_APPS_CLK_SRC			27
+#define BLSP1_QUP1_SPI_APPS_CLK_SRC			28
+#define BLSP1_QUP2_I2C_APPS_CLK_SRC			29
+#define BLSP1_QUP2_SPI_APPS_CLK_SRC			30
+#define BLSP1_QUP3_I2C_APPS_CLK_SRC			31
+#define BLSP1_QUP3_SPI_APPS_CLK_SRC			32
+#define BLSP1_QUP4_I2C_APPS_CLK_SRC			33
+#define BLSP1_QUP4_SPI_APPS_CLK_SRC			34
+#define BLSP1_QUP5_I2C_APPS_CLK_SRC			35
+#define BLSP1_QUP5_SPI_APPS_CLK_SRC			36
+#define BLSP1_QUP6_I2C_APPS_CLK_SRC			37
+#define BLSP1_QUP6_SPI_APPS_CLK_SRC			38
+#define BLSP1_UART1_APPS_CLK_SRC			39
+#define BLSP1_UART2_APPS_CLK_SRC			40
+#define BLSP1_UART3_APPS_CLK_SRC			41
+#define BLSP1_UART4_APPS_CLK_SRC			42
+#define BLSP1_UART5_APPS_CLK_SRC			43
+#define BLSP1_UART6_APPS_CLK_SRC			44
+#define BLSP2_QUP1_I2C_APPS_CLK_SRC			45
+#define BLSP2_QUP1_SPI_APPS_CLK_SRC			46
+#define BLSP2_QUP2_I2C_APPS_CLK_SRC			47
+#define BLSP2_QUP2_SPI_APPS_CLK_SRC			48
+#define BLSP2_QUP3_I2C_APPS_CLK_SRC			49
+#define BLSP2_QUP3_SPI_APPS_CLK_SRC			50
+#define BLSP2_QUP4_I2C_APPS_CLK_SRC			51
+#define BLSP2_QUP4_SPI_APPS_CLK_SRC			52
+#define BLSP2_QUP5_I2C_APPS_CLK_SRC			53
+#define BLSP2_QUP5_SPI_APPS_CLK_SRC			54
+#define BLSP2_QUP6_I2C_APPS_CLK_SRC			55
+#define BLSP2_QUP6_SPI_APPS_CLK_SRC			56
+#define BLSP2_UART1_APPS_CLK_SRC			57
+#define BLSP2_UART2_APPS_CLK_SRC			58
+#define BLSP2_UART3_APPS_CLK_SRC			59
+#define BLSP2_UART4_APPS_CLK_SRC			60
+#define BLSP2_UART5_APPS_CLK_SRC			61
+#define BLSP2_UART6_APPS_CLK_SRC			62
+#define CE1_CLK_SRC					63
+#define CE2_CLK_SRC					64
+#define CE3_CLK_SRC					65
+#define GP1_CLK_SRC					66
+#define GP2_CLK_SRC					67
+#define GP3_CLK_SRC					68
+#define PDM2_CLK_SRC					69
+#define QDSS_TRACECLKIN_CLK_SRC				70
+#define RBCPR_CLK_SRC					71
+#define SATA_ASIC0_CLK_SRC				72
+#define SATA_PMALIVE_CLK_SRC				73
+#define SATA_RX_CLK_SRC					74
+#define SATA_RX_OOB_CLK_SRC				75
+#define SDCC1_APPS_CLK_SRC				76
+#define SDCC2_APPS_CLK_SRC				77
+#define SDCC3_APPS_CLK_SRC				78
+#define SDCC4_APPS_CLK_SRC				79
+#define GCC_SNOC_BUS_TIMEOUT0_AHB_CLK			80
+#define SPMI_AHB_CLK_SRC				81
+#define SPMI_SER_CLK_SRC				82
+#define TSIF_REF_CLK_SRC				83
+#define USB30_MOCK_UTMI_CLK_SRC				84
+#define USB30_SEC_MOCK_UTMI_CLK_SRC			85
+#define USB_HS_SYSTEM_CLK_SRC				86
+#define USB_HSIC_CLK_SRC				87
+#define USB_HSIC_IO_CAL_CLK_SRC				88
+#define USB_HSIC_MOCK_UTMI_CLK_SRC			89
+#define USB_HSIC_SYSTEM_CLK_SRC				90
+#define GCC_BAM_DMA_AHB_CLK				91
+#define GCC_BAM_DMA_INACTIVITY_TIMERS_CLK		92
+#define DDR_CLK_SRC					93
+#define GCC_BIMC_CFG_AHB_CLK				94
+#define GCC_BIMC_CLK					95
+#define GCC_BIMC_KPSS_AXI_CLK				96
+#define GCC_BIMC_SLEEP_CLK				97
+#define GCC_BIMC_SYSNOC_AXI_CLK				98
+#define GCC_BIMC_XO_CLK					99
+#define GCC_BLSP1_AHB_CLK				100
+#define GCC_BLSP1_SLEEP_CLK				101
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK			102
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK			103
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK			104
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK			105
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK			106
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK			107
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK			108
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK			109
+#define GCC_BLSP1_QUP5_I2C_APPS_CLK			110
+#define GCC_BLSP1_QUP5_SPI_APPS_CLK			111
+#define GCC_BLSP1_QUP6_I2C_APPS_CLK			112
+#define GCC_BLSP1_QUP6_SPI_APPS_CLK			113
+#define GCC_BLSP1_UART1_APPS_CLK			114
+#define GCC_BLSP1_UART1_SIM_CLK				115
+#define GCC_BLSP1_UART2_APPS_CLK			116
+#define GCC_BLSP1_UART2_SIM_CLK				117
+#define GCC_BLSP1_UART3_APPS_CLK			118
+#define GCC_BLSP1_UART3_SIM_CLK				119
+#define GCC_BLSP1_UART4_APPS_CLK			120
+#define GCC_BLSP1_UART4_SIM_CLK				121
+#define GCC_BLSP1_UART5_APPS_CLK			122
+#define GCC_BLSP1_UART5_SIM_CLK				123
+#define GCC_BLSP1_UART6_APPS_CLK			124
+#define GCC_BLSP1_UART6_SIM_CLK				125
+#define GCC_BLSP2_AHB_CLK				126
+#define GCC_BLSP2_SLEEP_CLK				127
+#define GCC_BLSP2_QUP1_I2C_APPS_CLK			128
+#define GCC_BLSP2_QUP1_SPI_APPS_CLK			129
+#define GCC_BLSP2_QUP2_I2C_APPS_CLK			130
+#define GCC_BLSP2_QUP2_SPI_APPS_CLK			131
+#define GCC_BLSP2_QUP3_I2C_APPS_CLK			132
+#define GCC_BLSP2_QUP3_SPI_APPS_CLK			133
+#define GCC_BLSP2_QUP4_I2C_APPS_CLK			134
+#define GCC_BLSP2_QUP4_SPI_APPS_CLK			135
+#define GCC_BLSP2_QUP5_I2C_APPS_CLK			136
+#define GCC_BLSP2_QUP5_SPI_APPS_CLK			137
+#define GCC_BLSP2_QUP6_I2C_APPS_CLK			138
+#define GCC_BLSP2_QUP6_SPI_APPS_CLK			139
+#define GCC_BLSP2_UART1_APPS_CLK			140
+#define GCC_BLSP2_UART1_SIM_CLK				141
+#define GCC_BLSP2_UART2_APPS_CLK			142
+#define GCC_BLSP2_UART2_SIM_CLK				143
+#define GCC_BLSP2_UART3_APPS_CLK			144
+#define GCC_BLSP2_UART3_SIM_CLK				145
+#define GCC_BLSP2_UART4_APPS_CLK			146
+#define GCC_BLSP2_UART4_SIM_CLK				147
+#define GCC_BLSP2_UART5_APPS_CLK			148
+#define GCC_BLSP2_UART5_SIM_CLK				149
+#define GCC_BLSP2_UART6_APPS_CLK			150
+#define GCC_BLSP2_UART6_SIM_CLK				151
+#define GCC_BOOT_ROM_AHB_CLK				152
+#define GCC_CE1_AHB_CLK					153
+#define GCC_CE1_AXI_CLK					154
+#define GCC_CE1_CLK					155
+#define GCC_CE2_AHB_CLK					156
+#define GCC_CE2_AXI_CLK					157
+#define GCC_CE2_CLK					158
+#define GCC_CE3_AHB_CLK					159
+#define GCC_CE3_AXI_CLK					160
+#define GCC_CE3_CLK					161
+#define GCC_CNOC_BUS_TIMEOUT0_AHB_CLK			162
+#define GCC_CNOC_BUS_TIMEOUT1_AHB_CLK			163
+#define GCC_CNOC_BUS_TIMEOUT2_AHB_CLK			164
+#define GCC_CNOC_BUS_TIMEOUT3_AHB_CLK			165
+#define GCC_CNOC_BUS_TIMEOUT4_AHB_CLK			166
+#define GCC_CNOC_BUS_TIMEOUT5_AHB_CLK			167
+#define GCC_CNOC_BUS_TIMEOUT6_AHB_CLK			168
+#define GCC_CNOC_BUS_TIMEOUT7_AHB_CLK			169
+#define GCC_CFG_NOC_AHB_CLK				170
+#define GCC_CFG_NOC_DDR_CFG_CLK				171
+#define GCC_CFG_NOC_RPM_AHB_CLK				172
+#define GCC_COPSS_SMMU_AHB_CLK				173
+#define GCC_COPSS_SMMU_AXI_CLK				174
+#define GCC_DCD_XO_CLK					175
+#define GCC_BIMC_DDR_CH0_CLK				176
+#define GCC_BIMC_DDR_CH1_CLK				177
+#define GCC_BIMC_DDR_CPLL0_CLK				178
+#define GCC_BIMC_DDR_CPLL1_CLK				179
+#define GCC_BIMC_GFX_CLK				180
+#define GCC_DDR_DIM_CFG_CLK				181
+#define GCC_DDR_DIM_SLEEP_CLK				182
+#define GCC_DEHR_CLK					183
+#define GCC_AHB_CLK					184
+#define GCC_IM_SLEEP_CLK				185
+#define GCC_XO_CLK					186
+#define GCC_XO_DIV4_CLK					187
+#define GCC_GP1_CLK					188
+#define GCC_GP2_CLK					189
+#define GCC_GP3_CLK					190
+#define GCC_IMEM_AXI_CLK				191
+#define GCC_IMEM_CFG_AHB_CLK				192
+#define GCC_KPSS_AHB_CLK				193
+#define GCC_KPSS_AXI_CLK				194
+#define GCC_LPASS_MPORT_AXI_CLK				195
+#define GCC_LPASS_Q6_AXI_CLK				196
+#define GCC_LPASS_SWAY_CLK				197
+#define GCC_MMSS_BIMC_GFX_CLK				198
+#define GCC_MMSS_NOC_AT_CLK				199
+#define GCC_MMSS_NOC_CFG_AHB_CLK			200
+#define GCC_MMSS_VPU_MAPLE_SYS_NOC_AXI_CLK		201
+#define GCC_OCMEM_NOC_CFG_AHB_CLK			202
+#define GCC_OCMEM_SYS_NOC_AXI_CLK			203
+#define GCC_MPM_AHB_CLK					204
+#define GCC_MSG_RAM_AHB_CLK				205
+#define GCC_NOC_CONF_XPU_AHB_CLK			206
+#define GCC_PDM2_CLK					207
+#define GCC_PDM_AHB_CLK					208
+#define GCC_PDM_XO4_CLK					209
+#define GCC_PERIPH_NOC_AHB_CLK				210
+#define GCC_PERIPH_NOC_AT_CLK				211
+#define GCC_PERIPH_NOC_CFG_AHB_CLK			212
+#define GCC_PERIPH_NOC_USB_HSIC_AHB_CLK			213
+#define GCC_PERIPH_NOC_MPU_CFG_AHB_CLK			214
+#define GCC_PERIPH_XPU_AHB_CLK				215
+#define GCC_PNOC_BUS_TIMEOUT0_AHB_CLK			216
+#define GCC_PNOC_BUS_TIMEOUT1_AHB_CLK			217
+#define GCC_PNOC_BUS_TIMEOUT2_AHB_CLK			218
+#define GCC_PNOC_BUS_TIMEOUT3_AHB_CLK			219
+#define GCC_PNOC_BUS_TIMEOUT4_AHB_CLK			220
+#define GCC_PRNG_AHB_CLK				221
+#define GCC_QDSS_AT_CLK					222
+#define GCC_QDSS_CFG_AHB_CLK				223
+#define GCC_QDSS_DAP_AHB_CLK				224
+#define GCC_QDSS_DAP_CLK				225
+#define GCC_QDSS_ETR_USB_CLK				226
+#define GCC_QDSS_STM_CLK				227
+#define GCC_QDSS_TRACECLKIN_CLK				228
+#define GCC_QDSS_TSCTR_DIV16_CLK			229
+#define GCC_QDSS_TSCTR_DIV2_CLK				230
+#define GCC_QDSS_TSCTR_DIV3_CLK				231
+#define GCC_QDSS_TSCTR_DIV4_CLK				232
+#define GCC_QDSS_TSCTR_DIV8_CLK				233
+#define GCC_QDSS_RBCPR_XPU_AHB_CLK			234
+#define GCC_RBCPR_AHB_CLK				235
+#define GCC_RBCPR_CLK					236
+#define GCC_RPM_BUS_AHB_CLK				237
+#define GCC_RPM_PROC_HCLK				238
+#define GCC_RPM_SLEEP_CLK				239
+#define GCC_RPM_TIMER_CLK				240
+#define GCC_SATA_ASIC0_CLK				241
+#define GCC_SATA_AXI_CLK				242
+#define GCC_SATA_CFG_AHB_CLK				243
+#define GCC_SATA_PMALIVE_CLK				244
+#define GCC_SATA_RX_CLK					245
+#define GCC_SATA_RX_OOB_CLK				246
+#define GCC_SDCC1_AHB_CLK				247
+#define GCC_SDCC1_APPS_CLK				248
+#define GCC_SDCC1_CDCCAL_FF_CLK				249
+#define GCC_SDCC1_CDCCAL_SLEEP_CLK			250
+#define GCC_SDCC2_AHB_CLK				251
+#define GCC_SDCC2_APPS_CLK				252
+#define GCC_SDCC2_INACTIVITY_TIMERS_CLK			253
+#define GCC_SDCC3_AHB_CLK				254
+#define GCC_SDCC3_APPS_CLK				255
+#define GCC_SDCC3_INACTIVITY_TIMERS_CLK			256
+#define GCC_SDCC4_AHB_CLK				257
+#define GCC_SDCC4_APPS_CLK				258
+#define GCC_SDCC4_INACTIVITY_TIMERS_CLK			259
+#define GCC_SEC_CTRL_ACC_CLK				260
+#define GCC_SEC_CTRL_AHB_CLK				261
+#define GCC_SEC_CTRL_BOOT_ROM_PATCH_CLK			262
+#define GCC_SEC_CTRL_CLK				263
+#define GCC_SEC_CTRL_SENSE_CLK				264
+#define GCC_SNOC_BUS_TIMEOUT2_AHB_CLK			265
+#define GCC_SNOC_BUS_TIMEOUT3_AHB_CLK			266
+#define GCC_SPDM_BIMC_CY_CLK				267
+#define GCC_SPDM_CFG_AHB_CLK				268
+#define GCC_SPDM_DEBUG_CY_CLK				269
+#define GCC_SPDM_FF_CLK					270
+#define GCC_SPDM_MSTR_AHB_CLK				271
+#define GCC_SPDM_PNOC_CY_CLK				272
+#define GCC_SPDM_RPM_CY_CLK				273
+#define GCC_SPDM_SNOC_CY_CLK				274
+#define GCC_SPMI_AHB_CLK				275
+#define GCC_SPMI_CNOC_AHB_CLK				276
+#define GCC_SPMI_SER_CLK				277
+#define GCC_SPSS_AHB_CLK				278
+#define GCC_SNOC_CNOC_AHB_CLK				279
+#define GCC_SNOC_PNOC_AHB_CLK				280
+#define GCC_SYS_NOC_AT_CLK				281
+#define GCC_SYS_NOC_AXI_CLK				282
+#define GCC_SYS_NOC_KPSS_AHB_CLK			283
+#define GCC_SYS_NOC_QDSS_STM_AXI_CLK			284
+#define GCC_SYS_NOC_UFS_AXI_CLK				285
+#define GCC_SYS_NOC_USB3_AXI_CLK			286
+#define GCC_SYS_NOC_USB3_SEC_AXI_CLK			287
+#define GCC_TCSR_AHB_CLK				288
+#define GCC_TLMM_AHB_CLK				289
+#define GCC_TLMM_CLK					290
+#define GCC_TSIF_AHB_CLK				291
+#define GCC_TSIF_INACTIVITY_TIMERS_CLK			292
+#define GCC_TSIF_REF_CLK				293
+#define GCC_UFS_AHB_CLK					294
+#define GCC_UFS_AXI_CLK					295
+#define GCC_UFS_RX_CFG_CLK				296
+#define GCC_UFS_RX_SYMBOL_0_CLK				297
+#define GCC_UFS_RX_SYMBOL_1_CLK				298
+#define GCC_UFS_TX_CFG_CLK				299
+#define GCC_UFS_TX_SYMBOL_0_CLK				300
+#define GCC_UFS_TX_SYMBOL_1_CLK				301
+#define GCC_USB2A_PHY_SLEEP_CLK				302
+#define GCC_USB2B_PHY_SLEEP_CLK				303
+#define GCC_USB30_MASTER_CLK				304
+#define GCC_USB30_MOCK_UTMI_CLK				305
+#define GCC_USB30_SLEEP_CLK				306
+#define GCC_USB30_SEC_MASTER_CLK			307
+#define GCC_USB30_SEC_MOCK_UTMI_CLK			308
+#define GCC_USB30_SEC_SLEEP_CLK				309
+#define GCC_USB_HS_AHB_CLK				310
+#define GCC_USB_HS_INACTIVITY_TIMERS_CLK		311
+#define GCC_USB_HS_SYSTEM_CLK				312
+#define GCC_USB_HSIC_AHB_CLK				313
+#define GCC_USB_HSIC_CLK				314
+#define GCC_USB_HSIC_IO_CAL_CLK				315
+#define GCC_USB_HSIC_IO_CAL_SLEEP_CLK			316
+#define GCC_USB_HSIC_MOCK_UTMI_CLK			317
+#define GCC_USB_HSIC_SYSTEM_CLK				318
+#define PCIE_0_AUX_CLK_SRC				319
+#define PCIE_0_PIPE_CLK_SRC				320
+#define PCIE_1_AUX_CLK_SRC				321
+#define PCIE_1_PIPE_CLK_SRC				322
+#define GCC_PCIE_0_AUX_CLK				323
+#define GCC_PCIE_0_CFG_AHB_CLK				324
+#define GCC_PCIE_0_MSTR_AXI_CLK				325
+#define GCC_PCIE_0_PIPE_CLK				326
+#define GCC_PCIE_0_SLV_AXI_CLK				327
+#define GCC_PCIE_1_AUX_CLK				328
+#define GCC_PCIE_1_CFG_AHB_CLK				329
+#define GCC_PCIE_1_MSTR_AXI_CLK				330
+#define GCC_PCIE_1_PIPE_CLK				331
+#define GCC_PCIE_1_SLV_AXI_CLK				332
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-ipq806x.h b/include/dt-bindings/clock/qcom,gcc-ipq806x.h
new file mode 100644
index 0000000..b857cad
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-ipq806x.h
@@ -0,0 +1,293 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_GCC_IPQ806X_H
+#define _DT_BINDINGS_CLK_GCC_IPQ806X_H
+
+#define AFAB_CLK_SRC				0
+#define QDSS_STM_CLK				1
+#define SCSS_A_CLK				2
+#define SCSS_H_CLK				3
+#define AFAB_CORE_CLK				4
+#define SCSS_XO_SRC_CLK				5
+#define AFAB_EBI1_CH0_A_CLK			6
+#define AFAB_EBI1_CH1_A_CLK			7
+#define AFAB_AXI_S0_FCLK			8
+#define AFAB_AXI_S1_FCLK			9
+#define AFAB_AXI_S2_FCLK			10
+#define AFAB_AXI_S3_FCLK			11
+#define AFAB_AXI_S4_FCLK			12
+#define SFAB_CORE_CLK				13
+#define SFAB_AXI_S0_FCLK			14
+#define SFAB_AXI_S1_FCLK			15
+#define SFAB_AXI_S2_FCLK			16
+#define SFAB_AXI_S3_FCLK			17
+#define SFAB_AXI_S4_FCLK			18
+#define SFAB_AXI_S5_FCLK			19
+#define SFAB_AHB_S0_FCLK			20
+#define SFAB_AHB_S1_FCLK			21
+#define SFAB_AHB_S2_FCLK			22
+#define SFAB_AHB_S3_FCLK			23
+#define SFAB_AHB_S4_FCLK			24
+#define SFAB_AHB_S5_FCLK			25
+#define SFAB_AHB_S6_FCLK			26
+#define SFAB_AHB_S7_FCLK			27
+#define QDSS_AT_CLK_SRC				28
+#define QDSS_AT_CLK				29
+#define QDSS_TRACECLKIN_CLK_SRC			30
+#define QDSS_TRACECLKIN_CLK			31
+#define QDSS_TSCTR_CLK_SRC			32
+#define QDSS_TSCTR_CLK				33
+#define SFAB_ADM0_M0_A_CLK			34
+#define SFAB_ADM0_M1_A_CLK			35
+#define SFAB_ADM0_M2_H_CLK			36
+#define ADM0_CLK				37
+#define ADM0_PBUS_CLK				38
+#define IMEM0_A_CLK				39
+#define QDSS_H_CLK				40
+#define PCIE_A_CLK				41
+#define PCIE_AUX_CLK				42
+#define PCIE_H_CLK				43
+#define PCIE_PHY_CLK				44
+#define SFAB_CLK_SRC				45
+#define SFAB_LPASS_Q6_A_CLK			46
+#define SFAB_AFAB_M_A_CLK			47
+#define AFAB_SFAB_M0_A_CLK			48
+#define AFAB_SFAB_M1_A_CLK			49
+#define SFAB_SATA_S_H_CLK			50
+#define DFAB_CLK_SRC				51
+#define DFAB_CLK				52
+#define SFAB_DFAB_M_A_CLK			53
+#define DFAB_SFAB_M_A_CLK			54
+#define DFAB_SWAY0_H_CLK			55
+#define DFAB_SWAY1_H_CLK			56
+#define DFAB_ARB0_H_CLK				57
+#define DFAB_ARB1_H_CLK				58
+#define PPSS_H_CLK				59
+#define PPSS_PROC_CLK				60
+#define PPSS_TIMER0_CLK				61
+#define PPSS_TIMER1_CLK				62
+#define PMEM_A_CLK				63
+#define DMA_BAM_H_CLK				64
+#define SIC_H_CLK				65
+#define SPS_TIC_H_CLK				66
+#define CFPB_2X_CLK_SRC				67
+#define CFPB_CLK				68
+#define CFPB0_H_CLK				69
+#define CFPB1_H_CLK				70
+#define CFPB2_H_CLK				71
+#define SFAB_CFPB_M_H_CLK			72
+#define CFPB_MASTER_H_CLK			73
+#define SFAB_CFPB_S_H_CLK			74
+#define CFPB_SPLITTER_H_CLK			75
+#define TSIF_H_CLK				76
+#define TSIF_INACTIVITY_TIMERS_CLK		77
+#define TSIF_REF_SRC				78
+#define TSIF_REF_CLK				79
+#define CE1_H_CLK				80
+#define CE1_CORE_CLK				81
+#define CE1_SLEEP_CLK				82
+#define CE2_H_CLK				83
+#define CE2_CORE_CLK				84
+#define SFPB_H_CLK_SRC				85
+#define SFPB_H_CLK				86
+#define SFAB_SFPB_M_H_CLK			87
+#define SFAB_SFPB_S_H_CLK			88
+#define RPM_PROC_CLK				89
+#define RPM_BUS_H_CLK				90
+#define RPM_SLEEP_CLK				91
+#define RPM_TIMER_CLK				92
+#define RPM_MSG_RAM_H_CLK			93
+#define PMIC_ARB0_H_CLK				94
+#define PMIC_ARB1_H_CLK				95
+#define PMIC_SSBI2_SRC				96
+#define PMIC_SSBI2_CLK				97
+#define SDC1_H_CLK				98
+#define SDC2_H_CLK				99
+#define SDC3_H_CLK				100
+#define SDC4_H_CLK				101
+#define SDC1_SRC				102
+#define SDC1_CLK				103
+#define SDC2_SRC				104
+#define SDC2_CLK				105
+#define SDC3_SRC				106
+#define SDC3_CLK				107
+#define SDC4_SRC				108
+#define SDC4_CLK				109
+#define USB_HS1_H_CLK				110
+#define USB_HS1_XCVR_SRC			111
+#define USB_HS1_XCVR_CLK			112
+#define USB_HSIC_H_CLK				113
+#define USB_HSIC_XCVR_SRC			114
+#define USB_HSIC_XCVR_CLK			115
+#define USB_HSIC_SYSTEM_CLK_SRC			116
+#define USB_HSIC_SYSTEM_CLK			117
+#define CFPB0_C0_H_CLK				118
+#define CFPB0_D0_H_CLK				119
+#define CFPB0_C1_H_CLK				120
+#define CFPB0_D1_H_CLK				121
+#define USB_FS1_H_CLK				122
+#define USB_FS1_XCVR_SRC			123
+#define USB_FS1_XCVR_CLK			124
+#define USB_FS1_SYSTEM_CLK			125
+#define GSBI_COMMON_SIM_SRC			126
+#define GSBI1_H_CLK				127
+#define GSBI2_H_CLK				128
+#define GSBI3_H_CLK				129
+#define GSBI4_H_CLK				130
+#define GSBI5_H_CLK				131
+#define GSBI6_H_CLK				132
+#define GSBI7_H_CLK				133
+#define GSBI1_QUP_SRC				134
+#define GSBI1_QUP_CLK				135
+#define GSBI2_QUP_SRC				136
+#define GSBI2_QUP_CLK				137
+#define GSBI3_QUP_SRC				138
+#define GSBI3_QUP_CLK				139
+#define GSBI4_QUP_SRC				140
+#define GSBI4_QUP_CLK				141
+#define GSBI5_QUP_SRC				142
+#define GSBI5_QUP_CLK				143
+#define GSBI6_QUP_SRC				144
+#define GSBI6_QUP_CLK				145
+#define GSBI7_QUP_SRC				146
+#define GSBI7_QUP_CLK				147
+#define GSBI1_UART_SRC				148
+#define GSBI1_UART_CLK				149
+#define GSBI2_UART_SRC				150
+#define GSBI2_UART_CLK				151
+#define GSBI3_UART_SRC				152
+#define GSBI3_UART_CLK				153
+#define GSBI4_UART_SRC				154
+#define GSBI4_UART_CLK				155
+#define GSBI5_UART_SRC				156
+#define GSBI5_UART_CLK				157
+#define GSBI6_UART_SRC				158
+#define GSBI6_UART_CLK				159
+#define GSBI7_UART_SRC				160
+#define GSBI7_UART_CLK				161
+#define GSBI1_SIM_CLK				162
+#define GSBI2_SIM_CLK				163
+#define GSBI3_SIM_CLK				164
+#define GSBI4_SIM_CLK				165
+#define GSBI5_SIM_CLK				166
+#define GSBI6_SIM_CLK				167
+#define GSBI7_SIM_CLK				168
+#define USB_HSIC_HSIC_CLK_SRC			169
+#define USB_HSIC_HSIC_CLK			170
+#define USB_HSIC_HSIO_CAL_CLK			171
+#define SPDM_CFG_H_CLK				172
+#define SPDM_MSTR_H_CLK				173
+#define SPDM_FF_CLK_SRC				174
+#define SPDM_FF_CLK				175
+#define SEC_CTRL_CLK				176
+#define SEC_CTRL_ACC_CLK_SRC			177
+#define SEC_CTRL_ACC_CLK			178
+#define TLMM_H_CLK				179
+#define TLMM_CLK				180
+#define SATA_H_CLK				181
+#define SATA_CLK_SRC				182
+#define SATA_RXOOB_CLK				183
+#define SATA_PMALIVE_CLK			184
+#define SATA_PHY_REF_CLK			185
+#define SATA_A_CLK				186
+#define SATA_PHY_CFG_CLK			187
+#define TSSC_CLK_SRC				188
+#define TSSC_CLK				189
+#define PDM_SRC					190
+#define PDM_CLK					191
+#define GP0_SRC					192
+#define GP0_CLK					193
+#define GP1_SRC					194
+#define GP1_CLK					195
+#define GP2_SRC					196
+#define GP2_CLK					197
+#define MPM_CLK					198
+#define EBI1_CLK_SRC				199
+#define EBI1_CH0_CLK				200
+#define EBI1_CH1_CLK				201
+#define EBI1_2X_CLK				202
+#define EBI1_CH0_DQ_CLK				203
+#define EBI1_CH1_DQ_CLK				204
+#define EBI1_CH0_CA_CLK				205
+#define EBI1_CH1_CA_CLK				206
+#define EBI1_XO_CLK				207
+#define SFAB_SMPSS_S_H_CLK			208
+#define PRNG_SRC				209
+#define PRNG_CLK				210
+#define PXO_SRC					211
+#define SPDM_CY_PORT0_CLK			212
+#define SPDM_CY_PORT1_CLK			213
+#define SPDM_CY_PORT2_CLK			214
+#define SPDM_CY_PORT3_CLK			215
+#define SPDM_CY_PORT4_CLK			216
+#define SPDM_CY_PORT5_CLK			217
+#define SPDM_CY_PORT6_CLK			218
+#define SPDM_CY_PORT7_CLK			219
+#define PLL0					220
+#define PLL0_VOTE				221
+#define PLL3					222
+#define PLL3_VOTE				223
+#define PLL4					224
+#define PLL4_VOTE				225
+#define PLL8					226
+#define PLL8_VOTE				227
+#define PLL9					228
+#define PLL10					229
+#define PLL11					230
+#define PLL12					231
+#define PLL14					232
+#define PLL14_VOTE				233
+#define PLL18					234
+#define CE5_SRC					235
+#define CE5_H_CLK				236
+#define CE5_CORE_CLK				237
+#define CE3_SLEEP_CLK				238
+#define SFAB_AHB_S8_FCLK			239
+#define SPDM_CY_PORT8_CLK			246
+#define PCIE_ALT_REF_SRC			247
+#define PCIE_ALT_REF_CLK			248
+#define PCIE_1_A_CLK				249
+#define PCIE_1_AUX_CLK				250
+#define PCIE_1_H_CLK				251
+#define PCIE_1_PHY_CLK				252
+#define PCIE_1_ALT_REF_SRC			253
+#define PCIE_1_ALT_REF_CLK			254
+#define PCIE_2_A_CLK				255
+#define PCIE_2_AUX_CLK				256
+#define PCIE_2_H_CLK				257
+#define PCIE_2_PHY_CLK				258
+#define PCIE_2_ALT_REF_SRC			259
+#define PCIE_2_ALT_REF_CLK			260
+#define EBI2_CLK				261
+#define USB30_SLEEP_CLK				262
+#define USB30_UTMI_SRC				263
+#define USB30_0_UTMI_CLK			264
+#define USB30_1_UTMI_CLK			265
+#define USB30_MASTER_SRC			266
+#define USB30_0_MASTER_CLK			267
+#define USB30_1_MASTER_CLK			268
+#define GMAC_CORE1_CLK_SRC			269
+#define GMAC_CORE2_CLK_SRC			270
+#define GMAC_CORE3_CLK_SRC			271
+#define GMAC_CORE4_CLK_SRC			272
+#define GMAC_CORE1_CLK				273
+#define GMAC_CORE2_CLK				274
+#define GMAC_CORE3_CLK				275
+#define GMAC_CORE4_CLK				276
+#define UBI32_CORE1_CLK_SRC			277
+#define UBI32_CORE2_CLK_SRC			278
+#define UBI32_CORE1_CLK				279
+#define UBI32_CORE2_CLK				280
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8960.h b/include/dt-bindings/clock/qcom,gcc-msm8960.h
index f9f5471..7d20eed 100644
--- a/include/dt-bindings/clock/qcom,gcc-msm8960.h
+++ b/include/dt-bindings/clock/qcom,gcc-msm8960.h
@@ -308,5 +308,16 @@
 #define PLL13					292
 #define PLL14					293
 #define PLL14_VOTE				294
+#define USB_HS3_H_CLK				295
+#define USB_HS3_XCVR_SRC			296
+#define USB_HS3_XCVR_CLK			297
+#define USB_HS4_H_CLK				298
+#define USB_HS4_XCVR_SRC			299
+#define USB_HS4_XCVR_CLK			300
+#define SATA_PHY_CFG_CLK			301
+#define SATA_A_CLK				302
+#define CE3_SRC					303
+#define CE3_CORE_CLK				304
+#define CE3_H_CLK				305
 
 #endif
diff --git a/include/dt-bindings/clock/qcom,mmcc-apq8084.h b/include/dt-bindings/clock/qcom,mmcc-apq8084.h
new file mode 100644
index 0000000..a929f86
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,mmcc-apq8084.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_APQ_MMCC_8084_H
+#define _DT_BINDINGS_CLK_APQ_MMCC_8084_H
+
+#define MMSS_AHB_CLK_SRC		0
+#define MMSS_AXI_CLK_SRC		1
+#define MMPLL0				2
+#define MMPLL0_VOTE			3
+#define MMPLL1				4
+#define MMPLL1_VOTE			5
+#define MMPLL2				6
+#define MMPLL3				7
+#define MMPLL4				8
+#define CSI0_CLK_SRC			9
+#define CSI1_CLK_SRC			10
+#define CSI2_CLK_SRC			11
+#define CSI3_CLK_SRC			12
+#define VCODEC0_CLK_SRC			13
+#define VFE0_CLK_SRC			14
+#define VFE1_CLK_SRC			15
+#define MDP_CLK_SRC			16
+#define PCLK0_CLK_SRC			17
+#define PCLK1_CLK_SRC			18
+#define OCMEMNOC_CLK_SRC		19
+#define GFX3D_CLK_SRC			20
+#define JPEG0_CLK_SRC			21
+#define JPEG1_CLK_SRC			22
+#define JPEG2_CLK_SRC			23
+#define EDPPIXEL_CLK_SRC		24
+#define EXTPCLK_CLK_SRC			25
+#define VP_CLK_SRC			26
+#define CCI_CLK_SRC			27
+#define CAMSS_GP0_CLK_SRC		28
+#define CAMSS_GP1_CLK_SRC		29
+#define MCLK0_CLK_SRC			30
+#define MCLK1_CLK_SRC			31
+#define MCLK2_CLK_SRC			32
+#define MCLK3_CLK_SRC			33
+#define CSI0PHYTIMER_CLK_SRC		34
+#define CSI1PHYTIMER_CLK_SRC		35
+#define CSI2PHYTIMER_CLK_SRC		36
+#define CPP_CLK_SRC			37
+#define BYTE0_CLK_SRC			38
+#define BYTE1_CLK_SRC			39
+#define EDPAUX_CLK_SRC			40
+#define EDPLINK_CLK_SRC			41
+#define ESC0_CLK_SRC			42
+#define ESC1_CLK_SRC			43
+#define HDMI_CLK_SRC			44
+#define VSYNC_CLK_SRC			45
+#define RBCPR_CLK_SRC			46
+#define RBBMTIMER_CLK_SRC		47
+#define MAPLE_CLK_SRC			48
+#define VDP_CLK_SRC			49
+#define VPU_BUS_CLK_SRC			50
+#define MMSS_CXO_CLK			51
+#define MMSS_SLEEPCLK_CLK		52
+#define AVSYNC_AHB_CLK			53
+#define AVSYNC_EDPPIXEL_CLK		54
+#define AVSYNC_EXTPCLK_CLK		55
+#define AVSYNC_PCLK0_CLK		56
+#define AVSYNC_PCLK1_CLK		57
+#define AVSYNC_VP_CLK			58
+#define CAMSS_AHB_CLK			59
+#define CAMSS_CCI_CCI_AHB_CLK		60
+#define CAMSS_CCI_CCI_CLK		61
+#define CAMSS_CSI0_AHB_CLK		62
+#define CAMSS_CSI0_CLK			63
+#define CAMSS_CSI0PHY_CLK		64
+#define CAMSS_CSI0PIX_CLK		65
+#define CAMSS_CSI0RDI_CLK		66
+#define CAMSS_CSI1_AHB_CLK		67
+#define CAMSS_CSI1_CLK			68
+#define CAMSS_CSI1PHY_CLK		69
+#define CAMSS_CSI1PIX_CLK		70
+#define CAMSS_CSI1RDI_CLK		71
+#define CAMSS_CSI2_AHB_CLK		72
+#define CAMSS_CSI2_CLK			73
+#define CAMSS_CSI2PHY_CLK		74
+#define CAMSS_CSI2PIX_CLK		75
+#define CAMSS_CSI2RDI_CLK		76
+#define CAMSS_CSI3_AHB_CLK		77
+#define CAMSS_CSI3_CLK			78
+#define CAMSS_CSI3PHY_CLK		79
+#define CAMSS_CSI3PIX_CLK		80
+#define CAMSS_CSI3RDI_CLK		81
+#define CAMSS_CSI_VFE0_CLK		82
+#define CAMSS_CSI_VFE1_CLK		83
+#define CAMSS_GP0_CLK			84
+#define CAMSS_GP1_CLK			85
+#define CAMSS_ISPIF_AHB_CLK		86
+#define CAMSS_JPEG_JPEG0_CLK		87
+#define CAMSS_JPEG_JPEG1_CLK		88
+#define CAMSS_JPEG_JPEG2_CLK		89
+#define CAMSS_JPEG_JPEG_AHB_CLK		90
+#define CAMSS_JPEG_JPEG_AXI_CLK		91
+#define CAMSS_MCLK0_CLK			92
+#define CAMSS_MCLK1_CLK			93
+#define CAMSS_MCLK2_CLK			94
+#define CAMSS_MCLK3_CLK			95
+#define CAMSS_MICRO_AHB_CLK		96
+#define CAMSS_PHY0_CSI0PHYTIMER_CLK	97
+#define CAMSS_PHY1_CSI1PHYTIMER_CLK	98
+#define CAMSS_PHY2_CSI2PHYTIMER_CLK	99
+#define CAMSS_TOP_AHB_CLK		100
+#define CAMSS_VFE_CPP_AHB_CLK		101
+#define CAMSS_VFE_CPP_CLK		102
+#define CAMSS_VFE_VFE0_CLK		103
+#define CAMSS_VFE_VFE1_CLK		104
+#define CAMSS_VFE_VFE_AHB_CLK		105
+#define CAMSS_VFE_VFE_AXI_CLK		106
+#define MDSS_AHB_CLK			107
+#define MDSS_AXI_CLK			108
+#define MDSS_BYTE0_CLK			109
+#define MDSS_BYTE1_CLK			110
+#define MDSS_EDPAUX_CLK			111
+#define MDSS_EDPLINK_CLK		112
+#define MDSS_EDPPIXEL_CLK		113
+#define MDSS_ESC0_CLK			114
+#define MDSS_ESC1_CLK			115
+#define MDSS_EXTPCLK_CLK		116
+#define MDSS_HDMI_AHB_CLK		117
+#define MDSS_HDMI_CLK			118
+#define MDSS_MDP_CLK			119
+#define MDSS_MDP_LUT_CLK		120
+#define MDSS_PCLK0_CLK			121
+#define MDSS_PCLK1_CLK			122
+#define MDSS_VSYNC_CLK			123
+#define MMSS_RBCPR_AHB_CLK		124
+#define MMSS_RBCPR_CLK			125
+#define MMSS_SPDM_AHB_CLK		126
+#define MMSS_SPDM_AXI_CLK		127
+#define MMSS_SPDM_CSI0_CLK		128
+#define MMSS_SPDM_GFX3D_CLK		129
+#define MMSS_SPDM_JPEG0_CLK		130
+#define MMSS_SPDM_JPEG1_CLK		131
+#define MMSS_SPDM_JPEG2_CLK		132
+#define MMSS_SPDM_MDP_CLK		133
+#define MMSS_SPDM_PCLK0_CLK		134
+#define MMSS_SPDM_PCLK1_CLK		135
+#define MMSS_SPDM_VCODEC0_CLK		136
+#define MMSS_SPDM_VFE0_CLK		137
+#define MMSS_SPDM_VFE1_CLK		138
+#define MMSS_SPDM_RM_AXI_CLK		139
+#define MMSS_SPDM_RM_OCMEMNOC_CLK	140
+#define MMSS_MISC_AHB_CLK		141
+#define MMSS_MMSSNOC_AHB_CLK		142
+#define MMSS_MMSSNOC_BTO_AHB_CLK	143
+#define MMSS_MMSSNOC_AXI_CLK		144
+#define MMSS_S0_AXI_CLK			145
+#define OCMEMCX_AHB_CLK			146
+#define OCMEMCX_OCMEMNOC_CLK		147
+#define OXILI_OCMEMGX_CLK		148
+#define OXILI_GFX3D_CLK			149
+#define OXILI_RBBMTIMER_CLK		150
+#define OXILICX_AHB_CLK			151
+#define VENUS0_AHB_CLK			152
+#define VENUS0_AXI_CLK			153
+#define VENUS0_CORE0_VCODEC_CLK		154
+#define VENUS0_CORE1_VCODEC_CLK		155
+#define VENUS0_OCMEMNOC_CLK		156
+#define VENUS0_VCODEC0_CLK		157
+#define VPU_AHB_CLK			158
+#define VPU_AXI_CLK			159
+#define VPU_BUS_CLK			160
+#define VPU_CXO_CLK			161
+#define VPU_MAPLE_CLK			162
+#define VPU_SLEEP_CLK			163
+#define VPU_VDP_CLK			164
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,mmcc-msm8960.h b/include/dt-bindings/clock/qcom,mmcc-msm8960.h
index 5868ef1..85041b2 100644
--- a/include/dt-bindings/clock/qcom,mmcc-msm8960.h
+++ b/include/dt-bindings/clock/qcom,mmcc-msm8960.h
@@ -133,5 +133,13 @@
 #define CSIPHY0_TIMER_CLK				116
 #define PLL1						117
 #define PLL2						118
+#define RGB_TV_CLK					119
+#define NPL_TV_CLK					120
+#define VCAP_AHB_CLK					121
+#define VCAP_AXI_CLK					122
+#define VCAP_SRC					123
+#define VCAP_CLK					124
+#define VCAP_NPL_CLK					125
+#define PLL15						126
 
 #endif
diff --git a/include/dt-bindings/clock/rk3066a-cru.h b/include/dt-bindings/clock/rk3066a-cru.h
new file mode 100644
index 0000000..bc1ed1d
--- /dev/null
+++ b/include/dt-bindings/clock/rk3066a-cru.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2014 MundoReader S.L.
+ * Author: Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/clock/rk3188-cru-common.h>
+
+/* soft-reset indices */
+#define SRST_SRST1		0
+#define SRST_SRST2		1
+
+#define SRST_L2MEM		18
+#define SRST_I2S0		23
+#define SRST_I2S1		24
+#define SRST_I2S2		25
+#define SRST_TIMER2		29
+
+#define SRST_GPIO4		36
+#define SRST_GPIO6		38
+
+#define SRST_TSADC		92
+
+#define SRST_HDMI		96
+#define SRST_HDMI_APB		97
+#define SRST_CIF1		111
diff --git a/include/dt-bindings/clock/rk3188-cru-common.h b/include/dt-bindings/clock/rk3188-cru-common.h
new file mode 100644
index 0000000..750ee60
--- /dev/null
+++ b/include/dt-bindings/clock/rk3188-cru-common.h
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2014 MundoReader S.L.
+ * Author: Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* core clocks from */
+#define PLL_APLL		1
+#define PLL_DPLL		2
+#define PLL_CPLL		3
+#define PLL_GPLL		4
+#define CORE_PERI		5
+#define CORE_L2C		6
+
+/* sclk gates (special clocks) */
+#define SCLK_UART0		64
+#define SCLK_UART1		65
+#define SCLK_UART2		66
+#define SCLK_UART3		67
+#define SCLK_MAC		68
+#define SCLK_SPI0		69
+#define SCLK_SPI1		70
+#define SCLK_SARADC		71
+#define SCLK_SDMMC		72
+#define SCLK_SDIO		73
+#define SCLK_EMMC		74
+#define SCLK_I2S0		75
+#define SCLK_I2S1		76
+#define SCLK_I2S2		77
+#define SCLK_SPDIF		78
+#define SCLK_CIF0		79
+#define SCLK_CIF1		80
+#define SCLK_OTGPHY0		81
+#define SCLK_OTGPHY1		82
+#define SCLK_HSADC		83
+#define SCLK_TIMER0		84
+#define SCLK_TIMER1		85
+#define SCLK_TIMER2		86
+#define SCLK_TIMER3		87
+#define SCLK_TIMER4		88
+#define SCLK_TIMER5		89
+#define SCLK_TIMER6		90
+#define SCLK_JTAG		91
+#define SCLK_SMC		92
+
+#define DCLK_LCDC0		190
+#define DCLK_LCDC1		191
+
+/* aclk gates */
+#define ACLK_DMA1		192
+#define ACLK_DMA2		193
+#define ACLK_GPS		194
+#define ACLK_LCDC0		195
+#define ACLK_LCDC1		196
+#define ACLK_GPU		197
+#define ACLK_SMC		198
+#define ACLK_CIF		199
+#define ACLK_IPP		200
+#define ACLK_RGA		201
+#define ACLK_CIF0		202
+
+/* pclk gates */
+#define PCLK_GRF		320
+#define PCLK_PMU		321
+#define PCLK_TIMER0		322
+#define PCLK_TIMER1		323
+#define PCLK_TIMER2		324
+#define PCLK_TIMER3		325
+#define PCLK_PWM01		326
+#define PCLK_PWM23		327
+#define PCLK_SPI0		328
+#define PCLK_SPI1		329
+#define PCLK_SARADC		330
+#define PCLK_WDT		331
+#define PCLK_UART0		332
+#define PCLK_UART1		333
+#define PCLK_UART2		334
+#define PCLK_UART3		335
+#define PCLK_I2C0		336
+#define PCLK_I2C1		337
+#define PCLK_I2C2		338
+#define PCLK_I2C3		339
+#define PCLK_I2C4		340
+#define PCLK_GPIO0		341
+#define PCLK_GPIO1		342
+#define PCLK_GPIO2		343
+#define PCLK_GPIO3		344
+#define PCLK_GPIO4		345
+#define PCLK_GPIO6		346
+#define PCLK_EFUSE		347
+#define PCLK_TZPC		348
+#define PCLK_TSADC		349
+
+/* hclk gates */
+#define HCLK_SDMMC		448
+#define HCLK_SDIO		449
+#define HCLK_EMMC		450
+#define HCLK_OTG0		451
+#define HCLK_EMAC		452
+#define HCLK_SPDIF		453
+#define HCLK_I2S0		454
+#define HCLK_I2S1		455
+#define HCLK_I2S2		456
+#define HCLK_OTG1		457
+#define HCLK_HSIC		458
+#define HCLK_HSADC		459
+#define HCLK_PIDF		460
+#define HCLK_LCDC0		461
+#define HCLK_LCDC1		462
+#define HCLK_ROM		463
+#define HCLK_CIF0		464
+#define HCLK_IPP		465
+#define HCLK_RGA		466
+#define HCLK_NANDC0		467
+
+#define CLK_NR_CLKS		(HCLK_NANDC0 + 1)
+
+/* soft-reset indices */
+#define SRST_MCORE		2
+#define SRST_CORE0		3
+#define SRST_CORE1		4
+#define SRST_MCORE_DBG		7
+#define SRST_CORE0_DBG		8
+#define SRST_CORE1_DBG		9
+#define SRST_CORE0_WDT		12
+#define SRST_CORE1_WDT		13
+#define SRST_STRC_SYS		14
+#define SRST_L2C		15
+
+#define SRST_CPU_AHB		17
+#define SRST_AHB2APB		19
+#define SRST_DMA1		20
+#define SRST_INTMEM		21
+#define SRST_ROM		22
+#define SRST_SPDIF		26
+#define SRST_TIMER0		27
+#define SRST_TIMER1		28
+#define SRST_EFUSE		30
+
+#define SRST_GPIO0		32
+#define SRST_GPIO1		33
+#define SRST_GPIO2		34
+#define SRST_GPIO3		35
+
+#define SRST_UART0		39
+#define SRST_UART1		40
+#define SRST_UART2		41
+#define SRST_UART3		42
+#define SRST_I2C0		43
+#define SRST_I2C1		44
+#define SRST_I2C2		45
+#define SRST_I2C3		46
+#define SRST_I2C4		47
+
+#define SRST_PWM0		48
+#define SRST_PWM1		49
+#define SRST_DAP_PO		50
+#define SRST_DAP		51
+#define SRST_DAP_SYS		52
+#define SRST_TPIU_ATB		53
+#define SRST_PMU_APB		54
+#define SRST_GRF		55
+#define SRST_PMU		56
+#define SRST_PERI_AXI		57
+#define SRST_PERI_AHB		58
+#define SRST_PERI_APB		59
+#define SRST_PERI_NIU		60
+#define SRST_CPU_PERI		61
+#define SRST_EMEM_PERI		62
+#define SRST_USB_PERI		63
+
+#define SRST_DMA2		64
+#define SRST_SMC		65
+#define SRST_MAC		66
+#define SRST_NANC0		68
+#define SRST_USBOTG0		69
+#define SRST_USBPHY0		70
+#define SRST_OTGC0		71
+#define SRST_USBOTG1		72
+#define SRST_USBPHY1		73
+#define SRST_OTGC1		74
+#define SRST_HSADC		76
+#define SRST_PIDFILTER		77
+#define SRST_DDR_MSCH		79
+
+#define SRST_TZPC		80
+#define SRST_SDMMC		81
+#define SRST_SDIO		82
+#define SRST_EMMC		83
+#define SRST_SPI0		84
+#define SRST_SPI1		85
+#define SRST_WDT		86
+#define SRST_SARADC		87
+#define SRST_DDRPHY		88
+#define SRST_DDRPHY_APB		89
+#define SRST_DDRCTL		90
+#define SRST_DDRCTL_APB		91
+#define SRST_DDRPUB		93
+
+#define SRST_VIO0_AXI		98
+#define SRST_VIO0_AHB		99
+#define SRST_LCDC0_AXI		100
+#define SRST_LCDC0_AHB		101
+#define SRST_LCDC0_DCLK		102
+#define SRST_LCDC1_AXI		103
+#define SRST_LCDC1_AHB		104
+#define SRST_LCDC1_DCLK		105
+#define SRST_IPP_AXI		106
+#define SRST_IPP_AHB		107
+#define SRST_RGA_AXI		108
+#define SRST_RGA_AHB		109
+#define SRST_CIF0		110
+
+#define SRST_VCODEC_AXI		112
+#define SRST_VCODEC_AHB		113
+#define SRST_VIO1_AXI		114
+#define SRST_VCODEC_CPU		115
+#define SRST_VCODEC_NIU		116
+#define SRST_GPU		120
+#define SRST_GPU_NIU		122
+#define SRST_TFUN_ATB		125
+#define SRST_TFUN_APB		126
+#define SRST_CTI4_APB		127
+
+#define SRST_TPIU_APB		128
+#define SRST_TRACE		129
+#define SRST_CORE_DBG		130
+#define SRST_DBG_APB		131
+#define SRST_CTI0		132
+#define SRST_CTI0_APB		133
+#define SRST_CTI1		134
+#define SRST_CTI1_APB		135
+#define SRST_PTM_CORE0		136
+#define SRST_PTM_CORE1		137
+#define SRST_PTM0		138
+#define SRST_PTM0_ATB		139
+#define SRST_PTM1		140
+#define SRST_PTM1_ATB		141
+#define SRST_CTM		142
+#define SRST_TS			143
diff --git a/include/dt-bindings/clock/rk3188-cru.h b/include/dt-bindings/clock/rk3188-cru.h
new file mode 100644
index 0000000..9fac8ed
--- /dev/null
+++ b/include/dt-bindings/clock/rk3188-cru.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2014 MundoReader S.L.
+ * Author: Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/clock/rk3188-cru-common.h>
+
+/* soft-reset indices */
+#define SRST_PTM_CORE2		0
+#define SRST_PTM_CORE3		1
+#define SRST_CORE2		5
+#define SRST_CORE3		6
+#define SRST_CORE2_DBG		10
+#define SRST_CORE3_DBG		11
+
+#define SRST_TIMER2		16
+#define SRST_TIMER4		23
+#define SRST_I2S0		24
+#define SRST_TIMER5		25
+#define SRST_TIMER3		29
+#define SRST_TIMER6		31
+
+#define SRST_PTM3		36
+#define SRST_PTM3_ATB		37
+
+#define SRST_GPS		67
+#define SRST_HSICPHY		75
+#define SRST_TIMER		78
+
+#define SRST_PTM2		92
+#define SRST_CORE2_WDT		94
+#define SRST_CORE3_WDT		95
+
+#define SRST_PTM2_ATB		111
+
+#define SRST_HSIC		117
+#define SRST_CTI2		118
+#define SRST_CTI2_APB		119
+#define SRST_GPU_BRIDGE		121
+#define SRST_CTI3		123
+#define SRST_CTI3_APB		124
diff --git a/include/dt-bindings/clock/rk3288-cru.h b/include/dt-bindings/clock/rk3288-cru.h
new file mode 100644
index 0000000..ebcb460
--- /dev/null
+++ b/include/dt-bindings/clock/rk3288-cru.h
@@ -0,0 +1,278 @@
+/*
+ * Copyright (c) 2014 MundoReader S.L.
+ * Author: Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* core clocks */
+#define PLL_APLL		1
+#define PLL_DPLL		2
+#define PLL_CPLL		3
+#define PLL_GPLL		4
+#define PLL_NPLL		5
+
+/* sclk gates (special clocks) */
+#define SCLK_GPU		64
+#define SCLK_SPI0		65
+#define SCLK_SPI1		66
+#define SCLK_SPI2		67
+#define SCLK_SDMMC		68
+#define SCLK_SDIO0		69
+#define SCLK_SDIO1		70
+#define SCLK_EMMC		71
+#define SCLK_TSADC		72
+#define SCLK_SARADC		73
+#define SCLK_PS2C		74
+#define SCLK_NANDC0		75
+#define SCLK_NANDC1		76
+#define SCLK_UART0		77
+#define SCLK_UART1		78
+#define SCLK_UART2		79
+#define SCLK_UART3		80
+#define SCLK_UART4		81
+#define SCLK_I2S0		82
+#define SCLK_SPDIF		83
+#define SCLK_SPDIF8CH		84
+#define SCLK_TIMER0		85
+#define SCLK_TIMER1		86
+#define SCLK_TIMER2		87
+#define SCLK_TIMER3		88
+#define SCLK_TIMER4		89
+#define SCLK_TIMER5		90
+#define SCLK_TIMER6		91
+#define SCLK_HSADC		92
+#define SCLK_OTGPHY0		93
+#define SCLK_OTGPHY1		94
+#define SCLK_OTGPHY2		95
+#define SCLK_OTG_ADP		96
+#define SCLK_HSICPHY480M	97
+#define SCLK_HSICPHY12M		98
+#define SCLK_MACREF		99
+#define SCLK_LCDC_PWM0		100
+#define SCLK_LCDC_PWM1		101
+#define SCLK_MAC_RX		102
+#define SCLK_MAC_TX		103
+
+#define DCLK_VOP0		190
+#define DCLK_VOP1		191
+
+/* aclk gates */
+#define ACLK_GPU		192
+#define ACLK_DMAC1		193
+#define ACLK_DMAC2		194
+#define ACLK_MMU		195
+#define ACLK_GMAC		196
+#define ACLK_VOP0		197
+#define ACLK_VOP1		198
+#define ACLK_CRYPTO		199
+#define ACLK_RGA		200
+
+/* pclk gates */
+#define PCLK_GPIO0		320
+#define PCLK_GPIO1		321
+#define PCLK_GPIO2		322
+#define PCLK_GPIO3		323
+#define PCLK_GPIO4		324
+#define PCLK_GPIO5		325
+#define PCLK_GPIO6		326
+#define PCLK_GPIO7		327
+#define PCLK_GPIO8		328
+#define PCLK_GRF		329
+#define PCLK_SGRF		330
+#define PCLK_PMU		331
+#define PCLK_I2C0		332
+#define PCLK_I2C1		333
+#define PCLK_I2C2		334
+#define PCLK_I2C3		335
+#define PCLK_I2C4		336
+#define PCLK_I2C5		337
+#define PCLK_SPI0		338
+#define PCLK_SPI1		339
+#define PCLK_SPI2		340
+#define PCLK_UART0		341
+#define PCLK_UART1		342
+#define PCLK_UART2		343
+#define PCLK_UART3		344
+#define PCLK_UART4		345
+#define PCLK_TSADC		346
+#define PCLK_SARADC		347
+#define PCLK_SIM		348
+#define PCLK_GMAC		349
+#define PCLK_PWM		350
+#define PCLK_RKPWM		351
+#define PCLK_PS2C		352
+#define PCLK_TIMER		353
+#define PCLK_TZPC		354
+
+/* hclk gates */
+#define HCLK_GPS		448
+#define HCLK_OTG0		449
+#define HCLK_USBHOST0		450
+#define HCLK_USBHOST1		451
+#define HCLK_HSIC		452
+#define HCLK_NANDC0		453
+#define HCLK_NANDC1		454
+#define HCLK_TSP		455
+#define HCLK_SDMMC		456
+#define HCLK_SDIO0		457
+#define HCLK_SDIO1		458
+#define HCLK_EMMC		459
+#define HCLK_HSADC		460
+#define HCLK_CRYPTO		461
+#define HCLK_I2S0		462
+#define HCLK_SPDIF		463
+#define HCLK_SPDIF8CH		464
+#define HCLK_VOP0		465
+#define HCLK_VOP1		466
+#define HCLK_ROM		467
+#define HCLK_IEP		468
+#define HCLK_ISP		469
+#define HCLK_RGA		470
+
+#define CLK_NR_CLKS		(HCLK_RGA + 1)
+
+/* soft-reset indices */
+#define SRST_CORE0		0
+#define SRST_CORE1		1
+#define SRST_CORE2		2
+#define SRST_CORE3		3
+#define SRST_CORE0_PO		4
+#define SRST_CORE1_PO		5
+#define SRST_CORE2_PO		6
+#define SRST_CORE3_PO		7
+#define SRST_PDCORE_STRSYS	8
+#define SRST_PDBUS_STRSYS	9
+#define SRST_L2C		10
+#define SRST_TOPDBG		11
+#define SRST_CORE0_DBG		12
+#define SRST_CORE1_DBG		13
+#define SRST_CORE2_DBG		14
+#define SRST_CORE3_DBG		15
+
+#define SRST_PDBUG_AHB_ARBITOR	16
+#define SRST_EFUSE256		17
+#define SRST_DMAC1		18
+#define SRST_INTMEM		19
+#define SRST_ROM		20
+#define SRST_SPDIF8CH		21
+#define SRST_TIMER		22
+#define SRST_I2S0		23
+#define SRST_SPDIF		24
+#define SRST_TIMER0		25
+#define SRST_TIMER1		26
+#define SRST_TIMER2		27
+#define SRST_TIMER3		28
+#define SRST_TIMER4		29
+#define SRST_TIMER5		30
+#define SRST_EFUSE		31
+
+#define SRST_GPIO0		32
+#define SRST_GPIO1		33
+#define SRST_GPIO2		34
+#define SRST_GPIO3		35
+#define SRST_GPIO4		36
+#define SRST_GPIO5		37
+#define SRST_GPIO6		38
+#define SRST_GPIO7		39
+#define SRST_GPIO8		40
+#define SRST_I2C0		42
+#define SRST_I2C1		43
+#define SRST_I2C2		44
+#define SRST_I2C3		45
+#define SRST_I2C4		46
+#define SRST_I2C5		47
+
+#define SRST_DWPWM		48
+#define SRST_MMC_PERI		49
+#define SRST_PERIPH_MMU		50
+#define SRST_DAP		51
+#define SRST_DAP_SYS		52
+#define SRST_TPIU		53
+#define SRST_PMU_APB		54
+#define SRST_GRF		55
+#define SRST_PMU		56
+#define SRST_PERIPH_AXI		57
+#define SRST_PERIPH_AHB		58
+#define SRST_PERIPH_APB		59
+#define SRST_PERIPH_NIU		60
+#define SRST_PDPERI_AHB_ARBI	61
+#define SRST_EMEM		62
+#define SRST_USB_PERI		63
+
+#define SRST_DMAC2		64
+#define SRST_MAC		66
+#define SRST_GPS		67
+#define SRST_RKPWM		69
+#define SRST_CCP		71
+#define SRST_USBHOST0		72
+#define SRST_HSIC		73
+#define SRST_HSIC_AUX		74
+#define SRST_HSIC_PHY		75
+#define SRST_HSADC		76
+#define SRST_NANDC0		77
+#define SRST_NANDC1		78
+
+#define SRST_TZPC		80
+#define SRST_SPI0		83
+#define SRST_SPI1		84
+#define SRST_SPI2		85
+#define SRST_SARADC		87
+#define SRST_PDALIVE_NIU	88
+#define SRST_PDPMU_INTMEM	89
+#define SRST_PDPMU_NIU		90
+#define SRST_SGRF		91
+
+#define SRST_VIO_ARBI		96
+#define SRST_RGA_NIU		97
+#define SRST_VIO0_NIU_AXI	98
+#define SRST_VIO_NIU_AHB	99
+#define SRST_LCDC0_AXI		100
+#define SRST_LCDC0_AHB		101
+#define SRST_LCDC0_DCLK		102
+#define SRST_VIO1_NIU_AXI	103
+#define SRST_VIP		104
+#define SRST_RGA_CORE		105
+#define SRST_IEP_AXI		106
+#define SRST_IEP_AHB		107
+#define SRST_RGA_AXI		108
+#define SRST_RGA_AHB		109
+#define SRST_ISP		110
+#define SRST_EDP		111
+
+#define SRST_VCODEC_AXI		112
+#define SRST_VCODEC_AHB		113
+#define SRST_VIO_H2P		114
+#define SRST_MIPIDSI0		115
+#define SRST_MIPIDSI1		116
+#define SRST_MIPICSI		117
+#define SRST_LVDS_PHY		118
+#define SRST_LVDS_CON		119
+#define SRST_GPU		120
+#define SRST_HDMI		121
+#define SRST_CORE_PVTM		124
+#define SRST_GPU_PVTM		125
+
+#define SRST_MMC0		128
+#define SRST_SDIO0		129
+#define SRST_SDIO1		130
+#define SRST_EMMC		131
+#define SRST_USBOTG_AHB		132
+#define SRST_USBOTG_PHY		133
+#define SRST_USBOTG_CON		134
+#define SRST_USBHOST0_AHB	135
+#define SRST_USBHOST0_PHY	136
+#define SRST_USBHOST0_CON	137
+#define SRST_USBHOST1_AHB	138
+#define SRST_USBHOST1_PHY	139
+#define SRST_USBHOST1_CON	140
+#define SRST_USB_ADP		141
+#define SRST_ACC_EFUSE		142
diff --git a/include/dt-bindings/mfd/palmas.h b/include/dt-bindings/mfd/palmas.h
new file mode 100644
index 0000000..2c8ac48
--- /dev/null
+++ b/include/dt-bindings/mfd/palmas.h
@@ -0,0 +1,18 @@
+/*
+ * This header provides macros for Palmas device bindings.
+ *
+ * Copyright (c) 2013, NVIDIA Corporation.
+ *
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ */
+
+#ifndef __DT_BINDINGS_PALMAS_H__
+#define __DT_BINDINGS_PALMAS_H
+
+/* External control pins */
+#define PALMAS_EXT_CONTROL_PIN_ENABLE1	1
+#define PALMAS_EXT_CONTROL_PIN_ENABLE2	2
+#define PALMAS_EXT_CONTROL_PIN_NSLEEP	3
+
+#endif /* __DT_BINDINGS_PALMAS_H */
diff --git a/include/dt-bindings/pinctrl/dra.h b/include/dt-bindings/pinctrl/dra.h
index 002a285..3d33794 100644
--- a/include/dt-bindings/pinctrl/dra.h
+++ b/include/dt-bindings/pinctrl/dra.h
@@ -30,7 +30,8 @@
 #define MUX_MODE14	0xe
 #define MUX_MODE15	0xf
 
-#define PULL_ENA		(1 << 16)
+#define PULL_ENA		(0 << 16)
+#define PULL_DIS		(1 << 16)
 #define PULL_UP			(1 << 17)
 #define INPUT_EN		(1 << 18)
 #define SLEWCONTROL		(1 << 19)
@@ -38,10 +39,10 @@
 #define WAKEUP_EVENT		(1 << 25)
 
 /* Active pin states */
-#define PIN_OUTPUT		0
+#define PIN_OUTPUT		(0 | PULL_DIS)
 #define PIN_OUTPUT_PULLUP	(PIN_OUTPUT | PULL_ENA | PULL_UP)
 #define PIN_OUTPUT_PULLDOWN	(PIN_OUTPUT | PULL_ENA)
-#define PIN_INPUT		INPUT_EN
+#define PIN_INPUT		(INPUT_EN | PULL_DIS)
 #define PIN_INPUT_SLEW		(INPUT_EN | SLEWCONTROL)
 #define PIN_INPUT_PULLUP	(PULL_ENA | INPUT_EN | PULL_UP)
 #define PIN_INPUT_PULLDOWN	(PULL_ENA | INPUT_EN)
diff --git a/include/dt-bindings/reset/qcom,gcc-apq8084.h b/include/dt-bindings/reset/qcom,gcc-apq8084.h
new file mode 100644
index 0000000..527caaf
--- /dev/null
+++ b/include/dt-bindings/reset/qcom,gcc-apq8084.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_RESET_APQ_GCC_8084_H
+#define _DT_BINDINGS_RESET_APQ_GCC_8084_H
+
+#define GCC_SYSTEM_NOC_BCR		0
+#define GCC_CONFIG_NOC_BCR		1
+#define GCC_PERIPH_NOC_BCR		2
+#define GCC_IMEM_BCR			3
+#define GCC_MMSS_BCR			4
+#define GCC_QDSS_BCR			5
+#define GCC_USB_30_BCR			6
+#define GCC_USB3_PHY_BCR		7
+#define GCC_USB_HS_HSIC_BCR		8
+#define GCC_USB_HS_BCR			9
+#define GCC_USB2A_PHY_BCR		10
+#define GCC_USB2B_PHY_BCR		11
+#define GCC_SDCC1_BCR			12
+#define GCC_SDCC2_BCR			13
+#define GCC_SDCC3_BCR			14
+#define GCC_SDCC4_BCR			15
+#define GCC_BLSP1_BCR			16
+#define GCC_BLSP1_QUP1_BCR		17
+#define GCC_BLSP1_UART1_BCR		18
+#define GCC_BLSP1_QUP2_BCR		19
+#define GCC_BLSP1_UART2_BCR		20
+#define GCC_BLSP1_QUP3_BCR		21
+#define GCC_BLSP1_UART3_BCR		22
+#define GCC_BLSP1_QUP4_BCR		23
+#define GCC_BLSP1_UART4_BCR		24
+#define GCC_BLSP1_QUP5_BCR		25
+#define GCC_BLSP1_UART5_BCR		26
+#define GCC_BLSP1_QUP6_BCR		27
+#define GCC_BLSP1_UART6_BCR		28
+#define GCC_BLSP2_BCR			29
+#define GCC_BLSP2_QUP1_BCR		30
+#define GCC_BLSP2_UART1_BCR		31
+#define GCC_BLSP2_QUP2_BCR		32
+#define GCC_BLSP2_UART2_BCR		33
+#define GCC_BLSP2_QUP3_BCR		34
+#define GCC_BLSP2_UART3_BCR		35
+#define GCC_BLSP2_QUP4_BCR		36
+#define GCC_BLSP2_UART4_BCR		37
+#define GCC_BLSP2_QUP5_BCR		38
+#define GCC_BLSP2_UART5_BCR		39
+#define GCC_BLSP2_QUP6_BCR		40
+#define GCC_BLSP2_UART6_BCR		41
+#define GCC_PDM_BCR			42
+#define GCC_PRNG_BCR			43
+#define GCC_BAM_DMA_BCR			44
+#define GCC_TSIF_BCR			45
+#define GCC_TCSR_BCR			46
+#define GCC_BOOT_ROM_BCR		47
+#define GCC_MSG_RAM_BCR			48
+#define GCC_TLMM_BCR			49
+#define GCC_MPM_BCR			50
+#define GCC_MPM_AHB_RESET		51
+#define GCC_MPM_NON_AHB_RESET		52
+#define GCC_SEC_CTRL_BCR		53
+#define GCC_SPMI_BCR			54
+#define GCC_SPDM_BCR			55
+#define GCC_CE1_BCR			56
+#define GCC_CE2_BCR			57
+#define GCC_BIMC_BCR			58
+#define GCC_SNOC_BUS_TIMEOUT0_BCR	59
+#define GCC_SNOC_BUS_TIMEOUT2_BCR	60
+#define GCC_PNOC_BUS_TIMEOUT0_BCR	61
+#define GCC_PNOC_BUS_TIMEOUT1_BCR	62
+#define GCC_PNOC_BUS_TIMEOUT2_BCR	63
+#define GCC_PNOC_BUS_TIMEOUT3_BCR	64
+#define GCC_PNOC_BUS_TIMEOUT4_BCR	65
+#define GCC_CNOC_BUS_TIMEOUT0_BCR	66
+#define GCC_CNOC_BUS_TIMEOUT1_BCR	67
+#define GCC_CNOC_BUS_TIMEOUT2_BCR	68
+#define GCC_CNOC_BUS_TIMEOUT3_BCR	69
+#define GCC_CNOC_BUS_TIMEOUT4_BCR	70
+#define GCC_CNOC_BUS_TIMEOUT5_BCR	71
+#define GCC_CNOC_BUS_TIMEOUT6_BCR	72
+#define GCC_DEHR_BCR			73
+#define GCC_RBCPR_BCR			74
+#define GCC_MSS_RESTART			75
+#define GCC_LPASS_RESTART		76
+#define GCC_WCSS_RESTART		77
+#define GCC_VENUS_RESTART		78
+#define GCC_COPSS_SMMU_BCR		79
+#define GCC_SPSS_BCR			80
+#define GCC_PCIE_0_BCR			81
+#define GCC_PCIE_0_PHY_BCR		82
+#define GCC_PCIE_1_BCR			83
+#define GCC_PCIE_1_PHY_BCR		84
+#define GCC_USB_30_SEC_BCR		85
+#define GCC_USB3_SEC_PHY_BCR		86
+#define GCC_SATA_BCR			87
+#define GCC_CE3_BCR			88
+#define GCC_UFS_BCR			89
+#define GCC_USB30_PHY_COM_BCR		90
+
+#endif
diff --git a/include/dt-bindings/reset/qcom,gcc-ipq806x.h b/include/dt-bindings/reset/qcom,gcc-ipq806x.h
new file mode 100644
index 0000000..0ad5ef9
--- /dev/null
+++ b/include/dt-bindings/reset/qcom,gcc-ipq806x.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_RESET_IPQ_806X_H
+#define _DT_BINDINGS_RESET_IPQ_806X_H
+
+#define QDSS_STM_RESET					0
+#define AFAB_SMPSS_S_RESET				1
+#define AFAB_SMPSS_M1_RESET				2
+#define AFAB_SMPSS_M0_RESET				3
+#define AFAB_EBI1_CH0_RESET				4
+#define AFAB_EBI1_CH1_RESET				5
+#define SFAB_ADM0_M0_RESET				6
+#define SFAB_ADM0_M1_RESET				7
+#define SFAB_ADM0_M2_RESET				8
+#define ADM0_C2_RESET					9
+#define ADM0_C1_RESET					10
+#define ADM0_C0_RESET					11
+#define ADM0_PBUS_RESET					12
+#define ADM0_RESET					13
+#define QDSS_CLKS_SW_RESET				14
+#define QDSS_POR_RESET					15
+#define QDSS_TSCTR_RESET				16
+#define QDSS_HRESET_RESET				17
+#define QDSS_AXI_RESET					18
+#define QDSS_DBG_RESET					19
+#define SFAB_PCIE_M_RESET				20
+#define SFAB_PCIE_S_RESET				21
+#define PCIE_EXT_RESET					22
+#define PCIE_PHY_RESET					23
+#define PCIE_PCI_RESET					24
+#define PCIE_POR_RESET					25
+#define PCIE_HCLK_RESET					26
+#define PCIE_ACLK_RESET					27
+#define SFAB_LPASS_RESET				28
+#define SFAB_AFAB_M_RESET				29
+#define AFAB_SFAB_M0_RESET				30
+#define AFAB_SFAB_M1_RESET				31
+#define SFAB_SATA_S_RESET				32
+#define SFAB_DFAB_M_RESET				33
+#define DFAB_SFAB_M_RESET				34
+#define DFAB_SWAY0_RESET				35
+#define DFAB_SWAY1_RESET				36
+#define DFAB_ARB0_RESET					37
+#define DFAB_ARB1_RESET					38
+#define PPSS_PROC_RESET					39
+#define PPSS_RESET					40
+#define DMA_BAM_RESET					41
+#define SPS_TIC_H_RESET					42
+#define SFAB_CFPB_M_RESET				43
+#define SFAB_CFPB_S_RESET				44
+#define TSIF_H_RESET					45
+#define CE1_H_RESET					46
+#define CE1_CORE_RESET					47
+#define CE1_SLEEP_RESET					48
+#define CE2_H_RESET					49
+#define CE2_CORE_RESET					50
+#define SFAB_SFPB_M_RESET				51
+#define SFAB_SFPB_S_RESET				52
+#define RPM_PROC_RESET					53
+#define PMIC_SSBI2_RESET				54
+#define SDC1_RESET					55
+#define SDC2_RESET					56
+#define SDC3_RESET					57
+#define SDC4_RESET					58
+#define USB_HS1_RESET					59
+#define USB_HSIC_RESET					60
+#define USB_FS1_XCVR_RESET				61
+#define USB_FS1_RESET					62
+#define GSBI1_RESET					63
+#define GSBI2_RESET					64
+#define GSBI3_RESET					65
+#define GSBI4_RESET					66
+#define GSBI5_RESET					67
+#define GSBI6_RESET					68
+#define GSBI7_RESET					69
+#define SPDM_RESET					70
+#define SEC_CTRL_RESET					71
+#define TLMM_H_RESET					72
+#define SFAB_SATA_M_RESET				73
+#define SATA_RESET					74
+#define TSSC_RESET					75
+#define PDM_RESET					76
+#define MPM_H_RESET					77
+#define MPM_RESET					78
+#define SFAB_SMPSS_S_RESET				79
+#define PRNG_RESET					80
+#define SFAB_CE3_M_RESET				81
+#define SFAB_CE3_S_RESET				82
+#define CE3_SLEEP_RESET					83
+#define PCIE_1_M_RESET					84
+#define PCIE_1_S_RESET					85
+#define PCIE_1_EXT_RESET				86
+#define PCIE_1_PHY_RESET				87
+#define PCIE_1_PCI_RESET				88
+#define PCIE_1_POR_RESET				89
+#define PCIE_1_HCLK_RESET				90
+#define PCIE_1_ACLK_RESET				91
+#define PCIE_2_M_RESET					92
+#define PCIE_2_S_RESET					93
+#define PCIE_2_EXT_RESET				94
+#define PCIE_2_PHY_RESET				95
+#define PCIE_2_PCI_RESET				96
+#define PCIE_2_POR_RESET				97
+#define PCIE_2_HCLK_RESET				98
+#define PCIE_2_ACLK_RESET				99
+#define SFAB_USB30_S_RESET				100
+#define SFAB_USB30_M_RESET				101
+#define USB30_0_PORT2_HS_PHY_RESET			102
+#define USB30_0_MASTER_RESET				103
+#define USB30_0_SLEEP_RESET				104
+#define USB30_0_UTMI_PHY_RESET				105
+#define USB30_0_POWERON_RESET				106
+#define USB30_0_PHY_RESET				107
+#define USB30_1_MASTER_RESET				108
+#define USB30_1_SLEEP_RESET				109
+#define USB30_1_UTMI_PHY_RESET				110
+#define USB30_1_POWERON_RESET				111
+#define USB30_1_PHY_RESET				112
+#define NSSFB0_RESET					113
+#define NSSFB1_RESET					114
+#endif
diff --git a/include/dt-bindings/reset/qcom,gcc-msm8960.h b/include/dt-bindings/reset/qcom,gcc-msm8960.h
index 07edd0e..47c8686 100644
--- a/include/dt-bindings/reset/qcom,gcc-msm8960.h
+++ b/include/dt-bindings/reset/qcom,gcc-msm8960.h
@@ -114,5 +114,21 @@
 #define SFAB_SMPSS_S_RESET				97
 #define PRNG_RESET					98
 #define RIVA_RESET					99
+#define USB_HS3_RESET					100
+#define USB_HS4_RESET					101
+#define CE3_RESET					102
+#define PCIE_EXT_PCI_RESET				103
+#define PCIE_PHY_RESET					104
+#define PCIE_PCI_RESET					105
+#define PCIE_POR_RESET					106
+#define PCIE_HCLK_RESET					107
+#define PCIE_ACLK_RESET					108
+#define CE3_H_RESET					109
+#define SFAB_CE3_M_RESET				110
+#define SFAB_CE3_S_RESET				111
+#define SATA_RESET					112
+#define CE3_SLEEP_RESET					113
+#define GSS_SLP_RESET					114
+#define GSS_RESET					115
 
 #endif
diff --git a/include/dt-bindings/reset/qcom,mmcc-apq8084.h b/include/dt-bindings/reset/qcom,mmcc-apq8084.h
new file mode 100644
index 0000000..c167139
--- /dev/null
+++ b/include/dt-bindings/reset/qcom,mmcc-apq8084.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_RESET_APQ_MMCC_8084_H
+#define _DT_BINDINGS_RESET_APQ_MMCC_8084_H
+
+#define MMSS_SPDM_RESET			0
+#define MMSS_SPDM_RM_RESET		1
+#define VENUS0_RESET			2
+#define VPU_RESET			3
+#define MDSS_RESET			4
+#define AVSYNC_RESET			5
+#define CAMSS_PHY0_RESET		6
+#define CAMSS_PHY1_RESET		7
+#define CAMSS_PHY2_RESET		8
+#define CAMSS_CSI0_RESET		9
+#define CAMSS_CSI0PHY_RESET		10
+#define CAMSS_CSI0RDI_RESET		11
+#define CAMSS_CSI0PIX_RESET		12
+#define CAMSS_CSI1_RESET		13
+#define CAMSS_CSI1PHY_RESET		14
+#define CAMSS_CSI1RDI_RESET		15
+#define CAMSS_CSI1PIX_RESET		16
+#define CAMSS_CSI2_RESET		17
+#define CAMSS_CSI2PHY_RESET		18
+#define CAMSS_CSI2RDI_RESET		19
+#define CAMSS_CSI2PIX_RESET		20
+#define CAMSS_CSI3_RESET		21
+#define CAMSS_CSI3PHY_RESET		22
+#define CAMSS_CSI3RDI_RESET		23
+#define CAMSS_CSI3PIX_RESET		24
+#define CAMSS_ISPIF_RESET		25
+#define CAMSS_CCI_RESET			26
+#define CAMSS_MCLK0_RESET		27
+#define CAMSS_MCLK1_RESET		28
+#define CAMSS_MCLK2_RESET		29
+#define CAMSS_MCLK3_RESET		30
+#define CAMSS_GP0_RESET			31
+#define CAMSS_GP1_RESET			32
+#define CAMSS_TOP_RESET			33
+#define CAMSS_AHB_RESET			34
+#define CAMSS_MICRO_RESET		35
+#define CAMSS_JPEG_RESET		36
+#define CAMSS_VFE_RESET			37
+#define CAMSS_CSI_VFE0_RESET		38
+#define CAMSS_CSI_VFE1_RESET		39
+#define OXILI_RESET			40
+#define OXILICX_RESET			41
+#define OCMEMCX_RESET			42
+#define MMSS_RBCRP_RESET		43
+#define MMSSNOCAHB_RESET		44
+#define MMSSNOCAXI_RESET		45
+
+#endif
diff --git a/include/dt-bindings/reset/qcom,mmcc-msm8960.h b/include/dt-bindings/reset/qcom,mmcc-msm8960.h
index ba36ec6..1174111 100644
--- a/include/dt-bindings/reset/qcom,mmcc-msm8960.h
+++ b/include/dt-bindings/reset/qcom,mmcc-msm8960.h
@@ -89,5 +89,13 @@
 #define CSI2_RESET					72
 #define CSI_RDI1_RESET					73
 #define CSI_RDI2_RESET					74
+#define GFX3D_AXI_RESET					75
+#define VCAP_AXI_RESET					76
+#define SMMU_VCAP_AHB_RESET				77
+#define VCAP_AHB_RESET					78
+#define CSI_RDI_RESET					79
+#define CSI_PIX_RESET					80
+#define VCAP_NPL_RESET					81
+#define VCAP_RESET					82
 
 #endif
diff --git a/include/linux/ahci_platform.h b/include/linux/ahci_platform.h
index 6dfd51a..09a947e 100644
--- a/include/linux/ahci_platform.h
+++ b/include/linux/ahci_platform.h
@@ -43,10 +43,7 @@
 	struct platform_device *pdev);
 int ahci_platform_init_host(struct platform_device *pdev,
 			    struct ahci_host_priv *hpriv,
-			    const struct ata_port_info *pi_template,
-			    unsigned long host_flags,
-			    unsigned int force_port_map,
-			    unsigned int mask_port_map);
+			    const struct ata_port_info *pi_template);
 
 int ahci_platform_suspend_host(struct device *dev);
 int ahci_platform_resume_host(struct device *dev);
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 8a111dd..b5223c5 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -203,7 +203,15 @@
 	struct kernfs_node *kn;		/* cgroup kernfs entry */
 	struct kernfs_node *populated_kn; /* kn for "cgroup.subtree_populated" */
 
-	/* the bitmask of subsystems enabled on the child cgroups */
+	/*
+	 * The bitmask of subsystems enabled on the child cgroups.
+	 * ->subtree_control is the one configured through
+	 * "cgroup.subtree_control" while ->child_subsys_mask is the
+	 * effective one which may have more subsystems enabled.
+	 * Controller knobs are made available iff it's enabled in
+	 * ->subtree_control.
+	 */
+	unsigned int subtree_control;
 	unsigned int child_subsys_mask;
 
 	/* Private pointers for each registered subsystem */
@@ -248,73 +256,9 @@
 
 /* cgroup_root->flags */
 enum {
-	/*
-	 * Unfortunately, cgroup core and various controllers are riddled
-	 * with idiosyncrasies and pointless options.  The following flag,
-	 * when set, will force sane behavior - some options are forced on,
-	 * others are disallowed, and some controllers will change their
-	 * hierarchical or other behaviors.
-	 *
-	 * The set of behaviors affected by this flag are still being
-	 * determined and developed and the mount option for this flag is
-	 * prefixed with __DEVEL__.  The prefix will be dropped once we
-	 * reach the point where all behaviors are compatible with the
-	 * planned unified hierarchy, which will automatically turn on this
-	 * flag.
-	 *
-	 * The followings are the behaviors currently affected this flag.
-	 *
-	 * - Mount options "noprefix", "xattr", "clone_children",
-	 *   "release_agent" and "name" are disallowed.
-	 *
-	 * - When mounting an existing superblock, mount options should
-	 *   match.
-	 *
-	 * - Remount is disallowed.
-	 *
-	 * - rename(2) is disallowed.
-	 *
-	 * - "tasks" is removed.  Everything should be at process
-	 *   granularity.  Use "cgroup.procs" instead.
-	 *
-	 * - "cgroup.procs" is not sorted.  pids will be unique unless they
-	 *   got recycled inbetween reads.
-	 *
-	 * - "release_agent" and "notify_on_release" are removed.
-	 *   Replacement notification mechanism will be implemented.
-	 *
-	 * - "cgroup.clone_children" is removed.
-	 *
-	 * - "cgroup.subtree_populated" is available.  Its value is 0 if
-	 *   the cgroup and its descendants contain no task; otherwise, 1.
-	 *   The file also generates kernfs notification which can be
-	 *   monitored through poll and [di]notify when the value of the
-	 *   file changes.
-	 *
-	 * - If mount is requested with sane_behavior but without any
-	 *   subsystem, the default unified hierarchy is mounted.
-	 *
-	 * - cpuset: tasks will be kept in empty cpusets when hotplug happens
-	 *   and take masks of ancestors with non-empty cpus/mems, instead of
-	 *   being moved to an ancestor.
-	 *
-	 * - cpuset: a task can be moved into an empty cpuset, and again it
-	 *   takes masks of ancestors.
-	 *
-	 * - memcg: use_hierarchy is on by default and the cgroup file for
-	 *   the flag is not created.
-	 *
-	 * - blkcg: blk-throttle becomes properly hierarchical.
-	 *
-	 * - debug: disallowed on the default hierarchy.
-	 */
-	CGRP_ROOT_SANE_BEHAVIOR	= (1 << 0),
-
+	CGRP_ROOT_SANE_BEHAVIOR	= (1 << 0), /* __DEVEL__sane_behavior specified */
 	CGRP_ROOT_NOPREFIX	= (1 << 1), /* mounted subsystems have no named prefix */
 	CGRP_ROOT_XATTR		= (1 << 2), /* supports extended attributes */
-
-	/* mount options live below bit 16 */
-	CGRP_ROOT_OPTION_MASK	= (1 << 16) - 1,
 };
 
 /*
@@ -440,9 +384,11 @@
 enum {
 	CFTYPE_ONLY_ON_ROOT	= (1 << 0),	/* only create on root cgrp */
 	CFTYPE_NOT_ON_ROOT	= (1 << 1),	/* don't create on root cgrp */
-	CFTYPE_INSANE		= (1 << 2),	/* don't create if sane_behavior */
 	CFTYPE_NO_PREFIX	= (1 << 3),	/* (DON'T USE FOR NEW FILES) no subsys prefix */
-	CFTYPE_ONLY_ON_DFL	= (1 << 4),	/* only on default hierarchy */
+
+	/* internal flags, do not use outside cgroup core proper */
+	__CFTYPE_ONLY_ON_DFL	= (1 << 16),	/* only on default hierarchy */
+	__CFTYPE_NOT_ON_DFL	= (1 << 17),	/* not on default hierarchy */
 };
 
 #define MAX_CFTYPE_NAME		64
@@ -526,20 +472,64 @@
 extern struct cgroup_root cgrp_dfl_root;
 extern struct css_set init_css_set;
 
+/**
+ * cgroup_on_dfl - test whether a cgroup is on the default hierarchy
+ * @cgrp: the cgroup of interest
+ *
+ * The default hierarchy is the v2 interface of cgroup and this function
+ * can be used to test whether a cgroup is on the default hierarchy for
+ * cases where a subsystem should behave differnetly depending on the
+ * interface version.
+ *
+ * The set of behaviors which change on the default hierarchy are still
+ * being determined and the mount option is prefixed with __DEVEL__.
+ *
+ * List of changed behaviors:
+ *
+ * - Mount options "noprefix", "xattr", "clone_children", "release_agent"
+ *   and "name" are disallowed.
+ *
+ * - When mounting an existing superblock, mount options should match.
+ *
+ * - Remount is disallowed.
+ *
+ * - rename(2) is disallowed.
+ *
+ * - "tasks" is removed.  Everything should be at process granularity.  Use
+ *   "cgroup.procs" instead.
+ *
+ * - "cgroup.procs" is not sorted.  pids will be unique unless they got
+ *   recycled inbetween reads.
+ *
+ * - "release_agent" and "notify_on_release" are removed.  Replacement
+ *   notification mechanism will be implemented.
+ *
+ * - "cgroup.clone_children" is removed.
+ *
+ * - "cgroup.subtree_populated" is available.  Its value is 0 if the cgroup
+ *   and its descendants contain no task; otherwise, 1.  The file also
+ *   generates kernfs notification which can be monitored through poll and
+ *   [di]notify when the value of the file changes.
+ *
+ * - cpuset: tasks will be kept in empty cpusets when hotplug happens and
+ *   take masks of ancestors with non-empty cpus/mems, instead of being
+ *   moved to an ancestor.
+ *
+ * - cpuset: a task can be moved into an empty cpuset, and again it takes
+ *   masks of ancestors.
+ *
+ * - memcg: use_hierarchy is on by default and the cgroup file for the flag
+ *   is not created.
+ *
+ * - blkcg: blk-throttle becomes properly hierarchical.
+ *
+ * - debug: disallowed on the default hierarchy.
+ */
 static inline bool cgroup_on_dfl(const struct cgroup *cgrp)
 {
 	return cgrp->root == &cgrp_dfl_root;
 }
 
-/*
- * See the comment above CGRP_ROOT_SANE_BEHAVIOR for details.  This
- * function can be called as long as @cgrp is accessible.
- */
-static inline bool cgroup_sane_behavior(const struct cgroup *cgrp)
-{
-	return cgrp->root->flags & CGRP_ROOT_SANE_BEHAVIOR;
-}
-
 /* no synchronization, the result can only be used as a hint */
 static inline bool cgroup_has_tasks(struct cgroup *cgrp)
 {
@@ -602,7 +592,8 @@
 
 char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
 
-int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
+int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
+int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
 int cgroup_rm_cftypes(struct cftype *cfts);
 
 bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor);
@@ -634,6 +625,7 @@
 	int (*css_online)(struct cgroup_subsys_state *css);
 	void (*css_offline)(struct cgroup_subsys_state *css);
 	void (*css_free)(struct cgroup_subsys_state *css);
+	void (*css_reset)(struct cgroup_subsys_state *css);
 
 	int (*can_attach)(struct cgroup_subsys_state *css,
 			  struct cgroup_taskset *tset);
@@ -682,8 +674,21 @@
 	 */
 	struct list_head cfts;
 
-	/* base cftypes, automatically registered with subsys itself */
-	struct cftype *base_cftypes;
+	/*
+	 * Base cftypes which are automatically registered.  The two can
+	 * point to the same array.
+	 */
+	struct cftype *dfl_cftypes;	/* for the default hierarchy */
+	struct cftype *legacy_cftypes;	/* for the legacy hierarchies */
+
+	/*
+	 * A subsystem may depend on other subsystems.  When such subsystem
+	 * is enabled on a cgroup, the depended-upon subsystems are enabled
+	 * together if available.  Subsystems enabled due to dependency are
+	 * not visible to userland until explicitly enabled.  The following
+	 * specifies the mask of subsystems that this one depends on.
+	 */
+	unsigned int depends_on;
 };
 
 #define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 0c287db..411dd7e 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -619,5 +619,10 @@
 
 #endif	/* platform dependent I/O accessors */
 
+#ifdef CONFIG_DEBUG_FS
+struct dentry *clk_debugfs_add_file(struct clk *clk, char *name, umode_t mode,
+				void *data, const struct file_operations *fops);
+#endif
+
 #endif /* CONFIG_COMMON_CLK */
 #endif /* CLK_PROVIDER_H */
diff --git a/include/linux/clk/clk-conf.h b/include/linux/clk/clk-conf.h
new file mode 100644
index 0000000..f3050e1
--- /dev/null
+++ b/include/linux/clk/clk-conf.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2014 Samsung Electronics Co., Ltd.
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+struct device_node;
+
+#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
+int of_clk_set_defaults(struct device_node *node, bool clk_supplier);
+#else
+static inline int of_clk_set_defaults(struct device_node *node,
+				      bool clk_supplier)
+{
+	return 0;
+}
+#endif
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index b92eadf..d45e949 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -710,9 +710,9 @@
 
 static inline void ablkcipher_request_set_callback(
 	struct ablkcipher_request *req,
-	u32 flags, crypto_completion_t complete, void *data)
+	u32 flags, crypto_completion_t compl, void *data)
 {
-	req->base.complete = complete;
+	req->base.complete = compl;
 	req->base.data = data;
 	req->base.flags = flags;
 }
@@ -841,10 +841,10 @@
 
 static inline void aead_request_set_callback(struct aead_request *req,
 					     u32 flags,
-					     crypto_completion_t complete,
+					     crypto_completion_t compl,
 					     void *data)
 {
-	req->base.complete = complete;
+	req->base.complete = compl;
 	req->base.data = data;
 	req->base.flags = flags;
 }
diff --git a/include/linux/fs.h b/include/linux/fs.h
index e11d60c..2daccaf 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -833,7 +833,7 @@
  *
  * Lockd stuffs a "host" pointer into this.
  */
-typedef struct files_struct *fl_owner_t;
+typedef void *fl_owner_t;
 
 struct file_lock_operations {
 	void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 404a686..6bb5e3f 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -33,8 +33,7 @@
  * features, then it must call an indirect function that
  * does. Or at least does enough to prevent any unwelcomed side effects.
  */
-#if !defined(CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST) || \
-	!ARCH_SUPPORTS_FTRACE_OPS
+#if !ARCH_SUPPORTS_FTRACE_OPS
 # define FTRACE_FORCE_LIST_FUNC 1
 #else
 # define FTRACE_FORCE_LIST_FUNC 0
@@ -118,17 +117,18 @@
 	ftrace_func_t			func;
 	struct ftrace_ops		*next;
 	unsigned long			flags;
-	int __percpu			*disabled;
 	void				*private;
+	int __percpu			*disabled;
 #ifdef CONFIG_DYNAMIC_FTRACE
+	int				nr_trampolines;
 	struct ftrace_hash		*notrace_hash;
 	struct ftrace_hash		*filter_hash;
+	struct ftrace_hash		*tramp_hash;
 	struct mutex			regex_lock;
+	unsigned long			trampoline;
 #endif
 };
 
-extern int function_trace_stop;
-
 /*
  * Type of the current tracing.
  */
@@ -140,32 +140,6 @@
 /* Current tracing type, default is FTRACE_TYPE_ENTER */
 extern enum ftrace_tracing_type_t ftrace_tracing_type;
 
-/**
- * ftrace_stop - stop function tracer.
- *
- * A quick way to stop the function tracer. Note this an on off switch,
- * it is not something that is recursive like preempt_disable.
- * This does not disable the calling of mcount, it only stops the
- * calling of functions from mcount.
- */
-static inline void ftrace_stop(void)
-{
-	function_trace_stop = 1;
-}
-
-/**
- * ftrace_start - start the function tracer.
- *
- * This function is the inverse of ftrace_stop. This does not enable
- * the function tracing if the function tracer is disabled. This only
- * sets the function tracer flag to continue calling the functions
- * from mcount.
- */
-static inline void ftrace_start(void)
-{
-	function_trace_stop = 0;
-}
-
 /*
  * The ftrace_ops must be a static and should also
  * be read_mostly.  These functions do modify read_mostly variables
@@ -242,8 +216,6 @@
 }
 static inline void clear_ftrace_function(void) { }
 static inline void ftrace_kill(void) { }
-static inline void ftrace_stop(void) { }
-static inline void ftrace_start(void) { }
 #endif /* CONFIG_FUNCTION_TRACER */
 
 #ifdef CONFIG_STACK_TRACER
@@ -317,13 +289,20 @@
  * from tracing that function.
  */
 enum {
-	FTRACE_FL_ENABLED	= (1UL << 29),
+	FTRACE_FL_ENABLED	= (1UL << 31),
 	FTRACE_FL_REGS		= (1UL << 30),
-	FTRACE_FL_REGS_EN	= (1UL << 31)
+	FTRACE_FL_REGS_EN	= (1UL << 29),
+	FTRACE_FL_TRAMP		= (1UL << 28),
+	FTRACE_FL_TRAMP_EN	= (1UL << 27),
 };
 
-#define FTRACE_FL_MASK		(0x7UL << 29)
-#define FTRACE_REF_MAX		((1UL << 29) - 1)
+#define FTRACE_REF_MAX_SHIFT	27
+#define FTRACE_FL_BITS		5
+#define FTRACE_FL_MASKED_BITS	((1UL << FTRACE_FL_BITS) - 1)
+#define FTRACE_FL_MASK		(FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT)
+#define FTRACE_REF_MAX		((1UL << FTRACE_REF_MAX_SHIFT) - 1)
+
+#define ftrace_rec_count(rec)	((rec)->flags & ~FTRACE_FL_MASK)
 
 struct dyn_ftrace {
 	unsigned long		ip; /* address of mcount call-site */
@@ -431,6 +410,10 @@
 #define FTRACE_ADDR ((unsigned long)ftrace_caller)
 #endif
 
+#ifndef FTRACE_GRAPH_ADDR
+#define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller)
+#endif
+
 #ifndef FTRACE_REGS_ADDR
 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
 # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
@@ -439,6 +422,16 @@
 #endif
 #endif
 
+/*
+ * If an arch would like functions that are only traced
+ * by the function graph tracer to jump directly to its own
+ * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR
+ * to be that address to jump to.
+ */
+#ifndef FTRACE_GRAPH_TRAMP_ADDR
+#define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0)
+#endif
+
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 extern void ftrace_graph_caller(void);
 extern int ftrace_enable_ftrace_graph_caller(void);
@@ -736,6 +729,7 @@
 extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
 				trace_func_graph_ent_t entryfunc);
 
+extern bool ftrace_graph_is_dead(void);
 extern void ftrace_graph_stop(void);
 
 /* The current handlers in use */
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index cff3106..06c6faa 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -272,7 +272,6 @@
 	struct trace_event	event;
 	const char		*print_fmt;
 	struct event_filter	*filter;
-	struct list_head	*files;
 	void			*mod;
 	void			*data;
 	/*
@@ -404,8 +403,6 @@
 	ETT_EVENT_ENABLE	= (1 << 3),
 };
 
-extern void destroy_preds(struct ftrace_event_file *file);
-extern void destroy_call_preds(struct ftrace_event_call *call);
 extern int filter_match_preds(struct event_filter *filter, void *rec);
 
 extern int filter_check_discard(struct ftrace_event_file *file, void *rec,
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 255cd5c..a23c096 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -80,6 +80,7 @@
 bool isolate_huge_page(struct page *page, struct list_head *list);
 void putback_active_hugepage(struct page *page);
 bool is_hugepage_active(struct page *page);
+void free_huge_page(struct page *page);
 
 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 6df7f9f..2bb4c4f3 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -102,12 +102,6 @@
 #define INIT_IDS
 #endif
 
-#ifdef CONFIG_RCU_BOOST
-#define INIT_TASK_RCU_BOOST()						\
-	.rcu_boost_mutex = NULL,
-#else
-#define INIT_TASK_RCU_BOOST()
-#endif
 #ifdef CONFIG_TREE_PREEMPT_RCU
 #define INIT_TASK_RCU_TREE_PREEMPT()					\
 	.rcu_blocked_node = NULL,
@@ -119,8 +113,7 @@
 	.rcu_read_lock_nesting = 0,					\
 	.rcu_read_unlock_special = 0,					\
 	.rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry),		\
-	INIT_TASK_RCU_TREE_PREEMPT()					\
-	INIT_TASK_RCU_BOOST()
+	INIT_TASK_RCU_TREE_PREEMPT()
 #else
 #define INIT_TASK_RCU_PREEMPT(tsk)
 #endif
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
new file mode 100644
index 0000000..03a4ea3
--- /dev/null
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -0,0 +1,200 @@
+/*
+ * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __LINUX_IRQCHIP_ARM_GIC_V3_H
+#define __LINUX_IRQCHIP_ARM_GIC_V3_H
+
+#include <asm/sysreg.h>
+
+/*
+ * Distributor registers. We assume we're running non-secure, with ARE
+ * being set. Secure-only and non-ARE registers are not described.
+ */
+#define GICD_CTLR			0x0000
+#define GICD_TYPER			0x0004
+#define GICD_IIDR			0x0008
+#define GICD_STATUSR			0x0010
+#define GICD_SETSPI_NSR			0x0040
+#define GICD_CLRSPI_NSR			0x0048
+#define GICD_SETSPI_SR			0x0050
+#define GICD_CLRSPI_SR			0x0058
+#define GICD_SEIR			0x0068
+#define GICD_ISENABLER			0x0100
+#define GICD_ICENABLER			0x0180
+#define GICD_ISPENDR			0x0200
+#define GICD_ICPENDR			0x0280
+#define GICD_ISACTIVER			0x0300
+#define GICD_ICACTIVER			0x0380
+#define GICD_IPRIORITYR			0x0400
+#define GICD_ICFGR			0x0C00
+#define GICD_IROUTER			0x6000
+#define GICD_PIDR2			0xFFE8
+
+#define GICD_CTLR_RWP			(1U << 31)
+#define GICD_CTLR_ARE_NS		(1U << 4)
+#define GICD_CTLR_ENABLE_G1A		(1U << 1)
+#define GICD_CTLR_ENABLE_G1		(1U << 0)
+
+#define GICD_IROUTER_SPI_MODE_ONE	(0U << 31)
+#define GICD_IROUTER_SPI_MODE_ANY	(1U << 31)
+
+#define GIC_PIDR2_ARCH_MASK		0xf0
+#define GIC_PIDR2_ARCH_GICv3		0x30
+#define GIC_PIDR2_ARCH_GICv4		0x40
+
+/*
+ * Re-Distributor registers, offsets from RD_base
+ */
+#define GICR_CTLR			GICD_CTLR
+#define GICR_IIDR			0x0004
+#define GICR_TYPER			0x0008
+#define GICR_STATUSR			GICD_STATUSR
+#define GICR_WAKER			0x0014
+#define GICR_SETLPIR			0x0040
+#define GICR_CLRLPIR			0x0048
+#define GICR_SEIR			GICD_SEIR
+#define GICR_PROPBASER			0x0070
+#define GICR_PENDBASER			0x0078
+#define GICR_INVLPIR			0x00A0
+#define GICR_INVALLR			0x00B0
+#define GICR_SYNCR			0x00C0
+#define GICR_MOVLPIR			0x0100
+#define GICR_MOVALLR			0x0110
+#define GICR_PIDR2			GICD_PIDR2
+
+#define GICR_WAKER_ProcessorSleep	(1U << 1)
+#define GICR_WAKER_ChildrenAsleep	(1U << 2)
+
+/*
+ * Re-Distributor registers, offsets from SGI_base
+ */
+#define GICR_ISENABLER0			GICD_ISENABLER
+#define GICR_ICENABLER0			GICD_ICENABLER
+#define GICR_ISPENDR0			GICD_ISPENDR
+#define GICR_ICPENDR0			GICD_ICPENDR
+#define GICR_ISACTIVER0			GICD_ISACTIVER
+#define GICR_ICACTIVER0			GICD_ICACTIVER
+#define GICR_IPRIORITYR0		GICD_IPRIORITYR
+#define GICR_ICFGR0			GICD_ICFGR
+
+#define GICR_TYPER_VLPIS		(1U << 1)
+#define GICR_TYPER_LAST			(1U << 4)
+
+/*
+ * CPU interface registers
+ */
+#define ICC_CTLR_EL1_EOImode_drop_dir	(0U << 1)
+#define ICC_CTLR_EL1_EOImode_drop	(1U << 1)
+#define ICC_SRE_EL1_SRE			(1U << 0)
+
+/*
+ * Hypervisor interface registers (SRE only)
+ */
+#define ICH_LR_VIRTUAL_ID_MASK		((1UL << 32) - 1)
+
+#define ICH_LR_EOI			(1UL << 41)
+#define ICH_LR_GROUP			(1UL << 60)
+#define ICH_LR_STATE			(3UL << 62)
+#define ICH_LR_PENDING_BIT		(1UL << 62)
+#define ICH_LR_ACTIVE_BIT		(1UL << 63)
+
+#define ICH_MISR_EOI			(1 << 0)
+#define ICH_MISR_U			(1 << 1)
+
+#define ICH_HCR_EN			(1 << 0)
+#define ICH_HCR_UIE			(1 << 1)
+
+#define ICH_VMCR_CTLR_SHIFT		0
+#define ICH_VMCR_CTLR_MASK		(0x21f << ICH_VMCR_CTLR_SHIFT)
+#define ICH_VMCR_BPR1_SHIFT		18
+#define ICH_VMCR_BPR1_MASK		(7 << ICH_VMCR_BPR1_SHIFT)
+#define ICH_VMCR_BPR0_SHIFT		21
+#define ICH_VMCR_BPR0_MASK		(7 << ICH_VMCR_BPR0_SHIFT)
+#define ICH_VMCR_PMR_SHIFT		24
+#define ICH_VMCR_PMR_MASK		(0xffUL << ICH_VMCR_PMR_SHIFT)
+
+#define ICC_EOIR1_EL1			sys_reg(3, 0, 12, 12, 1)
+#define ICC_IAR1_EL1			sys_reg(3, 0, 12, 12, 0)
+#define ICC_SGI1R_EL1			sys_reg(3, 0, 12, 11, 5)
+#define ICC_PMR_EL1			sys_reg(3, 0, 4, 6, 0)
+#define ICC_CTLR_EL1			sys_reg(3, 0, 12, 12, 4)
+#define ICC_SRE_EL1			sys_reg(3, 0, 12, 12, 5)
+#define ICC_GRPEN1_EL1			sys_reg(3, 0, 12, 12, 7)
+
+#define ICC_IAR1_EL1_SPURIOUS		0x3ff
+
+#define ICC_SRE_EL2			sys_reg(3, 4, 12, 9, 5)
+
+#define ICC_SRE_EL2_SRE			(1 << 0)
+#define ICC_SRE_EL2_ENABLE		(1 << 3)
+
+/*
+ * System register definitions
+ */
+#define ICH_VSEIR_EL2			sys_reg(3, 4, 12, 9, 4)
+#define ICH_HCR_EL2			sys_reg(3, 4, 12, 11, 0)
+#define ICH_VTR_EL2			sys_reg(3, 4, 12, 11, 1)
+#define ICH_MISR_EL2			sys_reg(3, 4, 12, 11, 2)
+#define ICH_EISR_EL2			sys_reg(3, 4, 12, 11, 3)
+#define ICH_ELSR_EL2			sys_reg(3, 4, 12, 11, 5)
+#define ICH_VMCR_EL2			sys_reg(3, 4, 12, 11, 7)
+
+#define __LR0_EL2(x)			sys_reg(3, 4, 12, 12, x)
+#define __LR8_EL2(x)			sys_reg(3, 4, 12, 13, x)
+
+#define ICH_LR0_EL2			__LR0_EL2(0)
+#define ICH_LR1_EL2			__LR0_EL2(1)
+#define ICH_LR2_EL2			__LR0_EL2(2)
+#define ICH_LR3_EL2			__LR0_EL2(3)
+#define ICH_LR4_EL2			__LR0_EL2(4)
+#define ICH_LR5_EL2			__LR0_EL2(5)
+#define ICH_LR6_EL2			__LR0_EL2(6)
+#define ICH_LR7_EL2			__LR0_EL2(7)
+#define ICH_LR8_EL2			__LR8_EL2(0)
+#define ICH_LR9_EL2			__LR8_EL2(1)
+#define ICH_LR10_EL2			__LR8_EL2(2)
+#define ICH_LR11_EL2			__LR8_EL2(3)
+#define ICH_LR12_EL2			__LR8_EL2(4)
+#define ICH_LR13_EL2			__LR8_EL2(5)
+#define ICH_LR14_EL2			__LR8_EL2(6)
+#define ICH_LR15_EL2			__LR8_EL2(7)
+
+#define __AP0Rx_EL2(x)			sys_reg(3, 4, 12, 8, x)
+#define ICH_AP0R0_EL2			__AP0Rx_EL2(0)
+#define ICH_AP0R1_EL2			__AP0Rx_EL2(1)
+#define ICH_AP0R2_EL2			__AP0Rx_EL2(2)
+#define ICH_AP0R3_EL2			__AP0Rx_EL2(3)
+
+#define __AP1Rx_EL2(x)			sys_reg(3, 4, 12, 9, x)
+#define ICH_AP1R0_EL2			__AP1Rx_EL2(0)
+#define ICH_AP1R1_EL2			__AP1Rx_EL2(1)
+#define ICH_AP1R2_EL2			__AP1Rx_EL2(2)
+#define ICH_AP1R3_EL2			__AP1Rx_EL2(3)
+
+#ifndef __ASSEMBLY__
+
+#include <linux/stringify.h>
+
+static inline void gic_write_eoir(u64 irq)
+{
+	asm volatile("msr_s " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" (irq));
+	isb();
+}
+
+#endif
+
+#endif
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 7dcef33..13d5520 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -73,7 +73,6 @@
 struct kthread_work {
 	struct list_head	node;
 	kthread_work_func_t	func;
-	wait_queue_head_t	done;
 	struct kthread_worker	*worker;
 };
 
@@ -85,7 +84,6 @@
 #define KTHREAD_WORK_INIT(work, fn)	{				\
 	.node = LIST_HEAD_INIT((work).node),				\
 	.func = (fn),							\
-	.done = __WAIT_QUEUE_HEAD_INITIALIZER((work).done),		\
 	}
 
 #define DEFINE_KTHREAD_WORKER(worker)					\
@@ -95,22 +93,16 @@
 	struct kthread_work work = KTHREAD_WORK_INIT(work, fn)
 
 /*
- * kthread_worker.lock and kthread_work.done need their own lockdep class
- * keys if they are defined on stack with lockdep enabled.  Use the
- * following macros when defining them on stack.
+ * kthread_worker.lock needs its own lockdep class key when defined on
+ * stack with lockdep enabled.  Use the following macros in such cases.
  */
 #ifdef CONFIG_LOCKDEP
 # define KTHREAD_WORKER_INIT_ONSTACK(worker)				\
 	({ init_kthread_worker(&worker); worker; })
 # define DEFINE_KTHREAD_WORKER_ONSTACK(worker)				\
 	struct kthread_worker worker = KTHREAD_WORKER_INIT_ONSTACK(worker)
-# define KTHREAD_WORK_INIT_ONSTACK(work, fn)				\
-	({ init_kthread_work((&work), fn); work; })
-# define DEFINE_KTHREAD_WORK_ONSTACK(work, fn)				\
-	struct kthread_work work = KTHREAD_WORK_INIT_ONSTACK(work, fn)
 #else
 # define DEFINE_KTHREAD_WORKER_ONSTACK(worker) DEFINE_KTHREAD_WORKER(worker)
-# define DEFINE_KTHREAD_WORK_ONSTACK(work, fn) DEFINE_KTHREAD_WORK(work, fn)
 #endif
 
 extern void __init_kthread_worker(struct kthread_worker *worker,
@@ -127,7 +119,6 @@
 		memset((work), 0, sizeof(struct kthread_work));		\
 		INIT_LIST_HEAD(&(work)->node);				\
 		(work)->func = (fn);					\
-		init_waitqueue_head(&(work)->done);			\
 	} while (0)
 
 int kthread_worker_fn(void *worker_ptr);
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 92a2f99..8103f32 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -25,7 +25,8 @@
 struct msi_desc {
 	struct {
 		__u8	is_msix	: 1;
-		__u8	multiple: 3;	/* log2 number of messages */
+		__u8	multiple: 3;	/* log2 num of messages allocated */
+		__u8	multi_cap : 3;	/* log2 num of messages supported */
 		__u8	maskbit	: 1;	/* mask-pending bit supported ? */
 		__u8	is_64	: 1;	/* Address size: 0=32bit 1=64bit */
 		__u8	pos;		/* Location of the msi capability */
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 42aa9b9..8d5535c 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -176,8 +176,4 @@
 
 extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
 
-#ifndef arch_mutex_cpu_relax
-# define arch_mutex_cpu_relax() cpu_relax()
-#endif
-
 #endif /* __LINUX_MUTEX_H */
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index 0511789..0ff360d 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -73,6 +73,8 @@
 				   int depth, void *data);
 
 extern bool early_init_dt_scan(void *params);
+extern bool early_init_dt_verify(void *params);
+extern void early_init_dt_scan_nodes(void);
 
 extern const char *of_flat_dt_get_machine_name(void);
 extern const void *of_flat_dt_match_machine(const void *default_match,
@@ -84,6 +86,7 @@
 extern void early_init_devtree(void *);
 extern void early_get_first_memblock_info(void *, phys_addr_t *);
 extern u64 fdt_translate_address(const void *blob, int node_offset);
+extern void of_fdt_limit_memory(int limit);
 #else /* CONFIG_OF_FLATTREE */
 static inline void early_init_fdt_scan_reserved_mem(void) {}
 static inline const char *of_flat_dt_get_machine_name(void) { return NULL; }
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 466bcd1..6ed3647 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -978,6 +978,8 @@
 int pci_probe_reset_bus(struct pci_bus *bus);
 int pci_reset_bus(struct pci_bus *bus);
 int pci_try_reset_bus(struct pci_bus *bus);
+void pci_reset_secondary_bus(struct pci_dev *dev);
+void pcibios_reset_secondary_bus(struct pci_dev *dev);
 void pci_reset_bridge_secondary_bus(struct pci_dev *dev);
 void pci_update_resource(struct pci_dev *dev, int resno);
 int __must_check pci_assign_resource(struct pci_dev *dev, int i);
@@ -1186,7 +1188,6 @@
 int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec);
 void pci_msix_shutdown(struct pci_dev *dev);
 void pci_disable_msix(struct pci_dev *dev);
-void msi_remove_pci_irq_vectors(struct pci_dev *dev);
 void pci_restore_msi_state(struct pci_dev *dev);
 int pci_msi_enabled(void);
 int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec);
@@ -1217,7 +1218,6 @@
 { return -ENOSYS; }
 static inline void pci_msix_shutdown(struct pci_dev *dev) { }
 static inline void pci_disable_msix(struct pci_dev *dev) { }
-static inline void msi_remove_pci_irq_vectors(struct pci_dev *dev) { }
 static inline void pci_restore_msi_state(struct pci_dev *dev) { }
 static inline int pci_msi_enabled(void) { return 0; }
 static inline int pci_enable_msi_range(struct pci_dev *dev, int minvec,
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 7fa3173..6ed0bb7 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -6,6 +6,8 @@
  *	Do not add new entries to this file unless the definitions
  *	are shared between multiple drivers.
  */
+#ifndef _LINUX_PCI_IDS_H
+#define _LINUX_PCI_IDS_H
 
 /* Device classes and subclasses */
 
@@ -2968,3 +2970,5 @@
 #define PCI_DEVICE_ID_XEN_PLATFORM	0x0001
 
 #define PCI_VENDOR_ID_OCZ		0x1b85
+
+#endif /* _LINUX_PCI_IDS_H */
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index dec01d6..cfd5604 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -1,6 +1,40 @@
+/*
+ * linux/percpu-defs.h - basic definitions for percpu areas
+ *
+ * DO NOT INCLUDE DIRECTLY OUTSIDE PERCPU IMPLEMENTATION PROPER.
+ *
+ * This file is separate from linux/percpu.h to avoid cyclic inclusion
+ * dependency from arch header files.  Only to be included from
+ * asm/percpu.h.
+ *
+ * This file includes macros necessary to declare percpu sections and
+ * variables, and definitions of percpu accessors and operations.  It
+ * should provide enough percpu features to arch header files even when
+ * they can only include asm/percpu.h to avoid cyclic inclusion dependency.
+ */
+
 #ifndef _LINUX_PERCPU_DEFS_H
 #define _LINUX_PERCPU_DEFS_H
 
+#ifdef CONFIG_SMP
+
+#ifdef MODULE
+#define PER_CPU_SHARED_ALIGNED_SECTION ""
+#define PER_CPU_ALIGNED_SECTION ""
+#else
+#define PER_CPU_SHARED_ALIGNED_SECTION "..shared_aligned"
+#define PER_CPU_ALIGNED_SECTION "..shared_aligned"
+#endif
+#define PER_CPU_FIRST_SECTION "..first"
+
+#else
+
+#define PER_CPU_SHARED_ALIGNED_SECTION ""
+#define PER_CPU_ALIGNED_SECTION "..shared_aligned"
+#define PER_CPU_FIRST_SECTION ""
+
+#endif
+
 /*
  * Base implementations of per-CPU variable declarations and definitions, where
  * the section in which the variable is to be placed is provided by the
@@ -19,19 +53,6 @@
 	__attribute__((section(".discard"), unused))
 
 /*
- * Macro which verifies @ptr is a percpu pointer without evaluating
- * @ptr.  This is to be used in percpu accessors to verify that the
- * input parameter is a percpu pointer.
- *
- * + 0 is required in order to convert the pointer type from a
- * potential array type to a pointer to a single item of the array.
- */
-#define __verify_pcpu_ptr(ptr)	do {					\
-	const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL;	\
-	(void)__vpp_verify;						\
-} while (0)
-
-/*
  * s390 and alpha modules require percpu variables to be defined as
  * weak to force the compiler to generate GOT based external
  * references for them.  This is necessary because percpu sections
@@ -164,4 +185,337 @@
 #define EXPORT_PER_CPU_SYMBOL_GPL(var)
 #endif
 
+/*
+ * Accessors and operations.
+ */
+#ifndef __ASSEMBLY__
+
+/*
+ * __verify_pcpu_ptr() verifies @ptr is a percpu pointer without evaluating
+ * @ptr and is invoked once before a percpu area is accessed by all
+ * accessors and operations.  This is performed in the generic part of
+ * percpu and arch overrides don't need to worry about it; however, if an
+ * arch wants to implement an arch-specific percpu accessor or operation,
+ * it may use __verify_pcpu_ptr() to verify the parameters.
+ *
+ * + 0 is required in order to convert the pointer type from a
+ * potential array type to a pointer to a single item of the array.
+ */
+#define __verify_pcpu_ptr(ptr)						\
+do {									\
+	const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL;	\
+	(void)__vpp_verify;						\
+} while (0)
+
+#ifdef CONFIG_SMP
+
+/*
+ * Add an offset to a pointer but keep the pointer as-is.  Use RELOC_HIDE()
+ * to prevent the compiler from making incorrect assumptions about the
+ * pointer value.  The weird cast keeps both GCC and sparse happy.
+ */
+#define SHIFT_PERCPU_PTR(__p, __offset)					\
+	RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset))
+
+#define per_cpu_ptr(ptr, cpu)						\
+({									\
+	__verify_pcpu_ptr(ptr);						\
+	SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)));			\
+})
+
+#define raw_cpu_ptr(ptr)						\
+({									\
+	__verify_pcpu_ptr(ptr);						\
+	arch_raw_cpu_ptr(ptr);						\
+})
+
+#ifdef CONFIG_DEBUG_PREEMPT
+#define this_cpu_ptr(ptr)						\
+({									\
+	__verify_pcpu_ptr(ptr);						\
+	SHIFT_PERCPU_PTR(ptr, my_cpu_offset);				\
+})
+#else
+#define this_cpu_ptr(ptr) raw_cpu_ptr(ptr)
+#endif
+
+#else	/* CONFIG_SMP */
+
+#define VERIFY_PERCPU_PTR(__p)						\
+({									\
+	__verify_pcpu_ptr(__p);						\
+	(typeof(*(__p)) __kernel __force *)(__p);			\
+})
+
+#define per_cpu_ptr(ptr, cpu)	({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); })
+#define raw_cpu_ptr(ptr)	per_cpu_ptr(ptr, 0)
+#define this_cpu_ptr(ptr)	raw_cpu_ptr(ptr)
+
+#endif	/* CONFIG_SMP */
+
+#define per_cpu(var, cpu)	(*per_cpu_ptr(&(var), cpu))
+#define __raw_get_cpu_var(var)	(*raw_cpu_ptr(&(var)))
+#define __get_cpu_var(var)	(*this_cpu_ptr(&(var)))
+
+/* keep until we have removed all uses of __this_cpu_ptr */
+#define __this_cpu_ptr(ptr)	raw_cpu_ptr(ptr)
+
+/*
+ * Must be an lvalue. Since @var must be a simple identifier,
+ * we force a syntax error here if it isn't.
+ */
+#define get_cpu_var(var)						\
+(*({									\
+	preempt_disable();						\
+	this_cpu_ptr(&var);						\
+}))
+
+/*
+ * The weird & is necessary because sparse considers (void)(var) to be
+ * a direct dereference of percpu variable (var).
+ */
+#define put_cpu_var(var)						\
+do {									\
+	(void)&(var);							\
+	preempt_enable();						\
+} while (0)
+
+#define get_cpu_ptr(var)						\
+({									\
+	preempt_disable();						\
+	this_cpu_ptr(var);						\
+})
+
+#define put_cpu_ptr(var)						\
+do {									\
+	(void)(var);							\
+	preempt_enable();						\
+} while (0)
+
+/*
+ * Branching function to split up a function into a set of functions that
+ * are called for different scalar sizes of the objects handled.
+ */
+
+extern void __bad_size_call_parameter(void);
+
+#ifdef CONFIG_DEBUG_PREEMPT
+extern void __this_cpu_preempt_check(const char *op);
+#else
+static inline void __this_cpu_preempt_check(const char *op) { }
+#endif
+
+#define __pcpu_size_call_return(stem, variable)				\
+({									\
+	typeof(variable) pscr_ret__;					\
+	__verify_pcpu_ptr(&(variable));					\
+	switch(sizeof(variable)) {					\
+	case 1: pscr_ret__ = stem##1(variable); break;			\
+	case 2: pscr_ret__ = stem##2(variable); break;			\
+	case 4: pscr_ret__ = stem##4(variable); break;			\
+	case 8: pscr_ret__ = stem##8(variable); break;			\
+	default:							\
+		__bad_size_call_parameter(); break;			\
+	}								\
+	pscr_ret__;							\
+})
+
+#define __pcpu_size_call_return2(stem, variable, ...)			\
+({									\
+	typeof(variable) pscr2_ret__;					\
+	__verify_pcpu_ptr(&(variable));					\
+	switch(sizeof(variable)) {					\
+	case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break;	\
+	case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break;	\
+	case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break;	\
+	case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break;	\
+	default:							\
+		__bad_size_call_parameter(); break;			\
+	}								\
+	pscr2_ret__;							\
+})
+
+/*
+ * Special handling for cmpxchg_double.  cmpxchg_double is passed two
+ * percpu variables.  The first has to be aligned to a double word
+ * boundary and the second has to follow directly thereafter.
+ * We enforce this on all architectures even if they don't support
+ * a double cmpxchg instruction, since it's a cheap requirement, and it
+ * avoids breaking the requirement for architectures with the instruction.
+ */
+#define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...)		\
+({									\
+	bool pdcrb_ret__;						\
+	__verify_pcpu_ptr(&(pcp1));					\
+	BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2));			\
+	VM_BUG_ON((unsigned long)(&(pcp1)) % (2 * sizeof(pcp1)));	\
+	VM_BUG_ON((unsigned long)(&(pcp2)) !=				\
+		  (unsigned long)(&(pcp1)) + sizeof(pcp1));		\
+	switch(sizeof(pcp1)) {						\
+	case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break;	\
+	case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break;	\
+	case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break;	\
+	case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break;	\
+	default:							\
+		__bad_size_call_parameter(); break;			\
+	}								\
+	pdcrb_ret__;							\
+})
+
+#define __pcpu_size_call(stem, variable, ...)				\
+do {									\
+	__verify_pcpu_ptr(&(variable));					\
+	switch(sizeof(variable)) {					\
+		case 1: stem##1(variable, __VA_ARGS__);break;		\
+		case 2: stem##2(variable, __VA_ARGS__);break;		\
+		case 4: stem##4(variable, __VA_ARGS__);break;		\
+		case 8: stem##8(variable, __VA_ARGS__);break;		\
+		default: 						\
+			__bad_size_call_parameter();break;		\
+	}								\
+} while (0)
+
+/*
+ * this_cpu operations (C) 2008-2013 Christoph Lameter <cl@linux.com>
+ *
+ * Optimized manipulation for memory allocated through the per cpu
+ * allocator or for addresses of per cpu variables.
+ *
+ * These operation guarantee exclusivity of access for other operations
+ * on the *same* processor. The assumption is that per cpu data is only
+ * accessed by a single processor instance (the current one).
+ *
+ * The arch code can provide optimized implementation by defining macros
+ * for certain scalar sizes. F.e. provide this_cpu_add_2() to provide per
+ * cpu atomic operations for 2 byte sized RMW actions. If arch code does
+ * not provide operations for a scalar size then the fallback in the
+ * generic code will be used.
+ *
+ * cmpxchg_double replaces two adjacent scalars at once.  The first two
+ * parameters are per cpu variables which have to be of the same size.  A
+ * truth value is returned to indicate success or failure (since a double
+ * register result is difficult to handle).  There is very limited hardware
+ * support for these operations, so only certain sizes may work.
+ */
+
+/*
+ * Operations for contexts where we do not want to do any checks for
+ * preemptions.  Unless strictly necessary, always use [__]this_cpu_*()
+ * instead.
+ *
+ * If there is no other protection through preempt disable and/or disabling
+ * interupts then one of these RMW operations can show unexpected behavior
+ * because the execution thread was rescheduled on another processor or an
+ * interrupt occurred and the same percpu variable was modified from the
+ * interrupt context.
+ */
+#define raw_cpu_read(pcp)		__pcpu_size_call_return(raw_cpu_read_, pcp)
+#define raw_cpu_write(pcp, val)		__pcpu_size_call(raw_cpu_write_, pcp, val)
+#define raw_cpu_add(pcp, val)		__pcpu_size_call(raw_cpu_add_, pcp, val)
+#define raw_cpu_and(pcp, val)		__pcpu_size_call(raw_cpu_and_, pcp, val)
+#define raw_cpu_or(pcp, val)		__pcpu_size_call(raw_cpu_or_, pcp, val)
+#define raw_cpu_add_return(pcp, val)	__pcpu_size_call_return2(raw_cpu_add_return_, pcp, val)
+#define raw_cpu_xchg(pcp, nval)		__pcpu_size_call_return2(raw_cpu_xchg_, pcp, nval)
+#define raw_cpu_cmpxchg(pcp, oval, nval) \
+	__pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval)
+#define raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+	__pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2)
+
+#define raw_cpu_sub(pcp, val)		raw_cpu_add(pcp, -(val))
+#define raw_cpu_inc(pcp)		raw_cpu_add(pcp, 1)
+#define raw_cpu_dec(pcp)		raw_cpu_sub(pcp, 1)
+#define raw_cpu_sub_return(pcp, val)	raw_cpu_add_return(pcp, -(typeof(pcp))(val))
+#define raw_cpu_inc_return(pcp)		raw_cpu_add_return(pcp, 1)
+#define raw_cpu_dec_return(pcp)		raw_cpu_add_return(pcp, -1)
+
+/*
+ * Operations for contexts that are safe from preemption/interrupts.  These
+ * operations verify that preemption is disabled.
+ */
+#define __this_cpu_read(pcp)						\
+({									\
+	__this_cpu_preempt_check("read");				\
+	raw_cpu_read(pcp);						\
+})
+
+#define __this_cpu_write(pcp, val)					\
+({									\
+	__this_cpu_preempt_check("write");				\
+	raw_cpu_write(pcp, val);					\
+})
+
+#define __this_cpu_add(pcp, val)					\
+({									\
+	__this_cpu_preempt_check("add");				\
+	raw_cpu_add(pcp, val);						\
+})
+
+#define __this_cpu_and(pcp, val)					\
+({									\
+	__this_cpu_preempt_check("and");				\
+	raw_cpu_and(pcp, val);						\
+})
+
+#define __this_cpu_or(pcp, val)						\
+({									\
+	__this_cpu_preempt_check("or");					\
+	raw_cpu_or(pcp, val);						\
+})
+
+#define __this_cpu_add_return(pcp, val)					\
+({									\
+	__this_cpu_preempt_check("add_return");				\
+	raw_cpu_add_return(pcp, val);					\
+})
+
+#define __this_cpu_xchg(pcp, nval)					\
+({									\
+	__this_cpu_preempt_check("xchg");				\
+	raw_cpu_xchg(pcp, nval);					\
+})
+
+#define __this_cpu_cmpxchg(pcp, oval, nval)				\
+({									\
+	__this_cpu_preempt_check("cmpxchg");				\
+	raw_cpu_cmpxchg(pcp, oval, nval);				\
+})
+
+#define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+({	__this_cpu_preempt_check("cmpxchg_double");			\
+	raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2);	\
+})
+
+#define __this_cpu_sub(pcp, val)	__this_cpu_add(pcp, -(typeof(pcp))(val))
+#define __this_cpu_inc(pcp)		__this_cpu_add(pcp, 1)
+#define __this_cpu_dec(pcp)		__this_cpu_sub(pcp, 1)
+#define __this_cpu_sub_return(pcp, val)	__this_cpu_add_return(pcp, -(typeof(pcp))(val))
+#define __this_cpu_inc_return(pcp)	__this_cpu_add_return(pcp, 1)
+#define __this_cpu_dec_return(pcp)	__this_cpu_add_return(pcp, -1)
+
+/*
+ * Operations with implied preemption protection.  These operations can be
+ * used without worrying about preemption.  Note that interrupts may still
+ * occur while an operation is in progress and if the interrupt modifies
+ * the variable too then RMW actions may not be reliable.
+ */
+#define this_cpu_read(pcp)		__pcpu_size_call_return(this_cpu_read_, pcp)
+#define this_cpu_write(pcp, val)	__pcpu_size_call(this_cpu_write_, pcp, val)
+#define this_cpu_add(pcp, val)		__pcpu_size_call(this_cpu_add_, pcp, val)
+#define this_cpu_and(pcp, val)		__pcpu_size_call(this_cpu_and_, pcp, val)
+#define this_cpu_or(pcp, val)		__pcpu_size_call(this_cpu_or_, pcp, val)
+#define this_cpu_add_return(pcp, val)	__pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
+#define this_cpu_xchg(pcp, nval)	__pcpu_size_call_return2(this_cpu_xchg_, pcp, nval)
+#define this_cpu_cmpxchg(pcp, oval, nval) \
+	__pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval)
+#define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+	__pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2)
+
+#define this_cpu_sub(pcp, val)		this_cpu_add(pcp, -(typeof(pcp))(val))
+#define this_cpu_inc(pcp)		this_cpu_add(pcp, 1)
+#define this_cpu_dec(pcp)		this_cpu_sub(pcp, 1)
+#define this_cpu_sub_return(pcp, val)	this_cpu_add_return(pcp, -(typeof(pcp))(val))
+#define this_cpu_inc_return(pcp)	this_cpu_add_return(pcp, 1)
+#define this_cpu_dec_return(pcp)	this_cpu_add_return(pcp, -1)
+
+#endif /* __ASSEMBLY__ */
 #endif /* _LINUX_PERCPU_DEFS_H */
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 5d8920e..3dfbf23 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -57,11 +57,9 @@
 	atomic_t		count;
 	/*
 	 * The low bit of the pointer indicates whether the ref is in percpu
-	 * mode; if set, then get/put will manipulate the atomic_t (this is a
-	 * hack because we need to keep the pointer around for
-	 * percpu_ref_kill_rcu())
+	 * mode; if set, then get/put will manipulate the atomic_t.
 	 */
-	unsigned __percpu	*pcpu_count;
+	unsigned long		pcpu_count_ptr;
 	percpu_ref_func_t	*release;
 	percpu_ref_func_t	*confirm_kill;
 	struct rcu_head		rcu;
@@ -69,7 +67,8 @@
 
 int __must_check percpu_ref_init(struct percpu_ref *ref,
 				 percpu_ref_func_t *release);
-void percpu_ref_cancel_init(struct percpu_ref *ref);
+void percpu_ref_reinit(struct percpu_ref *ref);
+void percpu_ref_exit(struct percpu_ref *ref);
 void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
 				 percpu_ref_func_t *confirm_kill);
 
@@ -88,12 +87,28 @@
 	return percpu_ref_kill_and_confirm(ref, NULL);
 }
 
-#define PCPU_STATUS_BITS	2
-#define PCPU_STATUS_MASK	((1 << PCPU_STATUS_BITS) - 1)
-#define PCPU_REF_PTR		0
 #define PCPU_REF_DEAD		1
 
-#define REF_STATUS(count)	(((unsigned long) count) & PCPU_STATUS_MASK)
+/*
+ * Internal helper.  Don't use outside percpu-refcount proper.  The
+ * function doesn't return the pointer and let the caller test it for NULL
+ * because doing so forces the compiler to generate two conditional
+ * branches as it can't assume that @ref->pcpu_count is not NULL.
+ */
+static inline bool __pcpu_ref_alive(struct percpu_ref *ref,
+				    unsigned __percpu **pcpu_countp)
+{
+	unsigned long pcpu_ptr = ACCESS_ONCE(ref->pcpu_count_ptr);
+
+	/* paired with smp_store_release() in percpu_ref_reinit() */
+	smp_read_barrier_depends();
+
+	if (unlikely(pcpu_ptr & PCPU_REF_DEAD))
+		return false;
+
+	*pcpu_countp = (unsigned __percpu *)pcpu_ptr;
+	return true;
+}
 
 /**
  * percpu_ref_get - increment a percpu refcount
@@ -107,9 +122,7 @@
 
 	rcu_read_lock_sched();
 
-	pcpu_count = ACCESS_ONCE(ref->pcpu_count);
-
-	if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR))
+	if (__pcpu_ref_alive(ref, &pcpu_count))
 		this_cpu_inc(*pcpu_count);
 	else
 		atomic_inc(&ref->count);
@@ -133,9 +146,7 @@
 
 	rcu_read_lock_sched();
 
-	pcpu_count = ACCESS_ONCE(ref->pcpu_count);
-
-	if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) {
+	if (__pcpu_ref_alive(ref, &pcpu_count)) {
 		this_cpu_inc(*pcpu_count);
 		ret = true;
 	} else {
@@ -168,9 +179,7 @@
 
 	rcu_read_lock_sched();
 
-	pcpu_count = ACCESS_ONCE(ref->pcpu_count);
-
-	if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) {
+	if (__pcpu_ref_alive(ref, &pcpu_count)) {
 		this_cpu_inc(*pcpu_count);
 		ret = true;
 	}
@@ -193,9 +202,7 @@
 
 	rcu_read_lock_sched();
 
-	pcpu_count = ACCESS_ONCE(ref->pcpu_count);
-
-	if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR))
+	if (__pcpu_ref_alive(ref, &pcpu_count))
 		this_cpu_dec(*pcpu_count);
 	else if (unlikely(atomic_dec_and_test(&ref->count)))
 		ref->release(ref);
@@ -203,4 +210,19 @@
 	rcu_read_unlock_sched();
 }
 
+/**
+ * percpu_ref_is_zero - test whether a percpu refcount reached zero
+ * @ref: percpu_ref to test
+ *
+ * Returns %true if @ref reached zero.
+ */
+static inline bool percpu_ref_is_zero(struct percpu_ref *ref)
+{
+	unsigned __percpu *pcpu_count;
+
+	if (__pcpu_ref_alive(ref, &pcpu_count))
+		return false;
+	return !atomic_read(&ref->count);
+}
+
 #endif
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 8419053..6f61b61 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -23,32 +23,6 @@
 	 PERCPU_MODULE_RESERVE)
 #endif
 
-/*
- * Must be an lvalue. Since @var must be a simple identifier,
- * we force a syntax error here if it isn't.
- */
-#define get_cpu_var(var) (*({				\
-	preempt_disable();				\
-	this_cpu_ptr(&var); }))
-
-/*
- * The weird & is necessary because sparse considers (void)(var) to be
- * a direct dereference of percpu variable (var).
- */
-#define put_cpu_var(var) do {				\
-	(void)&(var);					\
-	preempt_enable();				\
-} while (0)
-
-#define get_cpu_ptr(var) ({				\
-	preempt_disable();				\
-	this_cpu_ptr(var); })
-
-#define put_cpu_ptr(var) do {				\
-	(void)(var);					\
-	preempt_enable();				\
-} while (0)
-
 /* minimum unit size, also is the maximum supported allocation size */
 #define PCPU_MIN_UNIT_SIZE		PFN_ALIGN(32 << 10)
 
@@ -140,17 +114,6 @@
 				pcpu_fc_populate_pte_fn_t populate_pte_fn);
 #endif
 
-/*
- * Use this to get to a cpu's version of the per-cpu object
- * dynamically allocated. Non-atomic access to the current CPU's
- * version should probably be combined with get_cpu()/put_cpu().
- */
-#ifdef CONFIG_SMP
-#define per_cpu_ptr(ptr, cpu)	SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
-#else
-#define per_cpu_ptr(ptr, cpu)	({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); })
-#endif
-
 extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
 extern bool is_kernel_percpu_address(unsigned long addr);
 
@@ -166,640 +129,4 @@
 #define alloc_percpu(type)	\
 	(typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type))
 
-/*
- * Branching function to split up a function into a set of functions that
- * are called for different scalar sizes of the objects handled.
- */
-
-extern void __bad_size_call_parameter(void);
-
-#ifdef CONFIG_DEBUG_PREEMPT
-extern void __this_cpu_preempt_check(const char *op);
-#else
-static inline void __this_cpu_preempt_check(const char *op) { }
-#endif
-
-#define __pcpu_size_call_return(stem, variable)				\
-({	typeof(variable) pscr_ret__;					\
-	__verify_pcpu_ptr(&(variable));					\
-	switch(sizeof(variable)) {					\
-	case 1: pscr_ret__ = stem##1(variable);break;			\
-	case 2: pscr_ret__ = stem##2(variable);break;			\
-	case 4: pscr_ret__ = stem##4(variable);break;			\
-	case 8: pscr_ret__ = stem##8(variable);break;			\
-	default:							\
-		__bad_size_call_parameter();break;			\
-	}								\
-	pscr_ret__;							\
-})
-
-#define __pcpu_size_call_return2(stem, variable, ...)			\
-({									\
-	typeof(variable) pscr2_ret__;					\
-	__verify_pcpu_ptr(&(variable));					\
-	switch(sizeof(variable)) {					\
-	case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break;	\
-	case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break;	\
-	case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break;	\
-	case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break;	\
-	default:							\
-		__bad_size_call_parameter(); break;			\
-	}								\
-	pscr2_ret__;							\
-})
-
-/*
- * Special handling for cmpxchg_double.  cmpxchg_double is passed two
- * percpu variables.  The first has to be aligned to a double word
- * boundary and the second has to follow directly thereafter.
- * We enforce this on all architectures even if they don't support
- * a double cmpxchg instruction, since it's a cheap requirement, and it
- * avoids breaking the requirement for architectures with the instruction.
- */
-#define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...)		\
-({									\
-	bool pdcrb_ret__;						\
-	__verify_pcpu_ptr(&pcp1);					\
-	BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2));			\
-	VM_BUG_ON((unsigned long)(&pcp1) % (2 * sizeof(pcp1)));		\
-	VM_BUG_ON((unsigned long)(&pcp2) !=				\
-		  (unsigned long)(&pcp1) + sizeof(pcp1));		\
-	switch(sizeof(pcp1)) {						\
-	case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break;	\
-	case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break;	\
-	case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break;	\
-	case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break;	\
-	default:							\
-		__bad_size_call_parameter(); break;			\
-	}								\
-	pdcrb_ret__;							\
-})
-
-#define __pcpu_size_call(stem, variable, ...)				\
-do {									\
-	__verify_pcpu_ptr(&(variable));					\
-	switch(sizeof(variable)) {					\
-		case 1: stem##1(variable, __VA_ARGS__);break;		\
-		case 2: stem##2(variable, __VA_ARGS__);break;		\
-		case 4: stem##4(variable, __VA_ARGS__);break;		\
-		case 8: stem##8(variable, __VA_ARGS__);break;		\
-		default: 						\
-			__bad_size_call_parameter();break;		\
-	}								\
-} while (0)
-
-/*
- * this_cpu operations (C) 2008-2013 Christoph Lameter <cl@linux.com>
- *
- * Optimized manipulation for memory allocated through the per cpu
- * allocator or for addresses of per cpu variables.
- *
- * These operation guarantee exclusivity of access for other operations
- * on the *same* processor. The assumption is that per cpu data is only
- * accessed by a single processor instance (the current one).
- *
- * The first group is used for accesses that must be done in a
- * preemption safe way since we know that the context is not preempt
- * safe. Interrupts may occur. If the interrupt modifies the variable
- * too then RMW actions will not be reliable.
- *
- * The arch code can provide optimized functions in two ways:
- *
- * 1. Override the function completely. F.e. define this_cpu_add().
- *    The arch must then ensure that the various scalar format passed
- *    are handled correctly.
- *
- * 2. Provide functions for certain scalar sizes. F.e. provide
- *    this_cpu_add_2() to provide per cpu atomic operations for 2 byte
- *    sized RMW actions. If arch code does not provide operations for
- *    a scalar size then the fallback in the generic code will be
- *    used.
- */
-
-#define _this_cpu_generic_read(pcp)					\
-({	typeof(pcp) ret__;						\
-	preempt_disable();						\
-	ret__ = *this_cpu_ptr(&(pcp));					\
-	preempt_enable();						\
-	ret__;								\
-})
-
-#ifndef this_cpu_read
-# ifndef this_cpu_read_1
-#  define this_cpu_read_1(pcp)	_this_cpu_generic_read(pcp)
-# endif
-# ifndef this_cpu_read_2
-#  define this_cpu_read_2(pcp)	_this_cpu_generic_read(pcp)
-# endif
-# ifndef this_cpu_read_4
-#  define this_cpu_read_4(pcp)	_this_cpu_generic_read(pcp)
-# endif
-# ifndef this_cpu_read_8
-#  define this_cpu_read_8(pcp)	_this_cpu_generic_read(pcp)
-# endif
-# define this_cpu_read(pcp)	__pcpu_size_call_return(this_cpu_read_, (pcp))
-#endif
-
-#define _this_cpu_generic_to_op(pcp, val, op)				\
-do {									\
-	unsigned long flags;						\
-	raw_local_irq_save(flags);					\
-	*raw_cpu_ptr(&(pcp)) op val;					\
-	raw_local_irq_restore(flags);					\
-} while (0)
-
-#ifndef this_cpu_write
-# ifndef this_cpu_write_1
-#  define this_cpu_write_1(pcp, val)	_this_cpu_generic_to_op((pcp), (val), =)
-# endif
-# ifndef this_cpu_write_2
-#  define this_cpu_write_2(pcp, val)	_this_cpu_generic_to_op((pcp), (val), =)
-# endif
-# ifndef this_cpu_write_4
-#  define this_cpu_write_4(pcp, val)	_this_cpu_generic_to_op((pcp), (val), =)
-# endif
-# ifndef this_cpu_write_8
-#  define this_cpu_write_8(pcp, val)	_this_cpu_generic_to_op((pcp), (val), =)
-# endif
-# define this_cpu_write(pcp, val)	__pcpu_size_call(this_cpu_write_, (pcp), (val))
-#endif
-
-#ifndef this_cpu_add
-# ifndef this_cpu_add_1
-#  define this_cpu_add_1(pcp, val)	_this_cpu_generic_to_op((pcp), (val), +=)
-# endif
-# ifndef this_cpu_add_2
-#  define this_cpu_add_2(pcp, val)	_this_cpu_generic_to_op((pcp), (val), +=)
-# endif
-# ifndef this_cpu_add_4
-#  define this_cpu_add_4(pcp, val)	_this_cpu_generic_to_op((pcp), (val), +=)
-# endif
-# ifndef this_cpu_add_8
-#  define this_cpu_add_8(pcp, val)	_this_cpu_generic_to_op((pcp), (val), +=)
-# endif
-# define this_cpu_add(pcp, val)		__pcpu_size_call(this_cpu_add_, (pcp), (val))
-#endif
-
-#ifndef this_cpu_sub
-# define this_cpu_sub(pcp, val)		this_cpu_add((pcp), -(typeof(pcp))(val))
-#endif
-
-#ifndef this_cpu_inc
-# define this_cpu_inc(pcp)		this_cpu_add((pcp), 1)
-#endif
-
-#ifndef this_cpu_dec
-# define this_cpu_dec(pcp)		this_cpu_sub((pcp), 1)
-#endif
-
-#ifndef this_cpu_and
-# ifndef this_cpu_and_1
-#  define this_cpu_and_1(pcp, val)	_this_cpu_generic_to_op((pcp), (val), &=)
-# endif
-# ifndef this_cpu_and_2
-#  define this_cpu_and_2(pcp, val)	_this_cpu_generic_to_op((pcp), (val), &=)
-# endif
-# ifndef this_cpu_and_4
-#  define this_cpu_and_4(pcp, val)	_this_cpu_generic_to_op((pcp), (val), &=)
-# endif
-# ifndef this_cpu_and_8
-#  define this_cpu_and_8(pcp, val)	_this_cpu_generic_to_op((pcp), (val), &=)
-# endif
-# define this_cpu_and(pcp, val)		__pcpu_size_call(this_cpu_and_, (pcp), (val))
-#endif
-
-#ifndef this_cpu_or
-# ifndef this_cpu_or_1
-#  define this_cpu_or_1(pcp, val)	_this_cpu_generic_to_op((pcp), (val), |=)
-# endif
-# ifndef this_cpu_or_2
-#  define this_cpu_or_2(pcp, val)	_this_cpu_generic_to_op((pcp), (val), |=)
-# endif
-# ifndef this_cpu_or_4
-#  define this_cpu_or_4(pcp, val)	_this_cpu_generic_to_op((pcp), (val), |=)
-# endif
-# ifndef this_cpu_or_8
-#  define this_cpu_or_8(pcp, val)	_this_cpu_generic_to_op((pcp), (val), |=)
-# endif
-# define this_cpu_or(pcp, val)		__pcpu_size_call(this_cpu_or_, (pcp), (val))
-#endif
-
-#define _this_cpu_generic_add_return(pcp, val)				\
-({									\
-	typeof(pcp) ret__;						\
-	unsigned long flags;						\
-	raw_local_irq_save(flags);					\
-	raw_cpu_add(pcp, val);					\
-	ret__ = raw_cpu_read(pcp);					\
-	raw_local_irq_restore(flags);					\
-	ret__;								\
-})
-
-#ifndef this_cpu_add_return
-# ifndef this_cpu_add_return_1
-#  define this_cpu_add_return_1(pcp, val)	_this_cpu_generic_add_return(pcp, val)
-# endif
-# ifndef this_cpu_add_return_2
-#  define this_cpu_add_return_2(pcp, val)	_this_cpu_generic_add_return(pcp, val)
-# endif
-# ifndef this_cpu_add_return_4
-#  define this_cpu_add_return_4(pcp, val)	_this_cpu_generic_add_return(pcp, val)
-# endif
-# ifndef this_cpu_add_return_8
-#  define this_cpu_add_return_8(pcp, val)	_this_cpu_generic_add_return(pcp, val)
-# endif
-# define this_cpu_add_return(pcp, val)	__pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
-#endif
-
-#define this_cpu_sub_return(pcp, val)	this_cpu_add_return(pcp, -(typeof(pcp))(val))
-#define this_cpu_inc_return(pcp)	this_cpu_add_return(pcp, 1)
-#define this_cpu_dec_return(pcp)	this_cpu_add_return(pcp, -1)
-
-#define _this_cpu_generic_xchg(pcp, nval)				\
-({	typeof(pcp) ret__;						\
-	unsigned long flags;						\
-	raw_local_irq_save(flags);					\
-	ret__ = raw_cpu_read(pcp);					\
-	raw_cpu_write(pcp, nval);					\
-	raw_local_irq_restore(flags);					\
-	ret__;								\
-})
-
-#ifndef this_cpu_xchg
-# ifndef this_cpu_xchg_1
-#  define this_cpu_xchg_1(pcp, nval)	_this_cpu_generic_xchg(pcp, nval)
-# endif
-# ifndef this_cpu_xchg_2
-#  define this_cpu_xchg_2(pcp, nval)	_this_cpu_generic_xchg(pcp, nval)
-# endif
-# ifndef this_cpu_xchg_4
-#  define this_cpu_xchg_4(pcp, nval)	_this_cpu_generic_xchg(pcp, nval)
-# endif
-# ifndef this_cpu_xchg_8
-#  define this_cpu_xchg_8(pcp, nval)	_this_cpu_generic_xchg(pcp, nval)
-# endif
-# define this_cpu_xchg(pcp, nval)	\
-	__pcpu_size_call_return2(this_cpu_xchg_, (pcp), nval)
-#endif
-
-#define _this_cpu_generic_cmpxchg(pcp, oval, nval)			\
-({									\
-	typeof(pcp) ret__;						\
-	unsigned long flags;						\
-	raw_local_irq_save(flags);					\
-	ret__ = raw_cpu_read(pcp);					\
-	if (ret__ == (oval))						\
-		raw_cpu_write(pcp, nval);				\
-	raw_local_irq_restore(flags);					\
-	ret__;								\
-})
-
-#ifndef this_cpu_cmpxchg
-# ifndef this_cpu_cmpxchg_1
-#  define this_cpu_cmpxchg_1(pcp, oval, nval)	_this_cpu_generic_cmpxchg(pcp, oval, nval)
-# endif
-# ifndef this_cpu_cmpxchg_2
-#  define this_cpu_cmpxchg_2(pcp, oval, nval)	_this_cpu_generic_cmpxchg(pcp, oval, nval)
-# endif
-# ifndef this_cpu_cmpxchg_4
-#  define this_cpu_cmpxchg_4(pcp, oval, nval)	_this_cpu_generic_cmpxchg(pcp, oval, nval)
-# endif
-# ifndef this_cpu_cmpxchg_8
-#  define this_cpu_cmpxchg_8(pcp, oval, nval)	_this_cpu_generic_cmpxchg(pcp, oval, nval)
-# endif
-# define this_cpu_cmpxchg(pcp, oval, nval)	\
-	__pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval)
-#endif
-
-/*
- * cmpxchg_double replaces two adjacent scalars at once.  The first
- * two parameters are per cpu variables which have to be of the same
- * size.  A truth value is returned to indicate success or failure
- * (since a double register result is difficult to handle).  There is
- * very limited hardware support for these operations, so only certain
- * sizes may work.
- */
-#define _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
-({									\
-	int ret__;							\
-	unsigned long flags;						\
-	raw_local_irq_save(flags);					\
-	ret__ = raw_cpu_generic_cmpxchg_double(pcp1, pcp2,		\
-			oval1, oval2, nval1, nval2);			\
-	raw_local_irq_restore(flags);					\
-	ret__;								\
-})
-
-#ifndef this_cpu_cmpxchg_double
-# ifndef this_cpu_cmpxchg_double_1
-#  define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
-	_this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
-# endif
-# ifndef this_cpu_cmpxchg_double_2
-#  define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
-	_this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
-# endif
-# ifndef this_cpu_cmpxchg_double_4
-#  define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
-	_this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
-# endif
-# ifndef this_cpu_cmpxchg_double_8
-#  define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
-	_this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
-# endif
-# define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
-	__pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
-#endif
-
-/*
- * Generic percpu operations for contexts where we do not want to do
- * any checks for preemptiosn.
- *
- * If there is no other protection through preempt disable and/or
- * disabling interupts then one of these RMW operations can show unexpected
- * behavior because the execution thread was rescheduled on another processor
- * or an interrupt occurred and the same percpu variable was modified from
- * the interrupt context.
- */
-#ifndef raw_cpu_read
-# ifndef raw_cpu_read_1
-#  define raw_cpu_read_1(pcp)	(*raw_cpu_ptr(&(pcp)))
-# endif
-# ifndef raw_cpu_read_2
-#  define raw_cpu_read_2(pcp)	(*raw_cpu_ptr(&(pcp)))
-# endif
-# ifndef raw_cpu_read_4
-#  define raw_cpu_read_4(pcp)	(*raw_cpu_ptr(&(pcp)))
-# endif
-# ifndef raw_cpu_read_8
-#  define raw_cpu_read_8(pcp)	(*raw_cpu_ptr(&(pcp)))
-# endif
-# define raw_cpu_read(pcp)	__pcpu_size_call_return(raw_cpu_read_, (pcp))
-#endif
-
-#define raw_cpu_generic_to_op(pcp, val, op)				\
-do {									\
-	*raw_cpu_ptr(&(pcp)) op val;					\
-} while (0)
-
-
-#ifndef raw_cpu_write
-# ifndef raw_cpu_write_1
-#  define raw_cpu_write_1(pcp, val)	raw_cpu_generic_to_op((pcp), (val), =)
-# endif
-# ifndef raw_cpu_write_2
-#  define raw_cpu_write_2(pcp, val)	raw_cpu_generic_to_op((pcp), (val), =)
-# endif
-# ifndef raw_cpu_write_4
-#  define raw_cpu_write_4(pcp, val)	raw_cpu_generic_to_op((pcp), (val), =)
-# endif
-# ifndef raw_cpu_write_8
-#  define raw_cpu_write_8(pcp, val)	raw_cpu_generic_to_op((pcp), (val), =)
-# endif
-# define raw_cpu_write(pcp, val)	__pcpu_size_call(raw_cpu_write_, (pcp), (val))
-#endif
-
-#ifndef raw_cpu_add
-# ifndef raw_cpu_add_1
-#  define raw_cpu_add_1(pcp, val)	raw_cpu_generic_to_op((pcp), (val), +=)
-# endif
-# ifndef raw_cpu_add_2
-#  define raw_cpu_add_2(pcp, val)	raw_cpu_generic_to_op((pcp), (val), +=)
-# endif
-# ifndef raw_cpu_add_4
-#  define raw_cpu_add_4(pcp, val)	raw_cpu_generic_to_op((pcp), (val), +=)
-# endif
-# ifndef raw_cpu_add_8
-#  define raw_cpu_add_8(pcp, val)	raw_cpu_generic_to_op((pcp), (val), +=)
-# endif
-# define raw_cpu_add(pcp, val)	__pcpu_size_call(raw_cpu_add_, (pcp), (val))
-#endif
-
-#ifndef raw_cpu_sub
-# define raw_cpu_sub(pcp, val)	raw_cpu_add((pcp), -(val))
-#endif
-
-#ifndef raw_cpu_inc
-# define raw_cpu_inc(pcp)		raw_cpu_add((pcp), 1)
-#endif
-
-#ifndef raw_cpu_dec
-# define raw_cpu_dec(pcp)		raw_cpu_sub((pcp), 1)
-#endif
-
-#ifndef raw_cpu_and
-# ifndef raw_cpu_and_1
-#  define raw_cpu_and_1(pcp, val)	raw_cpu_generic_to_op((pcp), (val), &=)
-# endif
-# ifndef raw_cpu_and_2
-#  define raw_cpu_and_2(pcp, val)	raw_cpu_generic_to_op((pcp), (val), &=)
-# endif
-# ifndef raw_cpu_and_4
-#  define raw_cpu_and_4(pcp, val)	raw_cpu_generic_to_op((pcp), (val), &=)
-# endif
-# ifndef raw_cpu_and_8
-#  define raw_cpu_and_8(pcp, val)	raw_cpu_generic_to_op((pcp), (val), &=)
-# endif
-# define raw_cpu_and(pcp, val)	__pcpu_size_call(raw_cpu_and_, (pcp), (val))
-#endif
-
-#ifndef raw_cpu_or
-# ifndef raw_cpu_or_1
-#  define raw_cpu_or_1(pcp, val)	raw_cpu_generic_to_op((pcp), (val), |=)
-# endif
-# ifndef raw_cpu_or_2
-#  define raw_cpu_or_2(pcp, val)	raw_cpu_generic_to_op((pcp), (val), |=)
-# endif
-# ifndef raw_cpu_or_4
-#  define raw_cpu_or_4(pcp, val)	raw_cpu_generic_to_op((pcp), (val), |=)
-# endif
-# ifndef raw_cpu_or_8
-#  define raw_cpu_or_8(pcp, val)	raw_cpu_generic_to_op((pcp), (val), |=)
-# endif
-# define raw_cpu_or(pcp, val)	__pcpu_size_call(raw_cpu_or_, (pcp), (val))
-#endif
-
-#define raw_cpu_generic_add_return(pcp, val)				\
-({									\
-	raw_cpu_add(pcp, val);						\
-	raw_cpu_read(pcp);						\
-})
-
-#ifndef raw_cpu_add_return
-# ifndef raw_cpu_add_return_1
-#  define raw_cpu_add_return_1(pcp, val)	raw_cpu_generic_add_return(pcp, val)
-# endif
-# ifndef raw_cpu_add_return_2
-#  define raw_cpu_add_return_2(pcp, val)	raw_cpu_generic_add_return(pcp, val)
-# endif
-# ifndef raw_cpu_add_return_4
-#  define raw_cpu_add_return_4(pcp, val)	raw_cpu_generic_add_return(pcp, val)
-# endif
-# ifndef raw_cpu_add_return_8
-#  define raw_cpu_add_return_8(pcp, val)	raw_cpu_generic_add_return(pcp, val)
-# endif
-# define raw_cpu_add_return(pcp, val)	\
-	__pcpu_size_call_return2(raw_cpu_add_return_, pcp, val)
-#endif
-
-#define raw_cpu_sub_return(pcp, val)	raw_cpu_add_return(pcp, -(typeof(pcp))(val))
-#define raw_cpu_inc_return(pcp)	raw_cpu_add_return(pcp, 1)
-#define raw_cpu_dec_return(pcp)	raw_cpu_add_return(pcp, -1)
-
-#define raw_cpu_generic_xchg(pcp, nval)					\
-({	typeof(pcp) ret__;						\
-	ret__ = raw_cpu_read(pcp);					\
-	raw_cpu_write(pcp, nval);					\
-	ret__;								\
-})
-
-#ifndef raw_cpu_xchg
-# ifndef raw_cpu_xchg_1
-#  define raw_cpu_xchg_1(pcp, nval)	raw_cpu_generic_xchg(pcp, nval)
-# endif
-# ifndef raw_cpu_xchg_2
-#  define raw_cpu_xchg_2(pcp, nval)	raw_cpu_generic_xchg(pcp, nval)
-# endif
-# ifndef raw_cpu_xchg_4
-#  define raw_cpu_xchg_4(pcp, nval)	raw_cpu_generic_xchg(pcp, nval)
-# endif
-# ifndef raw_cpu_xchg_8
-#  define raw_cpu_xchg_8(pcp, nval)	raw_cpu_generic_xchg(pcp, nval)
-# endif
-# define raw_cpu_xchg(pcp, nval)	\
-	__pcpu_size_call_return2(raw_cpu_xchg_, (pcp), nval)
-#endif
-
-#define raw_cpu_generic_cmpxchg(pcp, oval, nval)			\
-({									\
-	typeof(pcp) ret__;						\
-	ret__ = raw_cpu_read(pcp);					\
-	if (ret__ == (oval))						\
-		raw_cpu_write(pcp, nval);				\
-	ret__;								\
-})
-
-#ifndef raw_cpu_cmpxchg
-# ifndef raw_cpu_cmpxchg_1
-#  define raw_cpu_cmpxchg_1(pcp, oval, nval)	raw_cpu_generic_cmpxchg(pcp, oval, nval)
-# endif
-# ifndef raw_cpu_cmpxchg_2
-#  define raw_cpu_cmpxchg_2(pcp, oval, nval)	raw_cpu_generic_cmpxchg(pcp, oval, nval)
-# endif
-# ifndef raw_cpu_cmpxchg_4
-#  define raw_cpu_cmpxchg_4(pcp, oval, nval)	raw_cpu_generic_cmpxchg(pcp, oval, nval)
-# endif
-# ifndef raw_cpu_cmpxchg_8
-#  define raw_cpu_cmpxchg_8(pcp, oval, nval)	raw_cpu_generic_cmpxchg(pcp, oval, nval)
-# endif
-# define raw_cpu_cmpxchg(pcp, oval, nval)	\
-	__pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval)
-#endif
-
-#define raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
-({									\
-	int __ret = 0;							\
-	if (raw_cpu_read(pcp1) == (oval1) &&				\
-			 raw_cpu_read(pcp2)  == (oval2)) {		\
-		raw_cpu_write(pcp1, (nval1));				\
-		raw_cpu_write(pcp2, (nval2));				\
-		__ret = 1;						\
-	}								\
-	(__ret);							\
-})
-
-#ifndef raw_cpu_cmpxchg_double
-# ifndef raw_cpu_cmpxchg_double_1
-#  define raw_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
-	raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
-# endif
-# ifndef raw_cpu_cmpxchg_double_2
-#  define raw_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
-	raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
-# endif
-# ifndef raw_cpu_cmpxchg_double_4
-#  define raw_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
-	raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
-# endif
-# ifndef raw_cpu_cmpxchg_double_8
-#  define raw_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
-	raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
-# endif
-# define raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
-	__pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
-#endif
-
-/*
- * Generic percpu operations for context that are safe from preemption/interrupts.
- */
-#ifndef __this_cpu_read
-# define __this_cpu_read(pcp) \
-	(__this_cpu_preempt_check("read"),__pcpu_size_call_return(raw_cpu_read_, (pcp)))
-#endif
-
-#ifndef __this_cpu_write
-# define __this_cpu_write(pcp, val)					\
-do { __this_cpu_preempt_check("write");					\
-     __pcpu_size_call(raw_cpu_write_, (pcp), (val));			\
-} while (0)
-#endif
-
-#ifndef __this_cpu_add
-# define __this_cpu_add(pcp, val)					 \
-do { __this_cpu_preempt_check("add");					\
-	__pcpu_size_call(raw_cpu_add_, (pcp), (val));			\
-} while (0)
-#endif
-
-#ifndef __this_cpu_sub
-# define __this_cpu_sub(pcp, val)	__this_cpu_add((pcp), -(typeof(pcp))(val))
-#endif
-
-#ifndef __this_cpu_inc
-# define __this_cpu_inc(pcp)		__this_cpu_add((pcp), 1)
-#endif
-
-#ifndef __this_cpu_dec
-# define __this_cpu_dec(pcp)		__this_cpu_sub((pcp), 1)
-#endif
-
-#ifndef __this_cpu_and
-# define __this_cpu_and(pcp, val)					\
-do { __this_cpu_preempt_check("and");					\
-	__pcpu_size_call(raw_cpu_and_, (pcp), (val));			\
-} while (0)
-
-#endif
-
-#ifndef __this_cpu_or
-# define __this_cpu_or(pcp, val)					\
-do { __this_cpu_preempt_check("or");					\
-	__pcpu_size_call(raw_cpu_or_, (pcp), (val));			\
-} while (0)
-#endif
-
-#ifndef __this_cpu_add_return
-# define __this_cpu_add_return(pcp, val)	\
-	(__this_cpu_preempt_check("add_return"),__pcpu_size_call_return2(raw_cpu_add_return_, pcp, val))
-#endif
-
-#define __this_cpu_sub_return(pcp, val)	__this_cpu_add_return(pcp, -(typeof(pcp))(val))
-#define __this_cpu_inc_return(pcp)	__this_cpu_add_return(pcp, 1)
-#define __this_cpu_dec_return(pcp)	__this_cpu_add_return(pcp, -1)
-
-#ifndef __this_cpu_xchg
-# define __this_cpu_xchg(pcp, nval)	\
-	(__this_cpu_preempt_check("xchg"),__pcpu_size_call_return2(raw_cpu_xchg_, (pcp), nval))
-#endif
-
-#ifndef __this_cpu_cmpxchg
-# define __this_cpu_cmpxchg(pcp, oval, nval)	\
-	(__this_cpu_preempt_check("cmpxchg"),__pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval))
-#endif
-
-#ifndef __this_cpu_cmpxchg_double
-# define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
-	(__this_cpu_preempt_check("cmpxchg_double"),__pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)))
-#endif
-
 #endif /* __LINUX_PERCPU_H */
diff --git a/include/linux/platform_data/ata-samsung_cf.h b/include/linux/platform_data/ata-samsung_cf.h
index c2049e3..748e716 100644
--- a/include/linux/platform_data/ata-samsung_cf.h
+++ b/include/linux/platform_data/ata-samsung_cf.h
@@ -29,7 +29,6 @@
 
 /* architecture-specific IDE configuration */
 extern void s3c64xx_ide_setup_gpio(void);
-extern void s5pc100_ide_setup_gpio(void);
 extern void s5pv210_ide_setup_gpio(void);
 
 #endif /*__ATA_SAMSUNG_CF_H */
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 6a94cc8..d231aa1 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -826,15 +826,14 @@
  * read-side critical section that would block in a !PREEMPT kernel.
  * But if you want the full story, read on!
  *
- * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU), it
- * is illegal to block while in an RCU read-side critical section.  In
- * preemptible RCU implementations (TREE_PREEMPT_RCU and TINY_PREEMPT_RCU)
- * in CONFIG_PREEMPT kernel builds, RCU read-side critical sections may
- * be preempted, but explicit blocking is illegal.  Finally, in preemptible
- * RCU implementations in real-time (with -rt patchset) kernel builds,
- * RCU read-side critical sections may be preempted and they may also
- * block, but only when acquiring spinlocks that are subject to priority
- * inheritance.
+ * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU),
+ * it is illegal to block while in an RCU read-side critical section.
+ * In preemptible RCU implementations (TREE_PREEMPT_RCU) in CONFIG_PREEMPT
+ * kernel builds, RCU read-side critical sections may be preempted,
+ * but explicit blocking is illegal.  Finally, in preemptible RCU
+ * implementations in real-time (with -rt patchset) kernel builds, RCU
+ * read-side critical sections may be preempted and they may also block, but
+ * only when acquiring spinlocks that are subject to priority inheritance.
  */
 static inline void rcu_read_lock(void)
 {
@@ -858,6 +857,34 @@
 /**
  * rcu_read_unlock() - marks the end of an RCU read-side critical section.
  *
+ * In most situations, rcu_read_unlock() is immune from deadlock.
+ * However, in kernels built with CONFIG_RCU_BOOST, rcu_read_unlock()
+ * is responsible for deboosting, which it does via rt_mutex_unlock().
+ * Unfortunately, this function acquires the scheduler's runqueue and
+ * priority-inheritance spinlocks.  This means that deadlock could result
+ * if the caller of rcu_read_unlock() already holds one of these locks or
+ * any lock that is ever acquired while holding them.
+ *
+ * That said, RCU readers are never priority boosted unless they were
+ * preempted.  Therefore, one way to avoid deadlock is to make sure
+ * that preemption never happens within any RCU read-side critical
+ * section whose outermost rcu_read_unlock() is called with one of
+ * rt_mutex_unlock()'s locks held.  Such preemption can be avoided in
+ * a number of ways, for example, by invoking preempt_disable() before
+ * critical section's outermost rcu_read_lock().
+ *
+ * Given that the set of locks acquired by rt_mutex_unlock() might change
+ * at any time, a somewhat more future-proofed approach is to make sure
+ * that that preemption never happens within any RCU read-side critical
+ * section whose outermost rcu_read_unlock() is called with irqs disabled.
+ * This approach relies on the fact that rt_mutex_unlock() currently only
+ * acquires irq-disabled locks.
+ *
+ * The second of these two approaches is best in most situations,
+ * however, the first approach can also be useful, at least to those
+ * developers willing to keep abreast of the set of locks acquired by
+ * rt_mutex_unlock().
+ *
  * See rcu_read_lock() for more information.
  */
 static inline void rcu_read_unlock(void)
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index 3aed8d7..1abba5c 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -90,11 +90,9 @@
 extern void rt_mutex_destroy(struct rt_mutex *lock);
 
 extern void rt_mutex_lock(struct rt_mutex *lock);
-extern int rt_mutex_lock_interruptible(struct rt_mutex *lock,
-						int detect_deadlock);
+extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
 extern int rt_mutex_timed_lock(struct rt_mutex *lock,
-					struct hrtimer_sleeper *timeout,
-					int detect_deadlock);
+			       struct hrtimer_sleeper *timeout);
 
 extern int rt_mutex_trylock(struct rt_mutex *lock);
 
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 02757d1..42cac4d 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1270,9 +1270,6 @@
 #ifdef CONFIG_TREE_PREEMPT_RCU
 	struct rcu_node *rcu_blocked_node;
 #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
-#ifdef CONFIG_RCU_BOOST
-	struct rt_mutex *rcu_boost_mutex;
-#endif /* #ifdef CONFIG_RCU_BOOST */
 
 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
 	struct sched_info sched_info;
@@ -2007,9 +2004,6 @@
 #ifdef CONFIG_TREE_PREEMPT_RCU
 	p->rcu_blocked_node = NULL;
 #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
-#ifdef CONFIG_RCU_BOOST
-	p->rcu_boost_mutex = NULL;
-#endif /* #ifdef CONFIG_RCU_BOOST */
 	INIT_LIST_HEAD(&p->rcu_node_entry);
 }
 
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 535f158..8cf3503 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -164,8 +164,6 @@
 static inline unsigned raw_seqcount_begin(const seqcount_t *s)
 {
 	unsigned ret = ACCESS_ONCE(s->sequence);
-
-	seqcount_lockdep_reader_access(s);
 	smp_rmb();
 	return ret & ~1;
 }
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 8a4987f..0590523 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -12,6 +12,7 @@
 #include <linux/hrtimer.h>
 #include <linux/context_tracking_state.h>
 #include <linux/cpumask.h>
+#include <linux/sched.h>
 
 #ifdef CONFIG_GENERIC_CLOCKEVENTS
 
@@ -162,6 +163,7 @@
 #ifdef CONFIG_NO_HZ_FULL
 extern bool tick_nohz_full_running;
 extern cpumask_var_t tick_nohz_full_mask;
+extern cpumask_var_t housekeeping_mask;
 
 static inline bool tick_nohz_full_enabled(void)
 {
@@ -201,6 +203,24 @@
 static inline void __tick_nohz_task_switch(struct task_struct *tsk) { }
 #endif
 
+static inline bool is_housekeeping_cpu(int cpu)
+{
+#ifdef CONFIG_NO_HZ_FULL
+	if (tick_nohz_full_enabled())
+		return cpumask_test_cpu(cpu, housekeeping_mask);
+#endif
+	return true;
+}
+
+static inline void housekeeping_affine(struct task_struct *t)
+{
+#ifdef CONFIG_NO_HZ_FULL
+	if (tick_nohz_full_enabled())
+		set_cpus_allowed_ptr(t, housekeeping_mask);
+
+#endif
+}
+
 static inline void tick_nohz_full_check(void)
 {
 	if (tick_nohz_full_enabled())
diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h
index 1361169..ea6c9de 100644
--- a/include/linux/trace_seq.h
+++ b/include/linux/trace_seq.h
@@ -25,6 +25,21 @@
 	s->full = 0;
 }
 
+/**
+ * trace_seq_buffer_ptr - return pointer to next location in buffer
+ * @s: trace sequence descriptor
+ *
+ * Returns the pointer to the buffer where the next write to
+ * the buffer will happen. This is useful to save the location
+ * that is about to be written to and then return the result
+ * of that write.
+ */
+static inline unsigned char *
+trace_seq_buffer_ptr(struct trace_seq *s)
+{
+	return s->buffer + s->len;
+}
+
 /*
  * Currently only defined when tracing is enabled.
  */
@@ -36,14 +51,13 @@
 extern int
 trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary);
 extern int trace_print_seq(struct seq_file *m, struct trace_seq *s);
-extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
-				 size_t cnt);
+extern int trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
+			     int cnt);
 extern int trace_seq_puts(struct trace_seq *s, const char *str);
 extern int trace_seq_putc(struct trace_seq *s, unsigned char c);
-extern int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len);
+extern int trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len);
 extern int trace_seq_putmem_hex(struct trace_seq *s, const void *mem,
-				size_t len);
-extern void *trace_seq_reserve(struct trace_seq *s, size_t len);
+				unsigned int len);
 extern int trace_seq_path(struct trace_seq *s, const struct path *path);
 
 extern int trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp,
@@ -71,8 +85,8 @@
 {
 	return 0;
 }
-static inline ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
-				 size_t cnt)
+static inline int trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
+				    int cnt)
 {
 	return 0;
 }
@@ -85,19 +99,15 @@
 	return 0;
 }
 static inline int
-trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len)
+trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len)
 {
 	return 0;
 }
 static inline int trace_seq_putmem_hex(struct trace_seq *s, const void *mem,
-				       size_t len)
+				       unsigned int len)
 {
 	return 0;
 }
-static inline void *trace_seq_reserve(struct trace_seq *s, size_t len)
-{
-	return NULL;
-}
 static inline int trace_seq_path(struct trace_seq *s, const struct path *path)
 {
 	return 0;
diff --git a/include/net/ip.h b/include/net/ip.h
index 0e795df..7596eb2 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -309,16 +309,7 @@
 	}
 }
 
-#define IP_IDENTS_SZ 2048u
-extern atomic_t *ip_idents;
-
-static inline u32 ip_idents_reserve(u32 hash, int segs)
-{
-	atomic_t *id_ptr = ip_idents + hash % IP_IDENTS_SZ;
-
-	return atomic_add_return(segs, id_ptr) - segs;
-}
-
+u32 ip_idents_reserve(u32 hash, int segs);
 void __ip_select_ident(struct iphdr *iph, int segs);
 
 static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs)
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index cf67147..3b9ff33 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -342,6 +342,7 @@
 #define __AUDIT_ARCH_64BIT 0x80000000
 #define __AUDIT_ARCH_LE	   0x40000000
 
+#define AUDIT_ARCH_AARCH64	(EM_AARCH64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
 #define AUDIT_ARCH_ALPHA	(EM_ALPHA|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
 #define AUDIT_ARCH_ARM		(EM_ARM|__AUDIT_ARCH_LE)
 #define AUDIT_ARCH_ARMEB	(EM_ARM)
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index e11d8f1..9b744af 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -399,13 +399,18 @@
 	__u64 vapic_addr;
 };
 
-/* for KVM_SET_MPSTATE */
+/* for KVM_SET_MP_STATE */
 
+/* not all states are valid on all architectures */
 #define KVM_MP_STATE_RUNNABLE          0
 #define KVM_MP_STATE_UNINITIALIZED     1
 #define KVM_MP_STATE_INIT_RECEIVED     2
 #define KVM_MP_STATE_HALTED            3
 #define KVM_MP_STATE_SIPI_RECEIVED     4
+#define KVM_MP_STATE_STOPPED           5
+#define KVM_MP_STATE_CHECK_STOP        6
+#define KVM_MP_STATE_OPERATING         7
+#define KVM_MP_STATE_LOAD              8
 
 struct kvm_mp_state {
 	__u32 mp_state;
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
index a5af2a2..5c1aba1 100644
--- a/include/xen/grant_table.h
+++ b/include/xen/grant_table.h
@@ -170,6 +170,7 @@
 	unmap->dev_bus_addr = 0;
 }
 
+int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status);
 int arch_gnttab_map_shared(xen_pfn_t *frames, unsigned long nr_gframes,
 			   unsigned long max_nr_gframes,
 			   void **__shared);
diff --git a/init/Kconfig b/init/Kconfig
index 9d76b99..41066e4 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -505,7 +505,7 @@
 	def_bool TREE_PREEMPT_RCU
 	help
 	  This option enables preemptible-RCU code that is common between
-	  the TREE_PREEMPT_RCU and TINY_PREEMPT_RCU implementations.
+	  TREE_PREEMPT_RCU and, in the old days, TINY_PREEMPT_RCU.
 
 config RCU_STALL_COMMON
 	def_bool ( TREE_RCU || TREE_PREEMPT_RCU || RCU_TRACE )
@@ -737,7 +737,7 @@
 
 config RCU_NOCB_CPU_NONE
 	bool "No build_forced no-CBs CPUs"
-	depends on RCU_NOCB_CPU && !NO_HZ_FULL
+	depends on RCU_NOCB_CPU && !NO_HZ_FULL_ALL
 	help
 	  This option does not force any of the CPUs to be no-CBs CPUs.
 	  Only CPUs designated by the rcu_nocbs= boot parameter will be
@@ -751,7 +751,7 @@
 
 config RCU_NOCB_CPU_ZERO
 	bool "CPU 0 is a build_forced no-CBs CPU"
-	depends on RCU_NOCB_CPU && !NO_HZ_FULL
+	depends on RCU_NOCB_CPU && !NO_HZ_FULL_ALL
 	help
 	  This option forces CPU 0 to be a no-CBs CPU, so that its RCU
 	  callbacks are invoked by a per-CPU kthread whose name begins
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 70776ae..7dc8788 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -149,12 +149,14 @@
  */
 static bool cgrp_dfl_root_visible;
 
+/*
+ * Set by the boot param of the same name and makes subsystems with NULL
+ * ->dfl_files to use ->legacy_files on the default hierarchy.
+ */
+static bool cgroup_legacy_files_on_dfl;
+
 /* some controllers are not supported in the default hierarchy */
-static const unsigned int cgrp_dfl_root_inhibit_ss_mask = 0
-#ifdef CONFIG_CGROUP_DEBUG
-	| (1 << debug_cgrp_id)
-#endif
-	;
+static unsigned int cgrp_dfl_root_inhibit_ss_mask;
 
 /* The list of hierarchy roots */
 
@@ -180,13 +182,15 @@
  */
 static int need_forkexit_callback __read_mostly;
 
-static struct cftype cgroup_base_files[];
+static struct cftype cgroup_dfl_base_files[];
+static struct cftype cgroup_legacy_base_files[];
 
 static void cgroup_put(struct cgroup *cgrp);
 static int rebind_subsystems(struct cgroup_root *dst_root,
 			     unsigned int ss_mask);
 static int cgroup_destroy_locked(struct cgroup *cgrp);
-static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss);
+static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss,
+		      bool visible);
 static void css_release(struct percpu_ref *ref);
 static void kill_css(struct cgroup_subsys_state *css);
 static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
@@ -1037,6 +1041,58 @@
 }
 
 /**
+ * cgroup_refresh_child_subsys_mask - update child_subsys_mask
+ * @cgrp: the target cgroup
+ *
+ * On the default hierarchy, a subsystem may request other subsystems to be
+ * enabled together through its ->depends_on mask.  In such cases, more
+ * subsystems than specified in "cgroup.subtree_control" may be enabled.
+ *
+ * This function determines which subsystems need to be enabled given the
+ * current @cgrp->subtree_control and records it in
+ * @cgrp->child_subsys_mask.  The resulting mask is always a superset of
+ * @cgrp->subtree_control and follows the usual hierarchy rules.
+ */
+static void cgroup_refresh_child_subsys_mask(struct cgroup *cgrp)
+{
+	struct cgroup *parent = cgroup_parent(cgrp);
+	unsigned int cur_ss_mask = cgrp->subtree_control;
+	struct cgroup_subsys *ss;
+	int ssid;
+
+	lockdep_assert_held(&cgroup_mutex);
+
+	if (!cgroup_on_dfl(cgrp)) {
+		cgrp->child_subsys_mask = cur_ss_mask;
+		return;
+	}
+
+	while (true) {
+		unsigned int new_ss_mask = cur_ss_mask;
+
+		for_each_subsys(ss, ssid)
+			if (cur_ss_mask & (1 << ssid))
+				new_ss_mask |= ss->depends_on;
+
+		/*
+		 * Mask out subsystems which aren't available.  This can
+		 * happen only if some depended-upon subsystems were bound
+		 * to non-default hierarchies.
+		 */
+		if (parent)
+			new_ss_mask &= parent->child_subsys_mask;
+		else
+			new_ss_mask &= cgrp->root->subsys_mask;
+
+		if (new_ss_mask == cur_ss_mask)
+			break;
+		cur_ss_mask = new_ss_mask;
+	}
+
+	cgrp->child_subsys_mask = cur_ss_mask;
+}
+
+/**
  * cgroup_kn_unlock - unlocking helper for cgroup kernfs methods
  * @kn: the kernfs_node being serviced
  *
@@ -1208,12 +1264,15 @@
 		up_write(&css_set_rwsem);
 
 		src_root->subsys_mask &= ~(1 << ssid);
-		src_root->cgrp.child_subsys_mask &= ~(1 << ssid);
+		src_root->cgrp.subtree_control &= ~(1 << ssid);
+		cgroup_refresh_child_subsys_mask(&src_root->cgrp);
 
 		/* default hierarchy doesn't enable controllers by default */
 		dst_root->subsys_mask |= 1 << ssid;
-		if (dst_root != &cgrp_dfl_root)
-			dst_root->cgrp.child_subsys_mask |= 1 << ssid;
+		if (dst_root != &cgrp_dfl_root) {
+			dst_root->cgrp.subtree_control |= 1 << ssid;
+			cgroup_refresh_child_subsys_mask(&dst_root->cgrp);
+		}
 
 		if (ss->bind)
 			ss->bind(css);
@@ -1233,8 +1292,6 @@
 	for_each_subsys(ss, ssid)
 		if (root->subsys_mask & (1 << ssid))
 			seq_printf(seq, ",%s", ss->name);
-	if (root->flags & CGRP_ROOT_SANE_BEHAVIOR)
-		seq_puts(seq, ",sane_behavior");
 	if (root->flags & CGRP_ROOT_NOPREFIX)
 		seq_puts(seq, ",noprefix");
 	if (root->flags & CGRP_ROOT_XATTR)
@@ -1268,6 +1325,7 @@
 	bool all_ss = false, one_ss = false;
 	unsigned int mask = -1U;
 	struct cgroup_subsys *ss;
+	int nr_opts = 0;
 	int i;
 
 #ifdef CONFIG_CPUSETS
@@ -1277,6 +1335,8 @@
 	memset(opts, 0, sizeof(*opts));
 
 	while ((token = strsep(&o, ",")) != NULL) {
+		nr_opts++;
+
 		if (!*token)
 			return -EINVAL;
 		if (!strcmp(token, "none")) {
@@ -1361,37 +1421,33 @@
 			return -ENOENT;
 	}
 
-	/* Consistency checks */
-
 	if (opts->flags & CGRP_ROOT_SANE_BEHAVIOR) {
 		pr_warn("sane_behavior: this is still under development and its behaviors will change, proceed at your own risk\n");
-
-		if ((opts->flags & (CGRP_ROOT_NOPREFIX | CGRP_ROOT_XATTR)) ||
-		    opts->cpuset_clone_children || opts->release_agent ||
-		    opts->name) {
-			pr_err("sane_behavior: noprefix, xattr, clone_children, release_agent and name are not allowed\n");
+		if (nr_opts != 1) {
+			pr_err("sane_behavior: no other mount options allowed\n");
 			return -EINVAL;
 		}
-	} else {
-		/*
-		 * If the 'all' option was specified select all the
-		 * subsystems, otherwise if 'none', 'name=' and a subsystem
-		 * name options were not specified, let's default to 'all'
-		 */
-		if (all_ss || (!one_ss && !opts->none && !opts->name))
-			for_each_subsys(ss, i)
-				if (!ss->disabled)
-					opts->subsys_mask |= (1 << i);
-
-		/*
-		 * We either have to specify by name or by subsystems. (So
-		 * all empty hierarchies must have a name).
-		 */
-		if (!opts->subsys_mask && !opts->name)
-			return -EINVAL;
+		return 0;
 	}
 
 	/*
+	 * If the 'all' option was specified select all the subsystems,
+	 * otherwise if 'none', 'name=' and a subsystem name options were
+	 * not specified, let's default to 'all'
+	 */
+	if (all_ss || (!one_ss && !opts->none && !opts->name))
+		for_each_subsys(ss, i)
+			if (!ss->disabled)
+				opts->subsys_mask |= (1 << i);
+
+	/*
+	 * We either have to specify by name or by subsystems. (So all
+	 * empty hierarchies must have a name).
+	 */
+	if (!opts->subsys_mask && !opts->name)
+		return -EINVAL;
+
+	/*
 	 * Option noprefix was introduced just for backward compatibility
 	 * with the old cpuset, so we allow noprefix only if mounting just
 	 * the cpuset subsystem.
@@ -1399,7 +1455,6 @@
 	if ((opts->flags & CGRP_ROOT_NOPREFIX) && (opts->subsys_mask & mask))
 		return -EINVAL;
 
-
 	/* Can't specify "none" and some subsystems */
 	if (opts->subsys_mask && opts->none)
 		return -EINVAL;
@@ -1414,8 +1469,8 @@
 	struct cgroup_sb_opts opts;
 	unsigned int added_mask, removed_mask;
 
-	if (root->flags & CGRP_ROOT_SANE_BEHAVIOR) {
-		pr_err("sane_behavior: remount is not allowed\n");
+	if (root == &cgrp_dfl_root) {
+		pr_err("remount is not allowed\n");
 		return -EINVAL;
 	}
 
@@ -1434,11 +1489,10 @@
 	removed_mask = root->subsys_mask & ~opts.subsys_mask;
 
 	/* Don't allow flags or name to change at remount */
-	if (((opts.flags ^ root->flags) & CGRP_ROOT_OPTION_MASK) ||
+	if ((opts.flags ^ root->flags) ||
 	    (opts.name && strcmp(opts.name, root->name))) {
 		pr_err("option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"\n",
-		       opts.flags & CGRP_ROOT_OPTION_MASK, opts.name ?: "",
-		       root->flags & CGRP_ROOT_OPTION_MASK, root->name);
+		       opts.flags, opts.name ?: "", root->flags, root->name);
 		ret = -EINVAL;
 		goto out_unlock;
 	}
@@ -1563,6 +1617,7 @@
 {
 	LIST_HEAD(tmp_links);
 	struct cgroup *root_cgrp = &root->cgrp;
+	struct cftype *base_files;
 	struct css_set *cset;
 	int i, ret;
 
@@ -1600,7 +1655,12 @@
 	}
 	root_cgrp->kn = root->kf_root->kn;
 
-	ret = cgroup_addrm_files(root_cgrp, cgroup_base_files, true);
+	if (root == &cgrp_dfl_root)
+		base_files = cgroup_dfl_base_files;
+	else
+		base_files = cgroup_legacy_base_files;
+
+	ret = cgroup_addrm_files(root_cgrp, base_files, true);
 	if (ret)
 		goto destroy_root;
 
@@ -1638,7 +1698,7 @@
 exit_root_id:
 	cgroup_exit_root_id(root);
 cancel_ref:
-	percpu_ref_cancel_init(&root_cgrp->self.refcnt);
+	percpu_ref_exit(&root_cgrp->self.refcnt);
 out:
 	free_cgrp_cset_links(&tmp_links);
 	return ret;
@@ -1672,7 +1732,7 @@
 		goto out_unlock;
 
 	/* look for a matching existing root */
-	if (!opts.subsys_mask && !opts.none && !opts.name) {
+	if (opts.flags & CGRP_ROOT_SANE_BEHAVIOR) {
 		cgrp_dfl_root_visible = true;
 		root = &cgrp_dfl_root;
 		cgroup_get(&root->cgrp);
@@ -1730,15 +1790,8 @@
 			goto out_unlock;
 		}
 
-		if ((root->flags ^ opts.flags) & CGRP_ROOT_OPTION_MASK) {
-			if ((root->flags | opts.flags) & CGRP_ROOT_SANE_BEHAVIOR) {
-				pr_err("sane_behavior: new mount options should match the existing superblock\n");
-				ret = -EINVAL;
-				goto out_unlock;
-			} else {
-				pr_warn("new mount options do not match the existing superblock, will be ignored\n");
-			}
-		}
+		if (root->flags ^ opts.flags)
+			pr_warn("new mount options do not match the existing superblock, will be ignored\n");
 
 		/*
 		 * We want to reuse @root whose lifetime is governed by its
@@ -2457,9 +2510,7 @@
 
 static int cgroup_sane_behavior_show(struct seq_file *seq, void *v)
 {
-	struct cgroup *cgrp = seq_css(seq)->cgroup;
-
-	seq_printf(seq, "%d\n", cgroup_sane_behavior(cgrp));
+	seq_puts(seq, "0\n");
 	return 0;
 }
 
@@ -2496,7 +2547,7 @@
 {
 	struct cgroup *cgrp = seq_css(seq)->cgroup;
 
-	cgroup_print_ss_mask(seq, cgroup_parent(cgrp)->child_subsys_mask);
+	cgroup_print_ss_mask(seq, cgroup_parent(cgrp)->subtree_control);
 	return 0;
 }
 
@@ -2505,7 +2556,7 @@
 {
 	struct cgroup *cgrp = seq_css(seq)->cgroup;
 
-	cgroup_print_ss_mask(seq, cgrp->child_subsys_mask);
+	cgroup_print_ss_mask(seq, cgrp->subtree_control);
 	return 0;
 }
 
@@ -2611,6 +2662,7 @@
 					    loff_t off)
 {
 	unsigned int enable = 0, disable = 0;
+	unsigned int css_enable, css_disable, old_ctrl, new_ctrl;
 	struct cgroup *cgrp, *child;
 	struct cgroup_subsys *ss;
 	char *tok;
@@ -2650,11 +2702,26 @@
 
 	for_each_subsys(ss, ssid) {
 		if (enable & (1 << ssid)) {
-			if (cgrp->child_subsys_mask & (1 << ssid)) {
+			if (cgrp->subtree_control & (1 << ssid)) {
 				enable &= ~(1 << ssid);
 				continue;
 			}
 
+			/* unavailable or not enabled on the parent? */
+			if (!(cgrp_dfl_root.subsys_mask & (1 << ssid)) ||
+			    (cgroup_parent(cgrp) &&
+			     !(cgroup_parent(cgrp)->subtree_control & (1 << ssid)))) {
+				ret = -ENOENT;
+				goto out_unlock;
+			}
+
+			/*
+			 * @ss is already enabled through dependency and
+			 * we'll just make it visible.  Skip draining.
+			 */
+			if (cgrp->child_subsys_mask & (1 << ssid))
+				continue;
+
 			/*
 			 * Because css offlining is asynchronous, userland
 			 * might try to re-enable the same controller while
@@ -2677,23 +2744,15 @@
 
 				return restart_syscall();
 			}
-
-			/* unavailable or not enabled on the parent? */
-			if (!(cgrp_dfl_root.subsys_mask & (1 << ssid)) ||
-			    (cgroup_parent(cgrp) &&
-			     !(cgroup_parent(cgrp)->child_subsys_mask & (1 << ssid)))) {
-				ret = -ENOENT;
-				goto out_unlock;
-			}
 		} else if (disable & (1 << ssid)) {
-			if (!(cgrp->child_subsys_mask & (1 << ssid))) {
+			if (!(cgrp->subtree_control & (1 << ssid))) {
 				disable &= ~(1 << ssid);
 				continue;
 			}
 
 			/* a child has it enabled? */
 			cgroup_for_each_live_child(child, cgrp) {
-				if (child->child_subsys_mask & (1 << ssid)) {
+				if (child->subtree_control & (1 << ssid)) {
 					ret = -EBUSY;
 					goto out_unlock;
 				}
@@ -2707,7 +2766,7 @@
 	}
 
 	/*
-	 * Except for the root, child_subsys_mask must be zero for a cgroup
+	 * Except for the root, subtree_control must be zero for a cgroup
 	 * with tasks so that child cgroups don't compete against tasks.
 	 */
 	if (enable && cgroup_parent(cgrp) && !list_empty(&cgrp->cset_links)) {
@@ -2716,36 +2775,75 @@
 	}
 
 	/*
-	 * Create csses for enables and update child_subsys_mask.  This
-	 * changes cgroup_e_css() results which in turn makes the
-	 * subsequent cgroup_update_dfl_csses() associate all tasks in the
-	 * subtree to the updated csses.
+	 * Update subsys masks and calculate what needs to be done.  More
+	 * subsystems than specified may need to be enabled or disabled
+	 * depending on subsystem dependencies.
+	 */
+	cgrp->subtree_control |= enable;
+	cgrp->subtree_control &= ~disable;
+
+	old_ctrl = cgrp->child_subsys_mask;
+	cgroup_refresh_child_subsys_mask(cgrp);
+	new_ctrl = cgrp->child_subsys_mask;
+
+	css_enable = ~old_ctrl & new_ctrl;
+	css_disable = old_ctrl & ~new_ctrl;
+	enable |= css_enable;
+	disable |= css_disable;
+
+	/*
+	 * Create new csses or make the existing ones visible.  A css is
+	 * created invisible if it's being implicitly enabled through
+	 * dependency.  An invisible css is made visible when the userland
+	 * explicitly enables it.
 	 */
 	for_each_subsys(ss, ssid) {
 		if (!(enable & (1 << ssid)))
 			continue;
 
 		cgroup_for_each_live_child(child, cgrp) {
-			ret = create_css(child, ss);
+			if (css_enable & (1 << ssid))
+				ret = create_css(child, ss,
+					cgrp->subtree_control & (1 << ssid));
+			else
+				ret = cgroup_populate_dir(child, 1 << ssid);
 			if (ret)
 				goto err_undo_css;
 		}
 	}
 
-	cgrp->child_subsys_mask |= enable;
-	cgrp->child_subsys_mask &= ~disable;
-
+	/*
+	 * At this point, cgroup_e_css() results reflect the new csses
+	 * making the following cgroup_update_dfl_csses() properly update
+	 * css associations of all tasks in the subtree.
+	 */
 	ret = cgroup_update_dfl_csses(cgrp);
 	if (ret)
 		goto err_undo_css;
 
-	/* all tasks are now migrated away from the old csses, kill them */
+	/*
+	 * All tasks are migrated out of disabled csses.  Kill or hide
+	 * them.  A css is hidden when the userland requests it to be
+	 * disabled while other subsystems are still depending on it.  The
+	 * css must not actively control resources and be in the vanilla
+	 * state if it's made visible again later.  Controllers which may
+	 * be depended upon should provide ->css_reset() for this purpose.
+	 */
 	for_each_subsys(ss, ssid) {
 		if (!(disable & (1 << ssid)))
 			continue;
 
-		cgroup_for_each_live_child(child, cgrp)
-			kill_css(cgroup_css(child, ss));
+		cgroup_for_each_live_child(child, cgrp) {
+			struct cgroup_subsys_state *css = cgroup_css(child, ss);
+
+			if (css_disable & (1 << ssid)) {
+				kill_css(css);
+			} else {
+				cgroup_clear_dir(child, 1 << ssid);
+				if (ss->css_reset)
+					ss->css_reset(css);
+			}
+		}
 	}
 
 	kernfs_activate(cgrp->kn);
@@ -2755,8 +2853,9 @@
 	return ret ?: nbytes;
 
 err_undo_css:
-	cgrp->child_subsys_mask &= ~enable;
-	cgrp->child_subsys_mask |= disable;
+	cgrp->subtree_control &= ~enable;
+	cgrp->subtree_control |= disable;
+	cgroup_refresh_child_subsys_mask(cgrp);
 
 	for_each_subsys(ss, ssid) {
 		if (!(enable & (1 << ssid)))
@@ -2764,8 +2863,14 @@
 
 		cgroup_for_each_live_child(child, cgrp) {
 			struct cgroup_subsys_state *css = cgroup_css(child, ss);
-			if (css)
+
+			if (!css)
+				continue;
+
+			if (css_enable & (1 << ssid))
 				kill_css(css);
+			else
+				cgroup_clear_dir(child, 1 << ssid);
 		}
 	}
 	goto out_unlock;
@@ -2878,9 +2983,9 @@
 
 	/*
 	 * This isn't a proper migration and its usefulness is very
-	 * limited.  Disallow if sane_behavior.
+	 * limited.  Disallow on the default hierarchy.
 	 */
-	if (cgroup_sane_behavior(cgrp))
+	if (cgroup_on_dfl(cgrp))
 		return -EPERM;
 
 	/*
@@ -2964,9 +3069,9 @@
 
 	for (cft = cfts; cft->name[0] != '\0'; cft++) {
 		/* does cft->flags tell us to skip this file on @cgrp? */
-		if ((cft->flags & CFTYPE_ONLY_ON_DFL) && !cgroup_on_dfl(cgrp))
+		if ((cft->flags & __CFTYPE_ONLY_ON_DFL) && !cgroup_on_dfl(cgrp))
 			continue;
-		if ((cft->flags & CFTYPE_INSANE) && cgroup_sane_behavior(cgrp))
+		if ((cft->flags & __CFTYPE_NOT_ON_DFL) && cgroup_on_dfl(cgrp))
 			continue;
 		if ((cft->flags & CFTYPE_NOT_ON_ROOT) && !cgroup_parent(cgrp))
 			continue;
@@ -3024,6 +3129,9 @@
 			kfree(cft->kf_ops);
 		cft->kf_ops = NULL;
 		cft->ss = NULL;
+
+		/* revert flags set by cgroup core while adding @cfts */
+		cft->flags &= ~(__CFTYPE_ONLY_ON_DFL | __CFTYPE_NOT_ON_DFL);
 	}
 }
 
@@ -3109,7 +3217,7 @@
  * function currently returns 0 as long as @cfts registration is successful
  * even if some file creation attempts on existing cgroups fail.
  */
-int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
+static int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
 {
 	int ret;
 
@@ -3135,6 +3243,40 @@
 }
 
 /**
+ * cgroup_add_dfl_cftypes - add an array of cftypes for default hierarchy
+ * @ss: target cgroup subsystem
+ * @cfts: zero-length name terminated array of cftypes
+ *
+ * Similar to cgroup_add_cftypes() but the added files are only used for
+ * the default hierarchy.
+ */
+int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
+{
+	struct cftype *cft;
+
+	for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
+		cft->flags |= __CFTYPE_ONLY_ON_DFL;
+	return cgroup_add_cftypes(ss, cfts);
+}
+
+/**
+ * cgroup_add_legacy_cftypes - add an array of cftypes for legacy hierarchies
+ * @ss: target cgroup subsystem
+ * @cfts: zero-length name terminated array of cftypes
+ *
+ * Similar to cgroup_add_cftypes() but the added files are only used for
+ * the legacy hierarchies.
+ */
+int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
+{
+	struct cftype *cft;
+
+	for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
+		cft->flags |= __CFTYPE_NOT_ON_DFL;
+	return cgroup_add_cftypes(ss, cfts);
+}
+
+/**
  * cgroup_task_count - count the number of tasks in a cgroup.
  * @cgrp: the cgroup in question
  *
@@ -3699,8 +3841,9 @@
  *
  * All this extra complexity was caused by the original implementation
  * committing to an entirely unnecessary property.  In the long term, we
- * want to do away with it.  Explicitly scramble sort order if
- * sane_behavior so that no such expectation exists in the new interface.
+ * want to do away with it.  Explicitly scramble sort order if on the
+ * default hierarchy so that no such expectation exists in the new
+ * interface.
  *
  * Scrambling is done by swapping every two consecutive bits, which is
  * non-identity one-to-one mapping which disturbs sort order sufficiently.
@@ -3715,7 +3858,7 @@
 
 static pid_t cgroup_pid_fry(struct cgroup *cgrp, pid_t pid)
 {
-	if (cgroup_sane_behavior(cgrp))
+	if (cgroup_on_dfl(cgrp))
 		return pid_fry(pid);
 	else
 		return pid;
@@ -3818,7 +3961,7 @@
 	css_task_iter_end(&it);
 	length = n;
 	/* now sort & (if procs) strip out duplicates */
-	if (cgroup_sane_behavior(cgrp))
+	if (cgroup_on_dfl(cgrp))
 		sort(array, length, sizeof(pid_t), fried_cmppid, NULL);
 	else
 		sort(array, length, sizeof(pid_t), cmppid, NULL);
@@ -4040,7 +4183,43 @@
 	return 0;
 }
 
-static struct cftype cgroup_base_files[] = {
+/* cgroup core interface files for the default hierarchy */
+static struct cftype cgroup_dfl_base_files[] = {
+	{
+		.name = "cgroup.procs",
+		.seq_start = cgroup_pidlist_start,
+		.seq_next = cgroup_pidlist_next,
+		.seq_stop = cgroup_pidlist_stop,
+		.seq_show = cgroup_pidlist_show,
+		.private = CGROUP_FILE_PROCS,
+		.write = cgroup_procs_write,
+		.mode = S_IRUGO | S_IWUSR,
+	},
+	{
+		.name = "cgroup.controllers",
+		.flags = CFTYPE_ONLY_ON_ROOT,
+		.seq_show = cgroup_root_controllers_show,
+	},
+	{
+		.name = "cgroup.controllers",
+		.flags = CFTYPE_NOT_ON_ROOT,
+		.seq_show = cgroup_controllers_show,
+	},
+	{
+		.name = "cgroup.subtree_control",
+		.seq_show = cgroup_subtree_control_show,
+		.write = cgroup_subtree_control_write,
+	},
+	{
+		.name = "cgroup.populated",
+		.flags = CFTYPE_NOT_ON_ROOT,
+		.seq_show = cgroup_populated_show,
+	},
+	{ }	/* terminate */
+};
+
+/* cgroup core interface files for the legacy hierarchies */
+static struct cftype cgroup_legacy_base_files[] = {
 	{
 		.name = "cgroup.procs",
 		.seq_start = cgroup_pidlist_start,
@@ -4053,7 +4232,6 @@
 	},
 	{
 		.name = "cgroup.clone_children",
-		.flags = CFTYPE_INSANE,
 		.read_u64 = cgroup_clone_children_read,
 		.write_u64 = cgroup_clone_children_write,
 	},
@@ -4063,35 +4241,7 @@
 		.seq_show = cgroup_sane_behavior_show,
 	},
 	{
-		.name = "cgroup.controllers",
-		.flags = CFTYPE_ONLY_ON_DFL | CFTYPE_ONLY_ON_ROOT,
-		.seq_show = cgroup_root_controllers_show,
-	},
-	{
-		.name = "cgroup.controllers",
-		.flags = CFTYPE_ONLY_ON_DFL | CFTYPE_NOT_ON_ROOT,
-		.seq_show = cgroup_controllers_show,
-	},
-	{
-		.name = "cgroup.subtree_control",
-		.flags = CFTYPE_ONLY_ON_DFL,
-		.seq_show = cgroup_subtree_control_show,
-		.write = cgroup_subtree_control_write,
-	},
-	{
-		.name = "cgroup.populated",
-		.flags = CFTYPE_ONLY_ON_DFL | CFTYPE_NOT_ON_ROOT,
-		.seq_show = cgroup_populated_show,
-	},
-
-	/*
-	 * Historical crazy stuff.  These don't have "cgroup."  prefix and
-	 * don't exist if sane_behavior.  If you're depending on these, be
-	 * prepared to be burned.
-	 */
-	{
 		.name = "tasks",
-		.flags = CFTYPE_INSANE,		/* use "procs" instead */
 		.seq_start = cgroup_pidlist_start,
 		.seq_next = cgroup_pidlist_next,
 		.seq_stop = cgroup_pidlist_stop,
@@ -4102,13 +4252,12 @@
 	},
 	{
 		.name = "notify_on_release",
-		.flags = CFTYPE_INSANE,
 		.read_u64 = cgroup_read_notify_on_release,
 		.write_u64 = cgroup_write_notify_on_release,
 	},
 	{
 		.name = "release_agent",
-		.flags = CFTYPE_INSANE | CFTYPE_ONLY_ON_ROOT,
+		.flags = CFTYPE_ONLY_ON_ROOT,
 		.seq_show = cgroup_release_agent_show,
 		.write = cgroup_release_agent_write,
 		.max_write_len = PATH_MAX - 1,
@@ -4175,6 +4324,8 @@
 		container_of(work, struct cgroup_subsys_state, destroy_work);
 	struct cgroup *cgrp = css->cgroup;
 
+	percpu_ref_exit(&css->refcnt);
+
 	if (css->ss) {
 		/* css free path */
 		if (css->parent)
@@ -4314,12 +4465,14 @@
  * create_css - create a cgroup_subsys_state
  * @cgrp: the cgroup new css will be associated with
  * @ss: the subsys of new css
+ * @visible: whether to create control knobs for the new css or not
  *
  * Create a new css associated with @cgrp - @ss pair.  On success, the new
- * css is online and installed in @cgrp with all interface files created.
- * Returns 0 on success, -errno on failure.
+ * css is online and installed in @cgrp with all interface files created if
+ * @visible.  Returns 0 on success, -errno on failure.
  */
-static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss)
+static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss,
+		      bool visible)
 {
 	struct cgroup *parent = cgroup_parent(cgrp);
 	struct cgroup_subsys_state *parent_css = cgroup_css(parent, ss);
@@ -4343,9 +4496,11 @@
 		goto err_free_percpu_ref;
 	css->id = err;
 
-	err = cgroup_populate_dir(cgrp, 1 << ss->id);
-	if (err)
-		goto err_free_id;
+	if (visible) {
+		err = cgroup_populate_dir(cgrp, 1 << ss->id);
+		if (err)
+			goto err_free_id;
+	}
 
 	/* @css is ready to be brought online now, make it visible */
 	list_add_tail_rcu(&css->sibling, &parent_css->children);
@@ -4372,7 +4527,7 @@
 err_free_id:
 	cgroup_idr_remove(&ss->css_idr, css->id);
 err_free_percpu_ref:
-	percpu_ref_cancel_init(&css->refcnt);
+	percpu_ref_exit(&css->refcnt);
 err_free_css:
 	call_rcu(&css->rcu_head, css_free_rcu_fn);
 	return err;
@@ -4385,6 +4540,7 @@
 	struct cgroup_root *root;
 	struct cgroup_subsys *ss;
 	struct kernfs_node *kn;
+	struct cftype *base_files;
 	int ssid, ret;
 
 	parent = cgroup_kn_lock_live(parent_kn);
@@ -4455,14 +4611,20 @@
 	if (ret)
 		goto out_destroy;
 
-	ret = cgroup_addrm_files(cgrp, cgroup_base_files, true);
+	if (cgroup_on_dfl(cgrp))
+		base_files = cgroup_dfl_base_files;
+	else
+		base_files = cgroup_legacy_base_files;
+
+	ret = cgroup_addrm_files(cgrp, base_files, true);
 	if (ret)
 		goto out_destroy;
 
 	/* let's create and online css's */
 	for_each_subsys(ss, ssid) {
 		if (parent->child_subsys_mask & (1 << ssid)) {
-			ret = create_css(cgrp, ss);
+			ret = create_css(cgrp, ss,
+					 parent->subtree_control & (1 << ssid));
 			if (ret)
 				goto out_destroy;
 		}
@@ -4470,10 +4632,12 @@
 
 	/*
 	 * On the default hierarchy, a child doesn't automatically inherit
-	 * child_subsys_mask from the parent.  Each is configured manually.
+	 * subtree_control from the parent.  Each is configured manually.
 	 */
-	if (!cgroup_on_dfl(cgrp))
-		cgrp->child_subsys_mask = parent->child_subsys_mask;
+	if (!cgroup_on_dfl(cgrp)) {
+		cgrp->subtree_control = parent->subtree_control;
+		cgroup_refresh_child_subsys_mask(cgrp);
+	}
 
 	kernfs_activate(kn);
 
@@ -4483,7 +4647,7 @@
 out_free_id:
 	cgroup_idr_remove(&root->cgroup_idr, cgrp->id);
 out_cancel_ref:
-	percpu_ref_cancel_init(&cgrp->self.refcnt);
+	percpu_ref_exit(&cgrp->self.refcnt);
 out_free_cgrp:
 	kfree(cgrp);
 out_unlock:
@@ -4736,8 +4900,7 @@
  */
 int __init cgroup_init_early(void)
 {
-	static struct cgroup_sb_opts __initdata opts =
-		{ .flags = CGRP_ROOT_SANE_BEHAVIOR };
+	static struct cgroup_sb_opts __initdata opts;
 	struct cgroup_subsys *ss;
 	int i;
 
@@ -4775,7 +4938,8 @@
 	unsigned long key;
 	int ssid, err;
 
-	BUG_ON(cgroup_init_cftypes(NULL, cgroup_base_files));
+	BUG_ON(cgroup_init_cftypes(NULL, cgroup_dfl_base_files));
+	BUG_ON(cgroup_init_cftypes(NULL, cgroup_legacy_base_files));
 
 	mutex_lock(&cgroup_mutex);
 
@@ -4807,9 +4971,22 @@
 		 * disabled flag and cftype registration needs kmalloc,
 		 * both of which aren't available during early_init.
 		 */
-		if (!ss->disabled) {
-			cgrp_dfl_root.subsys_mask |= 1 << ss->id;
-			WARN_ON(cgroup_add_cftypes(ss, ss->base_cftypes));
+		if (ss->disabled)
+			continue;
+
+		cgrp_dfl_root.subsys_mask |= 1 << ss->id;
+
+		if (cgroup_legacy_files_on_dfl && !ss->dfl_cftypes)
+			ss->dfl_cftypes = ss->legacy_cftypes;
+
+		if (!ss->dfl_cftypes)
+			cgrp_dfl_root_inhibit_ss_mask |= 1 << ss->id;
+
+		if (ss->dfl_cftypes == ss->legacy_cftypes) {
+			WARN_ON(cgroup_add_cftypes(ss, ss->dfl_cftypes));
+		} else {
+			WARN_ON(cgroup_add_dfl_cftypes(ss, ss->dfl_cftypes));
+			WARN_ON(cgroup_add_legacy_cftypes(ss, ss->legacy_cftypes));
 		}
 	}
 
@@ -5205,6 +5382,14 @@
 }
 __setup("cgroup_disable=", cgroup_disable);
 
+static int __init cgroup_set_legacy_files_on_dfl(char *str)
+{
+	printk("cgroup: using legacy files on the default hierarchy\n");
+	cgroup_legacy_files_on_dfl = true;
+	return 0;
+}
+__setup("cgroup__DEVEL__legacy_files_on_dfl", cgroup_set_legacy_files_on_dfl);
+
 /**
  * css_tryget_online_from_dir - get corresponding css from a cgroup dentry
  * @dentry: directory dentry of interest
@@ -5399,6 +5584,6 @@
 struct cgroup_subsys debug_cgrp_subsys = {
 	.css_alloc = debug_css_alloc,
 	.css_free = debug_css_free,
-	.base_cftypes = debug_files,
+	.legacy_cftypes = debug_files,
 };
 #endif /* CONFIG_CGROUP_DEBUG */
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index a79e40f..92b98cc 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -480,5 +480,5 @@
 	.css_free	= freezer_css_free,
 	.attach		= freezer_attach,
 	.fork		= freezer_fork,
-	.base_cftypes	= files,
+	.legacy_cftypes	= files,
 };
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 116a416..22874d7 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -76,8 +76,34 @@
 	struct cgroup_subsys_state css;
 
 	unsigned long flags;		/* "unsigned long" so bitops work */
-	cpumask_var_t cpus_allowed;	/* CPUs allowed to tasks in cpuset */
-	nodemask_t mems_allowed;	/* Memory Nodes allowed to tasks */
+
+	/*
+	 * On default hierarchy:
+	 *
+	 * The user-configured masks can only be changed by writing to
+	 * cpuset.cpus and cpuset.mems, and won't be limited by the
+	 * parent masks.
+	 *
+	 * The effective masks is the real masks that apply to the tasks
+	 * in the cpuset. They may be changed if the configured masks are
+	 * changed or hotplug happens.
+	 *
+	 * effective_mask == configured_mask & parent's effective_mask,
+	 * and if it ends up empty, it will inherit the parent's mask.
+	 *
+	 *
+	 * On legacy hierachy:
+	 *
+	 * The user-configured masks are always the same with effective masks.
+	 */
+
+	/* user-configured CPUs and Memory Nodes allow to tasks */
+	cpumask_var_t cpus_allowed;
+	nodemask_t mems_allowed;
+
+	/* effective CPUs and Memory Nodes allow to tasks */
+	cpumask_var_t effective_cpus;
+	nodemask_t effective_mems;
 
 	/*
 	 * This is old Memory Nodes tasks took on.
@@ -307,9 +333,9 @@
  */
 static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask)
 {
-	while (!cpumask_intersects(cs->cpus_allowed, cpu_online_mask))
+	while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask))
 		cs = parent_cs(cs);
-	cpumask_and(pmask, cs->cpus_allowed, cpu_online_mask);
+	cpumask_and(pmask, cs->effective_cpus, cpu_online_mask);
 }
 
 /*
@@ -325,9 +351,9 @@
  */
 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
 {
-	while (!nodes_intersects(cs->mems_allowed, node_states[N_MEMORY]))
+	while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY]))
 		cs = parent_cs(cs);
-	nodes_and(*pmask, cs->mems_allowed, node_states[N_MEMORY]);
+	nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]);
 }
 
 /*
@@ -376,13 +402,20 @@
 	if (!trial)
 		return NULL;
 
-	if (!alloc_cpumask_var(&trial->cpus_allowed, GFP_KERNEL)) {
-		kfree(trial);
-		return NULL;
-	}
-	cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
+	if (!alloc_cpumask_var(&trial->cpus_allowed, GFP_KERNEL))
+		goto free_cs;
+	if (!alloc_cpumask_var(&trial->effective_cpus, GFP_KERNEL))
+		goto free_cpus;
 
+	cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
+	cpumask_copy(trial->effective_cpus, cs->effective_cpus);
 	return trial;
+
+free_cpus:
+	free_cpumask_var(trial->cpus_allowed);
+free_cs:
+	kfree(trial);
+	return NULL;
 }
 
 /**
@@ -391,6 +424,7 @@
  */
 static void free_trial_cpuset(struct cpuset *trial)
 {
+	free_cpumask_var(trial->effective_cpus);
 	free_cpumask_var(trial->cpus_allowed);
 	kfree(trial);
 }
@@ -436,9 +470,9 @@
 
 	par = parent_cs(cur);
 
-	/* We must be a subset of our parent cpuset */
+	/* On legacy hiearchy, we must be a subset of our parent cpuset. */
 	ret = -EACCES;
-	if (!is_cpuset_subset(trial, par))
+	if (!cgroup_on_dfl(cur->css.cgroup) && !is_cpuset_subset(trial, par))
 		goto out;
 
 	/*
@@ -480,11 +514,11 @@
 #ifdef CONFIG_SMP
 /*
  * Helper routine for generate_sched_domains().
- * Do cpusets a, b have overlapping cpus_allowed masks?
+ * Do cpusets a, b have overlapping effective cpus_allowed masks?
  */
 static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
 {
-	return cpumask_intersects(a->cpus_allowed, b->cpus_allowed);
+	return cpumask_intersects(a->effective_cpus, b->effective_cpus);
 }
 
 static void
@@ -601,7 +635,7 @@
 			*dattr = SD_ATTR_INIT;
 			update_domain_attr_tree(dattr, &top_cpuset);
 		}
-		cpumask_copy(doms[0], top_cpuset.cpus_allowed);
+		cpumask_copy(doms[0], top_cpuset.effective_cpus);
 
 		goto done;
 	}
@@ -705,7 +739,7 @@
 			struct cpuset *b = csa[j];
 
 			if (apn == b->pn) {
-				cpumask_or(dp, dp, b->cpus_allowed);
+				cpumask_or(dp, dp, b->effective_cpus);
 				if (dattr)
 					update_domain_attr_tree(dattr + nslot, b);
 
@@ -757,7 +791,7 @@
 	 * passing doms with offlined cpu to partition_sched_domains().
 	 * Anyways, hotplug work item will rebuild sched domains.
 	 */
-	if (!cpumask_equal(top_cpuset.cpus_allowed, cpu_active_mask))
+	if (!cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask))
 		goto out;
 
 	/* Generate domain masks and attrs */
@@ -781,45 +815,6 @@
 	mutex_unlock(&cpuset_mutex);
 }
 
-/*
- * effective_cpumask_cpuset - return nearest ancestor with non-empty cpus
- * @cs: the cpuset in interest
- *
- * A cpuset's effective cpumask is the cpumask of the nearest ancestor
- * with non-empty cpus. We use effective cpumask whenever:
- * - we update tasks' cpus_allowed. (they take on the ancestor's cpumask
- *   if the cpuset they reside in has no cpus)
- * - we want to retrieve task_cs(tsk)'s cpus_allowed.
- *
- * Called with cpuset_mutex held. cpuset_cpus_allowed_fallback() is an
- * exception. See comments there.
- */
-static struct cpuset *effective_cpumask_cpuset(struct cpuset *cs)
-{
-	while (cpumask_empty(cs->cpus_allowed))
-		cs = parent_cs(cs);
-	return cs;
-}
-
-/*
- * effective_nodemask_cpuset - return nearest ancestor with non-empty mems
- * @cs: the cpuset in interest
- *
- * A cpuset's effective nodemask is the nodemask of the nearest ancestor
- * with non-empty memss. We use effective nodemask whenever:
- * - we update tasks' mems_allowed. (they take on the ancestor's nodemask
- *   if the cpuset they reside in has no mems)
- * - we want to retrieve task_cs(tsk)'s mems_allowed.
- *
- * Called with cpuset_mutex held.
- */
-static struct cpuset *effective_nodemask_cpuset(struct cpuset *cs)
-{
-	while (nodes_empty(cs->mems_allowed))
-		cs = parent_cs(cs);
-	return cs;
-}
-
 /**
  * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
  * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
@@ -830,53 +825,80 @@
  */
 static void update_tasks_cpumask(struct cpuset *cs)
 {
-	struct cpuset *cpus_cs = effective_cpumask_cpuset(cs);
 	struct css_task_iter it;
 	struct task_struct *task;
 
 	css_task_iter_start(&cs->css, &it);
 	while ((task = css_task_iter_next(&it)))
-		set_cpus_allowed_ptr(task, cpus_cs->cpus_allowed);
+		set_cpus_allowed_ptr(task, cs->effective_cpus);
 	css_task_iter_end(&it);
 }
 
 /*
- * update_tasks_cpumask_hier - Update the cpumasks of tasks in the hierarchy.
- * @root_cs: the root cpuset of the hierarchy
- * @update_root: update root cpuset or not?
+ * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree
+ * @cs: the cpuset to consider
+ * @new_cpus: temp variable for calculating new effective_cpus
  *
- * This will update cpumasks of tasks in @root_cs and all other empty cpusets
- * which take on cpumask of @root_cs.
+ * When congifured cpumask is changed, the effective cpumasks of this cpuset
+ * and all its descendants need to be updated.
+ *
+ * On legacy hierachy, effective_cpus will be the same with cpu_allowed.
  *
  * Called with cpuset_mutex held
  */
-static void update_tasks_cpumask_hier(struct cpuset *root_cs, bool update_root)
+static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus)
 {
 	struct cpuset *cp;
 	struct cgroup_subsys_state *pos_css;
+	bool need_rebuild_sched_domains = false;
 
 	rcu_read_lock();
-	cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
-		if (cp == root_cs) {
-			if (!update_root)
-				continue;
-		} else {
-			/* skip the whole subtree if @cp have some CPU */
-			if (!cpumask_empty(cp->cpus_allowed)) {
-				pos_css = css_rightmost_descendant(pos_css);
-				continue;
-			}
+	cpuset_for_each_descendant_pre(cp, pos_css, cs) {
+		struct cpuset *parent = parent_cs(cp);
+
+		cpumask_and(new_cpus, cp->cpus_allowed, parent->effective_cpus);
+
+		/*
+		 * If it becomes empty, inherit the effective mask of the
+		 * parent, which is guaranteed to have some CPUs.
+		 */
+		if (cpumask_empty(new_cpus))
+			cpumask_copy(new_cpus, parent->effective_cpus);
+
+		/* Skip the whole subtree if the cpumask remains the same. */
+		if (cpumask_equal(new_cpus, cp->effective_cpus)) {
+			pos_css = css_rightmost_descendant(pos_css);
+			continue;
 		}
+
 		if (!css_tryget_online(&cp->css))
 			continue;
 		rcu_read_unlock();
 
+		mutex_lock(&callback_mutex);
+		cpumask_copy(cp->effective_cpus, new_cpus);
+		mutex_unlock(&callback_mutex);
+
+		WARN_ON(!cgroup_on_dfl(cp->css.cgroup) &&
+			!cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
+
 		update_tasks_cpumask(cp);
 
+		/*
+		 * If the effective cpumask of any non-empty cpuset is changed,
+		 * we need to rebuild sched domains.
+		 */
+		if (!cpumask_empty(cp->cpus_allowed) &&
+		    is_sched_load_balance(cp))
+			need_rebuild_sched_domains = true;
+
 		rcu_read_lock();
 		css_put(&cp->css);
 	}
 	rcu_read_unlock();
+
+	if (need_rebuild_sched_domains)
+		rebuild_sched_domains_locked();
 }
 
 /**
@@ -889,7 +911,6 @@
 			  const char *buf)
 {
 	int retval;
-	int is_load_balanced;
 
 	/* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */
 	if (cs == &top_cpuset)
@@ -908,7 +929,8 @@
 		if (retval < 0)
 			return retval;
 
-		if (!cpumask_subset(trialcs->cpus_allowed, cpu_active_mask))
+		if (!cpumask_subset(trialcs->cpus_allowed,
+				    top_cpuset.cpus_allowed))
 			return -EINVAL;
 	}
 
@@ -920,16 +942,12 @@
 	if (retval < 0)
 		return retval;
 
-	is_load_balanced = is_sched_load_balance(trialcs);
-
 	mutex_lock(&callback_mutex);
 	cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
 	mutex_unlock(&callback_mutex);
 
-	update_tasks_cpumask_hier(cs, true);
-
-	if (is_load_balanced)
-		rebuild_sched_domains_locked();
+	/* use trialcs->cpus_allowed as a temp variable */
+	update_cpumasks_hier(cs, trialcs->cpus_allowed);
 	return 0;
 }
 
@@ -951,15 +969,13 @@
 							const nodemask_t *to)
 {
 	struct task_struct *tsk = current;
-	struct cpuset *mems_cs;
 
 	tsk->mems_allowed = *to;
 
 	do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
 
 	rcu_read_lock();
-	mems_cs = effective_nodemask_cpuset(task_cs(tsk));
-	guarantee_online_mems(mems_cs, &tsk->mems_allowed);
+	guarantee_online_mems(task_cs(tsk), &tsk->mems_allowed);
 	rcu_read_unlock();
 }
 
@@ -1028,13 +1044,12 @@
 static void update_tasks_nodemask(struct cpuset *cs)
 {
 	static nodemask_t newmems;	/* protected by cpuset_mutex */
-	struct cpuset *mems_cs = effective_nodemask_cpuset(cs);
 	struct css_task_iter it;
 	struct task_struct *task;
 
 	cpuset_being_rebound = cs;		/* causes mpol_dup() rebind */
 
-	guarantee_online_mems(mems_cs, &newmems);
+	guarantee_online_mems(cs, &newmems);
 
 	/*
 	 * The mpol_rebind_mm() call takes mmap_sem, which we couldn't
@@ -1077,36 +1092,52 @@
 }
 
 /*
- * update_tasks_nodemask_hier - Update the nodemasks of tasks in the hierarchy.
- * @cs: the root cpuset of the hierarchy
- * @update_root: update the root cpuset or not?
+ * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree
+ * @cs: the cpuset to consider
+ * @new_mems: a temp variable for calculating new effective_mems
  *
- * This will update nodemasks of tasks in @root_cs and all other empty cpusets
- * which take on nodemask of @root_cs.
+ * When configured nodemask is changed, the effective nodemasks of this cpuset
+ * and all its descendants need to be updated.
+ *
+ * On legacy hiearchy, effective_mems will be the same with mems_allowed.
  *
  * Called with cpuset_mutex held
  */
-static void update_tasks_nodemask_hier(struct cpuset *root_cs, bool update_root)
+static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
 {
 	struct cpuset *cp;
 	struct cgroup_subsys_state *pos_css;
 
 	rcu_read_lock();
-	cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
-		if (cp == root_cs) {
-			if (!update_root)
-				continue;
-		} else {
-			/* skip the whole subtree if @cp have some CPU */
-			if (!nodes_empty(cp->mems_allowed)) {
-				pos_css = css_rightmost_descendant(pos_css);
-				continue;
-			}
+	cpuset_for_each_descendant_pre(cp, pos_css, cs) {
+		struct cpuset *parent = parent_cs(cp);
+
+		nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems);
+
+		/*
+		 * If it becomes empty, inherit the effective mask of the
+		 * parent, which is guaranteed to have some MEMs.
+		 */
+		if (nodes_empty(*new_mems))
+			*new_mems = parent->effective_mems;
+
+		/* Skip the whole subtree if the nodemask remains the same. */
+		if (nodes_equal(*new_mems, cp->effective_mems)) {
+			pos_css = css_rightmost_descendant(pos_css);
+			continue;
 		}
+
 		if (!css_tryget_online(&cp->css))
 			continue;
 		rcu_read_unlock();
 
+		mutex_lock(&callback_mutex);
+		cp->effective_mems = *new_mems;
+		mutex_unlock(&callback_mutex);
+
+		WARN_ON(!cgroup_on_dfl(cp->css.cgroup) &&
+			!nodes_equal(cp->mems_allowed, cp->effective_mems));
+
 		update_tasks_nodemask(cp);
 
 		rcu_read_lock();
@@ -1156,8 +1187,8 @@
 			goto done;
 
 		if (!nodes_subset(trialcs->mems_allowed,
-				node_states[N_MEMORY])) {
-			retval =  -EINVAL;
+				  top_cpuset.mems_allowed)) {
+			retval = -EINVAL;
 			goto done;
 		}
 	}
@@ -1174,7 +1205,8 @@
 	cs->mems_allowed = trialcs->mems_allowed;
 	mutex_unlock(&callback_mutex);
 
-	update_tasks_nodemask_hier(cs, true);
+	/* use trialcs->mems_allowed as a temp variable */
+	update_nodemasks_hier(cs, &cs->mems_allowed);
 done:
 	return retval;
 }
@@ -1389,12 +1421,9 @@
 
 	mutex_lock(&cpuset_mutex);
 
-	/*
-	 * We allow to move tasks into an empty cpuset if sane_behavior
-	 * flag is set.
-	 */
+	/* allow moving tasks into an empty cpuset if on default hierarchy */
 	ret = -ENOSPC;
-	if (!cgroup_sane_behavior(css->cgroup) &&
+	if (!cgroup_on_dfl(css->cgroup) &&
 	    (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)))
 		goto out_unlock;
 
@@ -1452,8 +1481,6 @@
 	struct task_struct *leader = cgroup_taskset_first(tset);
 	struct cpuset *cs = css_cs(css);
 	struct cpuset *oldcs = cpuset_attach_old_cs;
-	struct cpuset *cpus_cs = effective_cpumask_cpuset(cs);
-	struct cpuset *mems_cs = effective_nodemask_cpuset(cs);
 
 	mutex_lock(&cpuset_mutex);
 
@@ -1461,9 +1488,9 @@
 	if (cs == &top_cpuset)
 		cpumask_copy(cpus_attach, cpu_possible_mask);
 	else
-		guarantee_online_cpus(cpus_cs, cpus_attach);
+		guarantee_online_cpus(cs, cpus_attach);
 
-	guarantee_online_mems(mems_cs, &cpuset_attach_nodemask_to);
+	guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
 
 	cgroup_taskset_for_each(task, tset) {
 		/*
@@ -1480,11 +1507,9 @@
 	 * Change mm, possibly for multiple threads in a threadgroup. This is
 	 * expensive and may sleep.
 	 */
-	cpuset_attach_nodemask_to = cs->mems_allowed;
+	cpuset_attach_nodemask_to = cs->effective_mems;
 	mm = get_task_mm(leader);
 	if (mm) {
-		struct cpuset *mems_oldcs = effective_nodemask_cpuset(oldcs);
-
 		mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
 
 		/*
@@ -1495,7 +1520,7 @@
 		 * mm from.
 		 */
 		if (is_memory_migrate(cs)) {
-			cpuset_migrate_mm(mm, &mems_oldcs->old_mems_allowed,
+			cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
 					  &cpuset_attach_nodemask_to);
 		}
 		mmput(mm);
@@ -1516,6 +1541,8 @@
 	FILE_MEMORY_MIGRATE,
 	FILE_CPULIST,
 	FILE_MEMLIST,
+	FILE_EFFECTIVE_CPULIST,
+	FILE_EFFECTIVE_MEMLIST,
 	FILE_CPU_EXCLUSIVE,
 	FILE_MEM_EXCLUSIVE,
 	FILE_MEM_HARDWALL,
@@ -1694,6 +1721,12 @@
 	case FILE_MEMLIST:
 		s += nodelist_scnprintf(s, count, cs->mems_allowed);
 		break;
+	case FILE_EFFECTIVE_CPULIST:
+		s += cpulist_scnprintf(s, count, cs->effective_cpus);
+		break;
+	case FILE_EFFECTIVE_MEMLIST:
+		s += nodelist_scnprintf(s, count, cs->effective_mems);
+		break;
 	default:
 		ret = -EINVAL;
 		goto out_unlock;
@@ -1779,6 +1812,18 @@
 	},
 
 	{
+		.name = "effective_cpus",
+		.seq_show = cpuset_common_seq_show,
+		.private = FILE_EFFECTIVE_CPULIST,
+	},
+
+	{
+		.name = "effective_mems",
+		.seq_show = cpuset_common_seq_show,
+		.private = FILE_EFFECTIVE_MEMLIST,
+	},
+
+	{
 		.name = "cpu_exclusive",
 		.read_u64 = cpuset_read_u64,
 		.write_u64 = cpuset_write_u64,
@@ -1869,18 +1914,26 @@
 	cs = kzalloc(sizeof(*cs), GFP_KERNEL);
 	if (!cs)
 		return ERR_PTR(-ENOMEM);
-	if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL)) {
-		kfree(cs);
-		return ERR_PTR(-ENOMEM);
-	}
+	if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL))
+		goto free_cs;
+	if (!alloc_cpumask_var(&cs->effective_cpus, GFP_KERNEL))
+		goto free_cpus;
 
 	set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
 	cpumask_clear(cs->cpus_allowed);
 	nodes_clear(cs->mems_allowed);
+	cpumask_clear(cs->effective_cpus);
+	nodes_clear(cs->effective_mems);
 	fmeter_init(&cs->fmeter);
 	cs->relax_domain_level = -1;
 
 	return &cs->css;
+
+free_cpus:
+	free_cpumask_var(cs->cpus_allowed);
+free_cs:
+	kfree(cs);
+	return ERR_PTR(-ENOMEM);
 }
 
 static int cpuset_css_online(struct cgroup_subsys_state *css)
@@ -1903,6 +1956,13 @@
 
 	cpuset_inc();
 
+	mutex_lock(&callback_mutex);
+	if (cgroup_on_dfl(cs->css.cgroup)) {
+		cpumask_copy(cs->effective_cpus, parent->effective_cpus);
+		cs->effective_mems = parent->effective_mems;
+	}
+	mutex_unlock(&callback_mutex);
+
 	if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
 		goto out_unlock;
 
@@ -1962,20 +2022,40 @@
 {
 	struct cpuset *cs = css_cs(css);
 
+	free_cpumask_var(cs->effective_cpus);
 	free_cpumask_var(cs->cpus_allowed);
 	kfree(cs);
 }
 
+static void cpuset_bind(struct cgroup_subsys_state *root_css)
+{
+	mutex_lock(&cpuset_mutex);
+	mutex_lock(&callback_mutex);
+
+	if (cgroup_on_dfl(root_css->cgroup)) {
+		cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
+		top_cpuset.mems_allowed = node_possible_map;
+	} else {
+		cpumask_copy(top_cpuset.cpus_allowed,
+			     top_cpuset.effective_cpus);
+		top_cpuset.mems_allowed = top_cpuset.effective_mems;
+	}
+
+	mutex_unlock(&callback_mutex);
+	mutex_unlock(&cpuset_mutex);
+}
+
 struct cgroup_subsys cpuset_cgrp_subsys = {
-	.css_alloc = cpuset_css_alloc,
-	.css_online = cpuset_css_online,
-	.css_offline = cpuset_css_offline,
-	.css_free = cpuset_css_free,
-	.can_attach = cpuset_can_attach,
-	.cancel_attach = cpuset_cancel_attach,
-	.attach = cpuset_attach,
-	.base_cftypes = files,
-	.early_init = 1,
+	.css_alloc	= cpuset_css_alloc,
+	.css_online	= cpuset_css_online,
+	.css_offline	= cpuset_css_offline,
+	.css_free	= cpuset_css_free,
+	.can_attach	= cpuset_can_attach,
+	.cancel_attach	= cpuset_cancel_attach,
+	.attach		= cpuset_attach,
+	.bind		= cpuset_bind,
+	.legacy_cftypes	= files,
+	.early_init	= 1,
 };
 
 /**
@@ -1990,9 +2070,13 @@
 
 	if (!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL))
 		BUG();
+	if (!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL))
+		BUG();
 
 	cpumask_setall(top_cpuset.cpus_allowed);
 	nodes_setall(top_cpuset.mems_allowed);
+	cpumask_setall(top_cpuset.effective_cpus);
+	nodes_setall(top_cpuset.effective_mems);
 
 	fmeter_init(&top_cpuset.fmeter);
 	set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
@@ -2035,6 +2119,66 @@
 	}
 }
 
+static void
+hotplug_update_tasks_legacy(struct cpuset *cs,
+			    struct cpumask *new_cpus, nodemask_t *new_mems,
+			    bool cpus_updated, bool mems_updated)
+{
+	bool is_empty;
+
+	mutex_lock(&callback_mutex);
+	cpumask_copy(cs->cpus_allowed, new_cpus);
+	cpumask_copy(cs->effective_cpus, new_cpus);
+	cs->mems_allowed = *new_mems;
+	cs->effective_mems = *new_mems;
+	mutex_unlock(&callback_mutex);
+
+	/*
+	 * Don't call update_tasks_cpumask() if the cpuset becomes empty,
+	 * as the tasks will be migratecd to an ancestor.
+	 */
+	if (cpus_updated && !cpumask_empty(cs->cpus_allowed))
+		update_tasks_cpumask(cs);
+	if (mems_updated && !nodes_empty(cs->mems_allowed))
+		update_tasks_nodemask(cs);
+
+	is_empty = cpumask_empty(cs->cpus_allowed) ||
+		   nodes_empty(cs->mems_allowed);
+
+	mutex_unlock(&cpuset_mutex);
+
+	/*
+	 * Move tasks to the nearest ancestor with execution resources,
+	 * This is full cgroup operation which will also call back into
+	 * cpuset. Should be done outside any lock.
+	 */
+	if (is_empty)
+		remove_tasks_in_empty_cpuset(cs);
+
+	mutex_lock(&cpuset_mutex);
+}
+
+static void
+hotplug_update_tasks(struct cpuset *cs,
+		     struct cpumask *new_cpus, nodemask_t *new_mems,
+		     bool cpus_updated, bool mems_updated)
+{
+	if (cpumask_empty(new_cpus))
+		cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus);
+	if (nodes_empty(*new_mems))
+		*new_mems = parent_cs(cs)->effective_mems;
+
+	mutex_lock(&callback_mutex);
+	cpumask_copy(cs->effective_cpus, new_cpus);
+	cs->effective_mems = *new_mems;
+	mutex_unlock(&callback_mutex);
+
+	if (cpus_updated)
+		update_tasks_cpumask(cs);
+	if (mems_updated)
+		update_tasks_nodemask(cs);
+}
+
 /**
  * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug
  * @cs: cpuset in interest
@@ -2045,11 +2189,10 @@
  */
 static void cpuset_hotplug_update_tasks(struct cpuset *cs)
 {
-	static cpumask_t off_cpus;
-	static nodemask_t off_mems;
-	bool is_empty;
-	bool sane = cgroup_sane_behavior(cs->css.cgroup);
-
+	static cpumask_t new_cpus;
+	static nodemask_t new_mems;
+	bool cpus_updated;
+	bool mems_updated;
 retry:
 	wait_event(cpuset_attach_wq, cs->attach_in_progress == 0);
 
@@ -2064,51 +2207,20 @@
 		goto retry;
 	}
 
-	cpumask_andnot(&off_cpus, cs->cpus_allowed, top_cpuset.cpus_allowed);
-	nodes_andnot(off_mems, cs->mems_allowed, top_cpuset.mems_allowed);
+	cpumask_and(&new_cpus, cs->cpus_allowed, parent_cs(cs)->effective_cpus);
+	nodes_and(new_mems, cs->mems_allowed, parent_cs(cs)->effective_mems);
 
-	mutex_lock(&callback_mutex);
-	cpumask_andnot(cs->cpus_allowed, cs->cpus_allowed, &off_cpus);
-	mutex_unlock(&callback_mutex);
+	cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);
+	mems_updated = !nodes_equal(new_mems, cs->effective_mems);
 
-	/*
-	 * If sane_behavior flag is set, we need to update tasks' cpumask
-	 * for empty cpuset to take on ancestor's cpumask. Otherwise, don't
-	 * call update_tasks_cpumask() if the cpuset becomes empty, as
-	 * the tasks in it will be migrated to an ancestor.
-	 */
-	if ((sane && cpumask_empty(cs->cpus_allowed)) ||
-	    (!cpumask_empty(&off_cpus) && !cpumask_empty(cs->cpus_allowed)))
-		update_tasks_cpumask(cs);
-
-	mutex_lock(&callback_mutex);
-	nodes_andnot(cs->mems_allowed, cs->mems_allowed, off_mems);
-	mutex_unlock(&callback_mutex);
-
-	/*
-	 * If sane_behavior flag is set, we need to update tasks' nodemask
-	 * for empty cpuset to take on ancestor's nodemask. Otherwise, don't
-	 * call update_tasks_nodemask() if the cpuset becomes empty, as
-	 * the tasks in it will be migratd to an ancestor.
-	 */
-	if ((sane && nodes_empty(cs->mems_allowed)) ||
-	    (!nodes_empty(off_mems) && !nodes_empty(cs->mems_allowed)))
-		update_tasks_nodemask(cs);
-
-	is_empty = cpumask_empty(cs->cpus_allowed) ||
-		nodes_empty(cs->mems_allowed);
+	if (cgroup_on_dfl(cs->css.cgroup))
+		hotplug_update_tasks(cs, &new_cpus, &new_mems,
+				     cpus_updated, mems_updated);
+	else
+		hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems,
+					    cpus_updated, mems_updated);
 
 	mutex_unlock(&cpuset_mutex);
-
-	/*
-	 * If sane_behavior flag is set, we'll keep tasks in empty cpusets.
-	 *
-	 * Otherwise move tasks to the nearest ancestor with execution
-	 * resources.  This is full cgroup operation which will
-	 * also call back into cpuset.  Should be done outside any lock.
-	 */
-	if (!sane && is_empty)
-		remove_tasks_in_empty_cpuset(cs);
 }
 
 /**
@@ -2132,6 +2244,7 @@
 	static cpumask_t new_cpus;
 	static nodemask_t new_mems;
 	bool cpus_updated, mems_updated;
+	bool on_dfl = cgroup_on_dfl(top_cpuset.css.cgroup);
 
 	mutex_lock(&cpuset_mutex);
 
@@ -2139,13 +2252,15 @@
 	cpumask_copy(&new_cpus, cpu_active_mask);
 	new_mems = node_states[N_MEMORY];
 
-	cpus_updated = !cpumask_equal(top_cpuset.cpus_allowed, &new_cpus);
-	mems_updated = !nodes_equal(top_cpuset.mems_allowed, new_mems);
+	cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus);
+	mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems);
 
 	/* synchronize cpus_allowed to cpu_active_mask */
 	if (cpus_updated) {
 		mutex_lock(&callback_mutex);
-		cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
+		if (!on_dfl)
+			cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
+		cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
 		mutex_unlock(&callback_mutex);
 		/* we don't mess with cpumasks of tasks in top_cpuset */
 	}
@@ -2153,7 +2268,9 @@
 	/* synchronize mems_allowed to N_MEMORY */
 	if (mems_updated) {
 		mutex_lock(&callback_mutex);
-		top_cpuset.mems_allowed = new_mems;
+		if (!on_dfl)
+			top_cpuset.mems_allowed = new_mems;
+		top_cpuset.effective_mems = new_mems;
 		mutex_unlock(&callback_mutex);
 		update_tasks_nodemask(&top_cpuset);
 	}
@@ -2228,6 +2345,9 @@
 	top_cpuset.mems_allowed = node_states[N_MEMORY];
 	top_cpuset.old_mems_allowed = top_cpuset.mems_allowed;
 
+	cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask);
+	top_cpuset.effective_mems = node_states[N_MEMORY];
+
 	register_hotmemory_notifier(&cpuset_track_online_nodes_nb);
 }
 
@@ -2244,23 +2364,17 @@
 
 void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
 {
-	struct cpuset *cpus_cs;
-
 	mutex_lock(&callback_mutex);
 	rcu_read_lock();
-	cpus_cs = effective_cpumask_cpuset(task_cs(tsk));
-	guarantee_online_cpus(cpus_cs, pmask);
+	guarantee_online_cpus(task_cs(tsk), pmask);
 	rcu_read_unlock();
 	mutex_unlock(&callback_mutex);
 }
 
 void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
 {
-	struct cpuset *cpus_cs;
-
 	rcu_read_lock();
-	cpus_cs = effective_cpumask_cpuset(task_cs(tsk));
-	do_set_cpus_allowed(tsk, cpus_cs->cpus_allowed);
+	do_set_cpus_allowed(tsk, task_cs(tsk)->effective_cpus);
 	rcu_read_unlock();
 
 	/*
@@ -2299,13 +2413,11 @@
 
 nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
 {
-	struct cpuset *mems_cs;
 	nodemask_t mask;
 
 	mutex_lock(&callback_mutex);
 	rcu_read_lock();
-	mems_cs = effective_nodemask_cpuset(task_cs(tsk));
-	guarantee_online_mems(mems_cs, &mask);
+	guarantee_online_mems(task_cs(tsk), &mask);
 	rcu_read_unlock();
 	mutex_unlock(&callback_mutex);
 
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 6b17ac1..1cf24b3 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -5266,6 +5266,12 @@
 
 		goto got_name;
 	} else {
+		if (vma->vm_ops && vma->vm_ops->name) {
+			name = (char *) vma->vm_ops->name(vma);
+			if (name)
+				goto cpy_name;
+		}
+
 		name = (char *)arch_vma_name(vma);
 		if (name)
 			goto cpy_name;
@@ -7804,7 +7810,7 @@
 /*
  * Initialize the perf_event context in task_struct
  */
-int perf_event_init_context(struct task_struct *child, int ctxn)
+static int perf_event_init_context(struct task_struct *child, int ctxn)
 {
 	struct perf_event_context *child_ctx, *parent_ctx;
 	struct perf_event_context *cloned_ctx;
diff --git a/kernel/futex.c b/kernel/futex.c
index b632b5f..d3a9d94 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -792,94 +792,91 @@
  * [10] There is no transient state which leaves owner and user space
  *	TID out of sync.
  */
-static int
-lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
-		union futex_key *key, struct futex_pi_state **ps)
+
+/*
+ * Validate that the existing waiter has a pi_state and sanity check
+ * the pi_state against the user space value. If correct, attach to
+ * it.
+ */
+static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state,
+			      struct futex_pi_state **ps)
 {
-	struct futex_pi_state *pi_state = NULL;
-	struct futex_q *this, *next;
-	struct task_struct *p;
 	pid_t pid = uval & FUTEX_TID_MASK;
 
-	plist_for_each_entry_safe(this, next, &hb->chain, list) {
-		if (match_futex(&this->key, key)) {
+	/*
+	 * Userspace might have messed up non-PI and PI futexes [3]
+	 */
+	if (unlikely(!pi_state))
+		return -EINVAL;
+
+	WARN_ON(!atomic_read(&pi_state->refcount));
+
+	/*
+	 * Handle the owner died case:
+	 */
+	if (uval & FUTEX_OWNER_DIED) {
+		/*
+		 * exit_pi_state_list sets owner to NULL and wakes the
+		 * topmost waiter. The task which acquires the
+		 * pi_state->rt_mutex will fixup owner.
+		 */
+		if (!pi_state->owner) {
 			/*
-			 * Sanity check the waiter before increasing
-			 * the refcount and attaching to it.
+			 * No pi state owner, but the user space TID
+			 * is not 0. Inconsistent state. [5]
 			 */
-			pi_state = this->pi_state;
-			/*
-			 * Userspace might have messed up non-PI and
-			 * PI futexes [3]
-			 */
-			if (unlikely(!pi_state))
+			if (pid)
 				return -EINVAL;
-
-			WARN_ON(!atomic_read(&pi_state->refcount));
-
 			/*
-			 * Handle the owner died case:
+			 * Take a ref on the state and return success. [4]
 			 */
-			if (uval & FUTEX_OWNER_DIED) {
-				/*
-				 * exit_pi_state_list sets owner to NULL and
-				 * wakes the topmost waiter. The task which
-				 * acquires the pi_state->rt_mutex will fixup
-				 * owner.
-				 */
-				if (!pi_state->owner) {
-					/*
-					 * No pi state owner, but the user
-					 * space TID is not 0. Inconsistent
-					 * state. [5]
-					 */
-					if (pid)
-						return -EINVAL;
-					/*
-					 * Take a ref on the state and
-					 * return. [4]
-					 */
-					goto out_state;
-				}
-
-				/*
-				 * If TID is 0, then either the dying owner
-				 * has not yet executed exit_pi_state_list()
-				 * or some waiter acquired the rtmutex in the
-				 * pi state, but did not yet fixup the TID in
-				 * user space.
-				 *
-				 * Take a ref on the state and return. [6]
-				 */
-				if (!pid)
-					goto out_state;
-			} else {
-				/*
-				 * If the owner died bit is not set,
-				 * then the pi_state must have an
-				 * owner. [7]
-				 */
-				if (!pi_state->owner)
-					return -EINVAL;
-			}
-
-			/*
-			 * Bail out if user space manipulated the
-			 * futex value. If pi state exists then the
-			 * owner TID must be the same as the user
-			 * space TID. [9/10]
-			 */
-			if (pid != task_pid_vnr(pi_state->owner))
-				return -EINVAL;
-
-		out_state:
-			atomic_inc(&pi_state->refcount);
-			*ps = pi_state;
-			return 0;
+			goto out_state;
 		}
+
+		/*
+		 * If TID is 0, then either the dying owner has not
+		 * yet executed exit_pi_state_list() or some waiter
+		 * acquired the rtmutex in the pi state, but did not
+		 * yet fixup the TID in user space.
+		 *
+		 * Take a ref on the state and return success. [6]
+		 */
+		if (!pid)
+			goto out_state;
+	} else {
+		/*
+		 * If the owner died bit is not set, then the pi_state
+		 * must have an owner. [7]
+		 */
+		if (!pi_state->owner)
+			return -EINVAL;
 	}
 
 	/*
+	 * Bail out if user space manipulated the futex value. If pi
+	 * state exists then the owner TID must be the same as the
+	 * user space TID. [9/10]
+	 */
+	if (pid != task_pid_vnr(pi_state->owner))
+		return -EINVAL;
+out_state:
+	atomic_inc(&pi_state->refcount);
+	*ps = pi_state;
+	return 0;
+}
+
+/*
+ * Lookup the task for the TID provided from user space and attach to
+ * it after doing proper sanity checks.
+ */
+static int attach_to_pi_owner(u32 uval, union futex_key *key,
+			      struct futex_pi_state **ps)
+{
+	pid_t pid = uval & FUTEX_TID_MASK;
+	struct futex_pi_state *pi_state;
+	struct task_struct *p;
+
+	/*
 	 * We are the first waiter - try to look up the real owner and attach
 	 * the new pi_state to it, but bail out when TID = 0 [1]
 	 */
@@ -920,7 +917,7 @@
 	pi_state = alloc_pi_state();
 
 	/*
-	 * Initialize the pi_mutex in locked state and make 'p'
+	 * Initialize the pi_mutex in locked state and make @p
 	 * the owner of it:
 	 */
 	rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
@@ -940,6 +937,36 @@
 	return 0;
 }
 
+static int lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
+			   union futex_key *key, struct futex_pi_state **ps)
+{
+	struct futex_q *match = futex_top_waiter(hb, key);
+
+	/*
+	 * If there is a waiter on that futex, validate it and
+	 * attach to the pi_state when the validation succeeds.
+	 */
+	if (match)
+		return attach_to_pi_state(uval, match->pi_state, ps);
+
+	/*
+	 * We are the first waiter - try to look up the owner based on
+	 * @uval and attach to it.
+	 */
+	return attach_to_pi_owner(uval, key, ps);
+}
+
+static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
+{
+	u32 uninitialized_var(curval);
+
+	if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
+		return -EFAULT;
+
+	/*If user space value changed, let the caller retry */
+	return curval != uval ? -EAGAIN : 0;
+}
+
 /**
  * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
  * @uaddr:		the pi futex user address
@@ -963,113 +990,69 @@
 				struct futex_pi_state **ps,
 				struct task_struct *task, int set_waiters)
 {
-	int lock_taken, ret, force_take = 0;
-	u32 uval, newval, curval, vpid = task_pid_vnr(task);
-
-retry:
-	ret = lock_taken = 0;
+	u32 uval, newval, vpid = task_pid_vnr(task);
+	struct futex_q *match;
+	int ret;
 
 	/*
-	 * To avoid races, we attempt to take the lock here again
-	 * (by doing a 0 -> TID atomic cmpxchg), while holding all
-	 * the locks. It will most likely not succeed.
+	 * Read the user space value first so we can validate a few
+	 * things before proceeding further.
 	 */
-	newval = vpid;
-	if (set_waiters)
-		newval |= FUTEX_WAITERS;
-
-	if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, 0, newval)))
+	if (get_futex_value_locked(&uval, uaddr))
 		return -EFAULT;
 
 	/*
 	 * Detect deadlocks.
 	 */
-	if ((unlikely((curval & FUTEX_TID_MASK) == vpid)))
+	if ((unlikely((uval & FUTEX_TID_MASK) == vpid)))
 		return -EDEADLK;
 
 	/*
-	 * Surprise - we got the lock, but we do not trust user space at all.
+	 * Lookup existing state first. If it exists, try to attach to
+	 * its pi_state.
 	 */
-	if (unlikely(!curval)) {
+	match = futex_top_waiter(hb, key);
+	if (match)
+		return attach_to_pi_state(uval, match->pi_state, ps);
+
+	/*
+	 * No waiter and user TID is 0. We are here because the
+	 * waiters or the owner died bit is set or called from
+	 * requeue_cmp_pi or for whatever reason something took the
+	 * syscall.
+	 */
+	if (!(uval & FUTEX_TID_MASK)) {
 		/*
-		 * We verify whether there is kernel state for this
-		 * futex. If not, we can safely assume, that the 0 ->
-		 * TID transition is correct. If state exists, we do
-		 * not bother to fixup the user space state as it was
-		 * corrupted already.
+		 * We take over the futex. No other waiters and the user space
+		 * TID is 0. We preserve the owner died bit.
 		 */
-		return futex_top_waiter(hb, key) ? -EINVAL : 1;
+		newval = uval & FUTEX_OWNER_DIED;
+		newval |= vpid;
+
+		/* The futex requeue_pi code can enforce the waiters bit */
+		if (set_waiters)
+			newval |= FUTEX_WAITERS;
+
+		ret = lock_pi_update_atomic(uaddr, uval, newval);
+		/* If the take over worked, return 1 */
+		return ret < 0 ? ret : 1;
 	}
 
-	uval = curval;
-
 	/*
-	 * Set the FUTEX_WAITERS flag, so the owner will know it has someone
-	 * to wake at the next unlock.
+	 * First waiter. Set the waiters bit before attaching ourself to
+	 * the owner. If owner tries to unlock, it will be forced into
+	 * the kernel and blocked on hb->lock.
 	 */
-	newval = curval | FUTEX_WAITERS;
-
+	newval = uval | FUTEX_WAITERS;
+	ret = lock_pi_update_atomic(uaddr, uval, newval);
+	if (ret)
+		return ret;
 	/*
-	 * Should we force take the futex? See below.
+	 * If the update of the user space value succeeded, we try to
+	 * attach to the owner. If that fails, no harm done, we only
+	 * set the FUTEX_WAITERS bit in the user space variable.
 	 */
-	if (unlikely(force_take)) {
-		/*
-		 * Keep the OWNER_DIED and the WAITERS bit and set the
-		 * new TID value.
-		 */
-		newval = (curval & ~FUTEX_TID_MASK) | vpid;
-		force_take = 0;
-		lock_taken = 1;
-	}
-
-	if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
-		return -EFAULT;
-	if (unlikely(curval != uval))
-		goto retry;
-
-	/*
-	 * We took the lock due to forced take over.
-	 */
-	if (unlikely(lock_taken))
-		return 1;
-
-	/*
-	 * We dont have the lock. Look up the PI state (or create it if
-	 * we are the first waiter):
-	 */
-	ret = lookup_pi_state(uval, hb, key, ps);
-
-	if (unlikely(ret)) {
-		switch (ret) {
-		case -ESRCH:
-			/*
-			 * We failed to find an owner for this
-			 * futex. So we have no pi_state to block
-			 * on. This can happen in two cases:
-			 *
-			 * 1) The owner died
-			 * 2) A stale FUTEX_WAITERS bit
-			 *
-			 * Re-read the futex value.
-			 */
-			if (get_futex_value_locked(&curval, uaddr))
-				return -EFAULT;
-
-			/*
-			 * If the owner died or we have a stale
-			 * WAITERS bit the owner TID in the user space
-			 * futex is 0.
-			 */
-			if (!(curval & FUTEX_TID_MASK)) {
-				force_take = 1;
-				goto retry;
-			}
-		default:
-			break;
-		}
-	}
-
-	return ret;
+	return attach_to_pi_owner(uval, key, ps);
 }
 
 /**
@@ -1186,22 +1169,6 @@
 	return 0;
 }
 
-static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
-{
-	u32 uninitialized_var(oldval);
-
-	/*
-	 * There is no waiter, so we unlock the futex. The owner died
-	 * bit has not to be preserved here. We are the owner:
-	 */
-	if (cmpxchg_futex_value_locked(&oldval, uaddr, uval, 0))
-		return -EFAULT;
-	if (oldval != uval)
-		return -EAGAIN;
-
-	return 0;
-}
-
 /*
  * Express the locking dependencies for lockdep:
  */
@@ -1659,7 +1626,12 @@
 				goto retry;
 			goto out;
 		case -EAGAIN:
-			/* The owner was exiting, try again. */
+			/*
+			 * Two reasons for this:
+			 * - Owner is exiting and we just wait for the
+			 *   exit to complete.
+			 * - The user space value changed.
+			 */
 			double_unlock_hb(hb1, hb2);
 			hb_waiters_dec(hb2);
 			put_futex_key(&key2);
@@ -1718,7 +1690,7 @@
 			this->pi_state = pi_state;
 			ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
 							this->rt_waiter,
-							this->task, 1);
+							this->task);
 			if (ret == 1) {
 				/* We got the lock. */
 				requeue_pi_wake_futex(this, &key2, hb2);
@@ -2316,8 +2288,10 @@
 			goto uaddr_faulted;
 		case -EAGAIN:
 			/*
-			 * Task is exiting and we just wait for the
-			 * exit to complete.
+			 * Two reasons for this:
+			 * - Task is exiting and we just wait for the
+			 *   exit to complete.
+			 * - The user space value changed.
 			 */
 			queue_unlock(hb);
 			put_futex_key(&q.key);
@@ -2337,9 +2311,9 @@
 	/*
 	 * Block on the PI mutex:
 	 */
-	if (!trylock)
-		ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1);
-	else {
+	if (!trylock) {
+		ret = rt_mutex_timed_futex_lock(&q.pi_state->pi_mutex, to);
+	} else {
 		ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
 		/* Fixup the trylock return value: */
 		ret = ret ? 0 : -EWOULDBLOCK;
@@ -2401,10 +2375,10 @@
  */
 static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
 {
-	struct futex_hash_bucket *hb;
-	struct futex_q *this, *next;
+	u32 uninitialized_var(curval), uval, vpid = task_pid_vnr(current);
 	union futex_key key = FUTEX_KEY_INIT;
-	u32 uval, vpid = task_pid_vnr(current);
+	struct futex_hash_bucket *hb;
+	struct futex_q *match;
 	int ret;
 
 retry:
@@ -2417,57 +2391,47 @@
 		return -EPERM;
 
 	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_WRITE);
-	if (unlikely(ret != 0))
-		goto out;
+	if (ret)
+		return ret;
 
 	hb = hash_futex(&key);
 	spin_lock(&hb->lock);
 
 	/*
-	 * To avoid races, try to do the TID -> 0 atomic transition
-	 * again. If it succeeds then we can return without waking
-	 * anyone else up. We only try this if neither the waiters nor
-	 * the owner died bit are set.
+	 * Check waiters first. We do not trust user space values at
+	 * all and we at least want to know if user space fiddled
+	 * with the futex value instead of blindly unlocking.
 	 */
-	if (!(uval & ~FUTEX_TID_MASK) &&
-	    cmpxchg_futex_value_locked(&uval, uaddr, vpid, 0))
-		goto pi_faulted;
-	/*
-	 * Rare case: we managed to release the lock atomically,
-	 * no need to wake anyone else up:
-	 */
-	if (unlikely(uval == vpid))
-		goto out_unlock;
-
-	/*
-	 * Ok, other tasks may need to be woken up - check waiters
-	 * and do the wakeup if necessary:
-	 */
-	plist_for_each_entry_safe(this, next, &hb->chain, list) {
-		if (!match_futex (&this->key, &key))
-			continue;
-		ret = wake_futex_pi(uaddr, uval, this);
+	match = futex_top_waiter(hb, &key);
+	if (match) {
+		ret = wake_futex_pi(uaddr, uval, match);
 		/*
-		 * The atomic access to the futex value
-		 * generated a pagefault, so retry the
-		 * user-access and the wakeup:
+		 * The atomic access to the futex value generated a
+		 * pagefault, so retry the user-access and the wakeup:
 		 */
 		if (ret == -EFAULT)
 			goto pi_faulted;
 		goto out_unlock;
 	}
+
 	/*
-	 * No waiters - kernel unlocks the futex:
+	 * We have no kernel internal state, i.e. no waiters in the
+	 * kernel. Waiters which are about to queue themselves are stuck
+	 * on hb->lock. So we can safely ignore them. We do neither
+	 * preserve the WAITERS bit not the OWNER_DIED one. We are the
+	 * owner.
 	 */
-	ret = unlock_futex_pi(uaddr, uval);
-	if (ret == -EFAULT)
+	if (cmpxchg_futex_value_locked(&curval, uaddr, uval, 0))
 		goto pi_faulted;
 
+	/*
+	 * If uval has changed, let user space handle it.
+	 */
+	ret = (curval == uval) ? 0 : -EAGAIN;
+
 out_unlock:
 	spin_unlock(&hb->lock);
 	put_futex_key(&key);
-
-out:
 	return ret;
 
 pi_faulted:
@@ -2669,7 +2633,7 @@
 		 */
 		WARN_ON(!q.pi_state);
 		pi_mutex = &q.pi_state->pi_mutex;
-		ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
+		ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter);
 		debug_rt_mutex_free_waiter(&rt_waiter);
 
 		spin_lock(q.lock_ptr);
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 369f41a..4b8f0c9 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -33,6 +33,7 @@
 #include <linux/swap.h>
 #include <linux/syscore_ops.h>
 #include <linux/compiler.h>
+#include <linux/hugetlb.h>
 
 #include <asm/page.h>
 #include <asm/uaccess.h>
@@ -1619,6 +1620,9 @@
 #endif
 	VMCOREINFO_NUMBER(PG_head_mask);
 	VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE);
+#ifdef CONFIG_HUGETLBFS
+	VMCOREINFO_SYMBOL(free_huge_page);
+#endif
 
 	arch_crash_save_vmcoreinfo();
 	update_vmcoreinfo_note();
diff --git a/kernel/kthread.c b/kernel/kthread.c
index c2390f4..ef48322 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -591,7 +591,7 @@
 
 	list_add_tail(&work->node, pos);
 	work->worker = worker;
-	if (likely(worker->task))
+	if (!worker->current_work && likely(worker->task))
 		wake_up_process(worker->task);
 }
 
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index d24e433..88d0d44 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -384,7 +384,9 @@
 {
 	printk(KERN_DEBUG "%s\n", bug_msg);
 	printk(KERN_DEBUG "turning off the locking correctness validator.\n");
+#ifdef CONFIG_LOCK_STAT
 	printk(KERN_DEBUG "Please attach the output of /proc/lock_stat to the bug report\n");
+#endif
 }
 
 static int save_trace(struct stack_trace *trace)
diff --git a/kernel/locking/mcs_spinlock.c b/kernel/locking/mcs_spinlock.c
index be9ee15..9887a90 100644
--- a/kernel/locking/mcs_spinlock.c
+++ b/kernel/locking/mcs_spinlock.c
@@ -1,6 +1,4 @@
-
 #include <linux/percpu.h>
-#include <linux/mutex.h>
 #include <linux/sched.h>
 #include "mcs_spinlock.h"
 
@@ -79,7 +77,7 @@
 				break;
 		}
 
-		arch_mutex_cpu_relax();
+		cpu_relax_lowlatency();
 	}
 
 	return next;
@@ -120,7 +118,7 @@
 		if (need_resched())
 			goto unqueue;
 
-		arch_mutex_cpu_relax();
+		cpu_relax_lowlatency();
 	}
 	return true;
 
@@ -146,7 +144,7 @@
 		if (smp_load_acquire(&node->locked))
 			return true;
 
-		arch_mutex_cpu_relax();
+		cpu_relax_lowlatency();
 
 		/*
 		 * Or we race against a concurrent unqueue()'s step-B, in which
diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
index 74356dc..23e89c5 100644
--- a/kernel/locking/mcs_spinlock.h
+++ b/kernel/locking/mcs_spinlock.h
@@ -27,7 +27,7 @@
 #define arch_mcs_spin_lock_contended(l)					\
 do {									\
 	while (!(smp_load_acquire(l)))					\
-		arch_mutex_cpu_relax();					\
+		cpu_relax_lowlatency();					\
 } while (0)
 #endif
 
@@ -104,7 +104,7 @@
 			return;
 		/* Wait until the next pointer is set */
 		while (!(next = ACCESS_ONCE(node->next)))
-			arch_mutex_cpu_relax();
+			cpu_relax_lowlatency();
 	}
 
 	/* Pass lock to next waiter. */
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index acca2c1..ae712b2 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -46,12 +46,6 @@
 # include <asm/mutex.h>
 #endif
 
-/*
- * A negative mutex count indicates that waiters are sleeping waiting for the
- * mutex.
- */
-#define	MUTEX_SHOW_NO_WAITER(mutex)	(atomic_read(&(mutex)->count) >= 0)
-
 void
 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
 {
@@ -152,7 +146,7 @@
 		if (need_resched())
 			break;
 
-		arch_mutex_cpu_relax();
+		cpu_relax_lowlatency();
 	}
 	rcu_read_unlock();
 
@@ -388,12 +382,10 @@
 	/*
 	 * Optimistic spinning.
 	 *
-	 * We try to spin for acquisition when we find that there are no
-	 * pending waiters and the lock owner is currently running on a
-	 * (different) CPU.
-	 *
-	 * The rationale is that if the lock owner is running, it is likely to
-	 * release the lock soon.
+	 * We try to spin for acquisition when we find that the lock owner
+	 * is currently running on a (different) CPU and while we don't
+	 * need to reschedule. The rationale is that if the lock owner is
+	 * running, it is likely to release the lock soon.
 	 *
 	 * Since this needs the lock owner, and this mutex implementation
 	 * doesn't track the owner atomically in the lock field, we need to
@@ -440,7 +432,8 @@
 		if (owner && !mutex_spin_on_owner(lock, owner))
 			break;
 
-		if ((atomic_read(&lock->count) == 1) &&
+		/* Try to acquire the mutex if it is unlocked. */
+		if (!mutex_is_locked(lock) &&
 		    (atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
 			lock_acquired(&lock->dep_map, ip);
 			if (use_ww_ctx) {
@@ -471,7 +464,7 @@
 		 * memory barriers as we'll eventually observe the right
 		 * values at the cost of a few extra spins.
 		 */
-		arch_mutex_cpu_relax();
+		cpu_relax_lowlatency();
 	}
 	osq_unlock(&lock->osq);
 slowpath:
@@ -485,8 +478,11 @@
 #endif
 	spin_lock_mutex(&lock->wait_lock, flags);
 
-	/* once more, can we acquire the lock? */
-	if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, 0) == 1))
+	/*
+	 * Once more, try to acquire the lock. Only try-lock the mutex if
+	 * it is unlocked to reduce unnecessary xchg() operations.
+	 */
+	if (!mutex_is_locked(lock) && (atomic_xchg(&lock->count, 0) == 1))
 		goto skip_wait;
 
 	debug_mutex_lock_common(lock, &waiter);
@@ -506,9 +502,10 @@
 		 * it's unlocked. Later on, if we sleep, this is the
 		 * operation that gives us the lock. We xchg it to -1, so
 		 * that when we release the lock, we properly wake up the
-		 * other waiters:
+		 * other waiters. We only attempt the xchg if the count is
+		 * non-negative in order to avoid unnecessary xchg operations:
 		 */
-		if (MUTEX_SHOW_NO_WAITER(lock) &&
+		if (atomic_read(&lock->count) >= 0 &&
 		    (atomic_xchg(&lock->count, -1) == 1))
 			break;
 
@@ -823,6 +820,10 @@
 	unsigned long flags;
 	int prev;
 
+	/* No need to trylock if the mutex is locked. */
+	if (mutex_is_locked(lock))
+		return 0;
+
 	spin_lock_mutex(&lock->wait_lock, flags);
 
 	prev = atomic_xchg(&lock->count, -1);
diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c
index fb5b8ac..f956ede 100644
--- a/kernel/locking/qrwlock.c
+++ b/kernel/locking/qrwlock.c
@@ -20,7 +20,6 @@
 #include <linux/cpumask.h>
 #include <linux/percpu.h>
 #include <linux/hardirq.h>
-#include <linux/mutex.h>
 #include <asm/qrwlock.h>
 
 /**
@@ -35,7 +34,7 @@
 rspin_until_writer_unlock(struct qrwlock *lock, u32 cnts)
 {
 	while ((cnts & _QW_WMASK) == _QW_LOCKED) {
-		arch_mutex_cpu_relax();
+		cpu_relax_lowlatency();
 		cnts = smp_load_acquire((u32 *)&lock->cnts);
 	}
 }
@@ -75,7 +74,7 @@
 	 * to make sure that the write lock isn't taken.
 	 */
 	while (atomic_read(&lock->cnts) & _QW_WMASK)
-		arch_mutex_cpu_relax();
+		cpu_relax_lowlatency();
 
 	cnts = atomic_add_return(_QR_BIAS, &lock->cnts) - _QR_BIAS;
 	rspin_until_writer_unlock(lock, cnts);
@@ -114,7 +113,7 @@
 				    cnts | _QW_WAITING) == cnts))
 			break;
 
-		arch_mutex_cpu_relax();
+		cpu_relax_lowlatency();
 	}
 
 	/* When no more readers, set the locked flag */
@@ -125,7 +124,7 @@
 				    _QW_LOCKED) == _QW_WAITING))
 			break;
 
-		arch_mutex_cpu_relax();
+		cpu_relax_lowlatency();
 	}
 unlock:
 	arch_spin_unlock(&lock->lock);
diff --git a/kernel/locking/rtmutex-debug.c b/kernel/locking/rtmutex-debug.c
index 49b2ed3..62b6cee 100644
--- a/kernel/locking/rtmutex-debug.c
+++ b/kernel/locking/rtmutex-debug.c
@@ -66,12 +66,13 @@
  * the deadlock. We print when we return. act_waiter can be NULL in
  * case of a remove waiter operation.
  */
-void debug_rt_mutex_deadlock(int detect, struct rt_mutex_waiter *act_waiter,
+void debug_rt_mutex_deadlock(enum rtmutex_chainwalk chwalk,
+			     struct rt_mutex_waiter *act_waiter,
 			     struct rt_mutex *lock)
 {
 	struct task_struct *task;
 
-	if (!debug_locks || detect || !act_waiter)
+	if (!debug_locks || chwalk == RT_MUTEX_FULL_CHAINWALK || !act_waiter)
 		return;
 
 	task = rt_mutex_owner(act_waiter->lock);
diff --git a/kernel/locking/rtmutex-debug.h b/kernel/locking/rtmutex-debug.h
index ab29b6a..d0519c3 100644
--- a/kernel/locking/rtmutex-debug.h
+++ b/kernel/locking/rtmutex-debug.h
@@ -20,14 +20,15 @@
 extern void debug_rt_mutex_proxy_lock(struct rt_mutex *lock,
 				      struct task_struct *powner);
 extern void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock);
-extern void debug_rt_mutex_deadlock(int detect, struct rt_mutex_waiter *waiter,
+extern void debug_rt_mutex_deadlock(enum rtmutex_chainwalk chwalk,
+				    struct rt_mutex_waiter *waiter,
 				    struct rt_mutex *lock);
 extern void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter);
 # define debug_rt_mutex_reset_waiter(w)			\
 	do { (w)->deadlock_lock = NULL; } while (0)
 
-static inline int debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter,
-						 int detect)
+static inline bool debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter,
+						  enum rtmutex_chainwalk walk)
 {
 	return (waiter != NULL);
 }
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index fc60594..a0ea2a1 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -308,6 +308,32 @@
 }
 
 /*
+ * Deadlock detection is conditional:
+ *
+ * If CONFIG_DEBUG_RT_MUTEXES=n, deadlock detection is only conducted
+ * if the detect argument is == RT_MUTEX_FULL_CHAINWALK.
+ *
+ * If CONFIG_DEBUG_RT_MUTEXES=y, deadlock detection is always
+ * conducted independent of the detect argument.
+ *
+ * If the waiter argument is NULL this indicates the deboost path and
+ * deadlock detection is disabled independent of the detect argument
+ * and the config settings.
+ */
+static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter,
+					  enum rtmutex_chainwalk chwalk)
+{
+	/*
+	 * This is just a wrapper function for the following call,
+	 * because debug_rt_mutex_detect_deadlock() smells like a magic
+	 * debug feature and I wanted to keep the cond function in the
+	 * main source file along with the comments instead of having
+	 * two of the same in the headers.
+	 */
+	return debug_rt_mutex_detect_deadlock(waiter, chwalk);
+}
+
+/*
  * Max number of times we'll walk the boosting chain:
  */
 int max_lock_depth = 1024;
@@ -337,21 +363,65 @@
  * @top_task:	the current top waiter
  *
  * Returns 0 or -EDEADLK.
+ *
+ * Chain walk basics and protection scope
+ *
+ * [R] refcount on task
+ * [P] task->pi_lock held
+ * [L] rtmutex->wait_lock held
+ *
+ * Step	Description				Protected by
+ *	function arguments:
+ *	@task					[R]
+ *	@orig_lock if != NULL			@top_task is blocked on it
+ *	@next_lock				Unprotected. Cannot be
+ *						dereferenced. Only used for
+ *						comparison.
+ *	@orig_waiter if != NULL			@top_task is blocked on it
+ *	@top_task				current, or in case of proxy
+ *						locking protected by calling
+ *						code
+ *	again:
+ *	  loop_sanity_check();
+ *	retry:
+ * [1]	  lock(task->pi_lock);			[R] acquire [P]
+ * [2]	  waiter = task->pi_blocked_on;		[P]
+ * [3]	  check_exit_conditions_1();		[P]
+ * [4]	  lock = waiter->lock;			[P]
+ * [5]	  if (!try_lock(lock->wait_lock)) {	[P] try to acquire [L]
+ *	    unlock(task->pi_lock);		release [P]
+ *	    goto retry;
+ *	  }
+ * [6]	  check_exit_conditions_2();		[P] + [L]
+ * [7]	  requeue_lock_waiter(lock, waiter);	[P] + [L]
+ * [8]	  unlock(task->pi_lock);		release [P]
+ *	  put_task_struct(task);		release [R]
+ * [9]	  check_exit_conditions_3();		[L]
+ * [10]	  task = owner(lock);			[L]
+ *	  get_task_struct(task);		[L] acquire [R]
+ *	  lock(task->pi_lock);			[L] acquire [P]
+ * [11]	  requeue_pi_waiter(tsk, waiters(lock));[P] + [L]
+ * [12]	  check_exit_conditions_4();		[P] + [L]
+ * [13]	  unlock(task->pi_lock);		release [P]
+ *	  unlock(lock->wait_lock);		release [L]
+ *	  goto again;
  */
 static int rt_mutex_adjust_prio_chain(struct task_struct *task,
-				      int deadlock_detect,
+				      enum rtmutex_chainwalk chwalk,
 				      struct rt_mutex *orig_lock,
 				      struct rt_mutex *next_lock,
 				      struct rt_mutex_waiter *orig_waiter,
 				      struct task_struct *top_task)
 {
-	struct rt_mutex *lock;
 	struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
-	int detect_deadlock, ret = 0, depth = 0;
+	struct rt_mutex_waiter *prerequeue_top_waiter;
+	int ret = 0, depth = 0;
+	struct rt_mutex *lock;
+	bool detect_deadlock;
 	unsigned long flags;
+	bool requeue = true;
 
-	detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter,
-							 deadlock_detect);
+	detect_deadlock = rt_mutex_cond_detect_deadlock(orig_waiter, chwalk);
 
 	/*
 	 * The (de)boosting is a step by step approach with a lot of
@@ -360,6 +430,9 @@
 	 * carefully whether things change under us.
 	 */
  again:
+	/*
+	 * We limit the lock chain length for each invocation.
+	 */
 	if (++depth > max_lock_depth) {
 		static int prev_max;
 
@@ -377,13 +450,28 @@
 
 		return -EDEADLK;
 	}
+
+	/*
+	 * We are fully preemptible here and only hold the refcount on
+	 * @task. So everything can have changed under us since the
+	 * caller or our own code below (goto retry/again) dropped all
+	 * locks.
+	 */
  retry:
 	/*
-	 * Task can not go away as we did a get_task() before !
+	 * [1] Task cannot go away as we did a get_task() before !
 	 */
 	raw_spin_lock_irqsave(&task->pi_lock, flags);
 
+	/*
+	 * [2] Get the waiter on which @task is blocked on.
+	 */
 	waiter = task->pi_blocked_on;
+
+	/*
+	 * [3] check_exit_conditions_1() protected by task->pi_lock.
+	 */
+
 	/*
 	 * Check whether the end of the boosting chain has been
 	 * reached or the state of the chain has changed while we
@@ -421,20 +509,41 @@
 			goto out_unlock_pi;
 		/*
 		 * If deadlock detection is off, we stop here if we
-		 * are not the top pi waiter of the task.
+		 * are not the top pi waiter of the task. If deadlock
+		 * detection is enabled we continue, but stop the
+		 * requeueing in the chain walk.
 		 */
-		if (!detect_deadlock && top_waiter != task_top_pi_waiter(task))
-			goto out_unlock_pi;
+		if (top_waiter != task_top_pi_waiter(task)) {
+			if (!detect_deadlock)
+				goto out_unlock_pi;
+			else
+				requeue = false;
+		}
 	}
 
 	/*
-	 * When deadlock detection is off then we check, if further
-	 * priority adjustment is necessary.
+	 * If the waiter priority is the same as the task priority
+	 * then there is no further priority adjustment necessary.  If
+	 * deadlock detection is off, we stop the chain walk. If its
+	 * enabled we continue, but stop the requeueing in the chain
+	 * walk.
 	 */
-	if (!detect_deadlock && waiter->prio == task->prio)
-		goto out_unlock_pi;
+	if (waiter->prio == task->prio) {
+		if (!detect_deadlock)
+			goto out_unlock_pi;
+		else
+			requeue = false;
+	}
 
+	/*
+	 * [4] Get the next lock
+	 */
 	lock = waiter->lock;
+	/*
+	 * [5] We need to trylock here as we are holding task->pi_lock,
+	 * which is the reverse lock order versus the other rtmutex
+	 * operations.
+	 */
 	if (!raw_spin_trylock(&lock->wait_lock)) {
 		raw_spin_unlock_irqrestore(&task->pi_lock, flags);
 		cpu_relax();
@@ -442,79 +551,180 @@
 	}
 
 	/*
+	 * [6] check_exit_conditions_2() protected by task->pi_lock and
+	 * lock->wait_lock.
+	 *
 	 * Deadlock detection. If the lock is the same as the original
 	 * lock which caused us to walk the lock chain or if the
 	 * current lock is owned by the task which initiated the chain
 	 * walk, we detected a deadlock.
 	 */
 	if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
-		debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
+		debug_rt_mutex_deadlock(chwalk, orig_waiter, lock);
 		raw_spin_unlock(&lock->wait_lock);
 		ret = -EDEADLK;
 		goto out_unlock_pi;
 	}
 
-	top_waiter = rt_mutex_top_waiter(lock);
+	/*
+	 * If we just follow the lock chain for deadlock detection, no
+	 * need to do all the requeue operations. To avoid a truckload
+	 * of conditionals around the various places below, just do the
+	 * minimum chain walk checks.
+	 */
+	if (!requeue) {
+		/*
+		 * No requeue[7] here. Just release @task [8]
+		 */
+		raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+		put_task_struct(task);
 
-	/* Requeue the waiter */
+		/*
+		 * [9] check_exit_conditions_3 protected by lock->wait_lock.
+		 * If there is no owner of the lock, end of chain.
+		 */
+		if (!rt_mutex_owner(lock)) {
+			raw_spin_unlock(&lock->wait_lock);
+			return 0;
+		}
+
+		/* [10] Grab the next task, i.e. owner of @lock */
+		task = rt_mutex_owner(lock);
+		get_task_struct(task);
+		raw_spin_lock_irqsave(&task->pi_lock, flags);
+
+		/*
+		 * No requeue [11] here. We just do deadlock detection.
+		 *
+		 * [12] Store whether owner is blocked
+		 * itself. Decision is made after dropping the locks
+		 */
+		next_lock = task_blocked_on_lock(task);
+		/*
+		 * Get the top waiter for the next iteration
+		 */
+		top_waiter = rt_mutex_top_waiter(lock);
+
+		/* [13] Drop locks */
+		raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+		raw_spin_unlock(&lock->wait_lock);
+
+		/* If owner is not blocked, end of chain. */
+		if (!next_lock)
+			goto out_put_task;
+		goto again;
+	}
+
+	/*
+	 * Store the current top waiter before doing the requeue
+	 * operation on @lock. We need it for the boost/deboost
+	 * decision below.
+	 */
+	prerequeue_top_waiter = rt_mutex_top_waiter(lock);
+
+	/* [7] Requeue the waiter in the lock waiter list. */
 	rt_mutex_dequeue(lock, waiter);
 	waiter->prio = task->prio;
 	rt_mutex_enqueue(lock, waiter);
 
-	/* Release the task */
+	/* [8] Release the task */
 	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
-	if (!rt_mutex_owner(lock)) {
-		/*
-		 * If the requeue above changed the top waiter, then we need
-		 * to wake the new top waiter up to try to get the lock.
-		 */
-
-		if (top_waiter != rt_mutex_top_waiter(lock))
-			wake_up_process(rt_mutex_top_waiter(lock)->task);
-		raw_spin_unlock(&lock->wait_lock);
-		goto out_put_task;
-	}
 	put_task_struct(task);
 
-	/* Grab the next task */
+	/*
+	 * [9] check_exit_conditions_3 protected by lock->wait_lock.
+	 *
+	 * We must abort the chain walk if there is no lock owner even
+	 * in the dead lock detection case, as we have nothing to
+	 * follow here. This is the end of the chain we are walking.
+	 */
+	if (!rt_mutex_owner(lock)) {
+		/*
+		 * If the requeue [7] above changed the top waiter,
+		 * then we need to wake the new top waiter up to try
+		 * to get the lock.
+		 */
+		if (prerequeue_top_waiter != rt_mutex_top_waiter(lock))
+			wake_up_process(rt_mutex_top_waiter(lock)->task);
+		raw_spin_unlock(&lock->wait_lock);
+		return 0;
+	}
+
+	/* [10] Grab the next task, i.e. the owner of @lock */
 	task = rt_mutex_owner(lock);
 	get_task_struct(task);
 	raw_spin_lock_irqsave(&task->pi_lock, flags);
 
+	/* [11] requeue the pi waiters if necessary */
 	if (waiter == rt_mutex_top_waiter(lock)) {
-		/* Boost the owner */
-		rt_mutex_dequeue_pi(task, top_waiter);
+		/*
+		 * The waiter became the new top (highest priority)
+		 * waiter on the lock. Replace the previous top waiter
+		 * in the owner tasks pi waiters list with this waiter
+		 * and adjust the priority of the owner.
+		 */
+		rt_mutex_dequeue_pi(task, prerequeue_top_waiter);
 		rt_mutex_enqueue_pi(task, waiter);
 		__rt_mutex_adjust_prio(task);
 
-	} else if (top_waiter == waiter) {
-		/* Deboost the owner */
+	} else if (prerequeue_top_waiter == waiter) {
+		/*
+		 * The waiter was the top waiter on the lock, but is
+		 * no longer the top prority waiter. Replace waiter in
+		 * the owner tasks pi waiters list with the new top
+		 * (highest priority) waiter and adjust the priority
+		 * of the owner.
+		 * The new top waiter is stored in @waiter so that
+		 * @waiter == @top_waiter evaluates to true below and
+		 * we continue to deboost the rest of the chain.
+		 */
 		rt_mutex_dequeue_pi(task, waiter);
 		waiter = rt_mutex_top_waiter(lock);
 		rt_mutex_enqueue_pi(task, waiter);
 		__rt_mutex_adjust_prio(task);
+	} else {
+		/*
+		 * Nothing changed. No need to do any priority
+		 * adjustment.
+		 */
 	}
 
 	/*
+	 * [12] check_exit_conditions_4() protected by task->pi_lock
+	 * and lock->wait_lock. The actual decisions are made after we
+	 * dropped the locks.
+	 *
 	 * Check whether the task which owns the current lock is pi
 	 * blocked itself. If yes we store a pointer to the lock for
 	 * the lock chain change detection above. After we dropped
 	 * task->pi_lock next_lock cannot be dereferenced anymore.
 	 */
 	next_lock = task_blocked_on_lock(task);
-
-	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
-
+	/*
+	 * Store the top waiter of @lock for the end of chain walk
+	 * decision below.
+	 */
 	top_waiter = rt_mutex_top_waiter(lock);
+
+	/* [13] Drop the locks */
+	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
 	raw_spin_unlock(&lock->wait_lock);
 
 	/*
+	 * Make the actual exit decisions [12], based on the stored
+	 * values.
+	 *
 	 * We reached the end of the lock chain. Stop right here. No
 	 * point to go back just to figure that out.
 	 */
 	if (!next_lock)
 		goto out_put_task;
 
+	/*
+	 * If the current waiter is not the top waiter on the lock,
+	 * then we can stop the chain walk here if we are not in full
+	 * deadlock detection mode.
+	 */
 	if (!detect_deadlock && waiter != top_waiter)
 		goto out_put_task;
 
@@ -533,76 +743,119 @@
  *
  * Must be called with lock->wait_lock held.
  *
- * @lock:   the lock to be acquired.
- * @task:   the task which wants to acquire the lock
- * @waiter: the waiter that is queued to the lock's wait list. (could be NULL)
+ * @lock:   The lock to be acquired.
+ * @task:   The task which wants to acquire the lock
+ * @waiter: The waiter that is queued to the lock's wait list if the
+ *	    callsite called task_blocked_on_lock(), otherwise NULL
  */
 static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
-		struct rt_mutex_waiter *waiter)
+				struct rt_mutex_waiter *waiter)
 {
+	unsigned long flags;
+
 	/*
-	 * We have to be careful here if the atomic speedups are
-	 * enabled, such that, when
-	 *  - no other waiter is on the lock
-	 *  - the lock has been released since we did the cmpxchg
-	 * the lock can be released or taken while we are doing the
-	 * checks and marking the lock with RT_MUTEX_HAS_WAITERS.
+	 * Before testing whether we can acquire @lock, we set the
+	 * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all
+	 * other tasks which try to modify @lock into the slow path
+	 * and they serialize on @lock->wait_lock.
 	 *
-	 * The atomic acquire/release aware variant of
-	 * mark_rt_mutex_waiters uses a cmpxchg loop. After setting
-	 * the WAITERS bit, the atomic release / acquire can not
-	 * happen anymore and lock->wait_lock protects us from the
-	 * non-atomic case.
+	 * The RT_MUTEX_HAS_WAITERS bit can have a transitional state
+	 * as explained at the top of this file if and only if:
 	 *
-	 * Note, that this might set lock->owner =
-	 * RT_MUTEX_HAS_WAITERS in the case the lock is not contended
-	 * any more. This is fixed up when we take the ownership.
-	 * This is the transitional state explained at the top of this file.
+	 * - There is a lock owner. The caller must fixup the
+	 *   transient state if it does a trylock or leaves the lock
+	 *   function due to a signal or timeout.
+	 *
+	 * - @task acquires the lock and there are no other
+	 *   waiters. This is undone in rt_mutex_set_owner(@task) at
+	 *   the end of this function.
 	 */
 	mark_rt_mutex_waiters(lock);
 
+	/*
+	 * If @lock has an owner, give up.
+	 */
 	if (rt_mutex_owner(lock))
 		return 0;
 
 	/*
-	 * It will get the lock because of one of these conditions:
-	 * 1) there is no waiter
-	 * 2) higher priority than waiters
-	 * 3) it is top waiter
+	 * If @waiter != NULL, @task has already enqueued the waiter
+	 * into @lock waiter list. If @waiter == NULL then this is a
+	 * trylock attempt.
 	 */
-	if (rt_mutex_has_waiters(lock)) {
-		if (task->prio >= rt_mutex_top_waiter(lock)->prio) {
-			if (!waiter || waiter != rt_mutex_top_waiter(lock))
-				return 0;
-		}
-	}
-
-	if (waiter || rt_mutex_has_waiters(lock)) {
-		unsigned long flags;
-		struct rt_mutex_waiter *top;
-
-		raw_spin_lock_irqsave(&task->pi_lock, flags);
-
-		/* remove the queued waiter. */
-		if (waiter) {
-			rt_mutex_dequeue(lock, waiter);
-			task->pi_blocked_on = NULL;
-		}
+	if (waiter) {
+		/*
+		 * If waiter is not the highest priority waiter of
+		 * @lock, give up.
+		 */
+		if (waiter != rt_mutex_top_waiter(lock))
+			return 0;
 
 		/*
-		 * We have to enqueue the top waiter(if it exists) into
-		 * task->pi_waiters list.
+		 * We can acquire the lock. Remove the waiter from the
+		 * lock waiters list.
+		 */
+		rt_mutex_dequeue(lock, waiter);
+
+	} else {
+		/*
+		 * If the lock has waiters already we check whether @task is
+		 * eligible to take over the lock.
+		 *
+		 * If there are no other waiters, @task can acquire
+		 * the lock.  @task->pi_blocked_on is NULL, so it does
+		 * not need to be dequeued.
 		 */
 		if (rt_mutex_has_waiters(lock)) {
-			top = rt_mutex_top_waiter(lock);
-			rt_mutex_enqueue_pi(task, top);
+			/*
+			 * If @task->prio is greater than or equal to
+			 * the top waiter priority (kernel view),
+			 * @task lost.
+			 */
+			if (task->prio >= rt_mutex_top_waiter(lock)->prio)
+				return 0;
+
+			/*
+			 * The current top waiter stays enqueued. We
+			 * don't have to change anything in the lock
+			 * waiters order.
+			 */
+		} else {
+			/*
+			 * No waiters. Take the lock without the
+			 * pi_lock dance.@task->pi_blocked_on is NULL
+			 * and we have no waiters to enqueue in @task
+			 * pi waiters list.
+			 */
+			goto takeit;
 		}
-		raw_spin_unlock_irqrestore(&task->pi_lock, flags);
 	}
 
+	/*
+	 * Clear @task->pi_blocked_on. Requires protection by
+	 * @task->pi_lock. Redundant operation for the @waiter == NULL
+	 * case, but conditionals are more expensive than a redundant
+	 * store.
+	 */
+	raw_spin_lock_irqsave(&task->pi_lock, flags);
+	task->pi_blocked_on = NULL;
+	/*
+	 * Finish the lock acquisition. @task is the new owner. If
+	 * other waiters exist we have to insert the highest priority
+	 * waiter into @task->pi_waiters list.
+	 */
+	if (rt_mutex_has_waiters(lock))
+		rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock));
+	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+
+takeit:
 	/* We got the lock. */
 	debug_rt_mutex_lock(lock);
 
+	/*
+	 * This either preserves the RT_MUTEX_HAS_WAITERS bit if there
+	 * are still waiters or clears it.
+	 */
 	rt_mutex_set_owner(lock, task);
 
 	rt_mutex_deadlock_account_lock(lock, task);
@@ -620,7 +873,7 @@
 static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
 				   struct rt_mutex_waiter *waiter,
 				   struct task_struct *task,
-				   int detect_deadlock)
+				   enum rtmutex_chainwalk chwalk)
 {
 	struct task_struct *owner = rt_mutex_owner(lock);
 	struct rt_mutex_waiter *top_waiter = waiter;
@@ -666,7 +919,7 @@
 		__rt_mutex_adjust_prio(owner);
 		if (owner->pi_blocked_on)
 			chain_walk = 1;
-	} else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) {
+	} else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
 		chain_walk = 1;
 	}
 
@@ -691,7 +944,7 @@
 
 	raw_spin_unlock(&lock->wait_lock);
 
-	res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock,
+	res = rt_mutex_adjust_prio_chain(owner, chwalk, lock,
 					 next_lock, waiter, task);
 
 	raw_spin_lock(&lock->wait_lock);
@@ -753,9 +1006,9 @@
 static void remove_waiter(struct rt_mutex *lock,
 			  struct rt_mutex_waiter *waiter)
 {
-	int first = (waiter == rt_mutex_top_waiter(lock));
+	bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
 	struct task_struct *owner = rt_mutex_owner(lock);
-	struct rt_mutex *next_lock = NULL;
+	struct rt_mutex *next_lock;
 	unsigned long flags;
 
 	raw_spin_lock_irqsave(&current->pi_lock, flags);
@@ -763,29 +1016,31 @@
 	current->pi_blocked_on = NULL;
 	raw_spin_unlock_irqrestore(&current->pi_lock, flags);
 
-	if (!owner)
+	/*
+	 * Only update priority if the waiter was the highest priority
+	 * waiter of the lock and there is an owner to update.
+	 */
+	if (!owner || !is_top_waiter)
 		return;
 
-	if (first) {
+	raw_spin_lock_irqsave(&owner->pi_lock, flags);
 
-		raw_spin_lock_irqsave(&owner->pi_lock, flags);
+	rt_mutex_dequeue_pi(owner, waiter);
 
-		rt_mutex_dequeue_pi(owner, waiter);
+	if (rt_mutex_has_waiters(lock))
+		rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock));
 
-		if (rt_mutex_has_waiters(lock)) {
-			struct rt_mutex_waiter *next;
+	__rt_mutex_adjust_prio(owner);
 
-			next = rt_mutex_top_waiter(lock);
-			rt_mutex_enqueue_pi(owner, next);
-		}
-		__rt_mutex_adjust_prio(owner);
+	/* Store the lock on which owner is blocked or NULL */
+	next_lock = task_blocked_on_lock(owner);
 
-		/* Store the lock on which owner is blocked or NULL */
-		next_lock = task_blocked_on_lock(owner);
+	raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
 
-		raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
-	}
-
+	/*
+	 * Don't walk the chain, if the owner task is not blocked
+	 * itself.
+	 */
 	if (!next_lock)
 		return;
 
@@ -794,7 +1049,8 @@
 
 	raw_spin_unlock(&lock->wait_lock);
 
-	rt_mutex_adjust_prio_chain(owner, 0, lock, next_lock, NULL, current);
+	rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock,
+				   next_lock, NULL, current);
 
 	raw_spin_lock(&lock->wait_lock);
 }
@@ -824,7 +1080,8 @@
 	/* gets dropped in rt_mutex_adjust_prio_chain()! */
 	get_task_struct(task);
 
-	rt_mutex_adjust_prio_chain(task, 0, NULL, next_lock, NULL, task);
+	rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
+				   next_lock, NULL, task);
 }
 
 /**
@@ -902,7 +1159,7 @@
 static int __sched
 rt_mutex_slowlock(struct rt_mutex *lock, int state,
 		  struct hrtimer_sleeper *timeout,
-		  int detect_deadlock)
+		  enum rtmutex_chainwalk chwalk)
 {
 	struct rt_mutex_waiter waiter;
 	int ret = 0;
@@ -928,7 +1185,7 @@
 			timeout->task = NULL;
 	}
 
-	ret = task_blocks_on_rt_mutex(lock, &waiter, current, detect_deadlock);
+	ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
 
 	if (likely(!ret))
 		ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
@@ -937,7 +1194,7 @@
 
 	if (unlikely(ret)) {
 		remove_waiter(lock, &waiter);
-		rt_mutex_handle_deadlock(ret, detect_deadlock, &waiter);
+		rt_mutex_handle_deadlock(ret, chwalk, &waiter);
 	}
 
 	/*
@@ -960,22 +1217,31 @@
 /*
  * Slow path try-lock function:
  */
-static inline int
-rt_mutex_slowtrylock(struct rt_mutex *lock)
+static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
 {
-	int ret = 0;
+	int ret;
 
+	/*
+	 * If the lock already has an owner we fail to get the lock.
+	 * This can be done without taking the @lock->wait_lock as
+	 * it is only being read, and this is a trylock anyway.
+	 */
+	if (rt_mutex_owner(lock))
+		return 0;
+
+	/*
+	 * The mutex has currently no owner. Lock the wait lock and
+	 * try to acquire the lock.
+	 */
 	raw_spin_lock(&lock->wait_lock);
 
-	if (likely(rt_mutex_owner(lock) != current)) {
+	ret = try_to_take_rt_mutex(lock, current, NULL);
 
-		ret = try_to_take_rt_mutex(lock, current, NULL);
-		/*
-		 * try_to_take_rt_mutex() sets the lock waiters
-		 * bit unconditionally. Clean this up.
-		 */
-		fixup_rt_mutex_waiters(lock);
-	}
+	/*
+	 * try_to_take_rt_mutex() sets the lock waiters bit
+	 * unconditionally. Clean this up.
+	 */
+	fixup_rt_mutex_waiters(lock);
 
 	raw_spin_unlock(&lock->wait_lock);
 
@@ -1053,30 +1319,31 @@
  */
 static inline int
 rt_mutex_fastlock(struct rt_mutex *lock, int state,
-		  int detect_deadlock,
 		  int (*slowfn)(struct rt_mutex *lock, int state,
 				struct hrtimer_sleeper *timeout,
-				int detect_deadlock))
+				enum rtmutex_chainwalk chwalk))
 {
-	if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
+	if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
 		rt_mutex_deadlock_account_lock(lock, current);
 		return 0;
 	} else
-		return slowfn(lock, state, NULL, detect_deadlock);
+		return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
 }
 
 static inline int
 rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
-			struct hrtimer_sleeper *timeout, int detect_deadlock,
+			struct hrtimer_sleeper *timeout,
+			enum rtmutex_chainwalk chwalk,
 			int (*slowfn)(struct rt_mutex *lock, int state,
 				      struct hrtimer_sleeper *timeout,
-				      int detect_deadlock))
+				      enum rtmutex_chainwalk chwalk))
 {
-	if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
+	if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
+	    likely(rt_mutex_cmpxchg(lock, NULL, current))) {
 		rt_mutex_deadlock_account_lock(lock, current);
 		return 0;
 	} else
-		return slowfn(lock, state, timeout, detect_deadlock);
+		return slowfn(lock, state, timeout, chwalk);
 }
 
 static inline int
@@ -1109,54 +1376,61 @@
 {
 	might_sleep();
 
-	rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock);
+	rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
 }
 EXPORT_SYMBOL_GPL(rt_mutex_lock);
 
 /**
  * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
  *
- * @lock: 		the rt_mutex to be locked
- * @detect_deadlock:	deadlock detection on/off
+ * @lock:		the rt_mutex to be locked
  *
  * Returns:
- *  0 		on success
- * -EINTR 	when interrupted by a signal
- * -EDEADLK	when the lock would deadlock (when deadlock detection is on)
+ *  0		on success
+ * -EINTR	when interrupted by a signal
  */
-int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock,
-						 int detect_deadlock)
+int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
 {
 	might_sleep();
 
-	return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE,
-				 detect_deadlock, rt_mutex_slowlock);
+	return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
 }
 EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
 
+/*
+ * Futex variant with full deadlock detection.
+ */
+int rt_mutex_timed_futex_lock(struct rt_mutex *lock,
+			      struct hrtimer_sleeper *timeout)
+{
+	might_sleep();
+
+	return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
+				       RT_MUTEX_FULL_CHAINWALK,
+				       rt_mutex_slowlock);
+}
+
 /**
  * rt_mutex_timed_lock - lock a rt_mutex interruptible
  *			the timeout structure is provided
  *			by the caller
  *
- * @lock: 		the rt_mutex to be locked
+ * @lock:		the rt_mutex to be locked
  * @timeout:		timeout structure or NULL (no timeout)
- * @detect_deadlock:	deadlock detection on/off
  *
  * Returns:
- *  0 		on success
- * -EINTR 	when interrupted by a signal
+ *  0		on success
+ * -EINTR	when interrupted by a signal
  * -ETIMEDOUT	when the timeout expired
- * -EDEADLK	when the lock would deadlock (when deadlock detection is on)
  */
 int
-rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout,
-		    int detect_deadlock)
+rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
 {
 	might_sleep();
 
 	return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
-				       detect_deadlock, rt_mutex_slowlock);
+				       RT_MUTEX_MIN_CHAINWALK,
+				       rt_mutex_slowlock);
 }
 EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
 
@@ -1262,7 +1536,6 @@
  * @lock:		the rt_mutex to take
  * @waiter:		the pre-initialized rt_mutex_waiter
  * @task:		the task to prepare
- * @detect_deadlock:	perform deadlock detection (1) or not (0)
  *
  * Returns:
  *  0 - task blocked on lock
@@ -1273,7 +1546,7 @@
  */
 int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
 			      struct rt_mutex_waiter *waiter,
-			      struct task_struct *task, int detect_deadlock)
+			      struct task_struct *task)
 {
 	int ret;
 
@@ -1285,7 +1558,8 @@
 	}
 
 	/* We enforce deadlock detection for futexes */
-	ret = task_blocks_on_rt_mutex(lock, waiter, task, 1);
+	ret = task_blocks_on_rt_mutex(lock, waiter, task,
+				      RT_MUTEX_FULL_CHAINWALK);
 
 	if (ret && !rt_mutex_owner(lock)) {
 		/*
@@ -1331,22 +1605,20 @@
  * rt_mutex_finish_proxy_lock() - Complete lock acquisition
  * @lock:		the rt_mutex we were woken on
  * @to:			the timeout, null if none. hrtimer should already have
- * 			been started.
+ *			been started.
  * @waiter:		the pre-initialized rt_mutex_waiter
- * @detect_deadlock:	perform deadlock detection (1) or not (0)
  *
  * Complete the lock acquisition started our behalf by another thread.
  *
  * Returns:
  *  0 - success
- * <0 - error, one of -EINTR, -ETIMEDOUT, or -EDEADLK
+ * <0 - error, one of -EINTR, -ETIMEDOUT
  *
  * Special API call for PI-futex requeue support
  */
 int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
 			       struct hrtimer_sleeper *to,
-			       struct rt_mutex_waiter *waiter,
-			       int detect_deadlock)
+			       struct rt_mutex_waiter *waiter)
 {
 	int ret;
 
diff --git a/kernel/locking/rtmutex.h b/kernel/locking/rtmutex.h
index f6a1f3c..c406058 100644
--- a/kernel/locking/rtmutex.h
+++ b/kernel/locking/rtmutex.h
@@ -22,10 +22,15 @@
 #define debug_rt_mutex_init(m, n)			do { } while (0)
 #define debug_rt_mutex_deadlock(d, a ,l)		do { } while (0)
 #define debug_rt_mutex_print_deadlock(w)		do { } while (0)
-#define debug_rt_mutex_detect_deadlock(w,d)		(d)
 #define debug_rt_mutex_reset_waiter(w)			do { } while (0)
 
 static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w)
 {
 	WARN(1, "rtmutex deadlock detected\n");
 }
+
+static inline bool debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *w,
+						  enum rtmutex_chainwalk walk)
+{
+	return walk == RT_MUTEX_FULL_CHAINWALK;
+}
diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
index 7431a9c..8552125 100644
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
@@ -102,6 +102,21 @@
 }
 
 /*
+ * Constants for rt mutex functions which have a selectable deadlock
+ * detection.
+ *
+ * RT_MUTEX_MIN_CHAINWALK:	Stops the lock chain walk when there are
+ *				no further PI adjustments to be made.
+ *
+ * RT_MUTEX_FULL_CHAINWALK:	Invoke deadlock detection with a full
+ *				walk of the lock chain.
+ */
+enum rtmutex_chainwalk {
+	RT_MUTEX_MIN_CHAINWALK,
+	RT_MUTEX_FULL_CHAINWALK,
+};
+
+/*
  * PI-futex support (proxy locking functions, etc.):
  */
 extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
@@ -111,12 +126,11 @@
 				  struct task_struct *proxy_owner);
 extern int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
 				     struct rt_mutex_waiter *waiter,
-				     struct task_struct *task,
-				     int detect_deadlock);
+				     struct task_struct *task);
 extern int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
 				      struct hrtimer_sleeper *to,
-				      struct rt_mutex_waiter *waiter,
-				      int detect_deadlock);
+				      struct rt_mutex_waiter *waiter);
+extern int rt_mutex_timed_futex_lock(struct rt_mutex *l, struct hrtimer_sleeper *to);
 
 #ifdef CONFIG_DEBUG_RT_MUTEXES
 # include "rtmutex-debug.h"
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index a2391ac..d6203fa 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -329,7 +329,7 @@
 		if (need_resched())
 			break;
 
-		arch_mutex_cpu_relax();
+		cpu_relax_lowlatency();
 	}
 	rcu_read_unlock();
 
@@ -381,7 +381,7 @@
 		 * memory barriers as we'll eventually observe the right
 		 * values at the cost of a few extra spins.
 		 */
-		arch_mutex_cpu_relax();
+		cpu_relax_lowlatency();
 	}
 	osq_unlock(&sem->osq);
 done:
diff --git a/kernel/module.c b/kernel/module.c
index 81e727c..ae79ce6 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -60,7 +60,6 @@
 #include <linux/jump_label.h>
 #include <linux/pfn.h>
 #include <linux/bsearch.h>
-#include <linux/fips.h>
 #include <uapi/linux/module.h>
 #include "module-internal.h"
 
@@ -2448,9 +2447,6 @@
 	}
 
 	/* Not having a signature is only an error if we're strict. */
-	if (err < 0 && fips_enabled)
-		panic("Module verification failed with error %d in FIPS mode\n",
-		      err);
 	if (err == -ENOKEY && !sig_enforce)
 		err = 0;
 
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index fcc2611..a9dfa79 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -371,7 +371,6 @@
 	}
 
 	suspend_console();
-	ftrace_stop();
 	pm_restrict_gfp_mask();
 
 	error = dpm_suspend(PMSG_FREEZE);
@@ -397,7 +396,6 @@
 	if (error || !in_suspend)
 		pm_restore_gfp_mask();
 
-	ftrace_start();
 	resume_console();
 	dpm_complete(msg);
 
@@ -500,7 +498,6 @@
 
 	pm_prepare_console();
 	suspend_console();
-	ftrace_stop();
 	pm_restrict_gfp_mask();
 	error = dpm_suspend_start(PMSG_QUIESCE);
 	if (!error) {
@@ -508,7 +505,6 @@
 		dpm_resume_end(PMSG_RECOVER);
 	}
 	pm_restore_gfp_mask();
-	ftrace_start();
 	resume_console();
 	pm_restore_console();
 	return error;
@@ -535,7 +531,6 @@
 
 	entering_platform_hibernation = true;
 	suspend_console();
-	ftrace_stop();
 	error = dpm_suspend_start(PMSG_HIBERNATE);
 	if (error) {
 		if (hibernation_ops->recover)
@@ -579,7 +574,6 @@
  Resume_devices:
 	entering_platform_hibernation = false;
 	dpm_resume_end(PMSG_RESTORE);
-	ftrace_start();
 	resume_console();
 
  Close:
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index ed35a47..4b736b4 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -248,7 +248,6 @@
 		goto Platform_wake;
 	}
 
-	ftrace_stop();
 	error = disable_nonboot_cpus();
 	if (error || suspend_test(TEST_CPUS))
 		goto Enable_cpus;
@@ -275,7 +274,6 @@
 
  Enable_cpus:
 	enable_nonboot_cpus();
-	ftrace_start();
 
  Platform_wake:
 	if (need_suspend_ops(state) && suspend_ops->wake)
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index bfda272..ff1a6de 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -99,6 +99,10 @@
 
 void kfree(const void *);
 
+/*
+ * Reclaim the specified callback, either by invoking it (non-lazy case)
+ * or freeing it directly (lazy case).  Return true if lazy, false otherwise.
+ */
 static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head)
 {
 	unsigned long offset = (unsigned long)head->func;
@@ -108,12 +112,12 @@
 		RCU_TRACE(trace_rcu_invoke_kfree_callback(rn, head, offset));
 		kfree((void *)head - offset);
 		rcu_lock_release(&rcu_callback_map);
-		return 1;
+		return true;
 	} else {
 		RCU_TRACE(trace_rcu_invoke_callback(rn, head));
 		head->func(head);
 		rcu_lock_release(&rcu_callback_map);
-		return 0;
+		return false;
 	}
 }
 
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 7fa34f8..948a769 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -18,7 +18,7 @@
  * Copyright (C) IBM Corporation, 2005, 2006
  *
  * Authors: Paul E. McKenney <paulmck@us.ibm.com>
- *	  Josh Triplett <josh@freedesktop.org>
+ *	  Josh Triplett <josh@joshtriplett.org>
  *
  * See also:  Documentation/RCU/torture.txt
  */
@@ -51,7 +51,7 @@
 #include <linux/torture.h>
 
 MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@freedesktop.org>");
+MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
 
 
 torture_param(int, fqs_duration, 0,
diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c
index c639556..e037f3e 100644
--- a/kernel/rcu/srcu.c
+++ b/kernel/rcu/srcu.c
@@ -298,9 +298,9 @@
 
 	idx = ACCESS_ONCE(sp->completed) & 0x1;
 	preempt_disable();
-	ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
+	__this_cpu_inc(sp->per_cpu_ref->c[idx]);
 	smp_mb(); /* B */  /* Avoid leaking the critical section. */
-	ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
+	__this_cpu_inc(sp->per_cpu_ref->seq[idx]);
 	preempt_enable();
 	return idx;
 }
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 625d0b0..1b70cb6 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1013,10 +1013,7 @@
 }
 
 /*
- * Dump stacks of all tasks running on stalled CPUs.  This is a fallback
- * for architectures that do not implement trigger_all_cpu_backtrace().
- * The NMI-triggered stack traces are more accurate because they are
- * printed by the target CPU.
+ * Dump stacks of all tasks running on stalled CPUs.
  */
 static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
 {
@@ -1094,7 +1091,7 @@
 	       (long)rsp->gpnum, (long)rsp->completed, totqlen);
 	if (ndetected == 0)
 		pr_err("INFO: Stall ended before state dump start\n");
-	else if (!trigger_all_cpu_backtrace())
+	else
 		rcu_dump_cpu_stacks(rsp);
 
 	/* Complain about tasks blocking the grace period. */
@@ -1125,8 +1122,7 @@
 	pr_cont(" (t=%lu jiffies g=%ld c=%ld q=%lu)\n",
 		jiffies - rsp->gp_start,
 		(long)rsp->gpnum, (long)rsp->completed, totqlen);
-	if (!trigger_all_cpu_backtrace())
-		dump_stack();
+	rcu_dump_cpu_stacks(rsp);
 
 	raw_spin_lock_irqsave(&rnp->lock, flags);
 	if (ULONG_CMP_GE(jiffies, ACCESS_ONCE(rsp->jiffies_stall)))
@@ -1305,10 +1301,16 @@
 	 * believe that a grace period is in progress, then we must wait
 	 * for the one following, which is in "c".  Because our request
 	 * will be noticed at the end of the current grace period, we don't
-	 * need to explicitly start one.
+	 * need to explicitly start one.  We only do the lockless check
+	 * of rnp_root's fields if the current rcu_node structure thinks
+	 * there is no grace period in flight, and because we hold rnp->lock,
+	 * the only possible change is when rnp_root's two fields are
+	 * equal, in which case rnp_root->gpnum might be concurrently
+	 * incremented.  But that is OK, as it will just result in our
+	 * doing some extra useless work.
 	 */
 	if (rnp->gpnum != rnp->completed ||
-	    ACCESS_ONCE(rnp->gpnum) != ACCESS_ONCE(rnp->completed)) {
+	    ACCESS_ONCE(rnp_root->gpnum) != ACCESS_ONCE(rnp_root->completed)) {
 		rnp->need_future_gp[c & 0x1]++;
 		trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleaf"));
 		goto out;
@@ -1645,11 +1647,6 @@
 					    rnp->level, rnp->grplo,
 					    rnp->grphi, rnp->qsmask);
 		raw_spin_unlock_irq(&rnp->lock);
-#ifdef CONFIG_PROVE_RCU_DELAY
-		if ((prandom_u32() % (rcu_num_nodes + 1)) == 0 &&
-		    system_state == SYSTEM_RUNNING)
-			udelay(200);
-#endif /* #ifdef CONFIG_PROVE_RCU_DELAY */
 		cond_resched();
 	}
 
@@ -2347,7 +2344,7 @@
 	}
 	smp_mb(); /* List handling before counting for rcu_barrier(). */
 	rdp->qlen_lazy -= count_lazy;
-	ACCESS_ONCE(rdp->qlen) -= count;
+	ACCESS_ONCE(rdp->qlen) = rdp->qlen - count;
 	rdp->n_cbs_invoked += count;
 
 	/* Reinstate batch limit if we have worked down the excess. */
@@ -2485,14 +2482,14 @@
 	struct rcu_node *rnp_old = NULL;
 
 	/* Funnel through hierarchy to reduce memory contention. */
-	rnp = per_cpu_ptr(rsp->rda, raw_smp_processor_id())->mynode;
+	rnp = __this_cpu_read(rsp->rda->mynode);
 	for (; rnp != NULL; rnp = rnp->parent) {
 		ret = (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) ||
 		      !raw_spin_trylock(&rnp->fqslock);
 		if (rnp_old != NULL)
 			raw_spin_unlock(&rnp_old->fqslock);
 		if (ret) {
-			ACCESS_ONCE(rsp->n_force_qs_lh)++;
+			rsp->n_force_qs_lh++;
 			return;
 		}
 		rnp_old = rnp;
@@ -2504,7 +2501,7 @@
 	smp_mb__after_unlock_lock();
 	raw_spin_unlock(&rnp_old->fqslock);
 	if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
-		ACCESS_ONCE(rsp->n_force_qs_lh)++;
+		rsp->n_force_qs_lh++;
 		raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
 		return;  /* Someone beat us to it. */
 	}
@@ -2662,7 +2659,7 @@
 	unsigned long flags;
 	struct rcu_data *rdp;
 
-	WARN_ON_ONCE((unsigned long)head & 0x3); /* Misaligned rcu_head! */
+	WARN_ON_ONCE((unsigned long)head & 0x1); /* Misaligned rcu_head! */
 	if (debug_rcu_head_queue(head)) {
 		/* Probable double call_rcu(), so leak the callback. */
 		ACCESS_ONCE(head->func) = rcu_leak_callback;
@@ -2693,7 +2690,7 @@
 		local_irq_restore(flags);
 		return;
 	}
-	ACCESS_ONCE(rdp->qlen)++;
+	ACCESS_ONCE(rdp->qlen) = rdp->qlen + 1;
 	if (lazy)
 		rdp->qlen_lazy++;
 	else
@@ -3257,7 +3254,7 @@
 	 * ACCESS_ONCE() to prevent the compiler from speculating
 	 * the increment to precede the early-exit check.
 	 */
-	ACCESS_ONCE(rsp->n_barrier_done)++;
+	ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
 	WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
 	_rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
 	smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
@@ -3307,7 +3304,7 @@
 
 	/* Increment ->n_barrier_done to prevent duplicate work. */
 	smp_mb(); /* Keep increment after above mechanism. */
-	ACCESS_ONCE(rsp->n_barrier_done)++;
+	ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
 	WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
 	_rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
 	smp_mb(); /* Keep increment before caller's subsequent code. */
@@ -3564,14 +3561,16 @@
 static void __init rcu_init_one(struct rcu_state *rsp,
 		struct rcu_data __percpu *rda)
 {
-	static char *buf[] = { "rcu_node_0",
-			       "rcu_node_1",
-			       "rcu_node_2",
-			       "rcu_node_3" };  /* Match MAX_RCU_LVLS */
-	static char *fqs[] = { "rcu_node_fqs_0",
-			       "rcu_node_fqs_1",
-			       "rcu_node_fqs_2",
-			       "rcu_node_fqs_3" };  /* Match MAX_RCU_LVLS */
+	static const char * const buf[] = {
+		"rcu_node_0",
+		"rcu_node_1",
+		"rcu_node_2",
+		"rcu_node_3" };  /* Match MAX_RCU_LVLS */
+	static const char * const fqs[] = {
+		"rcu_node_fqs_0",
+		"rcu_node_fqs_1",
+		"rcu_node_fqs_2",
+		"rcu_node_fqs_3" };  /* Match MAX_RCU_LVLS */
 	static u8 fl_mask = 0x1;
 	int cpustride = 1;
 	int i;
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index 0f69a79..71e64c7 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -172,6 +172,14 @@
 				/*  queued on this rcu_node structure that */
 				/*  are blocking the current grace period, */
 				/*  there can be no such task. */
+	struct completion boost_completion;
+				/* Used to ensure that the rt_mutex used */
+				/*  to carry out the boosting is fully */
+				/*  released with no future boostee accesses */
+				/*  before that rt_mutex is re-initialized. */
+	struct rt_mutex boost_mtx;
+				/* Used only for the priority-boosting */
+				/*  side effect, not as a lock. */
 	unsigned long boost_time;
 				/* When to start boosting (jiffies). */
 	struct task_struct *boost_kthread_task;
@@ -334,11 +342,29 @@
 	struct rcu_head **nocb_tail;
 	atomic_long_t nocb_q_count;	/* # CBs waiting for kthread */
 	atomic_long_t nocb_q_count_lazy; /*  (approximate). */
+	struct rcu_head *nocb_follower_head; /* CBs ready to invoke. */
+	struct rcu_head **nocb_follower_tail;
+	atomic_long_t nocb_follower_count; /* # CBs ready to invoke. */
+	atomic_long_t nocb_follower_count_lazy; /*  (approximate). */
 	int nocb_p_count;		/* # CBs being invoked by kthread */
 	int nocb_p_count_lazy;		/*  (approximate). */
 	wait_queue_head_t nocb_wq;	/* For nocb kthreads to sleep on. */
 	struct task_struct *nocb_kthread;
 	bool nocb_defer_wakeup;		/* Defer wakeup of nocb_kthread. */
+
+	/* The following fields are used by the leader, hence own cacheline. */
+	struct rcu_head *nocb_gp_head ____cacheline_internodealigned_in_smp;
+					/* CBs waiting for GP. */
+	struct rcu_head **nocb_gp_tail;
+	long nocb_gp_count;
+	long nocb_gp_count_lazy;
+	bool nocb_leader_wake;		/* Is the nocb leader thread awake? */
+	struct rcu_data *nocb_next_follower;
+					/* Next follower in wakeup chain. */
+
+	/* The following fields are used by the follower, hence new cachline. */
+	struct rcu_data *nocb_leader ____cacheline_internodealigned_in_smp;
+					/* Leader CPU takes GP-end wakeups. */
 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
 
 	/* 8) RCU CPU stall data. */
@@ -587,8 +613,14 @@
 /* Sum up queue lengths for tracing. */
 static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
 {
-	*ql = atomic_long_read(&rdp->nocb_q_count) + rdp->nocb_p_count;
-	*qll = atomic_long_read(&rdp->nocb_q_count_lazy) + rdp->nocb_p_count_lazy;
+	*ql = atomic_long_read(&rdp->nocb_q_count) +
+	      rdp->nocb_p_count +
+	      atomic_long_read(&rdp->nocb_follower_count) +
+	      rdp->nocb_p_count + rdp->nocb_gp_count;
+	*qll = atomic_long_read(&rdp->nocb_q_count_lazy) +
+	       rdp->nocb_p_count_lazy +
+	       atomic_long_read(&rdp->nocb_follower_count_lazy) +
+	       rdp->nocb_p_count_lazy + rdp->nocb_gp_count_lazy;
 }
 #else /* #ifdef CONFIG_RCU_NOCB_CPU */
 static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 02ac0fb..00dc411 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -33,6 +33,7 @@
 #define RCU_KTHREAD_PRIO 1
 
 #ifdef CONFIG_RCU_BOOST
+#include "../locking/rtmutex_common.h"
 #define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
 #else
 #define RCU_BOOST_PRIO RCU_KTHREAD_PRIO
@@ -336,7 +337,7 @@
 	unsigned long flags;
 	struct list_head *np;
 #ifdef CONFIG_RCU_BOOST
-	struct rt_mutex *rbmp = NULL;
+	bool drop_boost_mutex = false;
 #endif /* #ifdef CONFIG_RCU_BOOST */
 	struct rcu_node *rnp;
 	int special;
@@ -398,11 +399,8 @@
 #ifdef CONFIG_RCU_BOOST
 		if (&t->rcu_node_entry == rnp->boost_tasks)
 			rnp->boost_tasks = np;
-		/* Snapshot/clear ->rcu_boost_mutex with rcu_node lock held. */
-		if (t->rcu_boost_mutex) {
-			rbmp = t->rcu_boost_mutex;
-			t->rcu_boost_mutex = NULL;
-		}
+		/* Snapshot ->boost_mtx ownership with rcu_node lock held. */
+		drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t;
 #endif /* #ifdef CONFIG_RCU_BOOST */
 
 		/*
@@ -427,8 +425,10 @@
 
 #ifdef CONFIG_RCU_BOOST
 		/* Unboost if we were boosted. */
-		if (rbmp)
-			rt_mutex_unlock(rbmp);
+		if (drop_boost_mutex) {
+			rt_mutex_unlock(&rnp->boost_mtx);
+			complete(&rnp->boost_completion);
+		}
 #endif /* #ifdef CONFIG_RCU_BOOST */
 
 		/*
@@ -988,6 +988,7 @@
 
 /* Because preemptible RCU does not exist, no quieting of tasks. */
 static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
+	__releases(rnp->lock)
 {
 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
 }
@@ -1149,7 +1150,6 @@
 static int rcu_boost(struct rcu_node *rnp)
 {
 	unsigned long flags;
-	struct rt_mutex mtx;
 	struct task_struct *t;
 	struct list_head *tb;
 
@@ -1200,11 +1200,15 @@
 	 * section.
 	 */
 	t = container_of(tb, struct task_struct, rcu_node_entry);
-	rt_mutex_init_proxy_locked(&mtx, t);
-	t->rcu_boost_mutex = &mtx;
+	rt_mutex_init_proxy_locked(&rnp->boost_mtx, t);
+	init_completion(&rnp->boost_completion);
 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
-	rt_mutex_lock(&mtx);  /* Side effect: boosts task t's priority. */
-	rt_mutex_unlock(&mtx);  /* Keep lockdep happy. */
+	/* Lock only for side effect: boosts task t's priority. */
+	rt_mutex_lock(&rnp->boost_mtx);
+	rt_mutex_unlock(&rnp->boost_mtx);  /* Then keep lockdep happy. */
+
+	/* Wait for boostee to be done w/boost_mtx before reinitializing. */
+	wait_for_completion(&rnp->boost_completion);
 
 	return ACCESS_ONCE(rnp->exp_tasks) != NULL ||
 	       ACCESS_ONCE(rnp->boost_tasks) != NULL;
@@ -1256,6 +1260,7 @@
  * about it going away.
  */
 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
+	__releases(rnp->lock)
 {
 	struct task_struct *t;
 
@@ -1491,6 +1496,7 @@
 #else /* #ifdef CONFIG_RCU_BOOST */
 
 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
+	__releases(rnp->lock)
 {
 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
 }
@@ -2060,6 +2066,22 @@
 #endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
 
 /*
+ * Kick the leader kthread for this NOCB group.
+ */
+static void wake_nocb_leader(struct rcu_data *rdp, bool force)
+{
+	struct rcu_data *rdp_leader = rdp->nocb_leader;
+
+	if (!ACCESS_ONCE(rdp_leader->nocb_kthread))
+		return;
+	if (!ACCESS_ONCE(rdp_leader->nocb_leader_wake) || force) {
+		/* Prior xchg orders against prior callback enqueue. */
+		ACCESS_ONCE(rdp_leader->nocb_leader_wake) = true;
+		wake_up(&rdp_leader->nocb_wq);
+	}
+}
+
+/*
  * Enqueue the specified string of rcu_head structures onto the specified
  * CPU's no-CBs lists.  The CPU is specified by rdp, the head of the
  * string by rhp, and the tail of the string by rhtp.  The non-lazy/lazy
@@ -2093,7 +2115,8 @@
 	len = atomic_long_read(&rdp->nocb_q_count);
 	if (old_rhpp == &rdp->nocb_head) {
 		if (!irqs_disabled_flags(flags)) {
-			wake_up(&rdp->nocb_wq); /* ... if queue was empty ... */
+			/* ... if queue was empty ... */
+			wake_nocb_leader(rdp, false);
 			trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
 					    TPS("WakeEmpty"));
 		} else {
@@ -2103,7 +2126,8 @@
 		}
 		rdp->qlen_last_fqs_check = 0;
 	} else if (len > rdp->qlen_last_fqs_check + qhimark) {
-		wake_up_process(t); /* ... or if many callbacks queued. */
+		/* ... or if many callbacks queued. */
+		wake_nocb_leader(rdp, true);
 		rdp->qlen_last_fqs_check = LONG_MAX / 2;
 		trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WakeOvf"));
 	} else {
@@ -2213,13 +2237,150 @@
 }
 
 /*
+ * Leaders come here to wait for additional callbacks to show up.
+ * This function does not return until callbacks appear.
+ */
+static void nocb_leader_wait(struct rcu_data *my_rdp)
+{
+	bool firsttime = true;
+	bool gotcbs;
+	struct rcu_data *rdp;
+	struct rcu_head **tail;
+
+wait_again:
+
+	/* Wait for callbacks to appear. */
+	if (!rcu_nocb_poll) {
+		trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Sleep");
+		wait_event_interruptible(my_rdp->nocb_wq,
+					 ACCESS_ONCE(my_rdp->nocb_leader_wake));
+		/* Memory barrier handled by smp_mb() calls below and repoll. */
+	} else if (firsttime) {
+		firsttime = false; /* Don't drown trace log with "Poll"! */
+		trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Poll");
+	}
+
+	/*
+	 * Each pass through the following loop checks a follower for CBs.
+	 * We are our own first follower.  Any CBs found are moved to
+	 * nocb_gp_head, where they await a grace period.
+	 */
+	gotcbs = false;
+	for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) {
+		rdp->nocb_gp_head = ACCESS_ONCE(rdp->nocb_head);
+		if (!rdp->nocb_gp_head)
+			continue;  /* No CBs here, try next follower. */
+
+		/* Move callbacks to wait-for-GP list, which is empty. */
+		ACCESS_ONCE(rdp->nocb_head) = NULL;
+		rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
+		rdp->nocb_gp_count = atomic_long_xchg(&rdp->nocb_q_count, 0);
+		rdp->nocb_gp_count_lazy =
+			atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
+		gotcbs = true;
+	}
+
+	/*
+	 * If there were no callbacks, sleep a bit, rescan after a
+	 * memory barrier, and go retry.
+	 */
+	if (unlikely(!gotcbs)) {
+		if (!rcu_nocb_poll)
+			trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu,
+					    "WokeEmpty");
+		flush_signals(current);
+		schedule_timeout_interruptible(1);
+
+		/* Rescan in case we were a victim of memory ordering. */
+		my_rdp->nocb_leader_wake = false;
+		smp_mb();  /* Ensure _wake false before scan. */
+		for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower)
+			if (ACCESS_ONCE(rdp->nocb_head)) {
+				/* Found CB, so short-circuit next wait. */
+				my_rdp->nocb_leader_wake = true;
+				break;
+			}
+		goto wait_again;
+	}
+
+	/* Wait for one grace period. */
+	rcu_nocb_wait_gp(my_rdp);
+
+	/*
+	 * We left ->nocb_leader_wake set to reduce cache thrashing.
+	 * We clear it now, but recheck for new callbacks while
+	 * traversing our follower list.
+	 */
+	my_rdp->nocb_leader_wake = false;
+	smp_mb(); /* Ensure _wake false before scan of ->nocb_head. */
+
+	/* Each pass through the following loop wakes a follower, if needed. */
+	for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) {
+		if (ACCESS_ONCE(rdp->nocb_head))
+			my_rdp->nocb_leader_wake = true; /* No need to wait. */
+		if (!rdp->nocb_gp_head)
+			continue; /* No CBs, so no need to wake follower. */
+
+		/* Append callbacks to follower's "done" list. */
+		tail = xchg(&rdp->nocb_follower_tail, rdp->nocb_gp_tail);
+		*tail = rdp->nocb_gp_head;
+		atomic_long_add(rdp->nocb_gp_count, &rdp->nocb_follower_count);
+		atomic_long_add(rdp->nocb_gp_count_lazy,
+				&rdp->nocb_follower_count_lazy);
+		if (rdp != my_rdp && tail == &rdp->nocb_follower_head) {
+			/*
+			 * List was empty, wake up the follower.
+			 * Memory barriers supplied by atomic_long_add().
+			 */
+			wake_up(&rdp->nocb_wq);
+		}
+	}
+
+	/* If we (the leader) don't have CBs, go wait some more. */
+	if (!my_rdp->nocb_follower_head)
+		goto wait_again;
+}
+
+/*
+ * Followers come here to wait for additional callbacks to show up.
+ * This function does not return until callbacks appear.
+ */
+static void nocb_follower_wait(struct rcu_data *rdp)
+{
+	bool firsttime = true;
+
+	for (;;) {
+		if (!rcu_nocb_poll) {
+			trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
+					    "FollowerSleep");
+			wait_event_interruptible(rdp->nocb_wq,
+						 ACCESS_ONCE(rdp->nocb_follower_head));
+		} else if (firsttime) {
+			/* Don't drown trace log with "Poll"! */
+			firsttime = false;
+			trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "Poll");
+		}
+		if (smp_load_acquire(&rdp->nocb_follower_head)) {
+			/* ^^^ Ensure CB invocation follows _head test. */
+			return;
+		}
+		if (!rcu_nocb_poll)
+			trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
+					    "WokeEmpty");
+		flush_signals(current);
+		schedule_timeout_interruptible(1);
+	}
+}
+
+/*
  * Per-rcu_data kthread, but only for no-CBs CPUs.  Each kthread invokes
- * callbacks queued by the corresponding no-CBs CPU.
+ * callbacks queued by the corresponding no-CBs CPU, however, there is
+ * an optional leader-follower relationship so that the grace-period
+ * kthreads don't have to do quite so many wakeups.
  */
 static int rcu_nocb_kthread(void *arg)
 {
 	int c, cl;
-	bool firsttime = 1;
 	struct rcu_head *list;
 	struct rcu_head *next;
 	struct rcu_head **tail;
@@ -2227,41 +2388,22 @@
 
 	/* Each pass through this loop invokes one batch of callbacks */
 	for (;;) {
-		/* If not polling, wait for next batch of callbacks. */
-		if (!rcu_nocb_poll) {
-			trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
-					    TPS("Sleep"));
-			wait_event_interruptible(rdp->nocb_wq, rdp->nocb_head);
-			/* Memory barrier provide by xchg() below. */
-		} else if (firsttime) {
-			firsttime = 0;
-			trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
-					    TPS("Poll"));
-		}
-		list = ACCESS_ONCE(rdp->nocb_head);
-		if (!list) {
-			if (!rcu_nocb_poll)
-				trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
-						    TPS("WokeEmpty"));
-			schedule_timeout_interruptible(1);
-			flush_signals(current);
-			continue;
-		}
-		firsttime = 1;
-		trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
-				    TPS("WokeNonEmpty"));
+		/* Wait for callbacks. */
+		if (rdp->nocb_leader == rdp)
+			nocb_leader_wait(rdp);
+		else
+			nocb_follower_wait(rdp);
 
-		/*
-		 * Extract queued callbacks, update counts, and wait
-		 * for a grace period to elapse.
-		 */
-		ACCESS_ONCE(rdp->nocb_head) = NULL;
-		tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
-		c = atomic_long_xchg(&rdp->nocb_q_count, 0);
-		cl = atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
-		ACCESS_ONCE(rdp->nocb_p_count) += c;
-		ACCESS_ONCE(rdp->nocb_p_count_lazy) += cl;
-		rcu_nocb_wait_gp(rdp);
+		/* Pull the ready-to-invoke callbacks onto local list. */
+		list = ACCESS_ONCE(rdp->nocb_follower_head);
+		BUG_ON(!list);
+		trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty");
+		ACCESS_ONCE(rdp->nocb_follower_head) = NULL;
+		tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head);
+		c = atomic_long_xchg(&rdp->nocb_follower_count, 0);
+		cl = atomic_long_xchg(&rdp->nocb_follower_count_lazy, 0);
+		rdp->nocb_p_count += c;
+		rdp->nocb_p_count_lazy += cl;
 
 		/* Each pass through the following loop invokes a callback. */
 		trace_rcu_batch_start(rdp->rsp->name, cl, c, -1);
@@ -2305,7 +2447,7 @@
 	if (!rcu_nocb_need_deferred_wakeup(rdp))
 		return;
 	ACCESS_ONCE(rdp->nocb_defer_wakeup) = false;
-	wake_up(&rdp->nocb_wq);
+	wake_nocb_leader(rdp, false);
 	trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWakeEmpty"));
 }
 
@@ -2314,19 +2456,57 @@
 {
 	rdp->nocb_tail = &rdp->nocb_head;
 	init_waitqueue_head(&rdp->nocb_wq);
+	rdp->nocb_follower_tail = &rdp->nocb_follower_head;
 }
 
-/* Create a kthread for each RCU flavor for each no-CBs CPU. */
+/* How many follower CPU IDs per leader?  Default of -1 for sqrt(nr_cpu_ids). */
+static int rcu_nocb_leader_stride = -1;
+module_param(rcu_nocb_leader_stride, int, 0444);
+
+/*
+ * Create a kthread for each RCU flavor for each no-CBs CPU.
+ * Also initialize leader-follower relationships.
+ */
 static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
 {
 	int cpu;
+	int ls = rcu_nocb_leader_stride;
+	int nl = 0;  /* Next leader. */
 	struct rcu_data *rdp;
+	struct rcu_data *rdp_leader = NULL;  /* Suppress misguided gcc warn. */
+	struct rcu_data *rdp_prev = NULL;
 	struct task_struct *t;
 
 	if (rcu_nocb_mask == NULL)
 		return;
+#if defined(CONFIG_NO_HZ_FULL) && !defined(CONFIG_NO_HZ_FULL_ALL)
+	if (tick_nohz_full_running)
+		cpumask_or(rcu_nocb_mask, rcu_nocb_mask, tick_nohz_full_mask);
+#endif /* #if defined(CONFIG_NO_HZ_FULL) && !defined(CONFIG_NO_HZ_FULL_ALL) */
+	if (ls == -1) {
+		ls = int_sqrt(nr_cpu_ids);
+		rcu_nocb_leader_stride = ls;
+	}
+
+	/*
+	 * Each pass through this loop sets up one rcu_data structure and
+	 * spawns one rcu_nocb_kthread().
+	 */
 	for_each_cpu(cpu, rcu_nocb_mask) {
 		rdp = per_cpu_ptr(rsp->rda, cpu);
+		if (rdp->cpu >= nl) {
+			/* New leader, set up for followers & next leader. */
+			nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls;
+			rdp->nocb_leader = rdp;
+			rdp_leader = rdp;
+		} else {
+			/* Another follower, link to previous leader. */
+			rdp->nocb_leader = rdp_leader;
+			rdp_prev->nocb_next_follower = rdp;
+		}
+		rdp_prev = rdp;
+
+		/* Spawn the kthread for this CPU. */
 		t = kthread_run(rcu_nocb_kthread, rdp,
 				"rcuo%c/%d", rsp->abbr, cpu);
 		BUG_ON(IS_ERR(t));
@@ -2843,12 +3023,16 @@
  */
 static void rcu_bind_gp_kthread(void)
 {
-#ifdef CONFIG_NO_HZ_FULL
-	int cpu = ACCESS_ONCE(tick_do_timer_cpu);
+	int __maybe_unused cpu;
 
-	if (cpu < 0 || cpu >= nr_cpu_ids)
+	if (!tick_nohz_full_enabled())
 		return;
-	if (raw_smp_processor_id() != cpu)
+#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
+	cpu = tick_do_timer_cpu;
+	if (cpu >= 0 && cpu < nr_cpu_ids && raw_smp_processor_id() != cpu)
 		set_cpus_allowed_ptr(current, cpumask_of(cpu));
-#endif /* #ifdef CONFIG_NO_HZ_FULL */
+#else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
+	if (!is_housekeeping_cpu(raw_smp_processor_id()))
+		housekeeping_affine(current);
+#endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
 }
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index bc78835..4056d79 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -90,9 +90,6 @@
 	} else {
 		barrier();  /* critical section before exit code. */
 		t->rcu_read_lock_nesting = INT_MIN;
-#ifdef CONFIG_PROVE_RCU_DELAY
-		udelay(10); /* Make preemption more probable. */
-#endif /* #ifdef CONFIG_PROVE_RCU_DELAY */
 		barrier();  /* assign before ->rcu_read_unlock_special load */
 		if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
 			rcu_read_unlock_special(t);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 2676866..1211575 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -8122,7 +8122,7 @@
 	.can_attach	= cpu_cgroup_can_attach,
 	.attach		= cpu_cgroup_attach,
 	.exit		= cpu_cgroup_exit,
-	.base_cftypes	= cpu_files,
+	.legacy_cftypes	= cpu_files,
 	.early_init	= 1,
 };
 
diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c
index 9cf350c..dd7cbb5 100644
--- a/kernel/sched/cpuacct.c
+++ b/kernel/sched/cpuacct.c
@@ -278,6 +278,6 @@
 struct cgroup_subsys cpuacct_cgrp_subsys = {
 	.css_alloc	= cpuacct_css_alloc,
 	.css_free	= cpuacct_css_free,
-	.base_cftypes	= files,
+	.legacy_cftypes	= files,
 	.early_init	= 1,
 };
diff --git a/kernel/signal.c b/kernel/signal.c
index a4077e9..40b76e3 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1263,6 +1263,10 @@
 	struct sighand_struct *sighand;
 
 	for (;;) {
+		/*
+		 * Disable interrupts early to avoid deadlocks.
+		 * See rcu_read_unlock() comment header for details.
+		 */
 		local_irq_save(*flags);
 		rcu_read_lock();
 		sighand = rcu_dereference(tsk->sighand);
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index ad362c2..9c94c19 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -146,7 +146,8 @@
 {
 	/* Nothing to do if we already reached the limit */
 	if (dev->min_delta_ns >= MIN_DELTA_LIMIT) {
-		printk(KERN_WARNING "CE: Reprogramming failure. Giving up\n");
+		printk_deferred(KERN_WARNING
+				"CE: Reprogramming failure. Giving up\n");
 		dev->next_event.tv64 = KTIME_MAX;
 		return -ETIME;
 	}
@@ -159,9 +160,10 @@
 	if (dev->min_delta_ns > MIN_DELTA_LIMIT)
 		dev->min_delta_ns = MIN_DELTA_LIMIT;
 
-	printk(KERN_WARNING "CE: %s increased min_delta_ns to %llu nsec\n",
-	       dev->name ? dev->name : "?",
-	       (unsigned long long) dev->min_delta_ns);
+	printk_deferred(KERN_WARNING
+			"CE: %s increased min_delta_ns to %llu nsec\n",
+			dev->name ? dev->name : "?",
+			(unsigned long long) dev->min_delta_ns);
 	return 0;
 }
 
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index 445106d..01d2d15 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -191,7 +191,8 @@
 
 static int sched_clock_suspend(void)
 {
-	sched_clock_poll(&sched_clock_timer);
+	update_sched_clock();
+	hrtimer_cancel(&sched_clock_timer);
 	cd.suspended = true;
 	return 0;
 }
@@ -199,6 +200,7 @@
 static void sched_clock_resume(void)
 {
 	cd.epoch_cyc = read_sched_clock();
+	hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
 	cd.suspended = false;
 }
 
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 3d63944..99aa6ee 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -154,6 +154,7 @@
 
 #ifdef CONFIG_NO_HZ_FULL
 cpumask_var_t tick_nohz_full_mask;
+cpumask_var_t housekeeping_mask;
 bool tick_nohz_full_running;
 
 static bool can_stop_full_tick(void)
@@ -283,6 +284,7 @@
 	int cpu;
 
 	alloc_bootmem_cpumask_var(&tick_nohz_full_mask);
+	alloc_bootmem_cpumask_var(&housekeeping_mask);
 	if (cpulist_parse(str, tick_nohz_full_mask) < 0) {
 		pr_warning("NOHZ: Incorrect nohz_full cpumask\n");
 		return 1;
@@ -293,6 +295,8 @@
 		pr_warning("NO_HZ: Clearing %d from nohz_full range for timekeeping\n", cpu);
 		cpumask_clear_cpu(cpu, tick_nohz_full_mask);
 	}
+	cpumask_andnot(housekeeping_mask,
+		       cpu_possible_mask, tick_nohz_full_mask);
 	tick_nohz_full_running = true;
 
 	return 1;
@@ -334,9 +338,15 @@
 		pr_err("NO_HZ: Can't allocate full dynticks cpumask\n");
 		return err;
 	}
+	if (!alloc_cpumask_var(&housekeeping_mask, GFP_KERNEL)) {
+		pr_err("NO_HZ: Can't allocate not-full dynticks cpumask\n");
+		return err;
+	}
 	err = 0;
 	cpumask_setall(tick_nohz_full_mask);
 	cpumask_clear_cpu(smp_processor_id(), tick_nohz_full_mask);
+	cpumask_clear(housekeeping_mask);
+	cpumask_set_cpu(smp_processor_id(), housekeeping_mask);
 	tick_nohz_full_running = true;
 #endif
 	return err;
diff --git a/kernel/torture.c b/kernel/torture.c
index 40bb511..d600af21 100644
--- a/kernel/torture.c
+++ b/kernel/torture.c
@@ -708,7 +708,7 @@
 	int ret = 0;
 
 	VERBOSE_TOROUT_STRING(m);
-	*tp = kthread_run(fn, arg, s);
+	*tp = kthread_run(fn, arg, "%s", s);
 	if (IS_ERR(*tp)) {
 		ret = PTR_ERR(*tp);
 		VERBOSE_TOROUT_ERRSTRING(f);
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index d440935..a5da09c 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -29,11 +29,6 @@
 	help
 	  See Documentation/trace/ftrace-design.txt
 
-config HAVE_FUNCTION_TRACE_MCOUNT_TEST
-	bool
-	help
-	  See Documentation/trace/ftrace-design.txt
-
 config HAVE_DYNAMIC_FTRACE
 	bool
 	help
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 2611613..67d6369 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -28,6 +28,7 @@
 
 obj-$(CONFIG_TRACING) += trace.o
 obj-$(CONFIG_TRACING) += trace_output.o
+obj-$(CONFIG_TRACING) += trace_seq.o
 obj-$(CONFIG_TRACING) += trace_stat.o
 obj-$(CONFIG_TRACING) += trace_printk.o
 obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index ac9d1da..1654b12 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -80,9 +80,6 @@
 int ftrace_enabled __read_mostly;
 static int last_ftrace_enabled;
 
-/* Quick disabling of function tracer. */
-int function_trace_stop __read_mostly;
-
 /* Current function tracing op */
 struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
 /* What to set function_trace_op to */
@@ -1042,6 +1039,8 @@
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 
+static struct ftrace_ops *removed_ops;
+
 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
 # error Dynamic ftrace depends on MCOUNT_RECORD
 #endif
@@ -1304,25 +1303,15 @@
 	struct ftrace_hash *new_hash;
 	int size = src->count;
 	int bits = 0;
-	int ret;
 	int i;
 
 	/*
-	 * Remove the current set, update the hash and add
-	 * them back.
-	 */
-	ftrace_hash_rec_disable(ops, enable);
-
-	/*
 	 * If the new source is empty, just free dst and assign it
 	 * the empty_hash.
 	 */
 	if (!src->count) {
-		free_ftrace_hash_rcu(*dst);
-		rcu_assign_pointer(*dst, EMPTY_HASH);
-		/* still need to update the function records */
-		ret = 0;
-		goto out;
+		new_hash = EMPTY_HASH;
+		goto update;
 	}
 
 	/*
@@ -1335,10 +1324,9 @@
 	if (bits > FTRACE_HASH_MAX_BITS)
 		bits = FTRACE_HASH_MAX_BITS;
 
-	ret = -ENOMEM;
 	new_hash = alloc_ftrace_hash(bits);
 	if (!new_hash)
-		goto out;
+		return -ENOMEM;
 
 	size = 1 << src->size_bits;
 	for (i = 0; i < size; i++) {
@@ -1349,20 +1337,20 @@
 		}
 	}
 
+update:
+	/*
+	 * Remove the current set, update the hash and add
+	 * them back.
+	 */
+	ftrace_hash_rec_disable(ops, enable);
+
 	old_hash = *dst;
 	rcu_assign_pointer(*dst, new_hash);
 	free_ftrace_hash_rcu(old_hash);
 
-	ret = 0;
- out:
-	/*
-	 * Enable regardless of ret:
-	 *  On success, we enable the new hash.
-	 *  On failure, we re-enable the original hash.
-	 */
 	ftrace_hash_rec_enable(ops, enable);
 
-	return ret;
+	return 0;
 }
 
 /*
@@ -1492,6 +1480,53 @@
 	return (int)!!ret;
 }
 
+/* Test if ops registered to this rec needs regs */
+static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
+{
+	struct ftrace_ops *ops;
+	bool keep_regs = false;
+
+	for (ops = ftrace_ops_list;
+	     ops != &ftrace_list_end; ops = ops->next) {
+		/* pass rec in as regs to have non-NULL val */
+		if (ftrace_ops_test(ops, rec->ip, rec)) {
+			if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
+				keep_regs = true;
+				break;
+			}
+		}
+	}
+
+	return  keep_regs;
+}
+
+static void ftrace_remove_tramp(struct ftrace_ops *ops,
+				struct dyn_ftrace *rec)
+{
+	struct ftrace_func_entry *entry;
+
+	entry = ftrace_lookup_ip(ops->tramp_hash, rec->ip);
+	if (!entry)
+		return;
+
+	/*
+	 * The tramp_hash entry will be removed at time
+	 * of update.
+	 */
+	ops->nr_trampolines--;
+	rec->flags &= ~FTRACE_FL_TRAMP;
+}
+
+static void ftrace_clear_tramps(struct dyn_ftrace *rec)
+{
+	struct ftrace_ops *op;
+
+	do_for_each_ftrace_op(op, ftrace_ops_list) {
+		if (op->nr_trampolines)
+			ftrace_remove_tramp(op, rec);
+	} while_for_each_ftrace_op(op);
+}
+
 static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
 				     int filter_hash,
 				     bool inc)
@@ -1572,8 +1607,30 @@
 
 		if (inc) {
 			rec->flags++;
-			if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
+			if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
 				return;
+
+			/*
+			 * If there's only a single callback registered to a
+			 * function, and the ops has a trampoline registered
+			 * for it, then we can call it directly.
+			 */
+			if (ftrace_rec_count(rec) == 1 && ops->trampoline) {
+				rec->flags |= FTRACE_FL_TRAMP;
+				ops->nr_trampolines++;
+			} else {
+				/*
+				 * If we are adding another function callback
+				 * to this function, and the previous had a
+				 * trampoline used, then we need to go back to
+				 * the default trampoline.
+				 */
+				rec->flags &= ~FTRACE_FL_TRAMP;
+
+				/* remove trampolines from any ops for this rec */
+				ftrace_clear_tramps(rec);
+			}
+
 			/*
 			 * If any ops wants regs saved for this function
 			 * then all ops will get saved regs.
@@ -1581,9 +1638,30 @@
 			if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
 				rec->flags |= FTRACE_FL_REGS;
 		} else {
-			if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
+			if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0))
 				return;
 			rec->flags--;
+
+			if (ops->trampoline && !ftrace_rec_count(rec))
+				ftrace_remove_tramp(ops, rec);
+
+			/*
+			 * If the rec had REGS enabled and the ops that is
+			 * being removed had REGS set, then see if there is
+			 * still any ops for this record that wants regs.
+			 * If not, we can stop recording them.
+			 */
+			if (ftrace_rec_count(rec) > 0 &&
+			    rec->flags & FTRACE_FL_REGS &&
+			    ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
+				if (!test_rec_ops_needs_regs(rec))
+					rec->flags &= ~FTRACE_FL_REGS;
+			}
+
+			/*
+			 * flags will be cleared in ftrace_check_record()
+			 * if rec count is zero.
+			 */
 		}
 		count++;
 		/* Shortcut, if we handled all records, we are done. */
@@ -1668,17 +1746,23 @@
 	 * If we are disabling calls, then disable all records that
 	 * are enabled.
 	 */
-	if (enable && (rec->flags & ~FTRACE_FL_MASK))
+	if (enable && ftrace_rec_count(rec))
 		flag = FTRACE_FL_ENABLED;
 
 	/*
-	 * If enabling and the REGS flag does not match the REGS_EN, then
-	 * do not ignore this record. Set flags to fail the compare against
-	 * ENABLED.
+	 * If enabling and the REGS flag does not match the REGS_EN, or
+	 * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
+	 * this record. Set flags to fail the compare against ENABLED.
 	 */
-	if (flag &&
-	    (!(rec->flags & FTRACE_FL_REGS) != !(rec->flags & FTRACE_FL_REGS_EN)))
-		flag |= FTRACE_FL_REGS;
+	if (flag) {
+		if (!(rec->flags & FTRACE_FL_REGS) != 
+		    !(rec->flags & FTRACE_FL_REGS_EN))
+			flag |= FTRACE_FL_REGS;
+
+		if (!(rec->flags & FTRACE_FL_TRAMP) != 
+		    !(rec->flags & FTRACE_FL_TRAMP_EN))
+			flag |= FTRACE_FL_TRAMP;
+	}
 
 	/* If the state of this record hasn't changed, then do nothing */
 	if ((rec->flags & FTRACE_FL_ENABLED) == flag)
@@ -1696,6 +1780,12 @@
 				else
 					rec->flags &= ~FTRACE_FL_REGS_EN;
 			}
+			if (flag & FTRACE_FL_TRAMP) {
+				if (rec->flags & FTRACE_FL_TRAMP)
+					rec->flags |= FTRACE_FL_TRAMP_EN;
+				else
+					rec->flags &= ~FTRACE_FL_TRAMP_EN;
+			}
 		}
 
 		/*
@@ -1704,7 +1794,7 @@
 		 * Otherwise,
 		 *   return UPDATE_MODIFY_CALL to tell the caller to convert
 		 *   from the save regs, to a non-save regs function or
-		 *   vice versa.
+		 *   vice versa, or from a trampoline call.
 		 */
 		if (flag & FTRACE_FL_ENABLED)
 			return FTRACE_UPDATE_MAKE_CALL;
@@ -1714,7 +1804,7 @@
 
 	if (update) {
 		/* If there's no more users, clear all flags */
-		if (!(rec->flags & ~FTRACE_FL_MASK))
+		if (!ftrace_rec_count(rec))
 			rec->flags = 0;
 		else
 			/* Just disable the record (keep REGS state) */
@@ -1751,6 +1841,43 @@
 	return ftrace_check_record(rec, enable, 0);
 }
 
+static struct ftrace_ops *
+ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
+{
+	struct ftrace_ops *op;
+
+	/* Removed ops need to be tested first */
+	if (removed_ops && removed_ops->tramp_hash) {
+		if (ftrace_lookup_ip(removed_ops->tramp_hash, rec->ip))
+			return removed_ops;
+	}
+
+	do_for_each_ftrace_op(op, ftrace_ops_list) {
+		if (!op->tramp_hash)
+			continue;
+
+		if (ftrace_lookup_ip(op->tramp_hash, rec->ip))
+			return op;
+
+	} while_for_each_ftrace_op(op);
+
+	return NULL;
+}
+
+static struct ftrace_ops *
+ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
+{
+	struct ftrace_ops *op;
+
+	do_for_each_ftrace_op(op, ftrace_ops_list) {
+		/* pass rec in as regs to have non-NULL val */
+		if (ftrace_ops_test(op, rec->ip, rec))
+			return op;
+	} while_for_each_ftrace_op(op);
+
+	return NULL;
+}
+
 /**
  * ftrace_get_addr_new - Get the call address to set to
  * @rec:  The ftrace record descriptor
@@ -1763,6 +1890,20 @@
  */
 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
 {
+	struct ftrace_ops *ops;
+
+	/* Trampolines take precedence over regs */
+	if (rec->flags & FTRACE_FL_TRAMP) {
+		ops = ftrace_find_tramp_ops_new(rec);
+		if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
+			pr_warning("Bad trampoline accounting at: %p (%pS)\n",
+				    (void *)rec->ip, (void *)rec->ip);
+			/* Ftrace is shutting down, return anything */
+			return (unsigned long)FTRACE_ADDR;
+		}
+		return ops->trampoline;
+	}
+
 	if (rec->flags & FTRACE_FL_REGS)
 		return (unsigned long)FTRACE_REGS_ADDR;
 	else
@@ -1781,6 +1922,20 @@
  */
 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
 {
+	struct ftrace_ops *ops;
+
+	/* Trampolines take precedence over regs */
+	if (rec->flags & FTRACE_FL_TRAMP_EN) {
+		ops = ftrace_find_tramp_ops_curr(rec);
+		if (FTRACE_WARN_ON(!ops)) {
+			pr_warning("Bad trampoline accounting at: %p (%pS)\n",
+				    (void *)rec->ip, (void *)rec->ip);
+			/* Ftrace is shutting down, return anything */
+			return (unsigned long)FTRACE_ADDR;
+		}
+		return ops->trampoline;
+	}
+
 	if (rec->flags & FTRACE_FL_REGS_EN)
 		return (unsigned long)FTRACE_REGS_ADDR;
 	else
@@ -2023,6 +2178,89 @@
 	ftrace_run_stop_machine(command);
 }
 
+static int ftrace_save_ops_tramp_hash(struct ftrace_ops *ops)
+{
+	struct ftrace_page *pg;
+	struct dyn_ftrace *rec;
+	int size, bits;
+	int ret;
+
+	size = ops->nr_trampolines;
+	bits = 0;
+	/*
+	 * Make the hash size about 1/2 the # found
+	 */
+	for (size /= 2; size; size >>= 1)
+		bits++;
+
+	ops->tramp_hash = alloc_ftrace_hash(bits);
+	/*
+	 * TODO: a failed allocation is going to screw up
+	 * the accounting of what needs to be modified
+	 * and not. For now, we kill ftrace if we fail
+	 * to allocate here. But there are ways around this,
+	 * but that will take a little more work.
+	 */
+	if (!ops->tramp_hash)
+		return -ENOMEM;
+
+	do_for_each_ftrace_rec(pg, rec) {
+		if (ftrace_rec_count(rec) == 1 &&
+		    ftrace_ops_test(ops, rec->ip, rec)) {
+
+			/*
+			 * If another ops adds to a rec, the rec will
+			 * lose its trampoline and never get it back
+			 * until all ops are off of it.
+			 */
+			if (!(rec->flags & FTRACE_FL_TRAMP))
+				continue;
+
+			/* This record had better have a trampoline */
+			if (FTRACE_WARN_ON(!(rec->flags & FTRACE_FL_TRAMP_EN)))
+				return -1;
+
+			ret = add_hash_entry(ops->tramp_hash, rec->ip);
+			if (ret < 0)
+				return ret;
+		}
+	} while_for_each_ftrace_rec();
+
+	/* The number of recs in the hash must match nr_trampolines */
+	FTRACE_WARN_ON(ops->tramp_hash->count != ops->nr_trampolines);
+
+	return 0;
+}
+
+static int ftrace_save_tramp_hashes(void)
+{
+	struct ftrace_ops *op;
+	int ret;
+
+	/*
+	 * Now that any trampoline is being used, we need to save the
+	 * hashes for the ops that have them. This allows the mapping
+	 * back from the record to the ops that has the trampoline to
+	 * know what code is being replaced. Modifying code must always
+	 * verify what it is changing.
+	 */
+	do_for_each_ftrace_op(op, ftrace_ops_list) {
+
+		/* The tramp_hash is recreated each time. */
+		free_ftrace_hash(op->tramp_hash);
+		op->tramp_hash = NULL;
+
+		if (op->nr_trampolines) {
+			ret = ftrace_save_ops_tramp_hash(op);
+			if (ret)
+				return ret;
+		}
+
+	} while_for_each_ftrace_op(op);
+
+	return 0;
+}
+
 static void ftrace_run_update_code(int command)
 {
 	int ret;
@@ -2031,11 +2269,6 @@
 	FTRACE_WARN_ON(ret);
 	if (ret)
 		return;
-	/*
-	 * Do not call function tracer while we update the code.
-	 * We are in stop machine.
-	 */
-	function_trace_stop++;
 
 	/*
 	 * By default we use stop_machine() to modify the code.
@@ -2045,15 +2278,15 @@
 	 */
 	arch_ftrace_update_code(command);
 
-	function_trace_stop--;
-
 	ret = ftrace_arch_code_modify_post_process();
 	FTRACE_WARN_ON(ret);
+
+	ret = ftrace_save_tramp_hashes();
+	FTRACE_WARN_ON(ret);
 }
 
 static ftrace_func_t saved_ftrace_func;
 static int ftrace_start_up;
-static int global_start_up;
 
 static void control_ops_free(struct ftrace_ops *ops)
 {
@@ -2117,8 +2350,7 @@
 
 	ftrace_hash_rec_disable(ops, 1);
 
-	if (!global_start_up)
-		ops->flags &= ~FTRACE_OPS_FL_ENABLED;
+	ops->flags &= ~FTRACE_OPS_FL_ENABLED;
 
 	command |= FTRACE_UPDATE_CALLS;
 
@@ -2139,8 +2371,16 @@
 		return 0;
 	}
 
+	/*
+	 * If the ops uses a trampoline, then it needs to be
+	 * tested first on update.
+	 */
+	removed_ops = ops;
+
 	ftrace_run_update_code(command);
 
+	removed_ops = NULL;
+
 	/*
 	 * Dynamic ops may be freed, we must make sure that all
 	 * callers are done before leaving this function.
@@ -2398,7 +2638,8 @@
 	return start_pg;
 
  free_pages:
-	while (start_pg) {
+	pg = start_pg;
+	while (pg) {
 		order = get_count_order(pg->size / ENTRIES_PER_PAGE);
 		free_pages((unsigned long)pg->records, order);
 		start_pg = pg->next;
@@ -2595,8 +2836,10 @@
 	 * off, we can short cut and just print out that all
 	 * functions are enabled.
 	 */
-	if (iter->flags & FTRACE_ITER_FILTER &&
-	    ftrace_hash_empty(ops->filter_hash)) {
+	if ((iter->flags & FTRACE_ITER_FILTER &&
+	     ftrace_hash_empty(ops->filter_hash)) ||
+	    (iter->flags & FTRACE_ITER_NOTRACE &&
+	     ftrace_hash_empty(ops->notrace_hash))) {
 		if (*pos > 0)
 			return t_hash_start(m, pos);
 		iter->flags |= FTRACE_ITER_PRINTALL;
@@ -2641,7 +2884,10 @@
 		return t_hash_show(m, iter);
 
 	if (iter->flags & FTRACE_ITER_PRINTALL) {
-		seq_printf(m, "#### all functions enabled ####\n");
+		if (iter->flags & FTRACE_ITER_NOTRACE)
+			seq_printf(m, "#### no functions disabled ####\n");
+		else
+			seq_printf(m, "#### all functions enabled ####\n");
 		return 0;
 	}
 
@@ -2651,10 +2897,22 @@
 		return 0;
 
 	seq_printf(m, "%ps", (void *)rec->ip);
-	if (iter->flags & FTRACE_ITER_ENABLED)
+	if (iter->flags & FTRACE_ITER_ENABLED) {
 		seq_printf(m, " (%ld)%s",
-			   rec->flags & ~FTRACE_FL_MASK,
-			   rec->flags & FTRACE_FL_REGS ? " R" : "");
+			   ftrace_rec_count(rec),
+			   rec->flags & FTRACE_FL_REGS ? " R" : "  ");
+		if (rec->flags & FTRACE_FL_TRAMP_EN) {
+			struct ftrace_ops *ops;
+
+			ops = ftrace_find_tramp_ops_curr(rec);
+			if (ops && ops->trampoline)
+				seq_printf(m, "\ttramp: %pS",
+					   (void *)ops->trampoline);
+			else
+				seq_printf(m, "\ttramp: ERROR!");
+		}
+	}	
+
 	seq_printf(m, "\n");
 
 	return 0;
@@ -2702,13 +2960,6 @@
 	return iter ? 0 : -ENOMEM;
 }
 
-static void ftrace_filter_reset(struct ftrace_hash *hash)
-{
-	mutex_lock(&ftrace_lock);
-	ftrace_hash_clear(hash);
-	mutex_unlock(&ftrace_lock);
-}
-
 /**
  * ftrace_regex_open - initialize function tracer filter files
  * @ops: The ftrace_ops that hold the hash filters
@@ -2758,7 +3009,13 @@
 		hash = ops->filter_hash;
 
 	if (file->f_mode & FMODE_WRITE) {
-		iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
+		const int size_bits = FTRACE_HASH_DEFAULT_BITS;
+
+		if (file->f_flags & O_TRUNC)
+			iter->hash = alloc_ftrace_hash(size_bits);
+		else
+			iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
+
 		if (!iter->hash) {
 			trace_parser_put(&iter->parser);
 			kfree(iter);
@@ -2767,10 +3024,6 @@
 		}
 	}
 
-	if ((file->f_mode & FMODE_WRITE) &&
-	    (file->f_flags & O_TRUNC))
-		ftrace_filter_reset(iter->hash);
-
 	if (file->f_mode & FMODE_READ) {
 		iter->pg = ftrace_pages_start;
 
@@ -3471,14 +3724,16 @@
 	else
 		orig_hash = &ops->notrace_hash;
 
-	hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
+	if (reset)
+		hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
+	else
+		hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
+
 	if (!hash) {
 		ret = -ENOMEM;
 		goto out_regex_unlock;
 	}
 
-	if (reset)
-		ftrace_filter_reset(hash);
 	if (buf && !ftrace_match_records(hash, buf, len)) {
 		ret = -EINVAL;
 		goto out_regex_unlock;
@@ -3630,6 +3885,7 @@
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
+static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
 static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer);
 
 static int __init set_graph_function(char *str)
@@ -3639,16 +3895,29 @@
 }
 __setup("ftrace_graph_filter=", set_graph_function);
 
-static void __init set_ftrace_early_graph(char *buf)
+static int __init set_graph_notrace_function(char *str)
+{
+	strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE);
+	return 1;
+}
+__setup("ftrace_graph_notrace=", set_graph_notrace_function);
+
+static void __init set_ftrace_early_graph(char *buf, int enable)
 {
 	int ret;
 	char *func;
+	unsigned long *table = ftrace_graph_funcs;
+	int *count = &ftrace_graph_count;
+
+	if (!enable) {
+		table = ftrace_graph_notrace_funcs;
+		count = &ftrace_graph_notrace_count;
+	}
 
 	while (buf) {
 		func = strsep(&buf, ",");
 		/* we allow only one expression at a time */
-		ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
-				      FTRACE_GRAPH_MAX_FUNCS, func);
+		ret = ftrace_set_func(table, count, FTRACE_GRAPH_MAX_FUNCS, func);
 		if (ret)
 			printk(KERN_DEBUG "ftrace: function %s not "
 					  "traceable\n", func);
@@ -3677,7 +3946,9 @@
 		ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 	if (ftrace_graph_buf[0])
-		set_ftrace_early_graph(ftrace_graph_buf);
+		set_ftrace_early_graph(ftrace_graph_buf, 1);
+	if (ftrace_graph_notrace_buf[0])
+		set_ftrace_early_graph(ftrace_graph_notrace_buf, 0);
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 }
 
@@ -3819,7 +4090,12 @@
 		return 0;
 
 	if (ptr == (unsigned long *)1) {
-		seq_printf(m, "#### all functions enabled ####\n");
+		struct ftrace_graph_data *fgd = m->private;
+
+		if (fgd->table == ftrace_graph_funcs)
+			seq_printf(m, "#### all functions enabled ####\n");
+		else
+			seq_printf(m, "#### no functions disabled ####\n");
 		return 0;
 	}
 
@@ -4447,9 +4723,6 @@
 	struct ftrace_ops *op;
 	int bit;
 
-	if (function_trace_stop)
-		return;
-
 	bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
 	if (bit < 0)
 		return;
@@ -4461,9 +4734,8 @@
 	preempt_disable_notrace();
 	do_for_each_ftrace_op(op, ftrace_ops_list) {
 		if (ftrace_ops_test(op, ip, regs)) {
-			if (WARN_ON(!op->func)) {
-				function_trace_stop = 1;
-				printk("op=%p %pS\n", op, op);
+			if (FTRACE_WARN_ON(!op->func)) {
+				pr_warn("op=%p %pS\n", op, op);
 				goto out;
 			}
 			op->func(ip, parent_ip, op, regs);
@@ -5084,6 +5356,12 @@
 	/* Function graph doesn't use the .func field of global_ops */
 	global_ops.flags |= FTRACE_OPS_FL_STUB;
 
+#ifdef CONFIG_DYNAMIC_FTRACE
+	/* Optimize function graph calling (if implemented by arch) */
+	if (FTRACE_GRAPH_TRAMP_ADDR != 0)
+		global_ops.trampoline = FTRACE_GRAPH_TRAMP_ADDR;
+#endif
+
 	ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
 
 out:
@@ -5104,6 +5382,10 @@
 	__ftrace_graph_entry = ftrace_graph_entry_stub;
 	ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
 	global_ops.flags &= ~FTRACE_OPS_FL_STUB;
+#ifdef CONFIG_DYNAMIC_FTRACE
+	if (FTRACE_GRAPH_TRAMP_ADDR != 0)
+		global_ops.trampoline = 0;
+#endif
 	unregister_pm_notifier(&ftrace_suspend_notifier);
 	unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
 
@@ -5183,9 +5465,4 @@
 
 	kfree(ret_stack);
 }
-
-void ftrace_graph_stop(void)
-{
-	ftrace_stop();
-}
 #endif
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index ff70271..925f629 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1689,22 +1689,14 @@
 			if (!cpu_buffer->nr_pages_to_update)
 				continue;
 
-			/* The update must run on the CPU that is being updated. */
-			preempt_disable();
-			if (cpu == smp_processor_id() || !cpu_online(cpu)) {
+			/* Can't run something on an offline CPU. */
+			if (!cpu_online(cpu)) {
 				rb_update_pages(cpu_buffer);
 				cpu_buffer->nr_pages_to_update = 0;
 			} else {
-				/*
-				 * Can not disable preemption for schedule_work_on()
-				 * on PREEMPT_RT.
-				 */
-				preempt_enable();
 				schedule_work_on(cpu,
 						&cpu_buffer->update_pages_work);
-				preempt_disable();
 			}
-			preempt_enable();
 		}
 
 		/* wait for all the updates to complete */
@@ -1742,22 +1734,14 @@
 
 		get_online_cpus();
 
-		preempt_disable();
-		/* The update must run on the CPU that is being updated. */
-		if (cpu_id == smp_processor_id() || !cpu_online(cpu_id))
+		/* Can't run something on an offline CPU. */
+		if (!cpu_online(cpu_id))
 			rb_update_pages(cpu_buffer);
 		else {
-			/*
-			 * Can not disable preemption for schedule_work_on()
-			 * on PREEMPT_RT.
-			 */
-			preempt_enable();
 			schedule_work_on(cpu_id,
 					 &cpu_buffer->update_pages_work);
 			wait_for_completion(&cpu_buffer->update_done);
-			preempt_disable();
 		}
-		preempt_enable();
 
 		cpu_buffer->nr_pages_to_update = 0;
 		put_online_cpus();
@@ -3775,7 +3759,7 @@
 	if (rb_per_cpu_empty(cpu_buffer))
 		return NULL;
 
-	if (iter->head >= local_read(&iter->head_page->page->commit)) {
+	if (iter->head >= rb_page_size(iter->head_page)) {
 		rb_inc_iter(iter);
 		goto again;
 	}
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 291397e..8bb80fe 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -937,30 +937,6 @@
 	return ret;
 }
 
-ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
-{
-	int len;
-	int ret;
-
-	if (!cnt)
-		return 0;
-
-	if (s->len <= s->readpos)
-		return -EBUSY;
-
-	len = s->len - s->readpos;
-	if (cnt > len)
-		cnt = len;
-	ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
-	if (ret == cnt)
-		return -EFAULT;
-
-	cnt -= ret;
-
-	s->readpos += cnt;
-	return cnt;
-}
-
 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
 {
 	int len;
@@ -3699,6 +3675,7 @@
 #endif
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 	"  set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
+	"  set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
 	"  max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
 #endif
 #ifdef CONFIG_TRACER_SNAPSHOT
@@ -4238,10 +4215,9 @@
 }
 
 static ssize_t
-tracing_max_lat_read(struct file *filp, char __user *ubuf,
-		     size_t cnt, loff_t *ppos)
+tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
+		   size_t cnt, loff_t *ppos)
 {
-	unsigned long *ptr = filp->private_data;
 	char buf[64];
 	int r;
 
@@ -4253,10 +4229,9 @@
 }
 
 static ssize_t
-tracing_max_lat_write(struct file *filp, const char __user *ubuf,
-		      size_t cnt, loff_t *ppos)
+tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
+		    size_t cnt, loff_t *ppos)
 {
-	unsigned long *ptr = filp->private_data;
 	unsigned long val;
 	int ret;
 
@@ -4269,6 +4244,52 @@
 	return cnt;
 }
 
+static ssize_t
+tracing_thresh_read(struct file *filp, char __user *ubuf,
+		    size_t cnt, loff_t *ppos)
+{
+	return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
+}
+
+static ssize_t
+tracing_thresh_write(struct file *filp, const char __user *ubuf,
+		     size_t cnt, loff_t *ppos)
+{
+	struct trace_array *tr = filp->private_data;
+	int ret;
+
+	mutex_lock(&trace_types_lock);
+	ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
+	if (ret < 0)
+		goto out;
+
+	if (tr->current_trace->update_thresh) {
+		ret = tr->current_trace->update_thresh(tr);
+		if (ret < 0)
+			goto out;
+	}
+
+	ret = cnt;
+out:
+	mutex_unlock(&trace_types_lock);
+
+	return ret;
+}
+
+static ssize_t
+tracing_max_lat_read(struct file *filp, char __user *ubuf,
+		     size_t cnt, loff_t *ppos)
+{
+	return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
+}
+
+static ssize_t
+tracing_max_lat_write(struct file *filp, const char __user *ubuf,
+		      size_t cnt, loff_t *ppos)
+{
+	return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
+}
+
 static int tracing_open_pipe(struct inode *inode, struct file *filp)
 {
 	struct trace_array *tr = inode->i_private;
@@ -5170,6 +5191,13 @@
 #endif /* CONFIG_TRACER_SNAPSHOT */
 
 
+static const struct file_operations tracing_thresh_fops = {
+	.open		= tracing_open_generic,
+	.read		= tracing_thresh_read,
+	.write		= tracing_thresh_write,
+	.llseek		= generic_file_llseek,
+};
+
 static const struct file_operations tracing_max_lat_fops = {
 	.open		= tracing_open_generic,
 	.read		= tracing_max_lat_read,
@@ -6107,10 +6135,8 @@
 	if (!topts)
 		return;
 
-	for (cnt = 0; topts[cnt].opt; cnt++) {
-		if (topts[cnt].entry)
-			debugfs_remove(topts[cnt].entry);
-	}
+	for (cnt = 0; topts[cnt].opt; cnt++)
+		debugfs_remove(topts[cnt].entry);
 
 	kfree(topts);
 }
@@ -6533,7 +6559,7 @@
 	init_tracer_debugfs(&global_trace, d_tracer);
 
 	trace_create_file("tracing_thresh", 0644, d_tracer,
-			&tracing_thresh, &tracing_max_lat_fops);
+			&global_trace, &tracing_thresh_fops);
 
 	trace_create_file("README", 0444, d_tracer,
 			NULL, &tracing_readme_fops);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 9258f5a..385391f 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -339,6 +339,7 @@
  * @reset: called when one switches to another tracer
  * @start: called when tracing is unpaused (echo 1 > tracing_enabled)
  * @stop: called when tracing is paused (echo 0 > tracing_enabled)
+ * @update_thresh: called when tracing_thresh is updated
  * @open: called when the trace file is opened
  * @pipe_open: called when the trace_pipe file is opened
  * @close: called when the trace file is released
@@ -357,6 +358,7 @@
 	void			(*reset)(struct trace_array *tr);
 	void			(*start)(struct trace_array *tr);
 	void			(*stop)(struct trace_array *tr);
+	int			(*update_thresh)(struct trace_array *tr);
 	void			(*open)(struct trace_iterator *iter);
 	void			(*pipe_open)(struct trace_iterator *iter);
 	void			(*close)(struct trace_iterator *iter);
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index 5d12bb4..4b9c114 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -30,6 +30,18 @@
 			return ret;
 	}
 
+	/*
+	 * We checked and allowed to create parent,
+	 * allow children without checking.
+	 */
+	if (p_event->parent)
+		return 0;
+
+	/*
+	 * It's ok to check current process (owner) permissions in here,
+	 * because code below is called only via perf_event_open syscall.
+	 */
+
 	/* The ftrace function trace is allowed only for root. */
 	if (ftrace_event_is_function(tp_event)) {
 		if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 2de53628..ef06ce7 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -8,6 +8,8 @@
  *
  */
 
+#define pr_fmt(fmt) fmt
+
 #include <linux/workqueue.h>
 #include <linux/spinlock.h>
 #include <linux/kthread.h>
@@ -1491,7 +1493,7 @@
 
 	dir->entry = debugfs_create_dir(name, parent);
 	if (!dir->entry) {
-		pr_warning("Failed to create system directory %s\n", name);
+		pr_warn("Failed to create system directory %s\n", name);
 		__put_system(system);
 		goto out_free;
 	}
@@ -1507,7 +1509,7 @@
 	if (!entry) {
 		kfree(system->filter);
 		system->filter = NULL;
-		pr_warning("Could not create debugfs '%s/filter' entry\n", name);
+		pr_warn("Could not create debugfs '%s/filter' entry\n", name);
 	}
 
 	trace_create_file("enable", 0644, dir->entry, dir,
@@ -1522,8 +1524,7 @@
  out_fail:
 	/* Only print this message if failed on memory allocation */
 	if (!dir || !system)
-		pr_warning("No memory to create event subsystem %s\n",
-			   name);
+		pr_warn("No memory to create event subsystem %s\n", name);
 	return NULL;
 }
 
@@ -1551,8 +1552,7 @@
 	name = ftrace_event_name(call);
 	file->dir = debugfs_create_dir(name, d_events);
 	if (!file->dir) {
-		pr_warning("Could not create debugfs '%s' directory\n",
-			   name);
+		pr_warn("Could not create debugfs '%s' directory\n", name);
 		return -1;
 	}
 
@@ -1575,8 +1575,8 @@
 	if (list_empty(head)) {
 		ret = call->class->define_fields(call);
 		if (ret < 0) {
-			pr_warning("Could not initialize trace point"
-				   " events/%s\n", name);
+			pr_warn("Could not initialize trace point events/%s\n",
+				name);
 			return -1;
 		}
 	}
@@ -1621,7 +1621,6 @@
 		if (file->event_call != call)
 			continue;
 		ftrace_event_enable_disable(file, 0);
-		destroy_preds(file);
 		/*
 		 * The do_for_each_event_file() is
 		 * a double loop. After finding the call for this
@@ -1649,8 +1648,7 @@
 	if (call->class->raw_init) {
 		ret = call->class->raw_init(call);
 		if (ret < 0 && ret != -ENOSYS)
-			pr_warn("Could not initialize trace events/%s\n",
-				name);
+			pr_warn("Could not initialize trace events/%s\n", name);
 	}
 
 	return ret;
@@ -1749,7 +1747,8 @@
 {
 	event_remove(call);
 	trace_destroy_fields(call);
-	destroy_call_preds(call);
+	free_event_filter(call->filter);
+	call->filter = NULL;
 }
 
 static int probe_remove_event_call(struct ftrace_event_call *call)
@@ -1895,8 +1894,8 @@
 	list_for_each_entry(call, &ftrace_events, list) {
 		ret = __trace_add_new_event(call, tr);
 		if (ret < 0)
-			pr_warning("Could not create directory for event %s\n",
-				   ftrace_event_name(call));
+			pr_warn("Could not create directory for event %s\n",
+				ftrace_event_name(call));
 	}
 }
 
@@ -2208,8 +2207,8 @@
 	list_for_each_entry(file, &tr->events, list) {
 		ret = event_create_dir(tr->event_dir, file);
 		if (ret < 0)
-			pr_warning("Could not create directory for event %s\n",
-				   ftrace_event_name(file->event_call));
+			pr_warn("Could not create directory for event %s\n",
+				ftrace_event_name(file->event_call));
 	}
 }
 
@@ -2232,8 +2231,8 @@
 
 		ret = __trace_early_add_new_event(call, tr);
 		if (ret < 0)
-			pr_warning("Could not create early event %s\n",
-				   ftrace_event_name(call));
+			pr_warn("Could not create early event %s\n",
+				ftrace_event_name(call));
 	}
 }
 
@@ -2280,13 +2279,13 @@
 	entry = debugfs_create_file("set_event", 0644, parent,
 				    tr, &ftrace_set_event_fops);
 	if (!entry) {
-		pr_warning("Could not create debugfs 'set_event' entry\n");
+		pr_warn("Could not create debugfs 'set_event' entry\n");
 		return -ENOMEM;
 	}
 
 	d_events = debugfs_create_dir("events", parent);
 	if (!d_events) {
-		pr_warning("Could not create debugfs 'events' directory\n");
+		pr_warn("Could not create debugfs 'events' directory\n");
 		return -ENOMEM;
 	}
 
@@ -2462,11 +2461,10 @@
 	entry = debugfs_create_file("available_events", 0444, d_tracer,
 				    tr, &ftrace_avail_fops);
 	if (!entry)
-		pr_warning("Could not create debugfs "
-			   "'available_events' entry\n");
+		pr_warn("Could not create debugfs 'available_events' entry\n");
 
 	if (trace_define_common_fields())
-		pr_warning("tracing: Failed to allocate common fields");
+		pr_warn("tracing: Failed to allocate common fields");
 
 	ret = early_event_add_tracer(d_tracer, tr);
 	if (ret)
@@ -2475,7 +2473,7 @@
 #ifdef CONFIG_MODULES
 	ret = register_module_notifier(&trace_module_nb);
 	if (ret)
-		pr_warning("Failed to register trace events module notifier\n");
+		pr_warn("Failed to register trace events module notifier\n");
 #endif
 	return 0;
 }
@@ -2579,7 +2577,7 @@
 		 * it and the self test should not be on.
 		 */
 		if (file->flags & FTRACE_EVENT_FL_ENABLED) {
-			pr_warning("Enabled event during self test!\n");
+			pr_warn("Enabled event during self test!\n");
 			WARN_ON_ONCE(1);
 			continue;
 		}
@@ -2607,8 +2605,8 @@
 
 		ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
 		if (WARN_ON_ONCE(ret)) {
-			pr_warning("error enabling system %s\n",
-				   system->name);
+			pr_warn("error enabling system %s\n",
+				system->name);
 			continue;
 		}
 
@@ -2616,8 +2614,8 @@
 
 		ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
 		if (WARN_ON_ONCE(ret)) {
-			pr_warning("error disabling system %s\n",
-				   system->name);
+			pr_warn("error disabling system %s\n",
+				system->name);
 			continue;
 		}
 
@@ -2631,7 +2629,7 @@
 
 	ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
 	if (WARN_ON_ONCE(ret)) {
-		pr_warning("error enabling all events\n");
+		pr_warn("error enabling all events\n");
 		return;
 	}
 
@@ -2640,7 +2638,7 @@
 	/* reset sysname */
 	ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
 	if (WARN_ON_ONCE(ret)) {
-		pr_warning("error disabling all events\n");
+		pr_warn("error disabling all events\n");
 		return;
 	}
 
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 8a86319..7a8c152 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -774,17 +774,12 @@
 	filter->n_preds = 0;
 }
 
-static void call_filter_disable(struct ftrace_event_call *call)
-{
-	call->flags &= ~TRACE_EVENT_FL_FILTERED;
-}
-
 static void filter_disable(struct ftrace_event_file *file)
 {
 	struct ftrace_event_call *call = file->event_call;
 
 	if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
-		call_filter_disable(call);
+		call->flags &= ~TRACE_EVENT_FL_FILTERED;
 	else
 		file->flags &= ~FTRACE_EVENT_FL_FILTERED;
 }
@@ -804,32 +799,6 @@
 	__free_filter(filter);
 }
 
-void destroy_call_preds(struct ftrace_event_call *call)
-{
-	__free_filter(call->filter);
-	call->filter = NULL;
-}
-
-static void destroy_file_preds(struct ftrace_event_file *file)
-{
-	__free_filter(file->filter);
-	file->filter = NULL;
-}
-
-/*
- * Called when destroying the ftrace_event_file.
- * The file is being freed, so we do not need to worry about
- * the file being currently used. This is for module code removing
- * the tracepoints from within it.
- */
-void destroy_preds(struct ftrace_event_file *file)
-{
-	if (file->event_call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
-		destroy_call_preds(file->event_call);
-	else
-		destroy_file_preds(file);
-}
-
 static struct event_filter *__alloc_filter(void)
 {
 	struct event_filter *filter;
@@ -873,17 +842,14 @@
 		remove_filter_string(file->filter);
 }
 
-static void filter_free_subsystem_preds(struct event_subsystem *system,
+static void filter_free_subsystem_preds(struct ftrace_subsystem_dir *dir,
 					struct trace_array *tr)
 {
 	struct ftrace_event_file *file;
-	struct ftrace_event_call *call;
 
 	list_for_each_entry(file, &tr->events, list) {
-		call = file->event_call;
-		if (strcmp(call->class->system, system->name) != 0)
+		if (file->system != dir)
 			continue;
-
 		__remove_filter(file);
 	}
 }
@@ -901,15 +867,13 @@
 	}
 }
 
-static void filter_free_subsystem_filters(struct event_subsystem *system,
+static void filter_free_subsystem_filters(struct ftrace_subsystem_dir *dir,
 					  struct trace_array *tr)
 {
 	struct ftrace_event_file *file;
-	struct ftrace_event_call *call;
 
 	list_for_each_entry(file, &tr->events, list) {
-		call = file->event_call;
-		if (strcmp(call->class->system, system->name) != 0)
+		if (file->system != dir)
 			continue;
 		__free_subsystem_filter(file);
 	}
@@ -1582,7 +1546,6 @@
 static int replace_preds(struct ftrace_event_call *call,
 			 struct event_filter *filter,
 			 struct filter_parse_state *ps,
-			 char *filter_string,
 			 bool dry_run)
 {
 	char *operand1 = NULL, *operand2 = NULL;
@@ -1755,13 +1718,12 @@
 	struct event_filter	*filter;
 };
 
-static int replace_system_preds(struct event_subsystem *system,
+static int replace_system_preds(struct ftrace_subsystem_dir *dir,
 				struct trace_array *tr,
 				struct filter_parse_state *ps,
 				char *filter_string)
 {
 	struct ftrace_event_file *file;
-	struct ftrace_event_call *call;
 	struct filter_list *filter_item;
 	struct filter_list *tmp;
 	LIST_HEAD(filter_list);
@@ -1769,15 +1731,14 @@
 	int err;
 
 	list_for_each_entry(file, &tr->events, list) {
-		call = file->event_call;
-		if (strcmp(call->class->system, system->name) != 0)
+		if (file->system != dir)
 			continue;
 
 		/*
 		 * Try to see if the filter can be applied
 		 *  (filter arg is ignored on dry_run)
 		 */
-		err = replace_preds(call, NULL, ps, filter_string, true);
+		err = replace_preds(file->event_call, NULL, ps, true);
 		if (err)
 			event_set_no_set_filter_flag(file);
 		else
@@ -1787,9 +1748,7 @@
 	list_for_each_entry(file, &tr->events, list) {
 		struct event_filter *filter;
 
-		call = file->event_call;
-
-		if (strcmp(call->class->system, system->name) != 0)
+		if (file->system != dir)
 			continue;
 
 		if (event_no_set_filter_flag(file))
@@ -1811,7 +1770,7 @@
 		if (err)
 			goto fail_mem;
 
-		err = replace_preds(call, filter, ps, filter_string, false);
+		err = replace_preds(file->event_call, filter, ps, false);
 		if (err) {
 			filter_disable(file);
 			parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
@@ -1933,7 +1892,7 @@
 
 	err = create_filter_start(filter_str, set_str, &ps, &filter);
 	if (!err) {
-		err = replace_preds(call, filter, ps, filter_str, false);
+		err = replace_preds(call, filter, ps, false);
 		if (err && set_str)
 			append_filter_err(ps, filter);
 	}
@@ -1959,7 +1918,7 @@
  * Identical to create_filter() except that it creates a subsystem filter
  * and always remembers @filter_str.
  */
-static int create_system_filter(struct event_subsystem *system,
+static int create_system_filter(struct ftrace_subsystem_dir *dir,
 				struct trace_array *tr,
 				char *filter_str, struct event_filter **filterp)
 {
@@ -1969,7 +1928,7 @@
 
 	err = create_filter_start(filter_str, true, &ps, &filter);
 	if (!err) {
-		err = replace_system_preds(system, tr, ps, filter_str);
+		err = replace_system_preds(dir, tr, ps, filter_str);
 		if (!err) {
 			/* System filters just show a default message */
 			kfree(filter->filter_string);
@@ -2053,18 +2012,18 @@
 	}
 
 	if (!strcmp(strstrip(filter_string), "0")) {
-		filter_free_subsystem_preds(system, tr);
+		filter_free_subsystem_preds(dir, tr);
 		remove_filter_string(system->filter);
 		filter = system->filter;
 		system->filter = NULL;
 		/* Ensure all filters are no longer used */
 		synchronize_sched();
-		filter_free_subsystem_filters(system, tr);
+		filter_free_subsystem_filters(dir, tr);
 		__free_filter(filter);
 		goto out_unlock;
 	}
 
-	err = create_system_filter(system, tr, filter_string, &filter);
+	err = create_system_filter(dir, tr, filter_string, &filter);
 	if (filter) {
 		/*
 		 * No event actually uses the system filter
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 4de3e57..f0a0c98 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -15,6 +15,33 @@
 #include "trace.h"
 #include "trace_output.h"
 
+static bool kill_ftrace_graph;
+
+/**
+ * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
+ *
+ * ftrace_graph_stop() is called when a severe error is detected in
+ * the function graph tracing. This function is called by the critical
+ * paths of function graph to keep those paths from doing any more harm.
+ */
+bool ftrace_graph_is_dead(void)
+{
+	return kill_ftrace_graph;
+}
+
+/**
+ * ftrace_graph_stop - set to permanently disable function graph tracincg
+ *
+ * In case of an error int function graph tracing, this is called
+ * to try to keep function graph tracing from causing any more harm.
+ * Usually this is pretty severe and this is called to try to at least
+ * get a warning out to the user.
+ */
+void ftrace_graph_stop(void)
+{
+	kill_ftrace_graph = true;
+}
+
 /* When set, irq functions will be ignored */
 static int ftrace_graph_skip_irqs;
 
@@ -92,6 +119,9 @@
 	unsigned long long calltime;
 	int index;
 
+	if (unlikely(ftrace_graph_is_dead()))
+		return -EBUSY;
+
 	if (!current->ret_stack)
 		return -EBUSY;
 
@@ -323,7 +353,7 @@
 	return ret;
 }
 
-int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
+static int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
 {
 	if (tracing_thresh)
 		return 1;
@@ -412,7 +442,7 @@
 	smp_mb();
 }
 
-void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
+static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
 {
 	if (tracing_thresh &&
 	    (trace->rettime - trace->calltime < tracing_thresh))
@@ -445,6 +475,12 @@
 	unregister_ftrace_graph();
 }
 
+static int graph_trace_update_thresh(struct trace_array *tr)
+{
+	graph_trace_reset(tr);
+	return graph_trace_init(tr);
+}
+
 static int max_bytes_for_cpu;
 
 static enum print_line_t
@@ -1399,7 +1435,7 @@
 	seq_printf(s, "               |   |   |   |\n");
 }
 
-void print_graph_headers(struct seq_file *s)
+static void print_graph_headers(struct seq_file *s)
 {
 	print_graph_headers_flags(s, tracer_flags.val);
 }
@@ -1495,6 +1531,7 @@
 
 static struct tracer graph_trace __tracer_data = {
 	.name		= "function_graph",
+	.update_thresh	= graph_trace_update_thresh,
 	.open		= graph_trace_open,
 	.pipe_open	= graph_trace_open,
 	.close		= graph_trace_close,
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index f3dad80..c6977d5 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -20,23 +20,6 @@
 
 static int next_event_type = __TRACE_LAST_TYPE + 1;
 
-int trace_print_seq(struct seq_file *m, struct trace_seq *s)
-{
-	int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
-	int ret;
-
-	ret = seq_write(m, s->buffer, len);
-
-	/*
-	 * Only reset this buffer if we successfully wrote to the
-	 * seq_file buffer.
-	 */
-	if (!ret)
-		trace_seq_init(s);
-
-	return ret;
-}
-
 enum print_line_t trace_print_bputs_msg_only(struct trace_iterator *iter)
 {
 	struct trace_seq *s = &iter->seq;
@@ -85,257 +68,6 @@
 	return TRACE_TYPE_HANDLED;
 }
 
-/**
- * trace_seq_printf - sequence printing of trace information
- * @s: trace sequence descriptor
- * @fmt: printf format string
- *
- * It returns 0 if the trace oversizes the buffer's free
- * space, 1 otherwise.
- *
- * The tracer may use either sequence operations or its own
- * copy to user routines. To simplify formating of a trace
- * trace_seq_printf is used to store strings into a special
- * buffer (@s). Then the output may be either used by
- * the sequencer or pulled into another buffer.
- */
-int
-trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
-{
-	int len = (PAGE_SIZE - 1) - s->len;
-	va_list ap;
-	int ret;
-
-	if (s->full || !len)
-		return 0;
-
-	va_start(ap, fmt);
-	ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
-	va_end(ap);
-
-	/* If we can't write it all, don't bother writing anything */
-	if (ret >= len) {
-		s->full = 1;
-		return 0;
-	}
-
-	s->len += ret;
-
-	return 1;
-}
-EXPORT_SYMBOL_GPL(trace_seq_printf);
-
-/**
- * trace_seq_bitmask - put a list of longs as a bitmask print output
- * @s:		trace sequence descriptor
- * @maskp:	points to an array of unsigned longs that represent a bitmask
- * @nmaskbits:	The number of bits that are valid in @maskp
- *
- * It returns 0 if the trace oversizes the buffer's free
- * space, 1 otherwise.
- *
- * Writes a ASCII representation of a bitmask string into @s.
- */
-int
-trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp,
-		  int nmaskbits)
-{
-	int len = (PAGE_SIZE - 1) - s->len;
-	int ret;
-
-	if (s->full || !len)
-		return 0;
-
-	ret = bitmap_scnprintf(s->buffer, len, maskp, nmaskbits);
-	s->len += ret;
-
-	return 1;
-}
-EXPORT_SYMBOL_GPL(trace_seq_bitmask);
-
-/**
- * trace_seq_vprintf - sequence printing of trace information
- * @s: trace sequence descriptor
- * @fmt: printf format string
- *
- * The tracer may use either sequence operations or its own
- * copy to user routines. To simplify formating of a trace
- * trace_seq_printf is used to store strings into a special
- * buffer (@s). Then the output may be either used by
- * the sequencer or pulled into another buffer.
- */
-int
-trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args)
-{
-	int len = (PAGE_SIZE - 1) - s->len;
-	int ret;
-
-	if (s->full || !len)
-		return 0;
-
-	ret = vsnprintf(s->buffer + s->len, len, fmt, args);
-
-	/* If we can't write it all, don't bother writing anything */
-	if (ret >= len) {
-		s->full = 1;
-		return 0;
-	}
-
-	s->len += ret;
-
-	return len;
-}
-EXPORT_SYMBOL_GPL(trace_seq_vprintf);
-
-int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
-{
-	int len = (PAGE_SIZE - 1) - s->len;
-	int ret;
-
-	if (s->full || !len)
-		return 0;
-
-	ret = bstr_printf(s->buffer + s->len, len, fmt, binary);
-
-	/* If we can't write it all, don't bother writing anything */
-	if (ret >= len) {
-		s->full = 1;
-		return 0;
-	}
-
-	s->len += ret;
-
-	return len;
-}
-
-/**
- * trace_seq_puts - trace sequence printing of simple string
- * @s: trace sequence descriptor
- * @str: simple string to record
- *
- * The tracer may use either the sequence operations or its own
- * copy to user routines. This function records a simple string
- * into a special buffer (@s) for later retrieval by a sequencer
- * or other mechanism.
- */
-int trace_seq_puts(struct trace_seq *s, const char *str)
-{
-	int len = strlen(str);
-
-	if (s->full)
-		return 0;
-
-	if (len > ((PAGE_SIZE - 1) - s->len)) {
-		s->full = 1;
-		return 0;
-	}
-
-	memcpy(s->buffer + s->len, str, len);
-	s->len += len;
-
-	return len;
-}
-
-int trace_seq_putc(struct trace_seq *s, unsigned char c)
-{
-	if (s->full)
-		return 0;
-
-	if (s->len >= (PAGE_SIZE - 1)) {
-		s->full = 1;
-		return 0;
-	}
-
-	s->buffer[s->len++] = c;
-
-	return 1;
-}
-EXPORT_SYMBOL(trace_seq_putc);
-
-int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len)
-{
-	if (s->full)
-		return 0;
-
-	if (len > ((PAGE_SIZE - 1) - s->len)) {
-		s->full = 1;
-		return 0;
-	}
-
-	memcpy(s->buffer + s->len, mem, len);
-	s->len += len;
-
-	return len;
-}
-
-int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, size_t len)
-{
-	unsigned char hex[HEX_CHARS];
-	const unsigned char *data = mem;
-	int i, j;
-
-	if (s->full)
-		return 0;
-
-#ifdef __BIG_ENDIAN
-	for (i = 0, j = 0; i < len; i++) {
-#else
-	for (i = len-1, j = 0; i >= 0; i--) {
-#endif
-		hex[j++] = hex_asc_hi(data[i]);
-		hex[j++] = hex_asc_lo(data[i]);
-	}
-	hex[j++] = ' ';
-
-	return trace_seq_putmem(s, hex, j);
-}
-
-void *trace_seq_reserve(struct trace_seq *s, size_t len)
-{
-	void *ret;
-
-	if (s->full)
-		return NULL;
-
-	if (len > ((PAGE_SIZE - 1) - s->len)) {
-		s->full = 1;
-		return NULL;
-	}
-
-	ret = s->buffer + s->len;
-	s->len += len;
-
-	return ret;
-}
-
-int trace_seq_path(struct trace_seq *s, const struct path *path)
-{
-	unsigned char *p;
-
-	if (s->full)
-		return 0;
-
-	if (s->len >= (PAGE_SIZE - 1)) {
-		s->full = 1;
-		return 0;
-	}
-
-	p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
-	if (!IS_ERR(p)) {
-		p = mangle_path(s->buffer + s->len, p, "\n");
-		if (p) {
-			s->len = p - s->buffer;
-			return 1;
-		}
-	} else {
-		s->buffer[s->len++] = '?';
-		return 1;
-	}
-
-	s->full = 1;
-	return 0;
-}
-
 const char *
 ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
 		       unsigned long flags,
@@ -343,7 +75,7 @@
 {
 	unsigned long mask;
 	const char *str;
-	const char *ret = p->buffer + p->len;
+	const char *ret = trace_seq_buffer_ptr(p);
 	int i, first = 1;
 
 	for (i = 0;  flag_array[i].name && flags; i++) {
@@ -379,7 +111,7 @@
 			 const struct trace_print_flags *symbol_array)
 {
 	int i;
-	const char *ret = p->buffer + p->len;
+	const char *ret = trace_seq_buffer_ptr(p);
 
 	for (i = 0;  symbol_array[i].name; i++) {
 
@@ -390,7 +122,7 @@
 		break;
 	}
 
-	if (ret == (const char *)(p->buffer + p->len))
+	if (ret == (const char *)(trace_seq_buffer_ptr(p)))
 		trace_seq_printf(p, "0x%lx", val);
 		
 	trace_seq_putc(p, 0);
@@ -405,7 +137,7 @@
 			 const struct trace_print_flags_u64 *symbol_array)
 {
 	int i;
-	const char *ret = p->buffer + p->len;
+	const char *ret = trace_seq_buffer_ptr(p);
 
 	for (i = 0;  symbol_array[i].name; i++) {
 
@@ -416,7 +148,7 @@
 		break;
 	}
 
-	if (ret == (const char *)(p->buffer + p->len))
+	if (ret == (const char *)(trace_seq_buffer_ptr(p)))
 		trace_seq_printf(p, "0x%llx", val);
 
 	trace_seq_putc(p, 0);
@@ -430,7 +162,7 @@
 ftrace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
 			 unsigned int bitmask_size)
 {
-	const char *ret = p->buffer + p->len;
+	const char *ret = trace_seq_buffer_ptr(p);
 
 	trace_seq_bitmask(p, bitmask_ptr, bitmask_size * 8);
 	trace_seq_putc(p, 0);
@@ -443,7 +175,7 @@
 ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len)
 {
 	int i;
-	const char *ret = p->buffer + p->len;
+	const char *ret = trace_seq_buffer_ptr(p);
 
 	for (i = 0; i < buf_len; i++)
 		trace_seq_printf(p, "%s%2.2x", i == 0 ? "" : " ", buf[i]);
diff --git a/kernel/trace/trace_output.h b/kernel/trace/trace_output.h
index 127a9d8..80b25b5 100644
--- a/kernel/trace/trace_output.h
+++ b/kernel/trace/trace_output.h
@@ -35,9 +35,6 @@
 extern int __unregister_ftrace_event(struct trace_event *event);
 extern struct rw_semaphore trace_event_sem;
 
-#define MAX_MEMHEX_BYTES	8
-#define HEX_CHARS		(MAX_MEMHEX_BYTES*2 + 1)
-
 #define SEQ_PUT_FIELD_RET(s, x)				\
 do {							\
 	if (!trace_seq_putmem(s, &(x), sizeof(x)))	\
@@ -46,7 +43,6 @@
 
 #define SEQ_PUT_HEX_FIELD_RET(s, x)			\
 do {							\
-	BUILD_BUG_ON(sizeof(x) > MAX_MEMHEX_BYTES);	\
 	if (!trace_seq_putmem_hex(s, &(x), sizeof(x)))	\
 		return TRACE_TYPE_PARTIAL_LINE;		\
 } while (0)
diff --git a/kernel/trace/trace_seq.c b/kernel/trace/trace_seq.c
new file mode 100644
index 0000000..1f24ed9
--- /dev/null
+++ b/kernel/trace/trace_seq.c
@@ -0,0 +1,428 @@
+/*
+ * trace_seq.c
+ *
+ * Copyright (C) 2008-2014 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
+ *
+ * The trace_seq is a handy tool that allows you to pass a descriptor around
+ * to a buffer that other functions can write to. It is similar to the
+ * seq_file functionality but has some differences.
+ *
+ * To use it, the trace_seq must be initialized with trace_seq_init().
+ * This will set up the counters within the descriptor. You can call
+ * trace_seq_init() more than once to reset the trace_seq to start
+ * from scratch.
+ * 
+ * The buffer size is currently PAGE_SIZE, although it may become dynamic
+ * in the future.
+ *
+ * A write to the buffer will either succed or fail. That is, unlike
+ * sprintf() there will not be a partial write (well it may write into
+ * the buffer but it wont update the pointers). This allows users to
+ * try to write something into the trace_seq buffer and if it fails
+ * they can flush it and try again.
+ *
+ */
+#include <linux/uaccess.h>
+#include <linux/seq_file.h>
+#include <linux/trace_seq.h>
+
+/* How much buffer is left on the trace_seq? */
+#define TRACE_SEQ_BUF_LEFT(s) ((PAGE_SIZE - 1) - (s)->len)
+
+/* How much buffer is written? */
+#define TRACE_SEQ_BUF_USED(s) min((s)->len, (unsigned int)(PAGE_SIZE - 1))
+
+/**
+ * trace_print_seq - move the contents of trace_seq into a seq_file
+ * @m: the seq_file descriptor that is the destination
+ * @s: the trace_seq descriptor that is the source.
+ *
+ * Returns 0 on success and non zero on error. If it succeeds to
+ * write to the seq_file it will reset the trace_seq, otherwise
+ * it does not modify the trace_seq to let the caller try again.
+ */
+int trace_print_seq(struct seq_file *m, struct trace_seq *s)
+{
+	unsigned int len = TRACE_SEQ_BUF_USED(s);
+	int ret;
+
+	ret = seq_write(m, s->buffer, len);
+
+	/*
+	 * Only reset this buffer if we successfully wrote to the
+	 * seq_file buffer. This lets the caller try again or
+	 * do something else with the contents.
+	 */
+	if (!ret)
+		trace_seq_init(s);
+
+	return ret;
+}
+
+/**
+ * trace_seq_printf - sequence printing of trace information
+ * @s: trace sequence descriptor
+ * @fmt: printf format string
+ *
+ * The tracer may use either sequence operations or its own
+ * copy to user routines. To simplify formating of a trace
+ * trace_seq_printf() is used to store strings into a special
+ * buffer (@s). Then the output may be either used by
+ * the sequencer or pulled into another buffer.
+ *
+ * Returns 1 if we successfully written all the contents to
+ *   the buffer.
+  * Returns 0 if we the length to write is bigger than the
+ *   reserved buffer space. In this case, nothing gets written.
+ */
+int trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
+{
+	unsigned int len = TRACE_SEQ_BUF_LEFT(s);
+	va_list ap;
+	int ret;
+
+	if (s->full || !len)
+		return 0;
+
+	va_start(ap, fmt);
+	ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
+	va_end(ap);
+
+	/* If we can't write it all, don't bother writing anything */
+	if (ret >= len) {
+		s->full = 1;
+		return 0;
+	}
+
+	s->len += ret;
+
+	return 1;
+}
+EXPORT_SYMBOL_GPL(trace_seq_printf);
+
+/**
+ * trace_seq_bitmask - write a bitmask array in its ASCII representation
+ * @s:		trace sequence descriptor
+ * @maskp:	points to an array of unsigned longs that represent a bitmask
+ * @nmaskbits:	The number of bits that are valid in @maskp
+ *
+ * Writes a ASCII representation of a bitmask string into @s.
+ *
+ * Returns 1 if we successfully written all the contents to
+ *   the buffer.
+ * Returns 0 if we the length to write is bigger than the
+ *   reserved buffer space. In this case, nothing gets written.
+ */
+int trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp,
+		      int nmaskbits)
+{
+	unsigned int len = TRACE_SEQ_BUF_LEFT(s);
+	int ret;
+
+	if (s->full || !len)
+		return 0;
+
+	ret = bitmap_scnprintf(s->buffer, len, maskp, nmaskbits);
+	s->len += ret;
+
+	return 1;
+}
+EXPORT_SYMBOL_GPL(trace_seq_bitmask);
+
+/**
+ * trace_seq_vprintf - sequence printing of trace information
+ * @s: trace sequence descriptor
+ * @fmt: printf format string
+ *
+ * The tracer may use either sequence operations or its own
+ * copy to user routines. To simplify formating of a trace
+ * trace_seq_printf is used to store strings into a special
+ * buffer (@s). Then the output may be either used by
+ * the sequencer or pulled into another buffer.
+ *
+ * Returns how much it wrote to the buffer.
+ */
+int trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args)
+{
+	unsigned int len = TRACE_SEQ_BUF_LEFT(s);
+	int ret;
+
+	if (s->full || !len)
+		return 0;
+
+	ret = vsnprintf(s->buffer + s->len, len, fmt, args);
+
+	/* If we can't write it all, don't bother writing anything */
+	if (ret >= len) {
+		s->full = 1;
+		return 0;
+	}
+
+	s->len += ret;
+
+	return len;
+}
+EXPORT_SYMBOL_GPL(trace_seq_vprintf);
+
+/**
+ * trace_seq_bprintf - Write the printf string from binary arguments
+ * @s: trace sequence descriptor
+ * @fmt: The format string for the @binary arguments
+ * @binary: The binary arguments for @fmt.
+ *
+ * When recording in a fast path, a printf may be recorded with just
+ * saving the format and the arguments as they were passed to the
+ * function, instead of wasting cycles converting the arguments into
+ * ASCII characters. Instead, the arguments are saved in a 32 bit
+ * word array that is defined by the format string constraints.
+ *
+ * This function will take the format and the binary array and finish
+ * the conversion into the ASCII string within the buffer.
+ *
+ * Returns how much it wrote to the buffer.
+ */
+int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
+{
+	unsigned int len = TRACE_SEQ_BUF_LEFT(s);
+	int ret;
+
+	if (s->full || !len)
+		return 0;
+
+	ret = bstr_printf(s->buffer + s->len, len, fmt, binary);
+
+	/* If we can't write it all, don't bother writing anything */
+	if (ret >= len) {
+		s->full = 1;
+		return 0;
+	}
+
+	s->len += ret;
+
+	return len;
+}
+EXPORT_SYMBOL_GPL(trace_seq_bprintf);
+
+/**
+ * trace_seq_puts - trace sequence printing of simple string
+ * @s: trace sequence descriptor
+ * @str: simple string to record
+ *
+ * The tracer may use either the sequence operations or its own
+ * copy to user routines. This function records a simple string
+ * into a special buffer (@s) for later retrieval by a sequencer
+ * or other mechanism.
+ *
+ * Returns how much it wrote to the buffer.
+ */
+int trace_seq_puts(struct trace_seq *s, const char *str)
+{
+	unsigned int len = strlen(str);
+
+	if (s->full)
+		return 0;
+
+	if (len > TRACE_SEQ_BUF_LEFT(s)) {
+		s->full = 1;
+		return 0;
+	}
+
+	memcpy(s->buffer + s->len, str, len);
+	s->len += len;
+
+	return len;
+}
+EXPORT_SYMBOL_GPL(trace_seq_puts);
+
+/**
+ * trace_seq_putc - trace sequence printing of simple character
+ * @s: trace sequence descriptor
+ * @c: simple character to record
+ *
+ * The tracer may use either the sequence operations or its own
+ * copy to user routines. This function records a simple charater
+ * into a special buffer (@s) for later retrieval by a sequencer
+ * or other mechanism.
+ *
+ * Returns how much it wrote to the buffer.
+ */
+int trace_seq_putc(struct trace_seq *s, unsigned char c)
+{
+	if (s->full)
+		return 0;
+
+	if (TRACE_SEQ_BUF_LEFT(s) < 1) {
+		s->full = 1;
+		return 0;
+	}
+
+	s->buffer[s->len++] = c;
+
+	return 1;
+}
+EXPORT_SYMBOL_GPL(trace_seq_putc);
+
+/**
+ * trace_seq_putmem - write raw data into the trace_seq buffer
+ * @s: trace sequence descriptor
+ * @mem: The raw memory to copy into the buffer
+ * @len: The length of the raw memory to copy (in bytes)
+ *
+ * There may be cases where raw memory needs to be written into the
+ * buffer and a strcpy() would not work. Using this function allows
+ * for such cases.
+ *
+ * Returns how much it wrote to the buffer.
+ */
+int trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len)
+{
+	if (s->full)
+		return 0;
+
+	if (len > TRACE_SEQ_BUF_LEFT(s)) {
+		s->full = 1;
+		return 0;
+	}
+
+	memcpy(s->buffer + s->len, mem, len);
+	s->len += len;
+
+	return len;
+}
+EXPORT_SYMBOL_GPL(trace_seq_putmem);
+
+#define MAX_MEMHEX_BYTES	8U
+#define HEX_CHARS		(MAX_MEMHEX_BYTES*2 + 1)
+
+/**
+ * trace_seq_putmem_hex - write raw memory into the buffer in ASCII hex
+ * @s: trace sequence descriptor
+ * @mem: The raw memory to write its hex ASCII representation of
+ * @len: The length of the raw memory to copy (in bytes)
+ *
+ * This is similar to trace_seq_putmem() except instead of just copying the
+ * raw memory into the buffer it writes its ASCII representation of it
+ * in hex characters.
+ *
+ * Returns how much it wrote to the buffer.
+ */
+int trace_seq_putmem_hex(struct trace_seq *s, const void *mem,
+			 unsigned int len)
+{
+	unsigned char hex[HEX_CHARS];
+	const unsigned char *data = mem;
+	unsigned int start_len;
+	int i, j;
+	int cnt = 0;
+
+	if (s->full)
+		return 0;
+
+	while (len) {
+		start_len = min(len, HEX_CHARS - 1);
+#ifdef __BIG_ENDIAN
+		for (i = 0, j = 0; i < start_len; i++) {
+#else
+		for (i = start_len-1, j = 0; i >= 0; i--) {
+#endif
+			hex[j++] = hex_asc_hi(data[i]);
+			hex[j++] = hex_asc_lo(data[i]);
+		}
+		if (WARN_ON_ONCE(j == 0 || j/2 > len))
+			break;
+
+		/* j increments twice per loop */
+		len -= j / 2;
+		hex[j++] = ' ';
+
+		cnt += trace_seq_putmem(s, hex, j);
+	}
+	return cnt;
+}
+EXPORT_SYMBOL_GPL(trace_seq_putmem_hex);
+
+/**
+ * trace_seq_path - copy a path into the sequence buffer
+ * @s: trace sequence descriptor
+ * @path: path to write into the sequence buffer.
+ *
+ * Write a path name into the sequence buffer.
+ *
+ * Returns 1 if we successfully written all the contents to
+ *   the buffer.
+ * Returns 0 if we the length to write is bigger than the
+ *   reserved buffer space. In this case, nothing gets written.
+ */
+int trace_seq_path(struct trace_seq *s, const struct path *path)
+{
+	unsigned char *p;
+
+	if (s->full)
+		return 0;
+
+	if (TRACE_SEQ_BUF_LEFT(s) < 1) {
+		s->full = 1;
+		return 0;
+	}
+
+	p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
+	if (!IS_ERR(p)) {
+		p = mangle_path(s->buffer + s->len, p, "\n");
+		if (p) {
+			s->len = p - s->buffer;
+			return 1;
+		}
+	} else {
+		s->buffer[s->len++] = '?';
+		return 1;
+	}
+
+	s->full = 1;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(trace_seq_path);
+
+/**
+ * trace_seq_to_user - copy the squence buffer to user space
+ * @s: trace sequence descriptor
+ * @ubuf: The userspace memory location to copy to
+ * @cnt: The amount to copy
+ *
+ * Copies the sequence buffer into the userspace memory pointed to
+ * by @ubuf. It starts from the last read position (@s->readpos)
+ * and writes up to @cnt characters or till it reaches the end of
+ * the content in the buffer (@s->len), which ever comes first.
+ *
+ * On success, it returns a positive number of the number of bytes
+ * it copied.
+ *
+ * On failure it returns -EBUSY if all of the content in the
+ * sequence has been already read, which includes nothing in the
+ * sequenc (@s->len == @s->readpos).
+ *
+ * Returns -EFAULT if the copy to userspace fails.
+ */
+int trace_seq_to_user(struct trace_seq *s, char __user *ubuf, int cnt)
+{
+	int len;
+	int ret;
+
+	if (!cnt)
+		return 0;
+
+	if (s->len <= s->readpos)
+		return -EBUSY;
+
+	len = s->len - s->readpos;
+	if (cnt > len)
+		cnt = len;
+	ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
+	if (ret == cnt)
+		return -EFAULT;
+
+	cnt -= ret;
+
+	s->readpos += cnt;
+	return cnt;
+}
+EXPORT_SYMBOL_GPL(trace_seq_to_user);
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 3c9b97e..33ff6a2 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -265,7 +265,6 @@
 	if (is_ret)
 		tu->consumer.ret_handler = uretprobe_dispatcher;
 	init_trace_uprobe_filter(&tu->filter);
-	tu->tp.call.flags |= TRACE_EVENT_FL_USE_CALL_FILTER;
 	return tu;
 
 error:
@@ -1292,7 +1291,7 @@
 		kfree(call->print_fmt);
 		return -ENODEV;
 	}
-	call->flags = 0;
+
 	call->class->reg = trace_uprobe_register;
 	call->data = tu;
 	ret = trace_add_event_call(call);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 35974ac..5dbe22a 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -265,7 +265,6 @@
 
 static struct kmem_cache *pwq_cache;
 
-static int wq_numa_tbl_len;		/* highest possible NUMA node id + 1 */
 static cpumask_var_t *wq_numa_possible_cpumask;
 					/* possible CPUs of each node */
 
@@ -758,13 +757,6 @@
 	int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
 	int nr_busy = pool->nr_workers - nr_idle;
 
-	/*
-	 * nr_idle and idle_list may disagree if idle rebinding is in
-	 * progress.  Never return %true if idle_list is empty.
-	 */
-	if (list_empty(&pool->idle_list))
-		return false;
-
 	return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
 }
 
@@ -850,7 +842,7 @@
 	pool = worker->pool;
 
 	/* this can only happen on the local cpu */
-	if (WARN_ON_ONCE(cpu != raw_smp_processor_id()))
+	if (WARN_ON_ONCE(cpu != raw_smp_processor_id() || pool->cpu != cpu))
 		return NULL;
 
 	/*
@@ -874,35 +866,22 @@
  * worker_set_flags - set worker flags and adjust nr_running accordingly
  * @worker: self
  * @flags: flags to set
- * @wakeup: wakeup an idle worker if necessary
  *
- * Set @flags in @worker->flags and adjust nr_running accordingly.  If
- * nr_running becomes zero and @wakeup is %true, an idle worker is
- * woken up.
+ * Set @flags in @worker->flags and adjust nr_running accordingly.
  *
  * CONTEXT:
  * spin_lock_irq(pool->lock)
  */
-static inline void worker_set_flags(struct worker *worker, unsigned int flags,
-				    bool wakeup)
+static inline void worker_set_flags(struct worker *worker, unsigned int flags)
 {
 	struct worker_pool *pool = worker->pool;
 
 	WARN_ON_ONCE(worker->task != current);
 
-	/*
-	 * If transitioning into NOT_RUNNING, adjust nr_running and
-	 * wake up an idle worker as necessary if requested by
-	 * @wakeup.
-	 */
+	/* If transitioning into NOT_RUNNING, adjust nr_running. */
 	if ((flags & WORKER_NOT_RUNNING) &&
 	    !(worker->flags & WORKER_NOT_RUNNING)) {
-		if (wakeup) {
-			if (atomic_dec_and_test(&pool->nr_running) &&
-			    !list_empty(&pool->worklist))
-				wake_up_worker(pool);
-		} else
-			atomic_dec(&pool->nr_running);
+		atomic_dec(&pool->nr_running);
 	}
 
 	worker->flags |= flags;
@@ -1232,7 +1211,7 @@
 			pwq_activate_delayed_work(work);
 
 		list_del_init(&work->entry);
-		pwq_dec_nr_in_flight(get_work_pwq(work), get_work_color(work));
+		pwq_dec_nr_in_flight(pwq, get_work_color(work));
 
 		/* work->data points to pwq iff queued, point to pool */
 		set_work_pool_and_keep_pending(work, pool->id);
@@ -1560,7 +1539,7 @@
 			 (worker->hentry.next || worker->hentry.pprev)))
 		return;
 
-	/* can't use worker_set_flags(), also called from start_worker() */
+	/* can't use worker_set_flags(), also called from create_worker() */
 	worker->flags |= WORKER_IDLE;
 	pool->nr_idle++;
 	worker->last_active = jiffies;
@@ -1602,11 +1581,11 @@
 	list_del_init(&worker->entry);
 }
 
-static struct worker *alloc_worker(void)
+static struct worker *alloc_worker(int node)
 {
 	struct worker *worker;
 
-	worker = kzalloc(sizeof(*worker), GFP_KERNEL);
+	worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node);
 	if (worker) {
 		INIT_LIST_HEAD(&worker->entry);
 		INIT_LIST_HEAD(&worker->scheduled);
@@ -1670,6 +1649,9 @@
 		detach_completion = pool->detach_completion;
 	mutex_unlock(&pool->attach_mutex);
 
+	/* clear leftover flags without pool->lock after it is detached */
+	worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND);
+
 	if (detach_completion)
 		complete(detach_completion);
 }
@@ -1678,8 +1660,7 @@
  * create_worker - create a new workqueue worker
  * @pool: pool the new worker will belong to
  *
- * Create a new worker which is attached to @pool.  The new worker must be
- * started by start_worker().
+ * Create and start a new worker which is attached to @pool.
  *
  * CONTEXT:
  * Might sleep.  Does GFP_KERNEL allocations.
@@ -1698,7 +1679,7 @@
 	if (id < 0)
 		goto fail;
 
-	worker = alloc_worker();
+	worker = alloc_worker(pool->node);
 	if (!worker)
 		goto fail;
 
@@ -1724,6 +1705,13 @@
 	/* successful, attach the worker to the pool */
 	worker_attach_to_pool(worker, pool);
 
+	/* start the newly created worker */
+	spin_lock_irq(&pool->lock);
+	worker->pool->nr_workers++;
+	worker_enter_idle(worker);
+	wake_up_process(worker->task);
+	spin_unlock_irq(&pool->lock);
+
 	return worker;
 
 fail:
@@ -1734,44 +1722,6 @@
 }
 
 /**
- * start_worker - start a newly created worker
- * @worker: worker to start
- *
- * Make the pool aware of @worker and start it.
- *
- * CONTEXT:
- * spin_lock_irq(pool->lock).
- */
-static void start_worker(struct worker *worker)
-{
-	worker->pool->nr_workers++;
-	worker_enter_idle(worker);
-	wake_up_process(worker->task);
-}
-
-/**
- * create_and_start_worker - create and start a worker for a pool
- * @pool: the target pool
- *
- * Grab the managership of @pool and create and start a new worker for it.
- *
- * Return: 0 on success. A negative error code otherwise.
- */
-static int create_and_start_worker(struct worker_pool *pool)
-{
-	struct worker *worker;
-
-	worker = create_worker(pool);
-	if (worker) {
-		spin_lock_irq(&pool->lock);
-		start_worker(worker);
-		spin_unlock_irq(&pool->lock);
-	}
-
-	return worker ? 0 : -ENOMEM;
-}
-
-/**
  * destroy_worker - destroy a workqueue worker
  * @worker: worker to be destroyed
  *
@@ -1909,23 +1859,10 @@
 	mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
 
 	while (true) {
-		struct worker *worker;
-
-		worker = create_worker(pool);
-		if (worker) {
-			del_timer_sync(&pool->mayday_timer);
-			spin_lock_irq(&pool->lock);
-			start_worker(worker);
-			if (WARN_ON_ONCE(need_to_create_worker(pool)))
-				goto restart;
-			return true;
-		}
-
-		if (!need_to_create_worker(pool))
+		if (create_worker(pool) || !need_to_create_worker(pool))
 			break;
 
-		__set_current_state(TASK_INTERRUPTIBLE);
-		schedule_timeout(CREATE_COOLDOWN);
+		schedule_timeout_interruptible(CREATE_COOLDOWN);
 
 		if (!need_to_create_worker(pool))
 			break;
@@ -1933,6 +1870,11 @@
 
 	del_timer_sync(&pool->mayday_timer);
 	spin_lock_irq(&pool->lock);
+	/*
+	 * This is necessary even after a new worker was just successfully
+	 * created as @pool->lock was dropped and the new worker might have
+	 * already become busy.
+	 */
 	if (need_to_create_worker(pool))
 		goto restart;
 	return true;
@@ -2020,13 +1962,8 @@
 
 	lockdep_copy_map(&lockdep_map, &work->lockdep_map);
 #endif
-	/*
-	 * Ensure we're on the correct CPU.  DISASSOCIATED test is
-	 * necessary to avoid spurious warnings from rescuers servicing the
-	 * unbound or a disassociated pool.
-	 */
-	WARN_ON_ONCE(!(worker->flags & WORKER_UNBOUND) &&
-		     !(pool->flags & POOL_DISASSOCIATED) &&
+	/* ensure we're on the correct CPU */
+	WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
 		     raw_smp_processor_id() != pool->cpu);
 
 	/*
@@ -2052,17 +1989,22 @@
 	list_del_init(&work->entry);
 
 	/*
-	 * CPU intensive works don't participate in concurrency
-	 * management.  They're the scheduler's responsibility.
+	 * CPU intensive works don't participate in concurrency management.
+	 * They're the scheduler's responsibility.  This takes @worker out
+	 * of concurrency management and the next code block will chain
+	 * execution of the pending work items.
 	 */
 	if (unlikely(cpu_intensive))
-		worker_set_flags(worker, WORKER_CPU_INTENSIVE, true);
+		worker_set_flags(worker, WORKER_CPU_INTENSIVE);
 
 	/*
-	 * Unbound pool isn't concurrency managed and work items should be
-	 * executed ASAP.  Wake up another worker if necessary.
+	 * Wake up another worker if necessary.  The condition is always
+	 * false for normal per-cpu workers since nr_running would always
+	 * be >= 1 at this point.  This is used to chain execution of the
+	 * pending work items for WORKER_NOT_RUNNING workers such as the
+	 * UNBOUND and CPU_INTENSIVE ones.
 	 */
-	if ((worker->flags & WORKER_UNBOUND) && need_more_worker(pool))
+	if (need_more_worker(pool))
 		wake_up_worker(pool);
 
 	/*
@@ -2218,7 +2160,7 @@
 		}
 	} while (keep_working(pool));
 
-	worker_set_flags(worker, WORKER_PREP, false);
+	worker_set_flags(worker, WORKER_PREP);
 sleep:
 	/*
 	 * pool->lock is held and there's no work to process and no need to
@@ -2311,29 +2253,27 @@
 				move_linked_works(work, scheduled, &n);
 
 		process_scheduled_works(rescuer);
-		spin_unlock_irq(&pool->lock);
-
-		worker_detach_from_pool(rescuer, pool);
-
-		spin_lock_irq(&pool->lock);
 
 		/*
 		 * Put the reference grabbed by send_mayday().  @pool won't
-		 * go away while we're holding its lock.
+		 * go away while we're still attached to it.
 		 */
 		put_pwq(pwq);
 
 		/*
-		 * Leave this pool.  If keep_working() is %true, notify a
+		 * Leave this pool.  If need_more_worker() is %true, notify a
 		 * regular worker; otherwise, we end up with 0 concurrency
 		 * and stalling the execution.
 		 */
-		if (keep_working(pool))
+		if (need_more_worker(pool))
 			wake_up_worker(pool);
 
 		rescuer->pool = NULL;
-		spin_unlock(&pool->lock);
-		spin_lock(&wq_mayday_lock);
+		spin_unlock_irq(&pool->lock);
+
+		worker_detach_from_pool(rescuer, pool);
+
+		spin_lock_irq(&wq_mayday_lock);
 	}
 
 	spin_unlock_irq(&wq_mayday_lock);
@@ -3458,7 +3398,7 @@
 		return;
 
 	/* sanity checks */
-	if (WARN_ON(!(pool->flags & POOL_DISASSOCIATED)) ||
+	if (WARN_ON(!(pool->cpu < 0)) ||
 	    WARN_ON(!list_empty(&pool->worklist)))
 		return;
 
@@ -3524,7 +3464,7 @@
 	hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
 		if (wqattrs_equal(pool->attrs, attrs)) {
 			pool->refcnt++;
-			goto out_unlock;
+			return pool;
 		}
 	}
 
@@ -3557,12 +3497,12 @@
 		goto fail;
 
 	/* create and start the initial worker */
-	if (create_and_start_worker(pool) < 0)
+	if (!create_worker(pool))
 		goto fail;
 
 	/* install */
 	hash_add(unbound_pool_hash, &pool->hash_node, hash);
-out_unlock:
+
 	return pool;
 fail:
 	if (pool)
@@ -3591,11 +3531,6 @@
 	if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
 		return;
 
-	/*
-	 * Unlink @pwq.  Synchronization against wq->mutex isn't strictly
-	 * necessary on release but do it anyway.  It's easier to verify
-	 * and consistent with the linking path.
-	 */
 	mutex_lock(&wq->mutex);
 	list_del_rcu(&pwq->pwqs_node);
 	is_last = list_empty(&wq->pwqs);
@@ -3692,10 +3627,7 @@
 	if (!list_empty(&pwq->pwqs_node))
 		return;
 
-	/*
-	 * Set the matching work_color.  This is synchronized with
-	 * wq->mutex to avoid confusing flush_workqueue().
-	 */
+	/* set the matching work_color */
 	pwq->work_color = wq->work_color;
 
 	/* sync max_active to the current setting */
@@ -3832,7 +3764,7 @@
 	if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs)))
 		return -EINVAL;
 
-	pwq_tbl = kzalloc(wq_numa_tbl_len * sizeof(pwq_tbl[0]), GFP_KERNEL);
+	pwq_tbl = kzalloc(nr_node_ids * sizeof(pwq_tbl[0]), GFP_KERNEL);
 	new_attrs = alloc_workqueue_attrs(GFP_KERNEL);
 	tmp_attrs = alloc_workqueue_attrs(GFP_KERNEL);
 	if (!pwq_tbl || !new_attrs || !tmp_attrs)
@@ -4080,7 +4012,7 @@
 
 	/* allocate wq and format name */
 	if (flags & WQ_UNBOUND)
-		tbl_size = wq_numa_tbl_len * sizeof(wq->numa_pwq_tbl[0]);
+		tbl_size = nr_node_ids * sizeof(wq->numa_pwq_tbl[0]);
 
 	wq = kzalloc(sizeof(*wq) + tbl_size, GFP_KERNEL);
 	if (!wq)
@@ -4122,7 +4054,7 @@
 	if (flags & WQ_MEM_RECLAIM) {
 		struct worker *rescuer;
 
-		rescuer = alloc_worker();
+		rescuer = alloc_worker(NUMA_NO_NODE);
 		if (!rescuer)
 			goto err_destroy;
 
@@ -4470,8 +4402,6 @@
 	struct worker *worker;
 
 	for_each_cpu_worker_pool(pool, cpu) {
-		WARN_ON_ONCE(cpu != smp_processor_id());
-
 		mutex_lock(&pool->attach_mutex);
 		spin_lock_irq(&pool->lock);
 
@@ -4543,6 +4473,7 @@
 						  pool->attrs->cpumask) < 0);
 
 	spin_lock_irq(&pool->lock);
+	pool->flags &= ~POOL_DISASSOCIATED;
 
 	for_each_pool_worker(worker, pool) {
 		unsigned int worker_flags = worker->flags;
@@ -4632,7 +4563,7 @@
 		for_each_cpu_worker_pool(pool, cpu) {
 			if (pool->nr_workers)
 				continue;
-			if (create_and_start_worker(pool) < 0)
+			if (!create_worker(pool))
 				return NOTIFY_BAD;
 		}
 		break;
@@ -4644,15 +4575,10 @@
 		for_each_pool(pool, pi) {
 			mutex_lock(&pool->attach_mutex);
 
-			if (pool->cpu == cpu) {
-				spin_lock_irq(&pool->lock);
-				pool->flags &= ~POOL_DISASSOCIATED;
-				spin_unlock_irq(&pool->lock);
-
+			if (pool->cpu == cpu)
 				rebind_workers(pool);
-			} else if (pool->cpu < 0) {
+			else if (pool->cpu < 0)
 				restore_unbound_workers_cpumask(pool, cpu);
-			}
 
 			mutex_unlock(&pool->attach_mutex);
 		}
@@ -4856,10 +4782,6 @@
 	cpumask_var_t *tbl;
 	int node, cpu;
 
-	/* determine NUMA pwq table len - highest node id + 1 */
-	for_each_node(node)
-		wq_numa_tbl_len = max(wq_numa_tbl_len, node + 1);
-
 	if (num_possible_nodes() <= 1)
 		return;
 
@@ -4876,7 +4798,7 @@
 	 * available.  Build one from cpu_to_node() which should have been
 	 * fully initialized by now.
 	 */
-	tbl = kzalloc(wq_numa_tbl_len * sizeof(tbl[0]), GFP_KERNEL);
+	tbl = kzalloc(nr_node_ids * sizeof(tbl[0]), GFP_KERNEL);
 	BUG_ON(!tbl);
 
 	for_each_node(node)
@@ -4936,7 +4858,7 @@
 
 		for_each_cpu_worker_pool(pool, cpu) {
 			pool->flags &= ~POOL_DISASSOCIATED;
-			BUG_ON(create_and_start_worker(pool) < 0);
+			BUG_ON(!create_worker(pool));
 		}
 	}
 
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 7a638aa..901096d 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -835,7 +835,7 @@
 
 config RT_MUTEX_TESTER
 	bool "Built-in scriptable tester for rt-mutexes"
-	depends on DEBUG_KERNEL && RT_MUTEXES
+	depends on DEBUG_KERNEL && RT_MUTEXES && BROKEN
 	help
 	  This option enables a rt-mutex tester.
 
@@ -1131,20 +1131,6 @@
 
 	 Say N if you are unsure.
 
-config PROVE_RCU_DELAY
-	bool "RCU debugging: preemptible RCU race provocation"
-	depends on DEBUG_KERNEL && PREEMPT_RCU
-	default n
-	help
-	 There is a class of races that involve an unlikely preemption
-	 of __rcu_read_unlock() just after ->rcu_read_lock_nesting has
-	 been set to INT_MIN.  This feature inserts a delay at that
-	 point to increase the probability of these races.
-
-	 Say Y to increase probability of preemption of __rcu_read_unlock().
-
-	 Say N if you are unsure.
-
 config SPARSE_RCU_POINTER
 	bool "RCU debugging: sparse-based checks for pointer usage"
 	default n
diff --git a/lib/lockref.c b/lib/lockref.c
index f07a40d..d2233de 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -1,6 +1,5 @@
 #include <linux/export.h>
 #include <linux/lockref.h>
-#include <linux/mutex.h>
 
 #if USE_CMPXCHG_LOCKREF
 
@@ -29,7 +28,7 @@
 		if (likely(old.lock_count == prev.lock_count)) {		\
 			SUCCESS;						\
 		}								\
-		arch_mutex_cpu_relax();						\
+		cpu_relax_lowlatency();						\
 	}									\
 } while (0)
 
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index 963b703..fe5a334 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -31,6 +31,11 @@
 
 #define PCPU_COUNT_BIAS		(1U << 31)
 
+static unsigned __percpu *pcpu_count_ptr(struct percpu_ref *ref)
+{
+	return (unsigned __percpu *)(ref->pcpu_count_ptr & ~PCPU_REF_DEAD);
+}
+
 /**
  * percpu_ref_init - initialize a percpu refcount
  * @ref: percpu_ref to initialize
@@ -46,8 +51,8 @@
 {
 	atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS);
 
-	ref->pcpu_count = alloc_percpu(unsigned);
-	if (!ref->pcpu_count)
+	ref->pcpu_count_ptr = (unsigned long)alloc_percpu(unsigned);
+	if (!ref->pcpu_count_ptr)
 		return -ENOMEM;
 
 	ref->release = release;
@@ -56,53 +61,71 @@
 EXPORT_SYMBOL_GPL(percpu_ref_init);
 
 /**
- * percpu_ref_cancel_init - cancel percpu_ref_init()
- * @ref: percpu_ref to cancel init for
+ * percpu_ref_reinit - re-initialize a percpu refcount
+ * @ref: perpcu_ref to re-initialize
  *
- * Once a percpu_ref is initialized, its destruction is initiated by
- * percpu_ref_kill() and completes asynchronously, which can be painful to
- * do when destroying a half-constructed object in init failure path.
+ * Re-initialize @ref so that it's in the same state as when it finished
+ * percpu_ref_init().  @ref must have been initialized successfully, killed
+ * and reached 0 but not exited.
  *
- * This function destroys @ref without invoking @ref->release and the
- * memory area containing it can be freed immediately on return.  To
- * prevent accidental misuse, it's required that @ref has finished
- * percpu_ref_init(), whether successful or not, but never used.
- *
- * The weird name and usage restriction are to prevent people from using
- * this function by mistake for normal shutdown instead of
- * percpu_ref_kill().
+ * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
+ * this function is in progress.
  */
-void percpu_ref_cancel_init(struct percpu_ref *ref)
+void percpu_ref_reinit(struct percpu_ref *ref)
 {
-	unsigned __percpu *pcpu_count = ref->pcpu_count;
+	unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
 	int cpu;
 
-	WARN_ON_ONCE(atomic_read(&ref->count) != 1 + PCPU_COUNT_BIAS);
+	BUG_ON(!pcpu_count);
+	WARN_ON(!percpu_ref_is_zero(ref));
+
+	atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS);
+
+	/*
+	 * Restore per-cpu operation.  smp_store_release() is paired with
+	 * smp_read_barrier_depends() in __pcpu_ref_alive() and guarantees
+	 * that the zeroing is visible to all percpu accesses which can see
+	 * the following PCPU_REF_DEAD clearing.
+	 */
+	for_each_possible_cpu(cpu)
+		*per_cpu_ptr(pcpu_count, cpu) = 0;
+
+	smp_store_release(&ref->pcpu_count_ptr,
+			  ref->pcpu_count_ptr & ~PCPU_REF_DEAD);
+}
+EXPORT_SYMBOL_GPL(percpu_ref_reinit);
+
+/**
+ * percpu_ref_exit - undo percpu_ref_init()
+ * @ref: percpu_ref to exit
+ *
+ * This function exits @ref.  The caller is responsible for ensuring that
+ * @ref is no longer in active use.  The usual places to invoke this
+ * function from are the @ref->release() callback or in init failure path
+ * where percpu_ref_init() succeeded but other parts of the initialization
+ * of the embedding object failed.
+ */
+void percpu_ref_exit(struct percpu_ref *ref)
+{
+	unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
 
 	if (pcpu_count) {
-		for_each_possible_cpu(cpu)
-			WARN_ON_ONCE(*per_cpu_ptr(pcpu_count, cpu));
-		free_percpu(ref->pcpu_count);
+		free_percpu(pcpu_count);
+		ref->pcpu_count_ptr = PCPU_REF_DEAD;
 	}
 }
-EXPORT_SYMBOL_GPL(percpu_ref_cancel_init);
+EXPORT_SYMBOL_GPL(percpu_ref_exit);
 
 static void percpu_ref_kill_rcu(struct rcu_head *rcu)
 {
 	struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
-	unsigned __percpu *pcpu_count = ref->pcpu_count;
+	unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
 	unsigned count = 0;
 	int cpu;
 
-	/* Mask out PCPU_REF_DEAD */
-	pcpu_count = (unsigned __percpu *)
-		(((unsigned long) pcpu_count) & ~PCPU_STATUS_MASK);
-
 	for_each_possible_cpu(cpu)
 		count += *per_cpu_ptr(pcpu_count, cpu);
 
-	free_percpu(pcpu_count);
-
 	pr_debug("global %i pcpu %i", atomic_read(&ref->count), (int) count);
 
 	/*
@@ -152,11 +175,10 @@
 void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
 				 percpu_ref_func_t *confirm_kill)
 {
-	WARN_ONCE(REF_STATUS(ref->pcpu_count) == PCPU_REF_DEAD,
+	WARN_ONCE(ref->pcpu_count_ptr & PCPU_REF_DEAD,
 		  "percpu_ref_kill() called more than once!\n");
 
-	ref->pcpu_count = (unsigned __percpu *)
-		(((unsigned long) ref->pcpu_count)|PCPU_REF_DEAD);
+	ref->pcpu_count_ptr |= PCPU_REF_DEAD;
 	ref->confirm_kill = confirm_kill;
 
 	call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu);
diff --git a/mm/filemap.c b/mm/filemap.c
index d175917..65d44fd 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1019,18 +1019,21 @@
  * @mapping: the address_space to search
  * @offset: the page index
  * @fgp_flags: PCG flags
- * @gfp_mask: gfp mask to use if a page is to be allocated
+ * @cache_gfp_mask: gfp mask to use for the page cache data page allocation
+ * @radix_gfp_mask: gfp mask to use for radix tree node allocation
  *
  * Looks up the page cache slot at @mapping & @offset.
  *
- * PCG flags modify how the page is returned
+ * PCG flags modify how the page is returned.
  *
  * FGP_ACCESSED: the page will be marked accessed
  * FGP_LOCK: Page is return locked
  * FGP_CREAT: If page is not present then a new page is allocated using
- *		@gfp_mask and added to the page cache and the VM's LRU
- *		list. The page is returned locked and with an increased
- *		refcount. Otherwise, %NULL is returned.
+ *		@cache_gfp_mask and added to the page cache and the VM's LRU
+ *		list. If radix tree nodes are allocated during page cache
+ *		insertion then @radix_gfp_mask is used. The page is returned
+ *		locked and with an increased refcount. Otherwise, %NULL is
+ *		returned.
  *
  * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even
  * if the GFP flags specified for FGP_CREAT are atomic.
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 9221c02..7a0a73d 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -856,7 +856,7 @@
 	return NULL;
 }
 
-static void free_huge_page(struct page *page)
+void free_huge_page(struct page *page)
 {
 	/*
 	 * Can't pass hstate in here because it is called from the
diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c
index 493f758..9aae6f4 100644
--- a/mm/hugetlb_cgroup.c
+++ b/mm/hugetlb_cgroup.c
@@ -358,9 +358,8 @@
 	cft = &h->cgroup_files[4];
 	memset(cft, 0, sizeof(*cft));
 
-	WARN_ON(cgroup_add_cftypes(&hugetlb_cgrp_subsys, h->cgroup_files));
-
-	return;
+	WARN_ON(cgroup_add_legacy_cftypes(&hugetlb_cgrp_subsys,
+					  h->cgroup_files));
 }
 
 void __init hugetlb_cgroup_file_init(void)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index a2c7bcb..f009a14 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5415,8 +5415,12 @@
 {
 	struct mem_cgroup_eventfd_list *ev;
 
+	spin_lock(&memcg_oom_lock);
+
 	list_for_each_entry(ev, &memcg->oom_notify, list)
 		eventfd_signal(ev->eventfd, 1);
+
+	spin_unlock(&memcg_oom_lock);
 	return 0;
 }
 
@@ -6003,7 +6007,6 @@
 	},
 	{
 		.name = "use_hierarchy",
-		.flags = CFTYPE_INSANE,
 		.write_u64 = mem_cgroup_hierarchy_write,
 		.read_u64 = mem_cgroup_hierarchy_read,
 	},
@@ -6407,6 +6410,29 @@
 	__mem_cgroup_free(memcg);
 }
 
+/**
+ * mem_cgroup_css_reset - reset the states of a mem_cgroup
+ * @css: the target css
+ *
+ * Reset the states of the mem_cgroup associated with @css.  This is
+ * invoked when the userland requests disabling on the default hierarchy
+ * but the memcg is pinned through dependency.  The memcg should stop
+ * applying policies and should revert to the vanilla state as it may be
+ * made visible again.
+ *
+ * The current implementation only resets the essential configurations.
+ * This needs to be expanded to cover all the visible parts.
+ */
+static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
+{
+	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
+
+	mem_cgroup_resize_limit(memcg, ULLONG_MAX);
+	mem_cgroup_resize_memsw_limit(memcg, ULLONG_MAX);
+	memcg_update_kmem_limit(memcg, ULLONG_MAX);
+	res_counter_set_soft_limit(&memcg->res, ULLONG_MAX);
+}
+
 #ifdef CONFIG_MMU
 /* Handlers for move charge at task migration. */
 #define PRECHARGE_COUNT_AT_ONCE	256
@@ -7001,16 +7027,17 @@
 
 /*
  * Cgroup retains root cgroups across [un]mount cycles making it necessary
- * to verify sane_behavior flag on each mount attempt.
+ * to verify whether we're attached to the default hierarchy on each mount
+ * attempt.
  */
 static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
 {
 	/*
-	 * use_hierarchy is forced with sane_behavior.  cgroup core
+	 * use_hierarchy is forced on the default hierarchy.  cgroup core
 	 * guarantees that @root doesn't have any children, so turning it
 	 * on for the root memcg is enough.
 	 */
-	if (cgroup_sane_behavior(root_css->cgroup))
+	if (cgroup_on_dfl(root_css->cgroup))
 		mem_cgroup_from_css(root_css)->use_hierarchy = true;
 }
 
@@ -7019,11 +7046,12 @@
 	.css_online = mem_cgroup_css_online,
 	.css_offline = mem_cgroup_css_offline,
 	.css_free = mem_cgroup_css_free,
+	.css_reset = mem_cgroup_css_reset,
 	.can_attach = mem_cgroup_can_attach,
 	.cancel_attach = mem_cgroup_cancel_attach,
 	.attach = mem_cgroup_move_task,
 	.bind = mem_cgroup_bind,
-	.base_cftypes = mem_cgroup_files,
+	.legacy_cftypes = mem_cgroup_files,
 	.early_init = 0,
 };
 
@@ -7040,7 +7068,8 @@
 
 static void __init memsw_file_init(void)
 {
-	WARN_ON(cgroup_add_cftypes(&memory_cgrp_subsys, memsw_cgroup_files));
+	WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
+					  memsw_cgroup_files));
 }
 
 static void __init enable_swap_cgroup(void)
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 7211a73..a013bc9 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -895,7 +895,13 @@
 	struct page *hpage = *hpagep;
 	struct page *ppage;
 
-	if (PageReserved(p) || PageSlab(p) || !PageLRU(p))
+	/*
+	 * Here we are interested only in user-mapped pages, so skip any
+	 * other types of pages.
+	 */
+	if (PageReserved(p) || PageSlab(p))
+		return SWAP_SUCCESS;
+	if (!(PageLRU(hpage) || PageHuge(p)))
 		return SWAP_SUCCESS;
 
 	/*
@@ -905,8 +911,10 @@
 	if (!page_mapped(hpage))
 		return SWAP_SUCCESS;
 
-	if (PageKsm(p))
+	if (PageKsm(p)) {
+		pr_err("MCE %#lx: can't handle KSM pages.\n", pfn);
 		return SWAP_FAIL;
+	}
 
 	if (PageSwapCache(p)) {
 		printk(KERN_ERR
@@ -1229,7 +1237,7 @@
 	 */
 	if (hwpoison_user_mappings(p, pfn, trapno, flags, &hpage)
 	    != SWAP_SUCCESS) {
-		printk(KERN_ERR "MCE %#lx: cannot unmap page, give up\n", pfn);
+		action_result(pfn, "unmapping failed", IGNORED);
 		res = -EBUSY;
 		goto out;
 	}
diff --git a/mm/memory.c b/mm/memory.c
index 7e8d820..8b44f76 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2758,23 +2758,18 @@
 	update_mmu_cache(vma, address, pte);
 }
 
-static unsigned long fault_around_bytes = 65536;
+static unsigned long fault_around_bytes = rounddown_pow_of_two(65536);
 
-/*
- * fault_around_pages() and fault_around_mask() round down fault_around_bytes
- * to nearest page order. It's what do_fault_around() expects to see.
- */
 static inline unsigned long fault_around_pages(void)
 {
-	return rounddown_pow_of_two(fault_around_bytes) / PAGE_SIZE;
+	return fault_around_bytes >> PAGE_SHIFT;
 }
 
 static inline unsigned long fault_around_mask(void)
 {
-	return ~(rounddown_pow_of_two(fault_around_bytes) - 1) & PAGE_MASK;
+	return ~(fault_around_bytes - 1) & PAGE_MASK;
 }
 
-
 #ifdef CONFIG_DEBUG_FS
 static int fault_around_bytes_get(void *data, u64 *val)
 {
@@ -2782,11 +2777,19 @@
 	return 0;
 }
 
+/*
+ * fault_around_pages() and fault_around_mask() expects fault_around_bytes
+ * rounded down to nearest page order. It's what do_fault_around() expects to
+ * see.
+ */
 static int fault_around_bytes_set(void *data, u64 val)
 {
 	if (val / PAGE_SIZE > PTRS_PER_PTE)
 		return -EINVAL;
-	fault_around_bytes = val;
+	if (val > PAGE_SIZE)
+		fault_around_bytes = rounddown_pow_of_two(val);
+	else
+		fault_around_bytes = PAGE_SIZE; /* rounddown_pow_of_two(0) is undefined */
 	return 0;
 }
 DEFINE_SIMPLE_ATTRIBUTE(fault_around_bytes_fops,
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 518e2c3..e0c94301 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1306,9 +1306,9 @@
 	*bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
 
 	if (bdi_bg_thresh)
-		*bdi_bg_thresh = div_u64((u64)*bdi_thresh *
-					 background_thresh,
-					 dirty_thresh);
+		*bdi_bg_thresh = dirty_thresh ? div_u64((u64)*bdi_thresh *
+							background_thresh,
+							dirty_thresh) : 0;
 
 	/*
 	 * In order to avoid the stacked BDI deadlock we need
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0ea758b..ef44ad7 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2447,7 +2447,7 @@
 gfp_to_alloc_flags(gfp_t gfp_mask)
 {
 	int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
-	const gfp_t wait = gfp_mask & __GFP_WAIT;
+	const bool atomic = !(gfp_mask & (__GFP_WAIT | __GFP_NO_KSWAPD));
 
 	/* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
 	BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
@@ -2456,20 +2456,20 @@
 	 * The caller may dip into page reserves a bit more if the caller
 	 * cannot run direct reclaim, or if the caller has realtime scheduling
 	 * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
-	 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
+	 * set both ALLOC_HARDER (atomic == true) and ALLOC_HIGH (__GFP_HIGH).
 	 */
 	alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
 
-	if (!wait) {
+	if (atomic) {
 		/*
-		 * Not worth trying to allocate harder for
-		 * __GFP_NOMEMALLOC even if it can't schedule.
+		 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
+		 * if it can't schedule.
 		 */
-		if  (!(gfp_mask & __GFP_NOMEMALLOC))
+		if (!(gfp_mask & __GFP_NOMEMALLOC))
 			alloc_flags |= ALLOC_HARDER;
 		/*
-		 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
-		 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
+		 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
+		 * comment for __cpuset_node_allowed_softwall().
 		 */
 		alloc_flags &= ~ALLOC_CPUSET;
 	} else if (unlikely(rt_task(current)) && !in_interrupt())
@@ -6062,11 +6062,13 @@
 }
 
 /**
- * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages
+ * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
  * @page: The page within the block of interest
- * @start_bitidx: The first bit of interest to retrieve
- * @end_bitidx: The last bit of interest
- * returns pageblock_bits flags
+ * @pfn: The target page frame number
+ * @end_bitidx: The last bit of interest to retrieve
+ * @mask: mask of bits that the caller is interested in
+ *
+ * Return: pageblock_bits flags
  */
 unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
 					unsigned long end_bitidx,
@@ -6091,9 +6093,10 @@
 /**
  * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
  * @page: The page within the block of interest
- * @start_bitidx: The first bit of interest
- * @end_bitidx: The last bit of interest
  * @flags: The flags to set
+ * @pfn: The target page frame number
+ * @end_bitidx: The last bit of interest
+ * @mask: mask of bits that the caller is interested in
  */
 void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
 					unsigned long pfn,
diff --git a/mm/percpu.c b/mm/percpu.c
index 2ddf9a9..2139e30 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -720,8 +720,7 @@
 	if (unlikely(align < 2))
 		align = 2;
 
-	if (unlikely(size & 1))
-		size++;
+	size = ALIGN(size, 2);
 
 	if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
 		WARN(true, "illegal size (%zu) or align (%zu) for "
diff --git a/net/compat.c b/net/compat.c
index 9a76eaf..bc8aeef 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -85,7 +85,7 @@
 {
 	int tot_len;
 
-	if (kern_msg->msg_namelen) {
+	if (kern_msg->msg_name && kern_msg->msg_namelen) {
 		if (mode == VERIFY_READ) {
 			int err = move_addr_to_kernel(kern_msg->msg_name,
 						      kern_msg->msg_namelen,
@@ -93,10 +93,11 @@
 			if (err < 0)
 				return err;
 		}
-		if (kern_msg->msg_name)
-			kern_msg->msg_name = kern_address;
-	} else
+		kern_msg->msg_name = kern_address;
+	} else {
 		kern_msg->msg_name = NULL;
+		kern_msg->msg_namelen = 0;
+	}
 
 	tot_len = iov_from_user_compat_to_kern(kern_iov,
 					  (struct compat_iovec __user *)kern_msg->msg_iov,
diff --git a/net/core/iovec.c b/net/core/iovec.c
index 827dd6b..e1ec45a 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -39,7 +39,7 @@
 {
 	int size, ct, err;
 
-	if (m->msg_namelen) {
+	if (m->msg_name && m->msg_namelen) {
 		if (mode == VERIFY_READ) {
 			void __user *namep;
 			namep = (void __user __force *) m->msg_name;
@@ -48,10 +48,10 @@
 			if (err < 0)
 				return err;
 		}
-		if (m->msg_name)
-			m->msg_name = address;
+		m->msg_name = address;
 	} else {
 		m->msg_name = NULL;
+		m->msg_namelen = 0;
 	}
 
 	size = m->msg_iovlen * sizeof(struct iovec);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 559890b..ef31fef 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -2249,7 +2249,7 @@
 	ndm->ndm_pad1    = 0;
 	ndm->ndm_pad2    = 0;
 	ndm->ndm_flags	 = pn->flags | NTF_PROXY;
-	ndm->ndm_type	 = NDA_DST;
+	ndm->ndm_type	 = RTN_UNICAST;
 	ndm->ndm_ifindex = pn->dev->ifindex;
 	ndm->ndm_state	 = NUD_NONE;
 
diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c
index 30d903b..1f2a126 100644
--- a/net/core/netclassid_cgroup.c
+++ b/net/core/netclassid_cgroup.c
@@ -107,5 +107,5 @@
 	.css_online		= cgrp_css_online,
 	.css_free		= cgrp_css_free,
 	.attach			= cgrp_attach,
-	.base_cftypes		= ss_files,
+	.legacy_cftypes		= ss_files,
 };
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index 2f385b9..cbd0a19 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -249,7 +249,7 @@
 	.css_online	= cgrp_css_online,
 	.css_free	= cgrp_css_free,
 	.attach		= net_prio_attach,
-	.base_cftypes	= ss_files,
+	.legacy_cftypes	= ss_files,
 };
 
 static int netprio_device_event(struct notifier_block *unused,
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 3162ea9..1901998 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -457,8 +457,31 @@
 	return neigh_create(&arp_tbl, pkey, dev);
 }
 
-atomic_t *ip_idents __read_mostly;
-EXPORT_SYMBOL(ip_idents);
+#define IP_IDENTS_SZ 2048u
+struct ip_ident_bucket {
+	atomic_t	id;
+	u32		stamp32;
+};
+
+static struct ip_ident_bucket *ip_idents __read_mostly;
+
+/* In order to protect privacy, we add a perturbation to identifiers
+ * if one generator is seldom used. This makes hard for an attacker
+ * to infer how many packets were sent between two points in time.
+ */
+u32 ip_idents_reserve(u32 hash, int segs)
+{
+	struct ip_ident_bucket *bucket = ip_idents + hash % IP_IDENTS_SZ;
+	u32 old = ACCESS_ONCE(bucket->stamp32);
+	u32 now = (u32)jiffies;
+	u32 delta = 0;
+
+	if (old != now && cmpxchg(&bucket->stamp32, old, now) == old)
+		delta = prandom_u32_max(now - old);
+
+	return atomic_add_return(segs + delta, &bucket->id) - segs;
+}
+EXPORT_SYMBOL(ip_idents_reserve);
 
 void __ip_select_ident(struct iphdr *iph, int segs)
 {
@@ -467,7 +490,10 @@
 
 	net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd));
 
-	hash = jhash_1word((__force u32)iph->daddr, ip_idents_hashrnd);
+	hash = jhash_3words((__force u32)iph->daddr,
+			    (__force u32)iph->saddr,
+			    iph->protocol,
+			    ip_idents_hashrnd);
 	id = ip_idents_reserve(hash, segs);
 	iph->id = htons(id);
 }
diff --git a/net/ipv4/tcp_memcontrol.c b/net/ipv4/tcp_memcontrol.c
index f7a2ec3..3af5226 100644
--- a/net/ipv4/tcp_memcontrol.c
+++ b/net/ipv4/tcp_memcontrol.c
@@ -222,7 +222,7 @@
 
 static int __init tcp_memcontrol_init(void)
 {
-	WARN_ON(cgroup_add_cftypes(&memory_cgrp_subsys, tcp_files));
+	WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, tcp_files));
 	return 0;
 }
 __initcall(tcp_memcontrol_init);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index cb9df0e..45702b8 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -545,6 +545,8 @@
 	net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
 
 	hash = __ipv6_addr_jhash(&rt->rt6i_dst.addr, ip6_idents_hashrnd);
+	hash = __ipv6_addr_jhash(&rt->rt6i_src.addr, hash);
+
 	id = ip_idents_reserve(hash, 1);
 	fhdr->identification = htonl(id);
 }
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index d7513a5..592f4b1 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -472,12 +472,15 @@
 {
 	struct ieee80211_sub_if_data *sdata = sta->sdata;
 	struct ieee80211_local *local = sdata->local;
-	struct rate_control_ref *ref = local->rate_ctrl;
+	struct rate_control_ref *ref = NULL;
 	struct timespec uptime;
 	u64 packets = 0;
 	u32 thr = 0;
 	int i, ac;
 
+	if (test_sta_flag(sta, WLAN_STA_RATE_CONTROL))
+		ref = local->rate_ctrl;
+
 	sinfo->generation = sdata->local->sta_generation;
 
 	sinfo->filled = STATION_INFO_INACTIVE_TIME |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 5214686..1a252c6 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -414,6 +414,9 @@
 	if (ieee80211_has_order(hdr->frame_control))
 		return TX_CONTINUE;
 
+	if (ieee80211_is_probe_req(hdr->frame_control))
+		return TX_CONTINUE;
+
 	if (tx->local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)
 		info->hw_queue = tx->sdata->vif.cab_queue;
 
@@ -463,6 +466,7 @@
 {
 	struct sta_info *sta = tx->sta;
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
 	struct ieee80211_local *local = tx->local;
 
 	if (unlikely(!sta))
@@ -473,6 +477,12 @@
 		     !(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) {
 		int ac = skb_get_queue_mapping(tx->skb);
 
+		if (ieee80211_is_mgmt(hdr->frame_control) &&
+		    !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) {
+			info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
+			return TX_CONTINUE;
+		}
+
 		ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n",
 		       sta->sta.addr, sta->sta.aid, ac);
 		if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
@@ -531,19 +541,9 @@
 static ieee80211_tx_result debug_noinline
 ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx)
 {
-	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
-	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
-
 	if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED))
 		return TX_CONTINUE;
 
-	if (ieee80211_is_mgmt(hdr->frame_control) &&
-	    !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) {
-		if (tx->flags & IEEE80211_TX_UNICAST)
-			info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
-		return TX_CONTINUE;
-	}
-
 	if (tx->flags & IEEE80211_TX_UNICAST)
 		return ieee80211_tx_h_unicast_ps_buf(tx);
 	else
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index a8eb0a8..610e19c 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -797,7 +797,6 @@
 			ip_vs_control_del(cp);
 
 		if (cp->flags & IP_VS_CONN_F_NFCT) {
-			ip_vs_conn_drop_conntrack(cp);
 			/* Do not access conntracks during subsys cleanup
 			 * because nf_conntrack_find_get can not be used after
 			 * conntrack cleanup for the net.
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 9de23a2..06a9ee6 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1097,6 +1097,7 @@
 	asoc->c = new->c;
 	asoc->peer.rwnd = new->peer.rwnd;
 	asoc->peer.sack_needed = new->peer.sack_needed;
+	asoc->peer.auth_capable = new->peer.auth_capable;
 	asoc->peer.i = new->peer.i;
 	sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
 			 asoc->peer.i.initial_tsn, GFP_ATOMIC);
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index 560ed77..7cc887f 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -2094,7 +2094,8 @@
 		MAC_ASSIGN(addr, addr);
 		__entry->key_type = key_type;
 		__entry->key_id = key_id;
-		memcpy(__entry->tsc, tsc, 6);
+		if (tsc)
+			memcpy(__entry->tsc, tsc, 6);
 	),
 	TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT ", key type: %d, key id: %d, tsc: %pm",
 		  NETDEV_PR_ARG, MAC_PR_ARG(addr), __entry->key_type,
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index a8ef510..0525d78 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2097,6 +2097,8 @@
 				goto no_transform;
 			}
 
+			dst_hold(&xdst->u.dst);
+			xdst->u.dst.flags |= DST_NOCACHE;
 			route = xdst->route;
 		}
 	}
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 412d9dc..d4db6eb 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -177,9 +177,7 @@
 		    attrs[XFRMA_ALG_AEAD]	||
 		    attrs[XFRMA_ALG_CRYPT]	||
 		    attrs[XFRMA_ALG_COMP]	||
-		    attrs[XFRMA_TFCPAD]		||
-		    (ntohl(p->id.spi) >= 0x10000))
-
+		    attrs[XFRMA_TFCPAD])
 			goto out;
 		break;
 
@@ -207,7 +205,8 @@
 		    attrs[XFRMA_ALG_AUTH]	||
 		    attrs[XFRMA_ALG_AUTH_TRUNC]	||
 		    attrs[XFRMA_ALG_CRYPT]	||
-		    attrs[XFRMA_TFCPAD])
+		    attrs[XFRMA_TFCPAD]		||
+		    (ntohl(p->id.spi) >= 0x10000))
 			goto out;
 		break;
 
diff --git a/samples/trace_events/trace-events-sample.h b/samples/trace_events/trace-events-sample.h
index 4b0113f..4764292 100644
--- a/samples/trace_events/trace-events-sample.h
+++ b/samples/trace_events/trace-events-sample.h
@@ -87,7 +87,7 @@
 	),
 
 	TP_fast_assign(
-		strncpy(__entry->foo, foo, 10);
+		strlcpy(__entry->foo, foo, 10);
 		__entry->bar	= bar;
 	),
 
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl
index 4198788..d701627 100755
--- a/scripts/get_maintainer.pl
+++ b/scripts/get_maintainer.pl
@@ -21,6 +21,7 @@
 my $email = 1;
 my $email_usename = 1;
 my $email_maintainer = 1;
+my $email_reviewer = 1;
 my $email_list = 1;
 my $email_subscriber_list = 0;
 my $email_git_penguin_chiefs = 0;
@@ -202,6 +203,7 @@
 		'remove-duplicates!' => \$email_remove_duplicates,
 		'mailmap!' => \$email_use_mailmap,
 		'm!' => \$email_maintainer,
+		'r!' => \$email_reviewer,
 		'n!' => \$email_usename,
 		'l!' => \$email_list,
 		's!' => \$email_subscriber_list,
@@ -260,7 +262,8 @@
 }
 
 if ($email &&
-    ($email_maintainer + $email_list + $email_subscriber_list +
+    ($email_maintainer + $email_reviewer +
+     $email_list + $email_subscriber_list +
      $email_git + $email_git_penguin_chiefs + $email_git_blame) == 0) {
     die "$P: Please select at least 1 email option\n";
 }
@@ -750,6 +753,7 @@
     --hg-since => hg history to use (default: $email_hg_since)
     --interactive => display a menu (mostly useful if used with the --git option)
     --m => include maintainer(s) if any
+    --r => include reviewer(s) if any
     --n => include name 'Full Name <addr\@domain.tld>'
     --l => include list(s) if any
     --s => include subscriber only list(s) if any
@@ -1064,6 +1068,22 @@
 		    my $role = get_maintainer_role($i);
 		    push_email_addresses($pvalue, $role);
 		}
+	    } elsif ($ptype eq "R") {
+		my ($name, $address) = parse_email($pvalue);
+		if ($name eq "") {
+		    if ($i > 0) {
+			my $tv = $typevalue[$i - 1];
+			if ($tv =~ m/^(\C):\s*(.*)/) {
+			    if ($1 eq "P") {
+				$name = $2;
+				$pvalue = format_email($name, $address, $email_usename);
+			    }
+			}
+		    }
+		}
+		if ($email_reviewer) {
+		    push_email_addresses($pvalue, 'reviewer');
+		}
 	    } elsif ($ptype eq "T") {
 		push(@scm, $pvalue);
 	    } elsif ($ptype eq "W") {
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index d9d69e6..188c1d2 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -796,7 +796,7 @@
 	.css_free = devcgroup_css_free,
 	.css_online = devcgroup_online,
 	.css_offline = devcgroup_offline,
-	.base_cftypes = dev_cgroup_files,
+	.legacy_cftypes = dev_cgroup_files,
 };
 
 /**
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index 93825a1..cf3a44b 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -2395,7 +2395,7 @@
 {
 	struct print_arg *field;
 	enum event_type type;
-	char *token;
+	char *token = NULL;
 
 	memset(arg, 0, sizeof(*arg));
 	arg->type = PRINT_FLAGS;
@@ -2448,7 +2448,7 @@
 {
 	struct print_arg *field;
 	enum event_type type;
-	char *token;
+	char *token = NULL;
 
 	memset(arg, 0, sizeof(*arg));
 	arg->type = PRINT_SYMBOL;
@@ -2487,7 +2487,7 @@
 {
 	struct print_arg *field;
 	enum event_type type;
-	char *token;
+	char *token = NULL;
 
 	memset(arg, 0, sizeof(*arg));
 	arg->type = PRINT_HEX;
diff --git a/tools/lib/traceevent/plugin_cfg80211.c b/tools/lib/traceevent/plugin_cfg80211.c
index c066b25..4592d84 100644
--- a/tools/lib/traceevent/plugin_cfg80211.c
+++ b/tools/lib/traceevent/plugin_cfg80211.c
@@ -5,8 +5,7 @@
 #include "event-parse.h"
 
 static unsigned long long
-process___le16_to_cpup(struct trace_seq *s,
-		       unsigned long long *args)
+process___le16_to_cpup(struct trace_seq *s, unsigned long long *args)
 {
 	uint16_t *val = (uint16_t *) (unsigned long) args[0];
 	return val ? (long long) le16toh(*val) : 0;
diff --git a/tools/lib/traceevent/plugin_jbd2.c b/tools/lib/traceevent/plugin_jbd2.c
index 0db714c..5c23d5b 100644
--- a/tools/lib/traceevent/plugin_jbd2.c
+++ b/tools/lib/traceevent/plugin_jbd2.c
@@ -30,8 +30,7 @@
 #define MINOR(dev)	((unsigned int) ((dev) & MINORMASK))
 
 static unsigned long long
-process_jbd2_dev_to_name(struct trace_seq *s,
-			 unsigned long long *args)
+process_jbd2_dev_to_name(struct trace_seq *s, unsigned long long *args)
 {
 	unsigned int dev = args[0];
 
@@ -40,8 +39,7 @@
 }
 
 static unsigned long long
-process_jiffies_to_msecs(struct trace_seq *s,
-			 unsigned long long *args)
+process_jiffies_to_msecs(struct trace_seq *s, unsigned long long *args)
 {
 	unsigned long long jiffies = args[0];
 
diff --git a/tools/lib/traceevent/plugin_kvm.c b/tools/lib/traceevent/plugin_kvm.c
index 9e0e8c6..88fe83d 100644
--- a/tools/lib/traceevent/plugin_kvm.c
+++ b/tools/lib/traceevent/plugin_kvm.c
@@ -240,25 +240,38 @@
 	for (i = 0; strings[i].val >= 0; i++)
 		if (strings[i].val == val)
 			break;
-	if (strings[i].str)
-		return strings[i].str;
-	return "UNKNOWN";
+
+	return strings[i].str;
 }
 
-static int kvm_exit_handler(struct trace_seq *s, struct pevent_record *record,
-			    struct event_format *event, void *context)
+static int print_exit_reason(struct trace_seq *s, struct pevent_record *record,
+			     struct event_format *event, const char *field)
 {
 	unsigned long long isa;
 	unsigned long long val;
-	unsigned long long info1 = 0, info2 = 0;
+	const char *reason;
 
-	if (pevent_get_field_val(s, event, "exit_reason", record, &val, 1) < 0)
+	if (pevent_get_field_val(s, event, field, record, &val, 1) < 0)
 		return -1;
 
 	if (pevent_get_field_val(s, event, "isa", record, &isa, 0) < 0)
 		isa = 1;
 
-	trace_seq_printf(s, "reason %s", find_exit_reason(isa, val));
+	reason = find_exit_reason(isa, val);
+	if (reason)
+		trace_seq_printf(s, "reason %s", reason);
+	else
+		trace_seq_printf(s, "reason UNKNOWN (%llu)", val);
+	return 0;
+}
+
+static int kvm_exit_handler(struct trace_seq *s, struct pevent_record *record,
+			    struct event_format *event, void *context)
+{
+	unsigned long long info1 = 0, info2 = 0;
+
+	if (print_exit_reason(s, record, event, "exit_reason") < 0)
+		return -1;
 
 	pevent_print_num_field(s, " rip 0x%lx", event, "guest_rip", record, 1);
 
@@ -313,6 +326,29 @@
 	return 0;
 }
 
+
+static int kvm_nested_vmexit_inject_handler(struct trace_seq *s, struct pevent_record *record,
+					    struct event_format *event, void *context)
+{
+	if (print_exit_reason(s, record, event, "exit_code") < 0)
+		return -1;
+
+	pevent_print_num_field(s, " info1 %llx", event, "exit_info1", record, 1);
+	pevent_print_num_field(s, " info2 %llx", event, "exit_info2", record, 1);
+	pevent_print_num_field(s, " int_info %llx", event, "exit_int_info", record, 1);
+	pevent_print_num_field(s, " int_info_err %llx", event, "exit_int_info_err", record, 1);
+
+	return 0;
+}
+
+static int kvm_nested_vmexit_handler(struct trace_seq *s, struct pevent_record *record,
+				     struct event_format *event, void *context)
+{
+	pevent_print_num_field(s, "rip %llx ", event, "rip", record, 1);
+
+	return kvm_nested_vmexit_inject_handler(s, record, event, context);
+}
+
 union kvm_mmu_page_role {
 	unsigned word;
 	struct {
@@ -409,6 +445,12 @@
 	pevent_register_event_handler(pevent, -1, "kvm", "kvm_emulate_insn",
 				      kvm_emulate_insn_handler, NULL);
 
+	pevent_register_event_handler(pevent, -1, "kvm", "kvm_nested_vmexit",
+				      kvm_nested_vmexit_handler, NULL);
+
+	pevent_register_event_handler(pevent, -1, "kvm", "kvm_nested_vmexit_inject",
+				      kvm_nested_vmexit_inject_handler, NULL);
+
 	pevent_register_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_get_page",
 				      kvm_mmu_get_page_handler, NULL);
 
@@ -443,6 +485,12 @@
 	pevent_unregister_event_handler(pevent, -1, "kvm", "kvm_emulate_insn",
 					kvm_emulate_insn_handler, NULL);
 
+	pevent_unregister_event_handler(pevent, -1, "kvm", "kvm_nested_vmexit",
+					kvm_nested_vmexit_handler, NULL);
+
+	pevent_unregister_event_handler(pevent, -1, "kvm", "kvm_nested_vmexit_inject",
+					kvm_nested_vmexit_inject_handler, NULL);
+
 	pevent_unregister_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_get_page",
 					kvm_mmu_get_page_handler, NULL);
 
diff --git a/tools/perf/Documentation/perf-bench.txt b/tools/perf/Documentation/perf-bench.txt
index 4464ad7..f6480cb 100644
--- a/tools/perf/Documentation/perf-bench.txt
+++ b/tools/perf/Documentation/perf-bench.txt
@@ -16,6 +16,10 @@
 
 COMMON OPTIONS
 --------------
+-r::
+--repeat=::
+Specify amount of times to repeat the run (default 10).
+
 -f::
 --format=::
 Specify format style.
diff --git a/tools/perf/Documentation/perf-inject.txt b/tools/perf/Documentation/perf-inject.txt
index a00a342..dc7442c 100644
--- a/tools/perf/Documentation/perf-inject.txt
+++ b/tools/perf/Documentation/perf-inject.txt
@@ -41,6 +41,9 @@
 	tasks slept. sched_switch contains a callchain where a task slept and
 	sched_stat contains a timeslice how long a task slept.
 
+--kallsyms=<file>::
+	kallsyms pathname
+
 SEE ALSO
 --------
 linkperf:perf-record[1], linkperf:perf-report[1], linkperf:perf-archive[1]
diff --git a/tools/perf/Documentation/perf-kvm.txt b/tools/perf/Documentation/perf-kvm.txt
index 52276a6..6e689dc 100644
--- a/tools/perf/Documentation/perf-kvm.txt
+++ b/tools/perf/Documentation/perf-kvm.txt
@@ -51,9 +51,9 @@
   'perf kvm stat <command>' to run a command and gather performance counter
   statistics.
   Especially, perf 'kvm stat record/report' generates a statistical analysis
-  of KVM events. Currently, vmexit, mmio and ioport events are supported.
-  'perf kvm stat record <command>' records kvm events and the events between
-  start and end <command>.
+  of KVM events. Currently, vmexit, mmio (x86 only) and ioport (x86 only)
+  events are supported. 'perf kvm stat record <command>' records kvm events
+  and the events between start and end <command>.
   And this command produces a file which contains tracing results of kvm
   events.
 
@@ -103,8 +103,8 @@
        analyze events which occures on this vcpu. (default: all vcpus)
 
 --event=<value>::
-       event to be analyzed. Possible values: vmexit, mmio, ioport.
-       (default: vmexit)
+       event to be analyzed. Possible values: vmexit, mmio (x86 only),
+       ioport (x86 only). (default: vmexit)
 -k::
 --key=<value>::
        Sorting key. Possible values: sample (default, sort by samples
@@ -138,7 +138,8 @@
 
 
 --event=<value>::
-       event to be analyzed. Possible values: vmexit, mmio, ioport.
+       event to be analyzed. Possible values: vmexit,
+       mmio (x86 only), ioport (x86 only).
        (default: vmexit)
 
 -k::
@@ -147,7 +148,8 @@
        number), time (sort by average time).
 
 --duration=<value>::
-       Show events other than HLT that take longer than duration usecs.
+       Show events other than HLT (x86 only) or Wait state (s390 only)
+       that take longer than duration usecs.
 
 SEE ALSO
 --------
diff --git a/tools/perf/Documentation/perf-timechart.txt b/tools/perf/Documentation/perf-timechart.txt
index 5e0f986..df98d1c 100644
--- a/tools/perf/Documentation/perf-timechart.txt
+++ b/tools/perf/Documentation/perf-timechart.txt
@@ -15,10 +15,20 @@
 There are two variants of perf timechart:
 
   'perf timechart record <command>' to record the system level events
-  of an arbitrary workload.
+  of an arbitrary workload. By default timechart records only scheduler
+  and CPU events (task switches, running times, CPU power states, etc),
+  but it's possible to record IO (disk, network) activity using -I argument.
 
   'perf timechart' to turn a trace into a Scalable Vector Graphics file,
-  that can be viewed with popular SVG viewers such as 'Inkscape'.
+  that can be viewed with popular SVG viewers such as 'Inkscape'. Depending
+  on the events in the perf.data file, timechart will contain scheduler/cpu
+  events or IO events.
+
+  In IO mode, every bar has two charts: upper and lower.
+  Upper bar shows incoming events (disk reads, ingress network packets).
+  Lower bar shows outgoing events (disk writes, egress network packets).
+  There are also poll bars which show how much time application spent
+  in poll/epoll/select syscalls.
 
 TIMECHART OPTIONS
 -----------------
@@ -54,6 +64,19 @@
 	duration or tasks with given name. If number is given it's interpreted
 	as number of nanoseconds. If non-numeric string is given it's
 	interpreted as task name.
+--io-skip-eagain::
+	Don't draw EAGAIN IO events.
+--io-min-time=<nsecs>::
+	Draw small events as if they lasted min-time. Useful when you need
+	to see very small and fast IO. It's possible to specify ms or us
+	suffix to specify time in milliseconds or microseconds.
+	Default value is 1ms.
+--io-merge-dist=<nsecs>::
+	Merge events that are merge-dist nanoseconds apart.
+	Reduces number of figures on the SVG and makes it more render-friendly.
+	It's possible to specify ms or us suffix to specify time in
+	milliseconds or microseconds.
+	Default value is 1us.
 
 RECORD OPTIONS
 --------------
@@ -63,6 +86,9 @@
 -T::
 --tasks-only::
         Record only tasks-related events
+-I::
+--io-only::
+        Record only io-related events
 -g::
 --callchain::
         Do call-graph (stack chain/backtrace) recording
@@ -87,6 +113,14 @@
 
   $ perf timechart --highlight gcc
 
+Record system-wide IO events:
+
+  $ perf timechart record -I
+
+  then generate timechart:
+
+  $ perf timechart
+
 SEE ALSO
 --------
 linkperf:perf-record[1]
diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt
index fae38d9..02aac83 100644
--- a/tools/perf/Documentation/perf-trace.txt
+++ b/tools/perf/Documentation/perf-trace.txt
@@ -107,6 +107,52 @@
 	Show tool stats such as number of times fd->pathname was discovered thru
 	hooking the open syscall return + vfs_getname or via reading /proc/pid/fd, etc.
 
+-F=[all|min|maj]::
+--pf=[all|min|maj]::
+	Trace pagefaults. Optionally, you can specify whether you want minor,
+	major or all pagefaults. Default value is maj.
+
+--syscalls::
+	Trace system calls. This options is enabled by default.
+
+PAGEFAULTS
+----------
+
+When tracing pagefaults, the format of the trace is as follows:
+
+<min|maj>fault [<ip.symbol>+<ip.offset>] => <addr.dso@addr.offset> (<map type><addr level>).
+
+- min/maj indicates whether fault event is minor or major;
+- ip.symbol shows symbol for instruction pointer (the code that generated the
+  fault); if no debug symbols available, perf trace will print raw IP;
+- addr.dso shows DSO for the faulted address;
+- map type is either 'd' for non-executable maps or 'x' for executable maps;
+- addr level is either 'k' for kernel dso or '.' for user dso.
+
+For symbols resolution you may need to install debugging symbols.
+
+Please be aware that duration is currently always 0 and doesn't reflect actual
+time it took for fault to be handled!
+
+When --verbose specified, perf trace tries to print all available information
+for both IP and fault address in the form of dso@symbol+offset.
+
+EXAMPLES
+--------
+
+Trace only major pagefaults:
+
+ $ perf trace --no-syscalls -F
+
+Trace syscalls, major and minor pagefaults:
+
+ $ perf trace -F all
+
+  1416.547 ( 0.000 ms): python/20235 majfault [CRYPTO_push_info_+0x0] => /lib/x86_64-linux-gnu/libcrypto.so.1.0.0@0x61be0 (x.)
+
+  As you can see, there was major pagefault in python process, from
+  CRYPTO_push_info_ routine which faulted somewhere in libcrypto.so.
+
 SEE ALSO
 --------
 linkperf:perf-record[1], linkperf:perf-script[1]
diff --git a/tools/perf/Documentation/perf.txt b/tools/perf/Documentation/perf.txt
index 0eeb247..d240bb2 100644
--- a/tools/perf/Documentation/perf.txt
+++ b/tools/perf/Documentation/perf.txt
@@ -8,7 +8,15 @@
 SYNOPSIS
 --------
 [verse]
-'perf' [--version] [--help] COMMAND [ARGS]
+'perf' [--version] [--help] [OPTIONS] COMMAND [ARGS]
+
+OPTIONS
+-------
+--debug::
+	Setup debug variable (just verbose for now) in value
+	range (0, 10). Use like:
+	  --debug verbose   # sets verbose = 1
+	  --debug verbose=2 # sets verbose = 2
 
 DESCRIPTION
 -----------
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST
index 45da209..344c4d3 100644
--- a/tools/perf/MANIFEST
+++ b/tools/perf/MANIFEST
@@ -37,3 +37,6 @@
 arch/x86/include/uapi/asm/svm.h
 arch/x86/include/uapi/asm/vmx.h
 arch/x86/include/uapi/asm/kvm.h
+arch/x86/include/uapi/asm/kvm_perf.h
+arch/s390/include/uapi/asm/sie.h
+arch/s390/include/uapi/asm/kvm_perf.h
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 9670a16..2240974 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -295,11 +295,13 @@
 LIB_H += util/perf_regs.h
 LIB_H += util/unwind.h
 LIB_H += util/vdso.h
+LIB_H += util/tsc.h
 LIB_H += ui/helpline.h
 LIB_H += ui/progress.h
 LIB_H += ui/util.h
 LIB_H += ui/ui.h
 LIB_H += util/data.h
+LIB_H += util/kvm-stat.h
 
 LIB_OBJS += $(OUTPUT)util/abspath.o
 LIB_OBJS += $(OUTPUT)util/alias.o
@@ -373,6 +375,8 @@
 LIB_OBJS += $(OUTPUT)util/record.o
 LIB_OBJS += $(OUTPUT)util/srcline.o
 LIB_OBJS += $(OUTPUT)util/data.o
+LIB_OBJS += $(OUTPUT)util/tsc.o
+LIB_OBJS += $(OUTPUT)util/cloexec.o
 
 LIB_OBJS += $(OUTPUT)ui/setup.o
 LIB_OBJS += $(OUTPUT)ui/helpline.o
diff --git a/tools/perf/arch/powerpc/Makefile b/tools/perf/arch/powerpc/Makefile
index 744e629..b92219b 100644
--- a/tools/perf/arch/powerpc/Makefile
+++ b/tools/perf/arch/powerpc/Makefile
@@ -3,3 +3,4 @@
 LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/dwarf-regs.o
 endif
 LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/header.o
+LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/skip-callchain-idx.o
diff --git a/tools/perf/arch/powerpc/util/header.c b/tools/perf/arch/powerpc/util/header.c
index 2f7073d..6c1b8a7 100644
--- a/tools/perf/arch/powerpc/util/header.c
+++ b/tools/perf/arch/powerpc/util/header.c
@@ -5,9 +5,7 @@
 #include <string.h>
 
 #include "../../util/header.h"
-
-#define __stringify_1(x)        #x
-#define __stringify(x)          __stringify_1(x)
+#include "../../util/util.h"
 
 #define mfspr(rn)       ({unsigned long rval; \
 			 asm volatile("mfspr %0," __stringify(rn) \
diff --git a/tools/perf/arch/powerpc/util/skip-callchain-idx.c b/tools/perf/arch/powerpc/util/skip-callchain-idx.c
new file mode 100644
index 0000000..a7c23a4
--- /dev/null
+++ b/tools/perf/arch/powerpc/util/skip-callchain-idx.c
@@ -0,0 +1,266 @@
+/*
+ * Use DWARF Debug information to skip unnecessary callchain entries.
+ *
+ * Copyright (C) 2014 Sukadev Bhattiprolu, IBM Corporation.
+ * Copyright (C) 2014 Ulrich Weigand, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <inttypes.h>
+#include <dwarf.h>
+#include <elfutils/libdwfl.h>
+
+#include "util/thread.h"
+#include "util/callchain.h"
+
+/*
+ * When saving the callchain on Power, the kernel conservatively saves
+ * excess entries in the callchain. A few of these entries are needed
+ * in some cases but not others. If the unnecessary entries are not
+ * ignored, we end up with duplicate arcs in the call-graphs. Use
+ * DWARF debug information to skip over any unnecessary callchain
+ * entries.
+ *
+ * See function header for arch_adjust_callchain() below for more details.
+ *
+ * The libdwfl code in this file is based on code from elfutils
+ * (libdwfl/argp-std.c, libdwfl/tests/addrcfi.c, etc).
+ */
+static char *debuginfo_path;
+
+static const Dwfl_Callbacks offline_callbacks = {
+	.debuginfo_path = &debuginfo_path,
+	.find_debuginfo = dwfl_standard_find_debuginfo,
+	.section_address = dwfl_offline_section_address,
+};
+
+
+/*
+ * Use the DWARF expression for the Call-frame-address and determine
+ * if return address is in LR and if a new frame was allocated.
+ */
+static int check_return_reg(int ra_regno, Dwarf_Frame *frame)
+{
+	Dwarf_Op ops_mem[2];
+	Dwarf_Op dummy;
+	Dwarf_Op *ops = &dummy;
+	size_t nops;
+	int result;
+
+	result = dwarf_frame_register(frame, ra_regno, ops_mem, &ops, &nops);
+	if (result < 0) {
+		pr_debug("dwarf_frame_register() %s\n", dwarf_errmsg(-1));
+		return -1;
+	}
+
+	/*
+	 * Check if return address is on the stack.
+	 */
+	if (nops != 0 || ops != NULL)
+		return 0;
+
+	/*
+	 * Return address is in LR. Check if a frame was allocated
+	 * but not-yet used.
+	 */
+	result = dwarf_frame_cfa(frame, &ops, &nops);
+	if (result < 0) {
+		pr_debug("dwarf_frame_cfa() returns %d, %s\n", result,
+					dwarf_errmsg(-1));
+		return -1;
+	}
+
+	/*
+	 * If call frame address is in r1, no new frame was allocated.
+	 */
+	if (nops == 1 && ops[0].atom == DW_OP_bregx && ops[0].number == 1 &&
+				ops[0].number2 == 0)
+		return 1;
+
+	/*
+	 * A new frame was allocated but has not yet been used.
+	 */
+	return 2;
+}
+
+/*
+ * Get the DWARF frame from the .eh_frame section.
+ */
+static Dwarf_Frame *get_eh_frame(Dwfl_Module *mod, Dwarf_Addr pc)
+{
+	int		result;
+	Dwarf_Addr	bias;
+	Dwarf_CFI	*cfi;
+	Dwarf_Frame	*frame;
+
+	cfi = dwfl_module_eh_cfi(mod, &bias);
+	if (!cfi) {
+		pr_debug("%s(): no CFI - %s\n", __func__, dwfl_errmsg(-1));
+		return NULL;
+	}
+
+	result = dwarf_cfi_addrframe(cfi, pc, &frame);
+	if (result) {
+		pr_debug("%s(): %s\n", __func__, dwfl_errmsg(-1));
+		return NULL;
+	}
+
+	return frame;
+}
+
+/*
+ * Get the DWARF frame from the .debug_frame section.
+ */
+static Dwarf_Frame *get_dwarf_frame(Dwfl_Module *mod, Dwarf_Addr pc)
+{
+	Dwarf_CFI       *cfi;
+	Dwarf_Addr      bias;
+	Dwarf_Frame     *frame;
+	int             result;
+
+	cfi = dwfl_module_dwarf_cfi(mod, &bias);
+	if (!cfi) {
+		pr_debug("%s(): no CFI - %s\n", __func__, dwfl_errmsg(-1));
+		return NULL;
+	}
+
+	result = dwarf_cfi_addrframe(cfi, pc, &frame);
+	if (result) {
+		pr_debug("%s(): %s\n", __func__, dwfl_errmsg(-1));
+		return NULL;
+	}
+
+	return frame;
+}
+
+/*
+ * Return:
+ *	0 if return address for the program counter @pc is on stack
+ *	1 if return address is in LR and no new stack frame was allocated
+ *	2 if return address is in LR and a new frame was allocated (but not
+ *		yet used)
+ *	-1 in case of errors
+ */
+static int check_return_addr(const char *exec_file, Dwarf_Addr pc)
+{
+	int		rc = -1;
+	Dwfl		*dwfl;
+	Dwfl_Module	*mod;
+	Dwarf_Frame	*frame;
+	int		ra_regno;
+	Dwarf_Addr	start = pc;
+	Dwarf_Addr	end = pc;
+	bool		signalp;
+
+	dwfl = dwfl_begin(&offline_callbacks);
+	if (!dwfl) {
+		pr_debug("dwfl_begin() failed: %s\n", dwarf_errmsg(-1));
+		return -1;
+	}
+
+	if (dwfl_report_offline(dwfl, "",  exec_file, -1) == NULL) {
+		pr_debug("dwfl_report_offline() failed %s\n", dwarf_errmsg(-1));
+		goto out;
+	}
+
+	mod = dwfl_addrmodule(dwfl, pc);
+	if (!mod) {
+		pr_debug("dwfl_addrmodule() failed, %s\n", dwarf_errmsg(-1));
+		goto out;
+	}
+
+	/*
+	 * To work with split debug info files (eg: glibc), check both
+	 * .eh_frame and .debug_frame sections of the ELF header.
+	 */
+	frame = get_eh_frame(mod, pc);
+	if (!frame) {
+		frame = get_dwarf_frame(mod, pc);
+		if (!frame)
+			goto out;
+	}
+
+	ra_regno = dwarf_frame_info(frame, &start, &end, &signalp);
+	if (ra_regno < 0) {
+		pr_debug("Return address register unavailable: %s\n",
+				dwarf_errmsg(-1));
+		goto out;
+	}
+
+	rc = check_return_reg(ra_regno, frame);
+
+out:
+	dwfl_end(dwfl);
+	return rc;
+}
+
+/*
+ * The callchain saved by the kernel always includes the link register (LR).
+ *
+ *	0:	PERF_CONTEXT_USER
+ *	1:	Program counter (Next instruction pointer)
+ *	2:	LR value
+ *	3:	Caller's caller
+ *	4:	...
+ *
+ * The value in LR is only needed when it holds a return address. If the
+ * return address is on the stack, we should ignore the LR value.
+ *
+ * Further, when the return address is in the LR, if a new frame was just
+ * allocated but the LR was not saved into it, then the LR contains the
+ * caller, slot 4: contains the caller's caller and the contents of slot 3:
+ * (chain->ips[3]) is undefined and must be ignored.
+ *
+ * Use DWARF debug information to determine if any entries need to be skipped.
+ *
+ * Return:
+ *	index:	of callchain entry that needs to be ignored (if any)
+ *	-1	if no entry needs to be ignored or in case of errors
+ */
+int arch_skip_callchain_idx(struct machine *machine, struct thread *thread,
+				struct ip_callchain *chain)
+{
+	struct addr_location al;
+	struct dso *dso = NULL;
+	int rc;
+	u64 ip;
+	u64 skip_slot = -1;
+
+	if (chain->nr < 3)
+		return skip_slot;
+
+	ip = chain->ips[2];
+
+	thread__find_addr_location(thread, machine, PERF_RECORD_MISC_USER,
+			MAP__FUNCTION, ip, &al);
+
+	if (al.map)
+		dso = al.map->dso;
+
+	if (!dso) {
+		pr_debug("%" PRIx64 " dso is NULL\n", ip);
+		return skip_slot;
+	}
+
+	rc = check_return_addr(dso->long_name, ip);
+
+	pr_debug("DSO %s, nr %" PRIx64 ", ip 0x%" PRIx64 "rc %d\n",
+				dso->long_name, chain->nr, ip, rc);
+
+	if (rc == 0) {
+		/*
+		 * Return address on stack. Ignore LR value in callchain
+		 */
+		skip_slot = 2;
+	} else if (rc == 2) {
+		/*
+		 * New frame allocated but return address still in LR.
+		 * Ignore the caller's caller entry in callchain.
+		 */
+		skip_slot = 3;
+	}
+	return skip_slot;
+}
diff --git a/tools/perf/arch/s390/Makefile b/tools/perf/arch/s390/Makefile
index 15130b50..798ac73 100644
--- a/tools/perf/arch/s390/Makefile
+++ b/tools/perf/arch/s390/Makefile
@@ -2,3 +2,6 @@
 PERF_HAVE_DWARF_REGS := 1
 LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/dwarf-regs.o
 endif
+LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/header.o
+HAVE_KVM_STAT_SUPPORT := 1
+LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/kvm-stat.o
diff --git a/tools/perf/arch/s390/util/header.c b/tools/perf/arch/s390/util/header.c
new file mode 100644
index 0000000..9fa6c3e
--- /dev/null
+++ b/tools/perf/arch/s390/util/header.c
@@ -0,0 +1,28 @@
+/*
+ * Implementation of get_cpuid().
+ *
+ * Copyright 2014 IBM Corp.
+ * Author(s): Alexander Yarygin <yarygin@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ */
+
+#include <sys/types.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "../../util/header.h"
+
+int get_cpuid(char *buffer, size_t sz)
+{
+	const char *cpuid = "IBM/S390";
+
+	if (strlen(cpuid) + 1 > sz)
+		return -1;
+
+	strcpy(buffer, cpuid);
+	return 0;
+}
diff --git a/tools/perf/arch/s390/util/kvm-stat.c b/tools/perf/arch/s390/util/kvm-stat.c
new file mode 100644
index 0000000..a5dbc07
--- /dev/null
+++ b/tools/perf/arch/s390/util/kvm-stat.c
@@ -0,0 +1,105 @@
+/*
+ * Arch specific functions for perf kvm stat.
+ *
+ * Copyright 2014 IBM Corp.
+ * Author(s): Alexander Yarygin <yarygin@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ */
+
+#include "../../util/kvm-stat.h"
+#include <asm/kvm_perf.h>
+
+define_exit_reasons_table(sie_exit_reasons, sie_intercept_code);
+define_exit_reasons_table(sie_icpt_insn_codes, icpt_insn_codes);
+define_exit_reasons_table(sie_sigp_order_codes, sigp_order_codes);
+define_exit_reasons_table(sie_diagnose_codes, diagnose_codes);
+define_exit_reasons_table(sie_icpt_prog_codes, icpt_prog_codes);
+
+static void event_icpt_insn_get_key(struct perf_evsel *evsel,
+				    struct perf_sample *sample,
+				    struct event_key *key)
+{
+	unsigned long insn;
+
+	insn = perf_evsel__intval(evsel, sample, "instruction");
+	key->key = icpt_insn_decoder(insn);
+	key->exit_reasons = sie_icpt_insn_codes;
+}
+
+static void event_sigp_get_key(struct perf_evsel *evsel,
+			       struct perf_sample *sample,
+			       struct event_key *key)
+{
+	key->key = perf_evsel__intval(evsel, sample, "order_code");
+	key->exit_reasons = sie_sigp_order_codes;
+}
+
+static void event_diag_get_key(struct perf_evsel *evsel,
+			       struct perf_sample *sample,
+			       struct event_key *key)
+{
+	key->key = perf_evsel__intval(evsel, sample, "code");
+	key->exit_reasons = sie_diagnose_codes;
+}
+
+static void event_icpt_prog_get_key(struct perf_evsel *evsel,
+				    struct perf_sample *sample,
+				    struct event_key *key)
+{
+	key->key = perf_evsel__intval(evsel, sample, "code");
+	key->exit_reasons = sie_icpt_prog_codes;
+}
+
+static struct child_event_ops child_events[] = {
+	{ .name = "kvm:kvm_s390_intercept_instruction",
+	  .get_key = event_icpt_insn_get_key },
+	{ .name = "kvm:kvm_s390_handle_sigp",
+	  .get_key = event_sigp_get_key },
+	{ .name = "kvm:kvm_s390_handle_diag",
+	  .get_key = event_diag_get_key },
+	{ .name = "kvm:kvm_s390_intercept_prog",
+	  .get_key = event_icpt_prog_get_key },
+	{ NULL, NULL },
+};
+
+static struct kvm_events_ops exit_events = {
+	.is_begin_event = exit_event_begin,
+	.is_end_event = exit_event_end,
+	.child_ops = child_events,
+	.decode_key = exit_event_decode_key,
+	.name = "VM-EXIT"
+};
+
+const char * const kvm_events_tp[] = {
+	"kvm:kvm_s390_sie_enter",
+	"kvm:kvm_s390_sie_exit",
+	"kvm:kvm_s390_intercept_instruction",
+	"kvm:kvm_s390_handle_sigp",
+	"kvm:kvm_s390_handle_diag",
+	"kvm:kvm_s390_intercept_prog",
+	NULL,
+};
+
+struct kvm_reg_events_ops kvm_reg_events_ops[] = {
+	{ .name = "vmexit", .ops = &exit_events },
+	{ NULL, NULL },
+};
+
+const char * const kvm_skip_events[] = {
+	"Wait state",
+	NULL,
+};
+
+int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid)
+{
+	if (strstr(cpuid, "IBM/S390")) {
+		kvm->exit_reasons = sie_exit_reasons;
+		kvm->exit_reasons_isa = "SIE";
+	} else
+		return -ENOTSUP;
+
+	return 0;
+}
diff --git a/tools/perf/arch/x86/Makefile b/tools/perf/arch/x86/Makefile
index 1641542..9b21881 100644
--- a/tools/perf/arch/x86/Makefile
+++ b/tools/perf/arch/x86/Makefile
@@ -15,3 +15,5 @@
 LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/header.o
 LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/tsc.o
 LIB_H += arch/$(ARCH)/util/tsc.h
+HAVE_KVM_STAT_SUPPORT := 1
+LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/kvm-stat.o
diff --git a/tools/perf/arch/x86/tests/dwarf-unwind.c b/tools/perf/arch/x86/tests/dwarf-unwind.c
index 9f89f89..d8bbf7a 100644
--- a/tools/perf/arch/x86/tests/dwarf-unwind.c
+++ b/tools/perf/arch/x86/tests/dwarf-unwind.c
@@ -3,6 +3,7 @@
 #include "thread.h"
 #include "map.h"
 #include "event.h"
+#include "debug.h"
 #include "tests/tests.h"
 
 #define STACK_SIZE 8192
diff --git a/tools/perf/arch/x86/util/kvm-stat.c b/tools/perf/arch/x86/util/kvm-stat.c
new file mode 100644
index 0000000..14e4e66
--- /dev/null
+++ b/tools/perf/arch/x86/util/kvm-stat.c
@@ -0,0 +1,156 @@
+#include "../../util/kvm-stat.h"
+#include <asm/kvm_perf.h>
+
+define_exit_reasons_table(vmx_exit_reasons, VMX_EXIT_REASONS);
+define_exit_reasons_table(svm_exit_reasons, SVM_EXIT_REASONS);
+
+static struct kvm_events_ops exit_events = {
+	.is_begin_event = exit_event_begin,
+	.is_end_event = exit_event_end,
+	.decode_key = exit_event_decode_key,
+	.name = "VM-EXIT"
+};
+
+/*
+ * For the mmio events, we treat:
+ * the time of MMIO write: kvm_mmio(KVM_TRACE_MMIO_WRITE...) -> kvm_entry
+ * the time of MMIO read: kvm_exit -> kvm_mmio(KVM_TRACE_MMIO_READ...).
+ */
+static void mmio_event_get_key(struct perf_evsel *evsel, struct perf_sample *sample,
+			       struct event_key *key)
+{
+	key->key  = perf_evsel__intval(evsel, sample, "gpa");
+	key->info = perf_evsel__intval(evsel, sample, "type");
+}
+
+#define KVM_TRACE_MMIO_READ_UNSATISFIED 0
+#define KVM_TRACE_MMIO_READ 1
+#define KVM_TRACE_MMIO_WRITE 2
+
+static bool mmio_event_begin(struct perf_evsel *evsel,
+			     struct perf_sample *sample, struct event_key *key)
+{
+	/* MMIO read begin event in kernel. */
+	if (kvm_exit_event(evsel))
+		return true;
+
+	/* MMIO write begin event in kernel. */
+	if (!strcmp(evsel->name, "kvm:kvm_mmio") &&
+	    perf_evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_WRITE) {
+		mmio_event_get_key(evsel, sample, key);
+		return true;
+	}
+
+	return false;
+}
+
+static bool mmio_event_end(struct perf_evsel *evsel, struct perf_sample *sample,
+			   struct event_key *key)
+{
+	/* MMIO write end event in kernel. */
+	if (kvm_entry_event(evsel))
+		return true;
+
+	/* MMIO read end event in kernel.*/
+	if (!strcmp(evsel->name, "kvm:kvm_mmio") &&
+	    perf_evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_READ) {
+		mmio_event_get_key(evsel, sample, key);
+		return true;
+	}
+
+	return false;
+}
+
+static void mmio_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
+				  struct event_key *key,
+				  char *decode)
+{
+	scnprintf(decode, DECODE_STR_LEN, "%#lx:%s",
+		  (unsigned long)key->key,
+		  key->info == KVM_TRACE_MMIO_WRITE ? "W" : "R");
+}
+
+static struct kvm_events_ops mmio_events = {
+	.is_begin_event = mmio_event_begin,
+	.is_end_event = mmio_event_end,
+	.decode_key = mmio_event_decode_key,
+	.name = "MMIO Access"
+};
+
+ /* The time of emulation pio access is from kvm_pio to kvm_entry. */
+static void ioport_event_get_key(struct perf_evsel *evsel,
+				 struct perf_sample *sample,
+				 struct event_key *key)
+{
+	key->key  = perf_evsel__intval(evsel, sample, "port");
+	key->info = perf_evsel__intval(evsel, sample, "rw");
+}
+
+static bool ioport_event_begin(struct perf_evsel *evsel,
+			       struct perf_sample *sample,
+			       struct event_key *key)
+{
+	if (!strcmp(evsel->name, "kvm:kvm_pio")) {
+		ioport_event_get_key(evsel, sample, key);
+		return true;
+	}
+
+	return false;
+}
+
+static bool ioport_event_end(struct perf_evsel *evsel,
+			     struct perf_sample *sample __maybe_unused,
+			     struct event_key *key __maybe_unused)
+{
+	return kvm_entry_event(evsel);
+}
+
+static void ioport_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
+				    struct event_key *key,
+				    char *decode)
+{
+	scnprintf(decode, DECODE_STR_LEN, "%#llx:%s",
+		  (unsigned long long)key->key,
+		  key->info ? "POUT" : "PIN");
+}
+
+static struct kvm_events_ops ioport_events = {
+	.is_begin_event = ioport_event_begin,
+	.is_end_event = ioport_event_end,
+	.decode_key = ioport_event_decode_key,
+	.name = "IO Port Access"
+};
+
+const char * const kvm_events_tp[] = {
+	"kvm:kvm_entry",
+	"kvm:kvm_exit",
+	"kvm:kvm_mmio",
+	"kvm:kvm_pio",
+	NULL,
+};
+
+struct kvm_reg_events_ops kvm_reg_events_ops[] = {
+	{ .name = "vmexit", .ops = &exit_events },
+	{ .name = "mmio", .ops = &mmio_events },
+	{ .name = "ioport", .ops = &ioport_events },
+	{ NULL, NULL },
+};
+
+const char * const kvm_skip_events[] = {
+	"HLT",
+	NULL,
+};
+
+int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid)
+{
+	if (strstr(cpuid, "Intel")) {
+		kvm->exit_reasons = vmx_exit_reasons;
+		kvm->exit_reasons_isa = "VMX";
+	} else if (strstr(cpuid, "AMD")) {
+		kvm->exit_reasons = svm_exit_reasons;
+		kvm->exit_reasons_isa = "SVM";
+	} else
+		return -ENOTSUP;
+
+	return 0;
+}
diff --git a/tools/perf/arch/x86/util/tsc.c b/tools/perf/arch/x86/util/tsc.c
index 40021fa..fd28684 100644
--- a/tools/perf/arch/x86/util/tsc.c
+++ b/tools/perf/arch/x86/util/tsc.c
@@ -6,29 +6,9 @@
 #include "../../perf.h"
 #include <linux/types.h>
 #include "../../util/debug.h"
+#include "../../util/tsc.h"
 #include "tsc.h"
 
-u64 perf_time_to_tsc(u64 ns, struct perf_tsc_conversion *tc)
-{
-	u64 t, quot, rem;
-
-	t = ns - tc->time_zero;
-	quot = t / tc->time_mult;
-	rem  = t % tc->time_mult;
-	return (quot << tc->time_shift) +
-	       (rem << tc->time_shift) / tc->time_mult;
-}
-
-u64 tsc_to_perf_time(u64 cyc, struct perf_tsc_conversion *tc)
-{
-	u64 quot, rem;
-
-	quot = cyc >> tc->time_shift;
-	rem  = cyc & ((1 << tc->time_shift) - 1);
-	return tc->time_zero + quot * tc->time_mult +
-	       ((rem * tc->time_mult) >> tc->time_shift);
-}
-
 int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
 			     struct perf_tsc_conversion *tc)
 {
@@ -57,3 +37,12 @@
 
 	return 0;
 }
+
+u64 rdtsc(void)
+{
+	unsigned int low, high;
+
+	asm volatile("rdtsc" : "=a" (low), "=d" (high));
+
+	return low | ((u64)high) << 32;
+}
diff --git a/tools/perf/arch/x86/util/tsc.h b/tools/perf/arch/x86/util/tsc.h
index 2affe03..2edc4d3 100644
--- a/tools/perf/arch/x86/util/tsc.h
+++ b/tools/perf/arch/x86/util/tsc.h
@@ -14,7 +14,4 @@
 int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
 			     struct perf_tsc_conversion *tc);
 
-u64 perf_time_to_tsc(u64 ns, struct perf_tsc_conversion *tc);
-u64 tsc_to_perf_time(u64 cyc, struct perf_tsc_conversion *tc);
-
 #endif /* TOOLS_PERF_ARCH_X86_UTIL_TSC_H__ */
diff --git a/tools/perf/arch/x86/util/unwind-libunwind.c b/tools/perf/arch/x86/util/unwind-libunwind.c
index 3261f68..db25e93 100644
--- a/tools/perf/arch/x86/util/unwind-libunwind.c
+++ b/tools/perf/arch/x86/util/unwind-libunwind.c
@@ -3,6 +3,7 @@
 #include <libunwind.h>
 #include "perf_regs.h"
 #include "../../util/unwind.h"
+#include "../../util/debug.h"
 
 #ifdef HAVE_ARCH_X86_64_SUPPORT
 int libunwind__arch_reg_id(int regnum)
diff --git a/tools/perf/bench/bench.h b/tools/perf/bench/bench.h
index eba46709..3c4dd44 100644
--- a/tools/perf/bench/bench.h
+++ b/tools/perf/bench/bench.h
@@ -43,5 +43,6 @@
 #define BENCH_FORMAT_UNKNOWN		-1
 
 extern int bench_format;
+extern unsigned int bench_repeat;
 
 #endif
diff --git a/tools/perf/bench/futex-requeue.c b/tools/perf/bench/futex-requeue.c
index a1625587..732403b 100644
--- a/tools/perf/bench/futex-requeue.c
+++ b/tools/perf/bench/futex-requeue.c
@@ -29,13 +29,6 @@
  */
 static unsigned int nrequeue = 1;
 
-/*
- * There can be significant variance from run to run,
- * the more repeats, the more exact the overall avg and
- * the better idea of the futex latency.
- */
-static unsigned int repeat = 10;
-
 static pthread_t *worker;
 static bool done = 0, silent = 0;
 static pthread_mutex_t thread_lock;
@@ -46,7 +39,6 @@
 static const struct option options[] = {
 	OPT_UINTEGER('t', "threads",  &nthreads, "Specify amount of threads"),
 	OPT_UINTEGER('q', "nrequeue", &nrequeue, "Specify amount of threads to requeue at once"),
-	OPT_UINTEGER('r', "repeat",   &repeat,   "Specify amount of times to repeat the run"),
 	OPT_BOOLEAN( 's', "silent",   &silent,   "Silent mode: do not display data/details"),
 	OPT_END()
 };
@@ -146,7 +138,7 @@
 	pthread_cond_init(&thread_parent, NULL);
 	pthread_cond_init(&thread_worker, NULL);
 
-	for (j = 0; j < repeat && !done; j++) {
+	for (j = 0; j < bench_repeat && !done; j++) {
 		unsigned int nrequeued = 0;
 		struct timeval start, end, runtime;
 
diff --git a/tools/perf/bench/futex-wake.c b/tools/perf/bench/futex-wake.c
index d096169..50022cb 100644
--- a/tools/perf/bench/futex-wake.c
+++ b/tools/perf/bench/futex-wake.c
@@ -30,15 +30,8 @@
  */
 static unsigned int nwakes = 1;
 
-/*
- * There can be significant variance from run to run,
- * the more repeats, the more exact the overall avg and
- * the better idea of the futex latency.
- */
-static unsigned int repeat = 10;
-
 pthread_t *worker;
-static bool done = 0, silent = 0;
+static bool done = false, silent = false;
 static pthread_mutex_t thread_lock;
 static pthread_cond_t thread_parent, thread_worker;
 static struct stats waketime_stats, wakeup_stats;
@@ -47,7 +40,6 @@
 static const struct option options[] = {
 	OPT_UINTEGER('t', "threads", &nthreads, "Specify amount of threads"),
 	OPT_UINTEGER('w', "nwakes",  &nwakes,   "Specify amount of threads to wake at once"),
-	OPT_UINTEGER('r', "repeat",  &repeat,   "Specify amount of times to repeat the run"),
 	OPT_BOOLEAN( 's', "silent",  &silent,   "Silent mode: do not display data/details"),
 	OPT_END()
 };
@@ -149,7 +141,7 @@
 	pthread_cond_init(&thread_parent, NULL);
 	pthread_cond_init(&thread_worker, NULL);
 
-	for (j = 0; j < repeat && !done; j++) {
+	for (j = 0; j < bench_repeat && !done; j++) {
 		unsigned int nwoken = 0;
 		struct timeval start, end, runtime;
 
diff --git a/tools/perf/bench/mem-memcpy.c b/tools/perf/bench/mem-memcpy.c
index 5ce71d3..2465141 100644
--- a/tools/perf/bench/mem-memcpy.c
+++ b/tools/perf/bench/mem-memcpy.c
@@ -10,6 +10,7 @@
 #include "../util/util.h"
 #include "../util/parse-options.h"
 #include "../util/header.h"
+#include "../util/cloexec.h"
 #include "bench.h"
 #include "mem-memcpy-arch.h"
 
@@ -83,7 +84,8 @@
 
 static void init_cycle(void)
 {
-	cycle_fd = sys_perf_event_open(&cycle_attr, getpid(), -1, -1, 0);
+	cycle_fd = sys_perf_event_open(&cycle_attr, getpid(), -1, -1,
+				       perf_event_open_cloexec_flag());
 
 	if (cycle_fd < 0 && errno == ENOSYS)
 		die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
@@ -189,6 +191,11 @@
 	argc = parse_options(argc, argv, options,
 			     bench_mem_memcpy_usage, 0);
 
+	if (no_prefault && only_prefault) {
+		fprintf(stderr, "Invalid options: -o and -n are mutually exclusive\n");
+		return 1;
+	}
+
 	if (use_cycle)
 		init_cycle();
 
diff --git a/tools/perf/bench/mem-memset.c b/tools/perf/bench/mem-memset.c
index 9af79d2..75fc3e6 100644
--- a/tools/perf/bench/mem-memset.c
+++ b/tools/perf/bench/mem-memset.c
@@ -10,6 +10,7 @@
 #include "../util/util.h"
 #include "../util/parse-options.h"
 #include "../util/header.h"
+#include "../util/cloexec.h"
 #include "bench.h"
 #include "mem-memset-arch.h"
 
@@ -83,7 +84,8 @@
 
 static void init_cycle(void)
 {
-	cycle_fd = sys_perf_event_open(&cycle_attr, getpid(), -1, -1, 0);
+	cycle_fd = sys_perf_event_open(&cycle_attr, getpid(), -1, -1,
+				       perf_event_open_cloexec_flag());
 
 	if (cycle_fd < 0 && errno == ENOSYS)
 		die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
@@ -181,6 +183,11 @@
 	argc = parse_options(argc, argv, options,
 			     bench_mem_memset_usage, 0);
 
+	if (no_prefault && only_prefault) {
+		fprintf(stderr, "Invalid options: -o and -n are mutually exclusive\n");
+		return 1;
+	}
+
 	if (use_cycle)
 		init_cycle();
 
diff --git a/tools/perf/bench/sched-messaging.c b/tools/perf/bench/sched-messaging.c
index cc1190a..52a5659 100644
--- a/tools/perf/bench/sched-messaging.c
+++ b/tools/perf/bench/sched-messaging.c
@@ -28,6 +28,7 @@
 #include <sys/time.h>
 #include <sys/poll.h>
 #include <limits.h>
+#include <err.h>
 
 #define DATASIZE 100
 
@@ -50,12 +51,6 @@
 	int wakefd;
 };
 
-static void barf(const char *msg)
-{
-	fprintf(stderr, "%s (error: %s)\n", msg, strerror(errno));
-	exit(1);
-}
-
 static void fdpair(int fds[2])
 {
 	if (use_pipes) {
@@ -66,7 +61,7 @@
 			return;
 	}
 
-	barf(use_pipes ? "pipe()" : "socketpair()");
+	err(EXIT_FAILURE, use_pipes ? "pipe()" : "socketpair()");
 }
 
 /* Block until we're ready to go */
@@ -77,11 +72,11 @@
 
 	/* Tell them we're ready. */
 	if (write(ready_out, &dummy, 1) != 1)
-		barf("CLIENT: ready write");
+		err(EXIT_FAILURE, "CLIENT: ready write");
 
 	/* Wait for "GO" signal */
 	if (poll(&pollfd, 1, -1) != 1)
-		barf("poll");
+		err(EXIT_FAILURE, "poll");
 }
 
 /* Sender sprays loops messages down each file descriptor */
@@ -101,7 +96,7 @@
 			ret = write(ctx->out_fds[j], data + done,
 				    sizeof(data)-done);
 			if (ret < 0)
-				barf("SENDER: write");
+				err(EXIT_FAILURE, "SENDER: write");
 			done += ret;
 			if (done < DATASIZE)
 				goto again;
@@ -131,7 +126,7 @@
 again:
 		ret = read(ctx->in_fds[0], data + done, DATASIZE - done);
 		if (ret < 0)
-			barf("SERVER: read");
+			err(EXIT_FAILURE, "SERVER: read");
 		done += ret;
 		if (done < DATASIZE)
 			goto again;
@@ -144,14 +139,14 @@
 {
 	pthread_attr_t attr;
 	pthread_t childid;
-	int err;
+	int ret;
 
 	if (!thread_mode) {
 		/* process mode */
 		/* Fork the receiver. */
 		switch (fork()) {
 		case -1:
-			barf("fork()");
+			err(EXIT_FAILURE, "fork()");
 			break;
 		case 0:
 			(*func) (ctx);
@@ -165,19 +160,17 @@
 	}
 
 	if (pthread_attr_init(&attr) != 0)
-		barf("pthread_attr_init:");
+		err(EXIT_FAILURE, "pthread_attr_init:");
 
 #ifndef __ia64__
 	if (pthread_attr_setstacksize(&attr, PTHREAD_STACK_MIN) != 0)
-		barf("pthread_attr_setstacksize");
+		err(EXIT_FAILURE, "pthread_attr_setstacksize");
 #endif
 
-	err = pthread_create(&childid, &attr, func, ctx);
-	if (err != 0) {
-		fprintf(stderr, "pthread_create failed: %s (%d)\n",
-			strerror(err), err);
-		exit(-1);
-	}
+	ret = pthread_create(&childid, &attr, func, ctx);
+	if (ret != 0)
+		err(EXIT_FAILURE, "pthread_create failed");
+
 	return childid;
 }
 
@@ -207,14 +200,14 @@
 			+ num_fds * sizeof(int));
 
 	if (!snd_ctx)
-		barf("malloc()");
+		err(EXIT_FAILURE, "malloc()");
 
 	for (i = 0; i < num_fds; i++) {
 		int fds[2];
 		struct receiver_context *ctx = malloc(sizeof(*ctx));
 
 		if (!ctx)
-			barf("malloc()");
+			err(EXIT_FAILURE, "malloc()");
 
 
 		/* Create the pipe between client and server */
@@ -281,7 +274,7 @@
 
 	pth_tab = malloc(num_fds * 2 * num_groups * sizeof(pthread_t));
 	if (!pth_tab)
-		barf("main:malloc()");
+		err(EXIT_FAILURE, "main:malloc()");
 
 	fdpair(readyfds);
 	fdpair(wakefds);
@@ -294,13 +287,13 @@
 	/* Wait for everyone to be ready */
 	for (i = 0; i < total_children; i++)
 		if (read(readyfds[0], &dummy, 1) != 1)
-			barf("Reading for readyfds");
+			err(EXIT_FAILURE, "Reading for readyfds");
 
 	gettimeofday(&start, NULL);
 
 	/* Kick them off */
 	if (write(wakefds[1], &dummy, 1) != 1)
-		barf("Writing to start them");
+		err(EXIT_FAILURE, "Writing to start them");
 
 	/* Reap them all */
 	for (i = 0; i < total_children; i++)
@@ -332,5 +325,7 @@
 		break;
 	}
 
+	free(pth_tab);
+
 	return 0;
 }
diff --git a/tools/perf/builtin-bench.c b/tools/perf/builtin-bench.c
index 1e6e777..b9a56fa 100644
--- a/tools/perf/builtin-bench.c
+++ b/tools/perf/builtin-bench.c
@@ -104,9 +104,11 @@
 
 /* Output/formatting style, exported to benchmark modules: */
 int bench_format = BENCH_FORMAT_DEFAULT;
+unsigned int bench_repeat = 10; /* default number of times to repeat the run */
 
 static const struct option bench_options[] = {
 	OPT_STRING('f', "format", &bench_format_str, "default", "Specify format style"),
+	OPT_UINTEGER('r', "repeat",  &bench_repeat,   "Specify amount of times to repeat the run"),
 	OPT_END()
 };
 
@@ -226,6 +228,11 @@
 		goto end;
 	}
 
+	if (bench_repeat == 0) {
+		printf("Invalid repeat option: Must specify a positive value\n");
+		goto end;
+	}
+
 	if (argc < 1) {
 		print_usage();
 		goto end;
diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c
index b22dbb1..2a2c78f 100644
--- a/tools/perf/builtin-buildid-cache.c
+++ b/tools/perf/builtin-buildid-cache.c
@@ -125,7 +125,8 @@
 	return ret;
 }
 
-static int build_id_cache__add_kcore(const char *filename, const char *debugdir)
+static int build_id_cache__add_kcore(const char *filename, const char *debugdir,
+				     bool force)
 {
 	char dir[32], sbuildid[BUILD_ID_SIZE * 2 + 1];
 	char from_dir[PATH_MAX], to_dir[PATH_MAX];
@@ -144,7 +145,8 @@
 	scnprintf(to_dir, sizeof(to_dir), "%s/[kernel.kcore]/%s",
 		  debugdir, sbuildid);
 
-	if (!build_id_cache__kcore_existing(from_dir, to_dir, sizeof(to_dir))) {
+	if (!force &&
+	    !build_id_cache__kcore_existing(from_dir, to_dir, sizeof(to_dir))) {
 		pr_debug("same kcore found in %s\n", to_dir);
 		return 0;
 	}
@@ -389,7 +391,7 @@
 	}
 
 	if (kcore_filename &&
-	    build_id_cache__add_kcore(kcore_filename, debugdir))
+	    build_id_cache__add_kcore(kcore_filename, debugdir, force))
 		pr_warning("Couldn't add %s\n", kcore_filename);
 
 	return ret;
diff --git a/tools/perf/builtin-evlist.c b/tools/perf/builtin-evlist.c
index c99e0de..66e12f5 100644
--- a/tools/perf/builtin-evlist.c
+++ b/tools/perf/builtin-evlist.c
@@ -15,6 +15,7 @@
 #include "util/parse-options.h"
 #include "util/session.h"
 #include "util/data.h"
+#include "util/debug.h"
 
 static int __cmd_evlist(const char *file_name, struct perf_attr_details *details)
 {
diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c
index 178b88a..0384d93 100644
--- a/tools/perf/builtin-help.c
+++ b/tools/perf/builtin-help.c
@@ -11,6 +11,7 @@
 #include "util/parse-options.h"
 #include "util/run-command.h"
 #include "util/help.h"
+#include "util/debug.h"
 
 static struct man_viewer_list {
 	struct man_viewer_list *next;
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index 16c7c11..9a02807 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -389,6 +389,9 @@
 	ret = perf_session__process_events(session, &inject->tool);
 
 	if (!file_out->is_pipe) {
+		if (inject->build_ids)
+			perf_header__set_feat(&session->header,
+					      HEADER_BUILD_ID);
 		session->header.data_size = inject->bytes_written;
 		perf_session__write_header(session, session->evlist, file_out->fd, true);
 	}
@@ -436,6 +439,8 @@
 			    "where and how long tasks slept"),
 		OPT_INCR('v', "verbose", &verbose,
 			 "be more verbose (show build ids, etc)"),
+		OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, "file",
+			   "kallsyms pathname"),
 		OPT_END()
 	};
 	const char * const inject_usage[] = {
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index 0f1e5a2..43367eb 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -29,114 +29,25 @@
 #include <pthread.h>
 #include <math.h>
 
-#if defined(__i386__) || defined(__x86_64__)
-#include <asm/svm.h>
-#include <asm/vmx.h>
-#include <asm/kvm.h>
+#ifdef HAVE_KVM_STAT_SUPPORT
+#include <asm/kvm_perf.h>
+#include "util/kvm-stat.h"
 
-struct event_key {
-	#define INVALID_KEY     (~0ULL)
-	u64 key;
-	int info;
-};
-
-struct kvm_event_stats {
-	u64 time;
-	struct stats stats;
-};
-
-struct kvm_event {
-	struct list_head hash_entry;
-	struct rb_node rb;
-
-	struct event_key key;
-
-	struct kvm_event_stats total;
-
-	#define DEFAULT_VCPU_NUM 8
-	int max_vcpu;
-	struct kvm_event_stats *vcpu;
-};
-
-typedef int (*key_cmp_fun)(struct kvm_event*, struct kvm_event*, int);
-
-struct kvm_event_key {
-	const char *name;
-	key_cmp_fun key;
-};
-
-
-struct perf_kvm_stat;
-
-struct kvm_events_ops {
-	bool (*is_begin_event)(struct perf_evsel *evsel,
-			       struct perf_sample *sample,
-			       struct event_key *key);
-	bool (*is_end_event)(struct perf_evsel *evsel,
-			     struct perf_sample *sample, struct event_key *key);
-	void (*decode_key)(struct perf_kvm_stat *kvm, struct event_key *key,
-			   char decode[20]);
-	const char *name;
-};
-
-struct exit_reasons_table {
-	unsigned long exit_code;
-	const char *reason;
-};
-
-#define EVENTS_BITS		12
-#define EVENTS_CACHE_SIZE	(1UL << EVENTS_BITS)
-
-struct perf_kvm_stat {
-	struct perf_tool    tool;
-	struct record_opts  opts;
-	struct perf_evlist  *evlist;
-	struct perf_session *session;
-
-	const char *file_name;
-	const char *report_event;
-	const char *sort_key;
-	int trace_vcpu;
-
-	struct exit_reasons_table *exit_reasons;
-	int exit_reasons_size;
-	const char *exit_reasons_isa;
-
-	struct kvm_events_ops *events_ops;
-	key_cmp_fun compare;
-	struct list_head kvm_events_cache[EVENTS_CACHE_SIZE];
-
-	u64 total_time;
-	u64 total_count;
-	u64 lost_events;
-	u64 duration;
-
-	const char *pid_str;
-	struct intlist *pid_list;
-
-	struct rb_root result;
-
-	int timerfd;
-	unsigned int display_time;
-	bool live;
-};
-
-
-static void exit_event_get_key(struct perf_evsel *evsel,
-			       struct perf_sample *sample,
-			       struct event_key *key)
+void exit_event_get_key(struct perf_evsel *evsel,
+			struct perf_sample *sample,
+			struct event_key *key)
 {
 	key->info = 0;
-	key->key = perf_evsel__intval(evsel, sample, "exit_reason");
+	key->key = perf_evsel__intval(evsel, sample, KVM_EXIT_REASON);
 }
 
-static bool kvm_exit_event(struct perf_evsel *evsel)
+bool kvm_exit_event(struct perf_evsel *evsel)
 {
-	return !strcmp(evsel->name, "kvm:kvm_exit");
+	return !strcmp(evsel->name, KVM_EXIT_TRACE);
 }
 
-static bool exit_event_begin(struct perf_evsel *evsel,
-			     struct perf_sample *sample, struct event_key *key)
+bool exit_event_begin(struct perf_evsel *evsel,
+		      struct perf_sample *sample, struct event_key *key)
 {
 	if (kvm_exit_event(evsel)) {
 		exit_event_get_key(evsel, sample, key);
@@ -146,32 +57,23 @@
 	return false;
 }
 
-static bool kvm_entry_event(struct perf_evsel *evsel)
+bool kvm_entry_event(struct perf_evsel *evsel)
 {
-	return !strcmp(evsel->name, "kvm:kvm_entry");
+	return !strcmp(evsel->name, KVM_ENTRY_TRACE);
 }
 
-static bool exit_event_end(struct perf_evsel *evsel,
-			   struct perf_sample *sample __maybe_unused,
-			   struct event_key *key __maybe_unused)
+bool exit_event_end(struct perf_evsel *evsel,
+		    struct perf_sample *sample __maybe_unused,
+		    struct event_key *key __maybe_unused)
 {
 	return kvm_entry_event(evsel);
 }
 
-static struct exit_reasons_table vmx_exit_reasons[] = {
-	VMX_EXIT_REASONS
-};
-
-static struct exit_reasons_table svm_exit_reasons[] = {
-	SVM_EXIT_REASONS
-};
-
-static const char *get_exit_reason(struct perf_kvm_stat *kvm, u64 exit_code)
+static const char *get_exit_reason(struct perf_kvm_stat *kvm,
+				   struct exit_reasons_table *tbl,
+				   u64 exit_code)
 {
-	int i = kvm->exit_reasons_size;
-	struct exit_reasons_table *tbl = kvm->exit_reasons;
-
-	while (i--) {
+	while (tbl->reason != NULL) {
 		if (tbl->exit_code == exit_code)
 			return tbl->reason;
 		tbl++;
@@ -182,146 +84,28 @@
 	return "UNKNOWN";
 }
 
-static void exit_event_decode_key(struct perf_kvm_stat *kvm,
-				  struct event_key *key,
-				  char decode[20])
+void exit_event_decode_key(struct perf_kvm_stat *kvm,
+			   struct event_key *key,
+			   char *decode)
 {
-	const char *exit_reason = get_exit_reason(kvm, key->key);
+	const char *exit_reason = get_exit_reason(kvm, key->exit_reasons,
+						  key->key);
 
-	scnprintf(decode, 20, "%s", exit_reason);
+	scnprintf(decode, DECODE_STR_LEN, "%s", exit_reason);
 }
 
-static struct kvm_events_ops exit_events = {
-	.is_begin_event = exit_event_begin,
-	.is_end_event = exit_event_end,
-	.decode_key = exit_event_decode_key,
-	.name = "VM-EXIT"
-};
-
-/*
- * For the mmio events, we treat:
- * the time of MMIO write: kvm_mmio(KVM_TRACE_MMIO_WRITE...) -> kvm_entry
- * the time of MMIO read: kvm_exit -> kvm_mmio(KVM_TRACE_MMIO_READ...).
- */
-static void mmio_event_get_key(struct perf_evsel *evsel, struct perf_sample *sample,
-			       struct event_key *key)
-{
-	key->key  = perf_evsel__intval(evsel, sample, "gpa");
-	key->info = perf_evsel__intval(evsel, sample, "type");
-}
-
-#define KVM_TRACE_MMIO_READ_UNSATISFIED 0
-#define KVM_TRACE_MMIO_READ 1
-#define KVM_TRACE_MMIO_WRITE 2
-
-static bool mmio_event_begin(struct perf_evsel *evsel,
-			     struct perf_sample *sample, struct event_key *key)
-{
-	/* MMIO read begin event in kernel. */
-	if (kvm_exit_event(evsel))
-		return true;
-
-	/* MMIO write begin event in kernel. */
-	if (!strcmp(evsel->name, "kvm:kvm_mmio") &&
-	    perf_evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_WRITE) {
-		mmio_event_get_key(evsel, sample, key);
-		return true;
-	}
-
-	return false;
-}
-
-static bool mmio_event_end(struct perf_evsel *evsel, struct perf_sample *sample,
-			   struct event_key *key)
-{
-	/* MMIO write end event in kernel. */
-	if (kvm_entry_event(evsel))
-		return true;
-
-	/* MMIO read end event in kernel.*/
-	if (!strcmp(evsel->name, "kvm:kvm_mmio") &&
-	    perf_evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_READ) {
-		mmio_event_get_key(evsel, sample, key);
-		return true;
-	}
-
-	return false;
-}
-
-static void mmio_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
-				  struct event_key *key,
-				  char decode[20])
-{
-	scnprintf(decode, 20, "%#lx:%s", (unsigned long)key->key,
-				key->info == KVM_TRACE_MMIO_WRITE ? "W" : "R");
-}
-
-static struct kvm_events_ops mmio_events = {
-	.is_begin_event = mmio_event_begin,
-	.is_end_event = mmio_event_end,
-	.decode_key = mmio_event_decode_key,
-	.name = "MMIO Access"
-};
-
- /* The time of emulation pio access is from kvm_pio to kvm_entry. */
-static void ioport_event_get_key(struct perf_evsel *evsel,
-				 struct perf_sample *sample,
-				 struct event_key *key)
-{
-	key->key  = perf_evsel__intval(evsel, sample, "port");
-	key->info = perf_evsel__intval(evsel, sample, "rw");
-}
-
-static bool ioport_event_begin(struct perf_evsel *evsel,
-			       struct perf_sample *sample,
-			       struct event_key *key)
-{
-	if (!strcmp(evsel->name, "kvm:kvm_pio")) {
-		ioport_event_get_key(evsel, sample, key);
-		return true;
-	}
-
-	return false;
-}
-
-static bool ioport_event_end(struct perf_evsel *evsel,
-			     struct perf_sample *sample __maybe_unused,
-			     struct event_key *key __maybe_unused)
-{
-	return kvm_entry_event(evsel);
-}
-
-static void ioport_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
-				    struct event_key *key,
-				    char decode[20])
-{
-	scnprintf(decode, 20, "%#llx:%s", (unsigned long long)key->key,
-				key->info ? "POUT" : "PIN");
-}
-
-static struct kvm_events_ops ioport_events = {
-	.is_begin_event = ioport_event_begin,
-	.is_end_event = ioport_event_end,
-	.decode_key = ioport_event_decode_key,
-	.name = "IO Port Access"
-};
-
 static bool register_kvm_events_ops(struct perf_kvm_stat *kvm)
 {
-	bool ret = true;
+	struct kvm_reg_events_ops *events_ops = kvm_reg_events_ops;
 
-	if (!strcmp(kvm->report_event, "vmexit"))
-		kvm->events_ops = &exit_events;
-	else if (!strcmp(kvm->report_event, "mmio"))
-		kvm->events_ops = &mmio_events;
-	else if (!strcmp(kvm->report_event, "ioport"))
-		kvm->events_ops = &ioport_events;
-	else {
-		pr_err("Unknown report event:%s\n", kvm->report_event);
-		ret = false;
+	for (events_ops = kvm_reg_events_ops; events_ops->name; events_ops++) {
+		if (!strcmp(events_ops->name, kvm->report_event)) {
+			kvm->events_ops = events_ops->ops;
+			return true;
+		}
 	}
 
-	return ret;
+	return false;
 }
 
 struct vcpu_event_record {
@@ -477,6 +261,54 @@
 	return true;
 }
 
+static bool is_child_event(struct perf_kvm_stat *kvm,
+			   struct perf_evsel *evsel,
+			   struct perf_sample *sample,
+			   struct event_key *key)
+{
+	struct child_event_ops *child_ops;
+
+	child_ops = kvm->events_ops->child_ops;
+
+	if (!child_ops)
+		return false;
+
+	for (; child_ops->name; child_ops++) {
+		if (!strcmp(evsel->name, child_ops->name)) {
+			child_ops->get_key(evsel, sample, key);
+			return true;
+		}
+	}
+
+	return false;
+}
+
+static bool handle_child_event(struct perf_kvm_stat *kvm,
+			       struct vcpu_event_record *vcpu_record,
+			       struct event_key *key,
+			       struct perf_sample *sample __maybe_unused)
+{
+	struct kvm_event *event = NULL;
+
+	if (key->key != INVALID_KEY)
+		event = find_create_kvm_event(kvm, key);
+
+	vcpu_record->last_event = event;
+
+	return true;
+}
+
+static bool skip_event(const char *event)
+{
+	const char * const *skip_events;
+
+	for (skip_events = kvm_skip_events; *skip_events; skip_events++)
+		if (!strcmp(event, *skip_events))
+			return true;
+
+	return false;
+}
+
 static bool handle_end_event(struct perf_kvm_stat *kvm,
 			     struct vcpu_event_record *vcpu_record,
 			     struct event_key *key,
@@ -525,10 +357,10 @@
 	time_diff = sample->time - time_begin;
 
 	if (kvm->duration && time_diff > kvm->duration) {
-		char decode[32];
+		char decode[DECODE_STR_LEN];
 
 		kvm->events_ops->decode_key(kvm, &event->key, decode);
-		if (strcmp(decode, "HLT")) {
+		if (!skip_event(decode)) {
 			pr_info("%" PRIu64 " VM %d, vcpu %d: %s event took %" PRIu64 "usec\n",
 				 sample->time, sample->pid, vcpu_record->vcpu_id,
 				 decode, time_diff/1000);
@@ -553,7 +385,7 @@
 			return NULL;
 		}
 
-		vcpu_record->vcpu_id = perf_evsel__intval(evsel, sample, "vcpu_id");
+		vcpu_record->vcpu_id = perf_evsel__intval(evsel, sample, VCPU_ID);
 		thread->priv = vcpu_record;
 	}
 
@@ -566,7 +398,8 @@
 			     struct perf_sample *sample)
 {
 	struct vcpu_event_record *vcpu_record;
-	struct event_key key = {.key = INVALID_KEY};
+	struct event_key key = { .key = INVALID_KEY,
+				 .exit_reasons = kvm->exit_reasons };
 
 	vcpu_record = per_vcpu_record(thread, evsel, sample);
 	if (!vcpu_record)
@@ -580,6 +413,9 @@
 	if (kvm->events_ops->is_begin_event(evsel, sample, &key))
 		return handle_begin_event(kvm, vcpu_record, &key, sample->time);
 
+	if (is_child_event(kvm, evsel, sample, &key))
+		return handle_child_event(kvm, vcpu_record, &key, sample);
+
 	if (kvm->events_ops->is_end_event(evsel, sample, &key))
 		return handle_end_event(kvm, vcpu_record, &key, sample);
 
@@ -740,7 +576,7 @@
 
 static void print_result(struct perf_kvm_stat *kvm)
 {
-	char decode[20];
+	char decode[DECODE_STR_LEN];
 	struct kvm_event *event;
 	int vcpu = kvm->trace_vcpu;
 
@@ -751,7 +587,7 @@
 
 	pr_info("\n\n");
 	print_vcpu_info(kvm);
-	pr_info("%20s ", kvm->events_ops->name);
+	pr_info("%*s ", DECODE_STR_LEN, kvm->events_ops->name);
 	pr_info("%10s ", "Samples");
 	pr_info("%9s ", "Samples%");
 
@@ -770,7 +606,7 @@
 		min = get_event_min(event, vcpu);
 
 		kvm->events_ops->decode_key(kvm, &event->key, decode);
-		pr_info("%20s ", decode);
+		pr_info("%*s ", DECODE_STR_LEN, decode);
 		pr_info("%10llu ", (unsigned long long)ecount);
 		pr_info("%8.2f%% ", (double)ecount / kvm->total_count * 100);
 		pr_info("%8.2f%% ", (double)etime / kvm->total_time * 100);
@@ -839,34 +675,28 @@
 static int cpu_isa_config(struct perf_kvm_stat *kvm)
 {
 	char buf[64], *cpuid;
-	int err, isa;
+	int err;
 
 	if (kvm->live) {
 		err = get_cpuid(buf, sizeof(buf));
 		if (err != 0) {
-			pr_err("Failed to look up CPU type (Intel or AMD)\n");
+			pr_err("Failed to look up CPU type\n");
 			return err;
 		}
 		cpuid = buf;
 	} else
 		cpuid = kvm->session->header.env.cpuid;
 
-	if (strstr(cpuid, "Intel"))
-		isa = 1;
-	else if (strstr(cpuid, "AMD"))
-		isa = 0;
-	else {
+	if (!cpuid) {
+		pr_err("Failed to look up CPU type\n");
+		return -EINVAL;
+	}
+
+	err = cpu_isa_init(kvm, cpuid);
+	if (err == -ENOTSUP)
 		pr_err("CPU %s is not supported.\n", cpuid);
-		return -ENOTSUP;
-	}
 
-	if (isa == 1) {
-		kvm->exit_reasons = vmx_exit_reasons;
-		kvm->exit_reasons_size = ARRAY_SIZE(vmx_exit_reasons);
-		kvm->exit_reasons_isa = "VMX";
-	}
-
-	return 0;
+	return err;
 }
 
 static bool verify_vcpu(int vcpu)
@@ -1300,13 +1130,6 @@
 	return ret;
 }
 
-static const char * const kvm_events_tp[] = {
-	"kvm:kvm_entry",
-	"kvm:kvm_exit",
-	"kvm:kvm_mmio",
-	"kvm:kvm_pio",
-};
-
 #define STRDUP_FAIL_EXIT(s)		\
 	({	char *_p;		\
 	_p = strdup(s);		\
@@ -1318,7 +1141,7 @@
 static int
 kvm_events_record(struct perf_kvm_stat *kvm, int argc, const char **argv)
 {
-	unsigned int rec_argc, i, j;
+	unsigned int rec_argc, i, j, events_tp_size;
 	const char **rec_argv;
 	const char * const record_args[] = {
 		"record",
@@ -1326,9 +1149,14 @@
 		"-m", "1024",
 		"-c", "1",
 	};
+	const char * const *events_tp;
+	events_tp_size = 0;
+
+	for (events_tp = kvm_events_tp; *events_tp; events_tp++)
+		events_tp_size++;
 
 	rec_argc = ARRAY_SIZE(record_args) + argc + 2 +
-		   2 * ARRAY_SIZE(kvm_events_tp);
+		   2 * events_tp_size;
 	rec_argv = calloc(rec_argc + 1, sizeof(char *));
 
 	if (rec_argv == NULL)
@@ -1337,7 +1165,7 @@
 	for (i = 0; i < ARRAY_SIZE(record_args); i++)
 		rec_argv[i] = STRDUP_FAIL_EXIT(record_args[i]);
 
-	for (j = 0; j < ARRAY_SIZE(kvm_events_tp); j++) {
+	for (j = 0; j < events_tp_size; j++) {
 		rec_argv[i++] = "-e";
 		rec_argv[i++] = STRDUP_FAIL_EXIT(kvm_events_tp[j]);
 	}
@@ -1356,7 +1184,8 @@
 {
 	const struct option kvm_events_report_options[] = {
 		OPT_STRING(0, "event", &kvm->report_event, "report event",
-			    "event for reporting: vmexit, mmio, ioport"),
+			   "event for reporting: vmexit, "
+			   "mmio (x86 only), ioport (x86 only)"),
 		OPT_INTEGER(0, "vcpu", &kvm->trace_vcpu,
 			    "vcpu id to report"),
 		OPT_STRING('k', "key", &kvm->sort_key, "sort-key",
@@ -1391,16 +1220,16 @@
 {
 	struct perf_evlist *evlist;
 	char *tp, *name, *sys;
-	unsigned int j;
 	int err = -1;
+	const char * const *events_tp;
 
 	evlist = perf_evlist__new();
 	if (evlist == NULL)
 		return NULL;
 
-	for (j = 0; j < ARRAY_SIZE(kvm_events_tp); j++) {
+	for (events_tp = kvm_events_tp; *events_tp; events_tp++) {
 
-		tp = strdup(kvm_events_tp[j]);
+		tp = strdup(*events_tp);
 		if (tp == NULL)
 			goto out;
 
@@ -1409,7 +1238,7 @@
 		name = strchr(tp, ':');
 		if (name == NULL) {
 			pr_err("Error parsing %s tracepoint: subsystem delimiter not found\n",
-				kvm_events_tp[j]);
+			       *events_tp);
 			free(tp);
 			goto out;
 		}
@@ -1417,7 +1246,7 @@
 		name++;
 
 		if (perf_evlist__add_newtp(evlist, sys, name, NULL)) {
-			pr_err("Failed to add %s tracepoint to the list\n", kvm_events_tp[j]);
+			pr_err("Failed to add %s tracepoint to the list\n", *events_tp);
 			free(tp);
 			goto out;
 		}
@@ -1462,7 +1291,9 @@
 			"key for sorting: sample(sort by samples number)"
 			" time (sort by avg time)"),
 		OPT_U64(0, "duration", &kvm->duration,
-		    "show events other than HALT that take longer than duration usecs"),
+			"show events other than"
+			" HLT (x86 only) or Wait state (s390 only)"
+			" that take longer than duration usecs"),
 		OPT_END()
 	};
 	const char * const live_usage[] = {
@@ -1585,9 +1416,6 @@
 		.report_event	= "vmexit",
 		.sort_key	= "sample",
 
-		.exit_reasons = svm_exit_reasons,
-		.exit_reasons_size = ARRAY_SIZE(svm_exit_reasons),
-		.exit_reasons_isa = "SVM",
 	};
 
 	if (argc == 1) {
@@ -1609,7 +1437,7 @@
 perf_stat:
 	return cmd_stat(argc, argv, NULL);
 }
-#endif
+#endif /* HAVE_KVM_STAT_SUPPORT */
 
 static int __cmd_record(const char *file_name, int argc, const char **argv)
 {
@@ -1726,7 +1554,7 @@
 		return cmd_top(argc, argv, NULL);
 	else if (!strncmp(argv[0], "buildid-list", 12))
 		return __cmd_buildid_list(file_name, argc, argv);
-#if defined(__i386__) || defined(__x86_64__)
+#ifdef HAVE_KVM_STAT_SUPPORT
 	else if (!strncmp(argv[0], "stat", 4))
 		return kvm_cmd_stat(file_name, argc, argv);
 #endif
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 378b85b..4869050 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -238,6 +238,7 @@
 
 static int record__mmap_read_all(struct record *rec)
 {
+	u64 bytes_written = rec->bytes_written;
 	int i;
 	int rc = 0;
 
@@ -250,7 +251,11 @@
 		}
 	}
 
-	if (perf_header__has_feat(&rec->session->header, HEADER_TRACING_DATA))
+	/*
+	 * Mark the round finished in case we wrote
+	 * at least one event.
+	 */
+	if (bytes_written != rec->bytes_written)
 		rc = record__write(rec, &finished_round_event, sizeof(finished_round_event));
 
 out:
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index c38d06c..f83c08c 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -10,6 +10,7 @@
 #include "util/header.h"
 #include "util/session.h"
 #include "util/tool.h"
+#include "util/cloexec.h"
 
 #include "util/parse-options.h"
 #include "util/trace-event.h"
@@ -434,7 +435,8 @@
 	attr.type = PERF_TYPE_SOFTWARE;
 	attr.config = PERF_COUNT_SW_TASK_CLOCK;
 
-	fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
+	fd = sys_perf_event_open(&attr, 0, -1, -1,
+				 perf_event_open_cloexec_flag());
 
 	if (fd < 0)
 		pr_err("Error: sys_perf_event_open() syscall returned "
@@ -935,8 +937,8 @@
 		return -1;
 	}
 
-	sched_out = machine__findnew_thread(machine, 0, prev_pid);
-	sched_in = machine__findnew_thread(machine, 0, next_pid);
+	sched_out = machine__findnew_thread(machine, -1, prev_pid);
+	sched_in = machine__findnew_thread(machine, -1, next_pid);
 
 	out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
 	if (!out_events) {
@@ -979,7 +981,7 @@
 {
 	const u32 pid	   = perf_evsel__intval(evsel, sample, "pid");
 	const u64 runtime  = perf_evsel__intval(evsel, sample, "runtime");
-	struct thread *thread = machine__findnew_thread(machine, 0, pid);
+	struct thread *thread = machine__findnew_thread(machine, -1, pid);
 	struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
 	u64 timestamp = sample->time;
 	int cpu = sample->cpu;
@@ -1012,7 +1014,7 @@
 	struct thread *wakee;
 	u64 timestamp = sample->time;
 
-	wakee = machine__findnew_thread(machine, 0, pid);
+	wakee = machine__findnew_thread(machine, -1, pid);
 	atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
 	if (!atoms) {
 		if (thread_atoms_insert(sched, wakee))
@@ -1072,7 +1074,7 @@
 	if (sched->profile_cpu == -1)
 		return 0;
 
-	migrant = machine__findnew_thread(machine, 0, pid);
+	migrant = machine__findnew_thread(machine, -1, pid);
 	atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
 	if (!atoms) {
 		if (thread_atoms_insert(sched, migrant))
@@ -1290,7 +1292,7 @@
 		return -1;
 	}
 
-	sched_in = machine__findnew_thread(machine, 0, next_pid);
+	sched_in = machine__findnew_thread(machine, -1, next_pid);
 
 	sched->curr_thread[this_cpu] = sched_in;
 
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 9e9c91f..f57035b 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -358,27 +358,6 @@
 	}
 }
 
-static bool is_bts_event(struct perf_event_attr *attr)
-{
-	return ((attr->type == PERF_TYPE_HARDWARE) &&
-		(attr->config & PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
-		(attr->sample_period == 1));
-}
-
-static bool sample_addr_correlates_sym(struct perf_event_attr *attr)
-{
-	if ((attr->type == PERF_TYPE_SOFTWARE) &&
-	    ((attr->config == PERF_COUNT_SW_PAGE_FAULTS) ||
-	     (attr->config == PERF_COUNT_SW_PAGE_FAULTS_MIN) ||
-	     (attr->config == PERF_COUNT_SW_PAGE_FAULTS_MAJ)))
-		return true;
-
-	if (is_bts_event(attr))
-		return true;
-
-	return false;
-}
-
 static void print_sample_addr(union perf_event *event,
 			  struct perf_sample *sample,
 			  struct machine *machine,
@@ -386,24 +365,13 @@
 			  struct perf_event_attr *attr)
 {
 	struct addr_location al;
-	u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
 
 	printf("%16" PRIx64, sample->addr);
 
 	if (!sample_addr_correlates_sym(attr))
 		return;
 
-	thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION,
-			      sample->addr, &al);
-	if (!al.map)
-		thread__find_addr_map(thread, machine, cpumode, MAP__VARIABLE,
-				      sample->addr, &al);
-
-	al.cpu = sample->cpu;
-	al.sym = NULL;
-
-	if (al.map)
-		al.sym = map__find_symbol(al.map, al.addr, NULL);
+	perf_event__preprocess_sample_addr(event, sample, machine, thread, &al);
 
 	if (PRINT_FIELD(SYM)) {
 		printf(" ");
@@ -427,25 +395,35 @@
 			     struct addr_location *al)
 {
 	struct perf_event_attr *attr = &evsel->attr;
+	bool print_srcline_last = false;
 
 	/* print branch_from information */
 	if (PRINT_FIELD(IP)) {
-		if (!symbol_conf.use_callchain)
-			printf(" ");
-		else
+		unsigned int print_opts = output[attr->type].print_ip_opts;
+
+		if (symbol_conf.use_callchain && sample->callchain) {
 			printf("\n");
-		perf_evsel__print_ip(evsel, sample, al,
-				     output[attr->type].print_ip_opts,
+		} else {
+			printf(" ");
+			if (print_opts & PRINT_IP_OPT_SRCLINE) {
+				print_srcline_last = true;
+				print_opts &= ~PRINT_IP_OPT_SRCLINE;
+			}
+		}
+		perf_evsel__print_ip(evsel, sample, al, print_opts,
 				     PERF_MAX_STACK_DEPTH);
 	}
 
-	printf(" => ");
-
 	/* print branch_to information */
 	if (PRINT_FIELD(ADDR) ||
 	    ((evsel->attr.sample_type & PERF_SAMPLE_ADDR) &&
-	     !output[attr->type].user_set))
+	     !output[attr->type].user_set)) {
+		printf(" => ");
 		print_sample_addr(event, sample, al->machine, thread, attr);
+	}
+
+	if (print_srcline_last)
+		map__fprintf_srcline(al->map, al->addr, "\n  ", stdout);
 
 	printf("\n");
 }
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 65a151e..3e80aa1 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -184,7 +184,7 @@
 static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel)
 {
 	evsel->priv = zalloc(sizeof(struct perf_stat));
-	if (evsel == NULL)
+	if (evsel->priv == NULL)
 		return -ENOMEM;
 	perf_evsel__reset_stat_priv(evsel);
 	return 0;
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index 74db256..2f1a522 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -37,6 +37,7 @@
 #include "util/svghelper.h"
 #include "util/tool.h"
 #include "util/data.h"
+#include "util/debug.h"
 
 #define SUPPORT_OLD_POWER_EVENTS 1
 #define PWR_EVENT_EXIT -1
@@ -60,10 +61,17 @@
 				tasks_only,
 				with_backtrace,
 				topology;
+	/* IO related settings */
+	u64			io_events;
+	bool			io_only,
+				skip_eagain;
+	u64			min_time,
+				merge_dist;
 };
 
 struct per_pidcomm;
 struct cpu_sample;
+struct io_sample;
 
 /*
  * Datastructure layout:
@@ -84,6 +92,7 @@
 	u64		start_time;
 	u64		end_time;
 	u64		total_time;
+	u64		total_bytes;
 	int		display;
 
 	struct per_pidcomm *all;
@@ -97,6 +106,8 @@
 	u64		start_time;
 	u64		end_time;
 	u64		total_time;
+	u64		max_bytes;
+	u64		total_bytes;
 
 	int		Y;
 	int		display;
@@ -107,6 +118,7 @@
 	char		*comm;
 
 	struct cpu_sample *samples;
+	struct io_sample  *io_samples;
 };
 
 struct sample_wrapper {
@@ -131,6 +143,27 @@
 	const char *backtrace;
 };
 
+enum {
+	IOTYPE_READ,
+	IOTYPE_WRITE,
+	IOTYPE_SYNC,
+	IOTYPE_TX,
+	IOTYPE_RX,
+	IOTYPE_POLL,
+};
+
+struct io_sample {
+	struct io_sample *next;
+
+	u64 start_time;
+	u64 end_time;
+	u64 bytes;
+	int type;
+	int fd;
+	int err;
+	int merges;
+};
+
 #define CSTATE 1
 #define PSTATE 2
 
@@ -213,7 +246,7 @@
 		pid_set_comm(tchart, pid, pp->current->comm);
 
 	p->start_time = timestamp;
-	if (p->current) {
+	if (p->current && !p->current->start_time) {
 		p->current->start_time = timestamp;
 		p->current->state_since = timestamp;
 	}
@@ -682,6 +715,249 @@
 	}
 }
 
+static int pid_begin_io_sample(struct timechart *tchart, int pid, int type,
+			       u64 start, int fd)
+{
+	struct per_pid *p = find_create_pid(tchart, pid);
+	struct per_pidcomm *c = p->current;
+	struct io_sample *sample;
+	struct io_sample *prev;
+
+	if (!c) {
+		c = zalloc(sizeof(*c));
+		if (!c)
+			return -ENOMEM;
+		p->current = c;
+		c->next = p->all;
+		p->all = c;
+	}
+
+	prev = c->io_samples;
+
+	if (prev && prev->start_time && !prev->end_time) {
+		pr_warning("Skip invalid start event: "
+			   "previous event already started!\n");
+
+		/* remove previous event that has been started,
+		 * we are not sure we will ever get an end for it */
+		c->io_samples = prev->next;
+		free(prev);
+		return 0;
+	}
+
+	sample = zalloc(sizeof(*sample));
+	if (!sample)
+		return -ENOMEM;
+	sample->start_time = start;
+	sample->type = type;
+	sample->fd = fd;
+	sample->next = c->io_samples;
+	c->io_samples = sample;
+
+	if (c->start_time == 0 || c->start_time > start)
+		c->start_time = start;
+
+	return 0;
+}
+
+static int pid_end_io_sample(struct timechart *tchart, int pid, int type,
+			     u64 end, long ret)
+{
+	struct per_pid *p = find_create_pid(tchart, pid);
+	struct per_pidcomm *c = p->current;
+	struct io_sample *sample, *prev;
+
+	if (!c) {
+		pr_warning("Invalid pidcomm!\n");
+		return -1;
+	}
+
+	sample = c->io_samples;
+
+	if (!sample) /* skip partially captured events */
+		return 0;
+
+	if (sample->end_time) {
+		pr_warning("Skip invalid end event: "
+			   "previous event already ended!\n");
+		return 0;
+	}
+
+	if (sample->type != type) {
+		pr_warning("Skip invalid end event: invalid event type!\n");
+		return 0;
+	}
+
+	sample->end_time = end;
+	prev = sample->next;
+
+	/* we want to be able to see small and fast transfers, so make them
+	 * at least min_time long, but don't overlap them */
+	if (sample->end_time - sample->start_time < tchart->min_time)
+		sample->end_time = sample->start_time + tchart->min_time;
+	if (prev && sample->start_time < prev->end_time) {
+		if (prev->err) /* try to make errors more visible */
+			sample->start_time = prev->end_time;
+		else
+			prev->end_time = sample->start_time;
+	}
+
+	if (ret < 0) {
+		sample->err = ret;
+	} else if (type == IOTYPE_READ || type == IOTYPE_WRITE ||
+		   type == IOTYPE_TX || type == IOTYPE_RX) {
+
+		if ((u64)ret > c->max_bytes)
+			c->max_bytes = ret;
+
+		c->total_bytes += ret;
+		p->total_bytes += ret;
+		sample->bytes = ret;
+	}
+
+	/* merge two requests to make svg smaller and render-friendly */
+	if (prev &&
+	    prev->type == sample->type &&
+	    prev->err == sample->err &&
+	    prev->fd == sample->fd &&
+	    prev->end_time + tchart->merge_dist >= sample->start_time) {
+
+		sample->bytes += prev->bytes;
+		sample->merges += prev->merges + 1;
+
+		sample->start_time = prev->start_time;
+		sample->next = prev->next;
+		free(prev);
+
+		if (!sample->err && sample->bytes > c->max_bytes)
+			c->max_bytes = sample->bytes;
+	}
+
+	tchart->io_events++;
+
+	return 0;
+}
+
+static int
+process_enter_read(struct timechart *tchart,
+		   struct perf_evsel *evsel,
+		   struct perf_sample *sample)
+{
+	long fd = perf_evsel__intval(evsel, sample, "fd");
+	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_READ,
+				   sample->time, fd);
+}
+
+static int
+process_exit_read(struct timechart *tchart,
+		  struct perf_evsel *evsel,
+		  struct perf_sample *sample)
+{
+	long ret = perf_evsel__intval(evsel, sample, "ret");
+	return pid_end_io_sample(tchart, sample->tid, IOTYPE_READ,
+				 sample->time, ret);
+}
+
+static int
+process_enter_write(struct timechart *tchart,
+		    struct perf_evsel *evsel,
+		    struct perf_sample *sample)
+{
+	long fd = perf_evsel__intval(evsel, sample, "fd");
+	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_WRITE,
+				   sample->time, fd);
+}
+
+static int
+process_exit_write(struct timechart *tchart,
+		   struct perf_evsel *evsel,
+		   struct perf_sample *sample)
+{
+	long ret = perf_evsel__intval(evsel, sample, "ret");
+	return pid_end_io_sample(tchart, sample->tid, IOTYPE_WRITE,
+				 sample->time, ret);
+}
+
+static int
+process_enter_sync(struct timechart *tchart,
+		   struct perf_evsel *evsel,
+		   struct perf_sample *sample)
+{
+	long fd = perf_evsel__intval(evsel, sample, "fd");
+	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_SYNC,
+				   sample->time, fd);
+}
+
+static int
+process_exit_sync(struct timechart *tchart,
+		  struct perf_evsel *evsel,
+		  struct perf_sample *sample)
+{
+	long ret = perf_evsel__intval(evsel, sample, "ret");
+	return pid_end_io_sample(tchart, sample->tid, IOTYPE_SYNC,
+				 sample->time, ret);
+}
+
+static int
+process_enter_tx(struct timechart *tchart,
+		 struct perf_evsel *evsel,
+		 struct perf_sample *sample)
+{
+	long fd = perf_evsel__intval(evsel, sample, "fd");
+	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_TX,
+				   sample->time, fd);
+}
+
+static int
+process_exit_tx(struct timechart *tchart,
+		struct perf_evsel *evsel,
+		struct perf_sample *sample)
+{
+	long ret = perf_evsel__intval(evsel, sample, "ret");
+	return pid_end_io_sample(tchart, sample->tid, IOTYPE_TX,
+				 sample->time, ret);
+}
+
+static int
+process_enter_rx(struct timechart *tchart,
+		 struct perf_evsel *evsel,
+		 struct perf_sample *sample)
+{
+	long fd = perf_evsel__intval(evsel, sample, "fd");
+	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_RX,
+				   sample->time, fd);
+}
+
+static int
+process_exit_rx(struct timechart *tchart,
+		struct perf_evsel *evsel,
+		struct perf_sample *sample)
+{
+	long ret = perf_evsel__intval(evsel, sample, "ret");
+	return pid_end_io_sample(tchart, sample->tid, IOTYPE_RX,
+				 sample->time, ret);
+}
+
+static int
+process_enter_poll(struct timechart *tchart,
+		   struct perf_evsel *evsel,
+		   struct perf_sample *sample)
+{
+	long fd = perf_evsel__intval(evsel, sample, "fd");
+	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_POLL,
+				   sample->time, fd);
+}
+
+static int
+process_exit_poll(struct timechart *tchart,
+		  struct perf_evsel *evsel,
+		  struct perf_sample *sample)
+{
+	long ret = perf_evsel__intval(evsel, sample, "ret");
+	return pid_end_io_sample(tchart, sample->tid, IOTYPE_POLL,
+				 sample->time, ret);
+}
+
 /*
  * Sort the pid datastructure
  */
@@ -852,6 +1128,121 @@
 	}
 }
 
+static void draw_io_bars(struct timechart *tchart)
+{
+	const char *suf;
+	double bytes;
+	char comm[256];
+	struct per_pid *p;
+	struct per_pidcomm *c;
+	struct io_sample *sample;
+	int Y = 1;
+
+	p = tchart->all_data;
+	while (p) {
+		c = p->all;
+		while (c) {
+			if (!c->display) {
+				c->Y = 0;
+				c = c->next;
+				continue;
+			}
+
+			svg_box(Y, c->start_time, c->end_time, "process3");
+			sample = c->io_samples;
+			for (sample = c->io_samples; sample; sample = sample->next) {
+				double h = (double)sample->bytes / c->max_bytes;
+
+				if (tchart->skip_eagain &&
+				    sample->err == -EAGAIN)
+					continue;
+
+				if (sample->err)
+					h = 1;
+
+				if (sample->type == IOTYPE_SYNC)
+					svg_fbox(Y,
+						sample->start_time,
+						sample->end_time,
+						1,
+						sample->err ? "error" : "sync",
+						sample->fd,
+						sample->err,
+						sample->merges);
+				else if (sample->type == IOTYPE_POLL)
+					svg_fbox(Y,
+						sample->start_time,
+						sample->end_time,
+						1,
+						sample->err ? "error" : "poll",
+						sample->fd,
+						sample->err,
+						sample->merges);
+				else if (sample->type == IOTYPE_READ)
+					svg_ubox(Y,
+						sample->start_time,
+						sample->end_time,
+						h,
+						sample->err ? "error" : "disk",
+						sample->fd,
+						sample->err,
+						sample->merges);
+				else if (sample->type == IOTYPE_WRITE)
+					svg_lbox(Y,
+						sample->start_time,
+						sample->end_time,
+						h,
+						sample->err ? "error" : "disk",
+						sample->fd,
+						sample->err,
+						sample->merges);
+				else if (sample->type == IOTYPE_RX)
+					svg_ubox(Y,
+						sample->start_time,
+						sample->end_time,
+						h,
+						sample->err ? "error" : "net",
+						sample->fd,
+						sample->err,
+						sample->merges);
+				else if (sample->type == IOTYPE_TX)
+					svg_lbox(Y,
+						sample->start_time,
+						sample->end_time,
+						h,
+						sample->err ? "error" : "net",
+						sample->fd,
+						sample->err,
+						sample->merges);
+			}
+
+			suf = "";
+			bytes = c->total_bytes;
+			if (bytes > 1024) {
+				bytes = bytes / 1024;
+				suf = "K";
+			}
+			if (bytes > 1024) {
+				bytes = bytes / 1024;
+				suf = "M";
+			}
+			if (bytes > 1024) {
+				bytes = bytes / 1024;
+				suf = "G";
+			}
+
+
+			sprintf(comm, "%s:%i (%3.1f %sbytes)", c->comm ?: "", p->pid, bytes, suf);
+			svg_text(Y, c->start_time, comm);
+
+			c->Y = Y;
+			Y++;
+			c = c->next;
+		}
+		p = p->next;
+	}
+}
+
 static void draw_process_bars(struct timechart *tchart)
 {
 	struct per_pid *p;
@@ -987,9 +1378,6 @@
 	struct per_pidcomm *c;
 	int count = 0;
 
-	if (process_filter)
-		return determine_display_tasks_filtered(tchart);
-
 	p = tchart->all_data;
 	while (p) {
 		p->display = 0;
@@ -1025,15 +1413,46 @@
 	return count;
 }
 
+static int determine_display_io_tasks(struct timechart *timechart, u64 threshold)
+{
+	struct per_pid *p;
+	struct per_pidcomm *c;
+	int count = 0;
 
+	p = timechart->all_data;
+	while (p) {
+		/* no exit marker, task kept running to the end */
+		if (p->end_time == 0)
+			p->end_time = timechart->last_time;
 
+		c = p->all;
+
+		while (c) {
+			c->display = 0;
+
+			if (c->total_bytes >= threshold) {
+				c->display = 1;
+				count++;
+			}
+
+			if (c->end_time == 0)
+				c->end_time = timechart->last_time;
+
+			c = c->next;
+		}
+		p = p->next;
+	}
+	return count;
+}
+
+#define BYTES_THRESH (1 * 1024 * 1024)
 #define TIME_THRESH 10000000
 
 static void write_svg_file(struct timechart *tchart, const char *filename)
 {
 	u64 i;
 	int count;
-	int thresh = TIME_THRESH;
+	int thresh = tchart->io_events ? BYTES_THRESH : TIME_THRESH;
 
 	if (tchart->power_only)
 		tchart->proc_num = 0;
@@ -1041,28 +1460,43 @@
 	/* We'd like to show at least proc_num tasks;
 	 * be less picky if we have fewer */
 	do {
-		count = determine_display_tasks(tchart, thresh);
+		if (process_filter)
+			count = determine_display_tasks_filtered(tchart);
+		else if (tchart->io_events)
+			count = determine_display_io_tasks(tchart, thresh);
+		else
+			count = determine_display_tasks(tchart, thresh);
 		thresh /= 10;
 	} while (!process_filter && thresh && count < tchart->proc_num);
 
 	if (!tchart->proc_num)
 		count = 0;
 
-	open_svg(filename, tchart->numcpus, count, tchart->first_time, tchart->last_time);
+	if (tchart->io_events) {
+		open_svg(filename, 0, count, tchart->first_time, tchart->last_time);
 
-	svg_time_grid();
-	svg_legenda();
+		svg_time_grid(0.5);
+		svg_io_legenda();
 
-	for (i = 0; i < tchart->numcpus; i++)
-		svg_cpu_box(i, tchart->max_freq, tchart->turbo_frequency);
+		draw_io_bars(tchart);
+	} else {
+		open_svg(filename, tchart->numcpus, count, tchart->first_time, tchart->last_time);
 
-	draw_cpu_usage(tchart);
-	if (tchart->proc_num)
-		draw_process_bars(tchart);
-	if (!tchart->tasks_only)
-		draw_c_p_states(tchart);
-	if (tchart->proc_num)
-		draw_wakeups(tchart);
+		svg_time_grid(0);
+
+		svg_legenda();
+
+		for (i = 0; i < tchart->numcpus; i++)
+			svg_cpu_box(i, tchart->max_freq, tchart->turbo_frequency);
+
+		draw_cpu_usage(tchart);
+		if (tchart->proc_num)
+			draw_process_bars(tchart);
+		if (!tchart->tasks_only)
+			draw_c_p_states(tchart);
+		if (tchart->proc_num)
+			draw_wakeups(tchart);
+	}
 
 	svg_close();
 }
@@ -1110,6 +1544,56 @@
 		{ "power:power_end",		process_sample_power_end },
 		{ "power:power_frequency",	process_sample_power_frequency },
 #endif
+
+		{ "syscalls:sys_enter_read",		process_enter_read },
+		{ "syscalls:sys_enter_pread64",		process_enter_read },
+		{ "syscalls:sys_enter_readv",		process_enter_read },
+		{ "syscalls:sys_enter_preadv",		process_enter_read },
+		{ "syscalls:sys_enter_write",		process_enter_write },
+		{ "syscalls:sys_enter_pwrite64",	process_enter_write },
+		{ "syscalls:sys_enter_writev",		process_enter_write },
+		{ "syscalls:sys_enter_pwritev",		process_enter_write },
+		{ "syscalls:sys_enter_sync",		process_enter_sync },
+		{ "syscalls:sys_enter_sync_file_range",	process_enter_sync },
+		{ "syscalls:sys_enter_fsync",		process_enter_sync },
+		{ "syscalls:sys_enter_msync",		process_enter_sync },
+		{ "syscalls:sys_enter_recvfrom",	process_enter_rx },
+		{ "syscalls:sys_enter_recvmmsg",	process_enter_rx },
+		{ "syscalls:sys_enter_recvmsg",		process_enter_rx },
+		{ "syscalls:sys_enter_sendto",		process_enter_tx },
+		{ "syscalls:sys_enter_sendmsg",		process_enter_tx },
+		{ "syscalls:sys_enter_sendmmsg",	process_enter_tx },
+		{ "syscalls:sys_enter_epoll_pwait",	process_enter_poll },
+		{ "syscalls:sys_enter_epoll_wait",	process_enter_poll },
+		{ "syscalls:sys_enter_poll",		process_enter_poll },
+		{ "syscalls:sys_enter_ppoll",		process_enter_poll },
+		{ "syscalls:sys_enter_pselect6",	process_enter_poll },
+		{ "syscalls:sys_enter_select",		process_enter_poll },
+
+		{ "syscalls:sys_exit_read",		process_exit_read },
+		{ "syscalls:sys_exit_pread64",		process_exit_read },
+		{ "syscalls:sys_exit_readv",		process_exit_read },
+		{ "syscalls:sys_exit_preadv",		process_exit_read },
+		{ "syscalls:sys_exit_write",		process_exit_write },
+		{ "syscalls:sys_exit_pwrite64",		process_exit_write },
+		{ "syscalls:sys_exit_writev",		process_exit_write },
+		{ "syscalls:sys_exit_pwritev",		process_exit_write },
+		{ "syscalls:sys_exit_sync",		process_exit_sync },
+		{ "syscalls:sys_exit_sync_file_range",	process_exit_sync },
+		{ "syscalls:sys_exit_fsync",		process_exit_sync },
+		{ "syscalls:sys_exit_msync",		process_exit_sync },
+		{ "syscalls:sys_exit_recvfrom",		process_exit_rx },
+		{ "syscalls:sys_exit_recvmmsg",		process_exit_rx },
+		{ "syscalls:sys_exit_recvmsg",		process_exit_rx },
+		{ "syscalls:sys_exit_sendto",		process_exit_tx },
+		{ "syscalls:sys_exit_sendmsg",		process_exit_tx },
+		{ "syscalls:sys_exit_sendmmsg",		process_exit_tx },
+		{ "syscalls:sys_exit_epoll_pwait",	process_exit_poll },
+		{ "syscalls:sys_exit_epoll_wait",	process_exit_poll },
+		{ "syscalls:sys_exit_poll",		process_exit_poll },
+		{ "syscalls:sys_exit_ppoll",		process_exit_poll },
+		{ "syscalls:sys_exit_pselect6",		process_exit_poll },
+		{ "syscalls:sys_exit_select",		process_exit_poll },
 	};
 	struct perf_data_file file = {
 		.path = input_name,
@@ -1154,6 +1638,139 @@
 	return ret;
 }
 
+static int timechart__io_record(int argc, const char **argv)
+{
+	unsigned int rec_argc, i;
+	const char **rec_argv;
+	const char **p;
+	char *filter = NULL;
+
+	const char * const common_args[] = {
+		"record", "-a", "-R", "-c", "1",
+	};
+	unsigned int common_args_nr = ARRAY_SIZE(common_args);
+
+	const char * const disk_events[] = {
+		"syscalls:sys_enter_read",
+		"syscalls:sys_enter_pread64",
+		"syscalls:sys_enter_readv",
+		"syscalls:sys_enter_preadv",
+		"syscalls:sys_enter_write",
+		"syscalls:sys_enter_pwrite64",
+		"syscalls:sys_enter_writev",
+		"syscalls:sys_enter_pwritev",
+		"syscalls:sys_enter_sync",
+		"syscalls:sys_enter_sync_file_range",
+		"syscalls:sys_enter_fsync",
+		"syscalls:sys_enter_msync",
+
+		"syscalls:sys_exit_read",
+		"syscalls:sys_exit_pread64",
+		"syscalls:sys_exit_readv",
+		"syscalls:sys_exit_preadv",
+		"syscalls:sys_exit_write",
+		"syscalls:sys_exit_pwrite64",
+		"syscalls:sys_exit_writev",
+		"syscalls:sys_exit_pwritev",
+		"syscalls:sys_exit_sync",
+		"syscalls:sys_exit_sync_file_range",
+		"syscalls:sys_exit_fsync",
+		"syscalls:sys_exit_msync",
+	};
+	unsigned int disk_events_nr = ARRAY_SIZE(disk_events);
+
+	const char * const net_events[] = {
+		"syscalls:sys_enter_recvfrom",
+		"syscalls:sys_enter_recvmmsg",
+		"syscalls:sys_enter_recvmsg",
+		"syscalls:sys_enter_sendto",
+		"syscalls:sys_enter_sendmsg",
+		"syscalls:sys_enter_sendmmsg",
+
+		"syscalls:sys_exit_recvfrom",
+		"syscalls:sys_exit_recvmmsg",
+		"syscalls:sys_exit_recvmsg",
+		"syscalls:sys_exit_sendto",
+		"syscalls:sys_exit_sendmsg",
+		"syscalls:sys_exit_sendmmsg",
+	};
+	unsigned int net_events_nr = ARRAY_SIZE(net_events);
+
+	const char * const poll_events[] = {
+		"syscalls:sys_enter_epoll_pwait",
+		"syscalls:sys_enter_epoll_wait",
+		"syscalls:sys_enter_poll",
+		"syscalls:sys_enter_ppoll",
+		"syscalls:sys_enter_pselect6",
+		"syscalls:sys_enter_select",
+
+		"syscalls:sys_exit_epoll_pwait",
+		"syscalls:sys_exit_epoll_wait",
+		"syscalls:sys_exit_poll",
+		"syscalls:sys_exit_ppoll",
+		"syscalls:sys_exit_pselect6",
+		"syscalls:sys_exit_select",
+	};
+	unsigned int poll_events_nr = ARRAY_SIZE(poll_events);
+
+	rec_argc = common_args_nr +
+		disk_events_nr * 4 +
+		net_events_nr * 4 +
+		poll_events_nr * 4 +
+		argc;
+	rec_argv = calloc(rec_argc + 1, sizeof(char *));
+
+	if (rec_argv == NULL)
+		return -ENOMEM;
+
+	if (asprintf(&filter, "common_pid != %d", getpid()) < 0)
+		return -ENOMEM;
+
+	p = rec_argv;
+	for (i = 0; i < common_args_nr; i++)
+		*p++ = strdup(common_args[i]);
+
+	for (i = 0; i < disk_events_nr; i++) {
+		if (!is_valid_tracepoint(disk_events[i])) {
+			rec_argc -= 4;
+			continue;
+		}
+
+		*p++ = "-e";
+		*p++ = strdup(disk_events[i]);
+		*p++ = "--filter";
+		*p++ = filter;
+	}
+	for (i = 0; i < net_events_nr; i++) {
+		if (!is_valid_tracepoint(net_events[i])) {
+			rec_argc -= 4;
+			continue;
+		}
+
+		*p++ = "-e";
+		*p++ = strdup(net_events[i]);
+		*p++ = "--filter";
+		*p++ = filter;
+	}
+	for (i = 0; i < poll_events_nr; i++) {
+		if (!is_valid_tracepoint(poll_events[i])) {
+			rec_argc -= 4;
+			continue;
+		}
+
+		*p++ = "-e";
+		*p++ = strdup(poll_events[i]);
+		*p++ = "--filter";
+		*p++ = filter;
+	}
+
+	for (i = 0; i < (unsigned int)argc; i++)
+		*p++ = argv[i];
+
+	return cmd_record(rec_argc, rec_argv, NULL);
+}
+
+
 static int timechart__record(struct timechart *tchart, int argc, const char **argv)
 {
 	unsigned int rec_argc, i, j;
@@ -1270,6 +1887,30 @@
 	return 0;
 }
 
+static int
+parse_time(const struct option *opt, const char *arg, int __maybe_unused unset)
+{
+	char unit = 'n';
+	u64 *value = opt->value;
+
+	if (sscanf(arg, "%" PRIu64 "%cs", value, &unit) > 0) {
+		switch (unit) {
+		case 'm':
+			*value *= 1000000;
+			break;
+		case 'u':
+			*value *= 1000;
+			break;
+		case 'n':
+			break;
+		default:
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
 int cmd_timechart(int argc, const char **argv,
 		  const char *prefix __maybe_unused)
 {
@@ -1282,6 +1923,8 @@
 			.ordered_samples = true,
 		},
 		.proc_num = 15,
+		.min_time = 1000000,
+		.merge_dist = 1000,
 	};
 	const char *output_name = "output.svg";
 	const struct option timechart_options[] = {
@@ -1303,6 +1946,14 @@
 		    "min. number of tasks to print"),
 	OPT_BOOLEAN('t', "topology", &tchart.topology,
 		    "sort CPUs according to topology"),
+	OPT_BOOLEAN(0, "io-skip-eagain", &tchart.skip_eagain,
+		    "skip EAGAIN errors"),
+	OPT_CALLBACK(0, "io-min-time", &tchart.min_time, "time",
+		     "all IO faster than min-time will visually appear longer",
+		     parse_time),
+	OPT_CALLBACK(0, "io-merge-dist", &tchart.merge_dist, "time",
+		     "merge events that are merge-dist us apart",
+		     parse_time),
 	OPT_END()
 	};
 	const char * const timechart_usage[] = {
@@ -1314,6 +1965,8 @@
 	OPT_BOOLEAN('P', "power-only", &tchart.power_only, "output power data only"),
 	OPT_BOOLEAN('T', "tasks-only", &tchart.tasks_only,
 		    "output processes data only"),
+	OPT_BOOLEAN('I', "io-only", &tchart.io_only,
+		    "record only IO data"),
 	OPT_BOOLEAN('g', "callchain", &tchart.with_backtrace, "record callchain"),
 	OPT_END()
 	};
@@ -1340,7 +1993,10 @@
 			return -1;
 		}
 
-		return timechart__record(&tchart, argc, argv);
+		if (tchart.io_only)
+			return timechart__io_record(argc, argv);
+		else
+			return timechart__record(&tchart, argc, argv);
 	} else if (argc)
 		usage_with_options(timechart_usage, timechart_options);
 
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index f954c26..a6c3752 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -1108,6 +1108,7 @@
 	struct event_format *tp_format;
 	const char	    *name;
 	bool		    filtered;
+	bool		    is_exit;
 	struct syscall_fmt  *fmt;
 	size_t		    (**arg_scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
 	void		    **arg_parm;
@@ -1132,6 +1133,7 @@
 	u64		  exit_time;
 	bool		  entry_pending;
 	unsigned long	  nr_events;
+	unsigned long	  pfmaj, pfmin;
 	char		  *entry_str;
 	double		  runtime_ms;
 	struct {
@@ -1177,6 +1179,9 @@
 	return NULL;
 }
 
+#define TRACE_PFMAJ		(1 << 0)
+#define TRACE_PFMIN		(1 << 1)
+
 struct trace {
 	struct perf_tool	tool;
 	struct {
@@ -1211,6 +1216,8 @@
 	bool			summary_only;
 	bool			show_comm;
 	bool			show_tool_stats;
+	bool			trace_syscalls;
+	int			trace_pgfaults;
 };
 
 static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname)
@@ -1276,11 +1283,11 @@
 	if (fd < 0)
 		return NULL;
 
-	if ((fd > ttrace->paths.max || ttrace->paths.table[fd] == NULL))
+	if ((fd > ttrace->paths.max || ttrace->paths.table[fd] == NULL)) {
 		if (!trace->live)
 			return NULL;
 		++trace->stats.proc_getname;
-		if (thread__read_fd_path(thread, fd)) {
+		if (thread__read_fd_path(thread, fd))
 			return NULL;
 	}
 
@@ -1473,6 +1480,8 @@
 	if (sc->tp_format == NULL)
 		return -1;
 
+	sc->is_exit = !strcmp(name, "exit_group") || !strcmp(name, "exit");
+
 	return syscall__set_arg_fmts(sc);
 }
 
@@ -1535,6 +1544,7 @@
 }
 
 typedef int (*tracepoint_handler)(struct trace *trace, struct perf_evsel *evsel,
+				  union perf_event *event,
 				  struct perf_sample *sample);
 
 static struct syscall *trace__syscall_info(struct trace *trace,
@@ -1607,6 +1617,7 @@
 }
 
 static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
+			    union perf_event *event __maybe_unused,
 			    struct perf_sample *sample)
 {
 	char *msg;
@@ -1629,7 +1640,6 @@
 		return -1;
 
 	args = perf_evsel__sc_tp_ptr(evsel, args, sample);
-	ttrace = thread->priv;
 
 	if (ttrace->entry_str == NULL) {
 		ttrace->entry_str = malloc(1024);
@@ -1644,7 +1654,7 @@
 	printed += syscall__scnprintf_args(sc, msg + printed, 1024 - printed,
 					   args, trace, thread);
 
-	if (!strcmp(sc->name, "exit_group") || !strcmp(sc->name, "exit")) {
+	if (sc->is_exit) {
 		if (!trace->duration_filter && !trace->summary_only) {
 			trace__fprintf_entry_head(trace, thread, 1, sample->time, trace->output);
 			fprintf(trace->output, "%-70s\n", ttrace->entry_str);
@@ -1656,6 +1666,7 @@
 }
 
 static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
+			   union perf_event *event __maybe_unused,
 			   struct perf_sample *sample)
 {
 	int ret;
@@ -1687,8 +1698,6 @@
 		++trace->stats.vfs_getname;
 	}
 
-	ttrace = thread->priv;
-
 	ttrace->exit_time = sample->time;
 
 	if (ttrace->entry_time) {
@@ -1735,6 +1744,7 @@
 }
 
 static int trace__vfs_getname(struct trace *trace, struct perf_evsel *evsel,
+			      union perf_event *event __maybe_unused,
 			      struct perf_sample *sample)
 {
 	trace->last_vfs_getname = perf_evsel__rawptr(evsel, sample, "pathname");
@@ -1742,6 +1752,7 @@
 }
 
 static int trace__sched_stat_runtime(struct trace *trace, struct perf_evsel *evsel,
+				     union perf_event *event __maybe_unused,
 				     struct perf_sample *sample)
 {
         u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
@@ -1768,6 +1779,80 @@
 	return 0;
 }
 
+static void print_location(FILE *f, struct perf_sample *sample,
+			   struct addr_location *al,
+			   bool print_dso, bool print_sym)
+{
+
+	if ((verbose || print_dso) && al->map)
+		fprintf(f, "%s@", al->map->dso->long_name);
+
+	if ((verbose || print_sym) && al->sym)
+		fprintf(f, "%s+0x%" PRIx64, al->sym->name,
+			al->addr - al->sym->start);
+	else if (al->map)
+		fprintf(f, "0x%" PRIx64, al->addr);
+	else
+		fprintf(f, "0x%" PRIx64, sample->addr);
+}
+
+static int trace__pgfault(struct trace *trace,
+			  struct perf_evsel *evsel,
+			  union perf_event *event,
+			  struct perf_sample *sample)
+{
+	struct thread *thread;
+	u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+	struct addr_location al;
+	char map_type = 'd';
+	struct thread_trace *ttrace;
+
+	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
+	ttrace = thread__trace(thread, trace->output);
+	if (ttrace == NULL)
+		return -1;
+
+	if (evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ)
+		ttrace->pfmaj++;
+	else
+		ttrace->pfmin++;
+
+	if (trace->summary_only)
+		return 0;
+
+	thread__find_addr_location(thread, trace->host, cpumode, MAP__FUNCTION,
+			      sample->ip, &al);
+
+	trace__fprintf_entry_head(trace, thread, 0, sample->time, trace->output);
+
+	fprintf(trace->output, "%sfault [",
+		evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ?
+		"maj" : "min");
+
+	print_location(trace->output, sample, &al, false, true);
+
+	fprintf(trace->output, "] => ");
+
+	thread__find_addr_location(thread, trace->host, cpumode, MAP__VARIABLE,
+				   sample->addr, &al);
+
+	if (!al.map) {
+		thread__find_addr_location(thread, trace->host, cpumode,
+					   MAP__FUNCTION, sample->addr, &al);
+
+		if (al.map)
+			map_type = 'x';
+		else
+			map_type = '?';
+	}
+
+	print_location(trace->output, sample, &al, true, false);
+
+	fprintf(trace->output, " (%c%c)\n", map_type, al.level);
+
+	return 0;
+}
+
 static bool skip_sample(struct trace *trace, struct perf_sample *sample)
 {
 	if ((trace->pid_list && intlist__find(trace->pid_list, sample->pid)) ||
@@ -1781,7 +1866,7 @@
 }
 
 static int trace__process_sample(struct perf_tool *tool,
-				 union perf_event *event __maybe_unused,
+				 union perf_event *event,
 				 struct perf_sample *sample,
 				 struct perf_evsel *evsel,
 				 struct machine *machine __maybe_unused)
@@ -1799,7 +1884,7 @@
 
 	if (handler) {
 		++trace->nr_events;
-		handler(trace, evsel, sample);
+		handler(trace, evsel, event, sample);
 	}
 
 	return err;
@@ -1826,7 +1911,7 @@
 	return 0;
 }
 
-static int trace__record(int argc, const char **argv)
+static int trace__record(struct trace *trace, int argc, const char **argv)
 {
 	unsigned int rec_argc, i, j;
 	const char **rec_argv;
@@ -1835,34 +1920,54 @@
 		"-R",
 		"-m", "1024",
 		"-c", "1",
-		"-e",
 	};
 
+	const char * const sc_args[] = { "-e", };
+	unsigned int sc_args_nr = ARRAY_SIZE(sc_args);
+	const char * const majpf_args[] = { "-e", "major-faults" };
+	unsigned int majpf_args_nr = ARRAY_SIZE(majpf_args);
+	const char * const minpf_args[] = { "-e", "minor-faults" };
+	unsigned int minpf_args_nr = ARRAY_SIZE(minpf_args);
+
 	/* +1 is for the event string below */
-	rec_argc = ARRAY_SIZE(record_args) + 1 + argc;
+	rec_argc = ARRAY_SIZE(record_args) + sc_args_nr + 1 +
+		majpf_args_nr + minpf_args_nr + argc;
 	rec_argv = calloc(rec_argc + 1, sizeof(char *));
 
 	if (rec_argv == NULL)
 		return -ENOMEM;
 
+	j = 0;
 	for (i = 0; i < ARRAY_SIZE(record_args); i++)
-		rec_argv[i] = record_args[i];
+		rec_argv[j++] = record_args[i];
 
-	/* event string may be different for older kernels - e.g., RHEL6 */
-	if (is_valid_tracepoint("raw_syscalls:sys_enter"))
-		rec_argv[i] = "raw_syscalls:sys_enter,raw_syscalls:sys_exit";
-	else if (is_valid_tracepoint("syscalls:sys_enter"))
-		rec_argv[i] = "syscalls:sys_enter,syscalls:sys_exit";
-	else {
-		pr_err("Neither raw_syscalls nor syscalls events exist.\n");
-		return -1;
+	if (trace->trace_syscalls) {
+		for (i = 0; i < sc_args_nr; i++)
+			rec_argv[j++] = sc_args[i];
+
+		/* event string may be different for older kernels - e.g., RHEL6 */
+		if (is_valid_tracepoint("raw_syscalls:sys_enter"))
+			rec_argv[j++] = "raw_syscalls:sys_enter,raw_syscalls:sys_exit";
+		else if (is_valid_tracepoint("syscalls:sys_enter"))
+			rec_argv[j++] = "syscalls:sys_enter,syscalls:sys_exit";
+		else {
+			pr_err("Neither raw_syscalls nor syscalls events exist.\n");
+			return -1;
+		}
 	}
-	i++;
 
-	for (j = 0; j < (unsigned int)argc; j++, i++)
-		rec_argv[i] = argv[j];
+	if (trace->trace_pgfaults & TRACE_PFMAJ)
+		for (i = 0; i < majpf_args_nr; i++)
+			rec_argv[j++] = majpf_args[i];
 
-	return cmd_record(i, rec_argv, NULL);
+	if (trace->trace_pgfaults & TRACE_PFMIN)
+		for (i = 0; i < minpf_args_nr; i++)
+			rec_argv[j++] = minpf_args[i];
+
+	for (i = 0; i < (unsigned int)argc; i++)
+		rec_argv[j++] = argv[i];
+
+	return cmd_record(j, rec_argv, NULL);
 }
 
 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
@@ -1882,6 +1987,30 @@
 	perf_evlist__add(evlist, evsel);
 }
 
+static int perf_evlist__add_pgfault(struct perf_evlist *evlist,
+				    u64 config)
+{
+	struct perf_evsel *evsel;
+	struct perf_event_attr attr = {
+		.type = PERF_TYPE_SOFTWARE,
+		.mmap_data = 1,
+	};
+
+	attr.config = config;
+	attr.sample_period = 1;
+
+	event_attr_init(&attr);
+
+	evsel = perf_evsel__new(&attr);
+	if (!evsel)
+		return -ENOMEM;
+
+	evsel->handler = trace__pgfault;
+	perf_evlist__add(evlist, evsel);
+
+	return 0;
+}
+
 static int trace__run(struct trace *trace, int argc, const char **argv)
 {
 	struct perf_evlist *evlist = perf_evlist__new();
@@ -1897,10 +2026,21 @@
 		goto out;
 	}
 
-	if (perf_evlist__add_syscall_newtp(evlist, trace__sys_enter, trace__sys_exit))
+	if (trace->trace_syscalls &&
+	    perf_evlist__add_syscall_newtp(evlist, trace__sys_enter,
+					   trace__sys_exit))
 		goto out_error_tp;
 
-	perf_evlist__add_vfs_getname(evlist);
+	if (trace->trace_syscalls)
+		perf_evlist__add_vfs_getname(evlist);
+
+	if ((trace->trace_pgfaults & TRACE_PFMAJ) &&
+	    perf_evlist__add_pgfault(evlist, PERF_COUNT_SW_PAGE_FAULTS_MAJ))
+		goto out_error_tp;
+
+	if ((trace->trace_pgfaults & TRACE_PFMIN) &&
+	    perf_evlist__add_pgfault(evlist, PERF_COUNT_SW_PAGE_FAULTS_MIN))
+		goto out_error_tp;
 
 	if (trace->sched &&
 		perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime",
@@ -1982,7 +2122,8 @@
 				goto next_event;
 			}
 
-			if (sample.raw_data == NULL) {
+			if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
+			    sample.raw_data == NULL) {
 				fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
 				       perf_evsel__name(evsel), sample.tid,
 				       sample.cpu, sample.raw_size);
@@ -1990,7 +2131,7 @@
 			}
 
 			handler = evsel->handler;
-			handler(trace, evsel, &sample);
+			handler(trace, evsel, event, &sample);
 next_event:
 			perf_evlist__mmap_consume(evlist, i);
 
@@ -2093,13 +2234,10 @@
 	if (evsel == NULL)
 		evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
 							     "syscalls:sys_enter");
-	if (evsel == NULL) {
-		pr_err("Data file does not have raw_syscalls:sys_enter event\n");
-		goto out;
-	}
 
-	if (perf_evsel__init_syscall_tp(evsel, trace__sys_enter) < 0 ||
-	    perf_evsel__init_sc_tp_ptr_field(evsel, args)) {
+	if (evsel &&
+	    (perf_evsel__init_syscall_tp(evsel, trace__sys_enter) < 0 ||
+	    perf_evsel__init_sc_tp_ptr_field(evsel, args))) {
 		pr_err("Error during initialize raw_syscalls:sys_enter event\n");
 		goto out;
 	}
@@ -2109,15 +2247,19 @@
 	if (evsel == NULL)
 		evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
 							     "syscalls:sys_exit");
-	if (evsel == NULL) {
-		pr_err("Data file does not have raw_syscalls:sys_exit event\n");
+	if (evsel &&
+	    (perf_evsel__init_syscall_tp(evsel, trace__sys_exit) < 0 ||
+	    perf_evsel__init_sc_tp_uint_field(evsel, ret))) {
+		pr_err("Error during initialize raw_syscalls:sys_exit event\n");
 		goto out;
 	}
 
-	if (perf_evsel__init_syscall_tp(evsel, trace__sys_exit) < 0 ||
-	    perf_evsel__init_sc_tp_uint_field(evsel, ret)) {
-		pr_err("Error during initialize raw_syscalls:sys_exit event\n");
-		goto out;
+	evlist__for_each(session->evlist, evsel) {
+		if (evsel->attr.type == PERF_TYPE_SOFTWARE &&
+		    (evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ||
+		     evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
+		     evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS))
+			evsel->handler = trace__pgfault;
 	}
 
 	err = parse_target_str(trace);
@@ -2217,6 +2359,10 @@
 	printed += fprintf(fp, " %s (%d), ", thread__comm_str(thread), thread->tid);
 	printed += fprintf(fp, "%lu events, ", ttrace->nr_events);
 	printed += fprintf(fp, "%.1f%%", ratio);
+	if (ttrace->pfmaj)
+		printed += fprintf(fp, ", %lu majfaults", ttrace->pfmaj);
+	if (ttrace->pfmin)
+		printed += fprintf(fp, ", %lu minfaults", ttrace->pfmin);
 	printed += fprintf(fp, ", %.3f msec\n", ttrace->runtime_ms);
 	printed += thread__dump_stats(ttrace, trace, fp);
 
@@ -2264,6 +2410,23 @@
 	return trace->output == NULL ? -errno : 0;
 }
 
+static int parse_pagefaults(const struct option *opt, const char *str,
+			    int unset __maybe_unused)
+{
+	int *trace_pgfaults = opt->value;
+
+	if (strcmp(str, "all") == 0)
+		*trace_pgfaults |= TRACE_PFMAJ | TRACE_PFMIN;
+	else if (strcmp(str, "maj") == 0)
+		*trace_pgfaults |= TRACE_PFMAJ;
+	else if (strcmp(str, "min") == 0)
+		*trace_pgfaults |= TRACE_PFMIN;
+	else
+		return -1;
+
+	return 0;
+}
+
 int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
 {
 	const char * const trace_usage[] = {
@@ -2293,6 +2456,7 @@
 		},
 		.output = stdout,
 		.show_comm = true,
+		.trace_syscalls = true,
 	};
 	const char *output_name = NULL;
 	const char *ev_qualifier_str = NULL;
@@ -2330,20 +2494,34 @@
 		    "Show only syscall summary with statistics"),
 	OPT_BOOLEAN('S', "with-summary", &trace.summary,
 		    "Show all syscalls and summary with statistics"),
+	OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min",
+		     "Trace pagefaults", parse_pagefaults, "maj"),
+	OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"),
 	OPT_END()
 	};
 	int err;
 	char bf[BUFSIZ];
 
-	if ((argc > 1) && (strcmp(argv[1], "record") == 0))
-		return trace__record(argc-2, &argv[2]);
+	argc = parse_options(argc, argv, trace_options, trace_usage,
+			     PARSE_OPT_STOP_AT_NON_OPTION);
 
-	argc = parse_options(argc, argv, trace_options, trace_usage, 0);
+	if (trace.trace_pgfaults) {
+		trace.opts.sample_address = true;
+		trace.opts.sample_time = true;
+	}
+
+	if ((argc >= 1) && (strcmp(argv[0], "record") == 0))
+		return trace__record(&trace, argc-1, &argv[1]);
 
 	/* summary_only implies summary option, but don't overwrite summary if set */
 	if (trace.summary_only)
 		trace.summary = trace.summary_only;
 
+	if (!trace.trace_syscalls && !trace.trace_pgfaults) {
+		pr_err("Please specify something to trace.\n");
+		return -1;
+	}
+
 	if (output_name != NULL) {
 		err = trace__open_output(&trace, output_name);
 		if (err < 0) {
diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile
index f30ac5e..1f67aa0 100644
--- a/tools/perf/config/Makefile
+++ b/tools/perf/config/Makefile
@@ -48,6 +48,10 @@
   NO_LIBDW_DWARF_UNWIND := 1
 endif
 
+ifeq ($(ARCH),powerpc)
+  CFLAGS += -DHAVE_SKIP_CALLCHAIN_IDX
+endif
+
 ifeq ($(LIBUNWIND_LIBS),)
   NO_LIBUNWIND := 1
 else
@@ -160,6 +164,7 @@
 	backtrace			\
 	dwarf				\
 	fortify-source			\
+	sync-compare-and-swap		\
 	glibc				\
 	gtk2				\
 	gtk2-infobar			\
@@ -195,6 +200,7 @@
 VF_FEATURE_TESTS =			\
 	backtrace			\
 	fortify-source			\
+	sync-compare-and-swap		\
 	gtk2-infobar			\
 	libelf-getphdrnum		\
 	libelf-mmap			\
@@ -268,6 +274,10 @@
 
 CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
 
+ifeq ($(feature-sync-compare-and-swap), 1)
+  CFLAGS += -DHAVE_SYNC_COMPARE_AND_SWAP_SUPPORT
+endif
+
 ifndef NO_BIONIC
   $(call feature_check,bionic)
   ifeq ($(feature-bionic), 1)
@@ -590,6 +600,10 @@
   endif
 endif
 
+ifdef HAVE_KVM_STAT_SUPPORT
+    CFLAGS += -DHAVE_KVM_STAT_SUPPORT
+endif
+
 # Among the variables below, these:
 #   perfexecdir
 #   template_dir
diff --git a/tools/perf/config/feature-checks/Makefile b/tools/perf/config/feature-checks/Makefile
index 64c84e5..6088f8d 100644
--- a/tools/perf/config/feature-checks/Makefile
+++ b/tools/perf/config/feature-checks/Makefile
@@ -5,6 +5,7 @@
 	test-bionic.bin			\
 	test-dwarf.bin			\
 	test-fortify-source.bin		\
+	test-sync-compare-and-swap.bin	\
 	test-glibc.bin			\
 	test-gtk2.bin			\
 	test-gtk2-infobar.bin		\
@@ -141,6 +142,9 @@
 test-libdw-dwarf-unwind.bin:
 	$(BUILD)
 
+test-sync-compare-and-swap.bin:
+	$(BUILD) -Werror
+
 -include *.d
 
 ###############################
diff --git a/tools/perf/config/feature-checks/test-all.c b/tools/perf/config/feature-checks/test-all.c
index fe5c1e5..a7d022e 100644
--- a/tools/perf/config/feature-checks/test-all.c
+++ b/tools/perf/config/feature-checks/test-all.c
@@ -89,6 +89,10 @@
 # include "test-libdw-dwarf-unwind.c"
 #undef main
 
+#define main main_test_sync_compare_and_swap
+# include "test-sync-compare-and-swap.c"
+#undef main
+
 int main(int argc, char *argv[])
 {
 	main_test_libpython();
@@ -111,6 +115,7 @@
 	main_test_timerfd();
 	main_test_stackprotector_all();
 	main_test_libdw_dwarf_unwind();
+	main_test_sync_compare_and_swap(argc, argv);
 
 	return 0;
 }
diff --git a/tools/perf/config/feature-checks/test-sync-compare-and-swap.c b/tools/perf/config/feature-checks/test-sync-compare-and-swap.c
new file mode 100644
index 0000000..c34d4ca
--- /dev/null
+++ b/tools/perf/config/feature-checks/test-sync-compare-and-swap.c
@@ -0,0 +1,14 @@
+#include <stdint.h>
+
+volatile uint64_t x;
+
+int main(int argc, char *argv[])
+{
+	uint64_t old, new = argc;
+
+	argv = argv;
+	do {
+		old = __sync_val_compare_and_swap(&x, 0, 0);
+	} while (!__sync_bool_compare_and_swap(&x, old, new));
+	return old == new;
+}
diff --git a/tools/perf/perf-sys.h b/tools/perf/perf-sys.h
index 5268a14..937e432 100644
--- a/tools/perf/perf-sys.h
+++ b/tools/perf/perf-sys.h
@@ -54,6 +54,7 @@
 #define mb()		asm volatile("bcr 15,0" ::: "memory")
 #define wmb()		asm volatile("bcr 15,0" ::: "memory")
 #define rmb()		asm volatile("bcr 15,0" ::: "memory")
+#define CPUINFO_PROC	"vendor_id"
 #endif
 
 #ifdef __sh__
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index 95c58fc..2282d41 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -13,11 +13,12 @@
 #include "util/quote.h"
 #include "util/run-command.h"
 #include "util/parse-events.h"
+#include "util/debug.h"
 #include <api/fs/debugfs.h>
 #include <pthread.h>
 
 const char perf_usage_string[] =
-	"perf [--version] [--help] COMMAND [ARGS]";
+	"perf [--version] [--help] [OPTIONS] COMMAND [ARGS]";
 
 const char perf_more_info_string[] =
 	"See 'perf help COMMAND' for more information on a specific command.";
@@ -212,6 +213,16 @@
 				printf("%s ", p->cmd);
 			}
 			exit(0);
+		} else if (!strcmp(cmd, "--debug")) {
+			if (*argc < 2) {
+				fprintf(stderr, "No variable specified for --debug.\n");
+				usage(perf_usage_string);
+			}
+			if (perf_debug_option((*argv)[1]))
+				usage(perf_usage_string);
+
+			(*argv)++;
+			(*argc)--;
 		} else {
 			fprintf(stderr, "Unknown option: %s\n", cmd);
 			usage(perf_usage_string);
diff --git a/tools/perf/scripts/perl/bin/failed-syscalls-record b/tools/perf/scripts/perl/bin/failed-syscalls-record
index 8104895..74685f3 100644
--- a/tools/perf/scripts/perl/bin/failed-syscalls-record
+++ b/tools/perf/scripts/perl/bin/failed-syscalls-record
@@ -1,2 +1,3 @@
 #!/bin/bash
-perf record -e raw_syscalls:sys_exit $@
+(perf record -e raw_syscalls:sys_exit $@ || \
+ perf record -e syscalls:sys_exit $@) 2> /dev/null
diff --git a/tools/perf/scripts/perl/failed-syscalls.pl b/tools/perf/scripts/perl/failed-syscalls.pl
index 94bc25a..55e7ae4 100644
--- a/tools/perf/scripts/perl/failed-syscalls.pl
+++ b/tools/perf/scripts/perl/failed-syscalls.pl
@@ -26,6 +26,11 @@
 	}
 }
 
+sub syscalls::sys_exit
+{
+	raw_syscalls::sys_exit(@_)
+}
+
 sub trace_end
 {
     printf("\nfailed syscalls by comm:\n\n");
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py
index de7211e..38dfb72 100644
--- a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py
+++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py
@@ -107,12 +107,13 @@
 
 class EventHeaders:
 	def __init__(self, common_cpu, common_secs, common_nsecs,
-		     common_pid, common_comm):
+		     common_pid, common_comm, common_callchain):
 		self.cpu = common_cpu
 		self.secs = common_secs
 		self.nsecs = common_nsecs
 		self.pid = common_pid
 		self.comm = common_comm
+		self.callchain = common_callchain
 
 	def ts(self):
 		return (self.secs * (10 ** 9)) + self.nsecs
diff --git a/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record b/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record
index 8104895..74685f3 100644
--- a/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record
+++ b/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record
@@ -1,2 +1,3 @@
 #!/bin/bash
-perf record -e raw_syscalls:sys_exit $@
+(perf record -e raw_syscalls:sys_exit $@ || \
+ perf record -e syscalls:sys_exit $@) 2> /dev/null
diff --git a/tools/perf/scripts/python/bin/sctop-record b/tools/perf/scripts/python/bin/sctop-record
index 4efbfaa..d694084 100644
--- a/tools/perf/scripts/python/bin/sctop-record
+++ b/tools/perf/scripts/python/bin/sctop-record
@@ -1,2 +1,3 @@
 #!/bin/bash
-perf record -e raw_syscalls:sys_enter $@
+(perf record -e raw_syscalls:sys_enter $@ || \
+ perf record -e syscalls:sys_enter $@) 2> /dev/null
diff --git a/tools/perf/scripts/python/bin/syscall-counts-by-pid-record b/tools/perf/scripts/python/bin/syscall-counts-by-pid-record
index 4efbfaa..d694084 100644
--- a/tools/perf/scripts/python/bin/syscall-counts-by-pid-record
+++ b/tools/perf/scripts/python/bin/syscall-counts-by-pid-record
@@ -1,2 +1,3 @@
 #!/bin/bash
-perf record -e raw_syscalls:sys_enter $@
+(perf record -e raw_syscalls:sys_enter $@ || \
+ perf record -e syscalls:sys_enter $@) 2> /dev/null
diff --git a/tools/perf/scripts/python/bin/syscall-counts-record b/tools/perf/scripts/python/bin/syscall-counts-record
index 4efbfaa..d694084 100644
--- a/tools/perf/scripts/python/bin/syscall-counts-record
+++ b/tools/perf/scripts/python/bin/syscall-counts-record
@@ -1,2 +1,3 @@
 #!/bin/bash
-perf record -e raw_syscalls:sys_enter $@
+(perf record -e raw_syscalls:sys_enter $@ || \
+ perf record -e syscalls:sys_enter $@) 2> /dev/null
diff --git a/tools/perf/scripts/python/check-perf-trace.py b/tools/perf/scripts/python/check-perf-trace.py
index 4647a76..334599c 100644
--- a/tools/perf/scripts/python/check-perf-trace.py
+++ b/tools/perf/scripts/python/check-perf-trace.py
@@ -27,7 +27,7 @@
 
 def irq__softirq_entry(event_name, context, common_cpu,
 	common_secs, common_nsecs, common_pid, common_comm,
-	vec):
+	common_callchain, vec):
 		print_header(event_name, common_cpu, common_secs, common_nsecs,
 			common_pid, common_comm)
 
@@ -38,7 +38,7 @@
 
 def kmem__kmalloc(event_name, context, common_cpu,
 	common_secs, common_nsecs, common_pid, common_comm,
-	call_site, ptr, bytes_req, bytes_alloc,
+	common_callchain, call_site, ptr, bytes_req, bytes_alloc,
 	gfp_flags):
 		print_header(event_name, common_cpu, common_secs, common_nsecs,
 			common_pid, common_comm)
diff --git a/tools/perf/scripts/python/failed-syscalls-by-pid.py b/tools/perf/scripts/python/failed-syscalls-by-pid.py
index 85805fa..cafeff3 100644
--- a/tools/perf/scripts/python/failed-syscalls-by-pid.py
+++ b/tools/perf/scripts/python/failed-syscalls-by-pid.py
@@ -39,7 +39,7 @@
 
 def raw_syscalls__sys_exit(event_name, context, common_cpu,
 	common_secs, common_nsecs, common_pid, common_comm,
-	id, ret):
+	common_callchain, id, ret):
 	if (for_comm and common_comm != for_comm) or \
 	   (for_pid  and common_pid  != for_pid ):
 		return
@@ -50,6 +50,11 @@
 		except TypeError:
 			syscalls[common_comm][common_pid][id][ret] = 1
 
+def syscalls__sys_exit(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	id, ret):
+	raw_syscalls__sys_exit(**locals())
+
 def print_error_totals():
     if for_comm is not None:
 	    print "\nsyscall errors for %s:\n\n" % (for_comm),
diff --git a/tools/perf/scripts/python/futex-contention.py b/tools/perf/scripts/python/futex-contention.py
index 11e70a3..0f5cf43 100644
--- a/tools/perf/scripts/python/futex-contention.py
+++ b/tools/perf/scripts/python/futex-contention.py
@@ -21,7 +21,7 @@
 lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
 process_names = {} # long-lived pid-to-execname mapping
 
-def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
+def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm, callchain,
 			      nr, uaddr, op, val, utime, uaddr2, val3):
 	cmd = op & FUTEX_CMD_MASK
 	if cmd != FUTEX_WAIT:
@@ -31,7 +31,7 @@
 	thread_thislock[tid] = uaddr
 	thread_blocktime[tid] = nsecs(s, ns)
 
-def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,
+def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm, callchain,
 			     nr, ret):
 	if thread_blocktime.has_key(tid):
 		elapsed = nsecs(s, ns) - thread_blocktime[tid]
diff --git a/tools/perf/scripts/python/net_dropmonitor.py b/tools/perf/scripts/python/net_dropmonitor.py
index b574059..0b6ce8c 100755
--- a/tools/perf/scripts/python/net_dropmonitor.py
+++ b/tools/perf/scripts/python/net_dropmonitor.py
@@ -66,7 +66,7 @@
 	print_drop_table()
 
 # called from perf, when it finds a correspoinding event
-def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
+def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, callchain,
 		   skbaddr, location, protocol):
 	slocation = str(location)
 	try:
diff --git a/tools/perf/scripts/python/netdev-times.py b/tools/perf/scripts/python/netdev-times.py
index 9aa0a32..4d21ef2 100644
--- a/tools/perf/scripts/python/netdev-times.py
+++ b/tools/perf/scripts/python/netdev-times.py
@@ -224,75 +224,75 @@
 			(len(rx_skb_list), of_count_rx_skb_list)
 
 # called from perf, when it finds a correspoinding event
-def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
+def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
 	if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
 		return
 	event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
 	all_event_list.append(event_info)
 
-def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
+def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
 	if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
 		return
 	event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
 	all_event_list.append(event_info)
 
-def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
+def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
 	if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
 		return
 	event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
 	all_event_list.append(event_info)
 
 def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
-			irq, irq_name):
+			callchain, irq, irq_name):
 	event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
 			irq, irq_name)
 	all_event_list.append(event_info)
 
-def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
+def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, callchain, irq, ret):
 	event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
 	all_event_list.append(event_info)
 
-def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
+def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, callchain, napi, dev_name):
 	event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
 			napi, dev_name)
 	all_event_list.append(event_info)
 
-def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
+def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr,
 			skblen, dev_name):
 	event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
 			skbaddr, skblen, dev_name)
 	all_event_list.append(event_info)
 
-def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
+def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr,
 			skblen, dev_name):
 	event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
 			skbaddr, skblen, dev_name)
 	all_event_list.append(event_info)
 
-def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
+def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm, callchain,
 			skbaddr, skblen, dev_name):
 	event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
 			skbaddr, skblen, dev_name)
 	all_event_list.append(event_info)
 
-def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
+def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm, callchain,
 			skbaddr, skblen, rc, dev_name):
 	event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
 			skbaddr, skblen, rc ,dev_name)
 	all_event_list.append(event_info)
 
-def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
+def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, callchain,
 			skbaddr, protocol, location):
 	event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
 			skbaddr, protocol, location)
 	all_event_list.append(event_info)
 
-def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
+def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr):
 	event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
 			skbaddr)
 	all_event_list.append(event_info)
 
-def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
+def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm, callchain,
 	skbaddr, skblen):
 	event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
 			skbaddr, skblen)
diff --git a/tools/perf/scripts/python/sched-migration.py b/tools/perf/scripts/python/sched-migration.py
index 74d55ec..de66cb3 100644
--- a/tools/perf/scripts/python/sched-migration.py
+++ b/tools/perf/scripts/python/sched-migration.py
@@ -369,93 +369,92 @@
 
 def sched__sched_stat_runtime(event_name, context, common_cpu,
 	common_secs, common_nsecs, common_pid, common_comm,
-	comm, pid, runtime, vruntime):
+	common_callchain, comm, pid, runtime, vruntime):
 	pass
 
 def sched__sched_stat_iowait(event_name, context, common_cpu,
 	common_secs, common_nsecs, common_pid, common_comm,
-	comm, pid, delay):
+	common_callchain, comm, pid, delay):
 	pass
 
 def sched__sched_stat_sleep(event_name, context, common_cpu,
 	common_secs, common_nsecs, common_pid, common_comm,
-	comm, pid, delay):
+	common_callchain, comm, pid, delay):
 	pass
 
 def sched__sched_stat_wait(event_name, context, common_cpu,
 	common_secs, common_nsecs, common_pid, common_comm,
-	comm, pid, delay):
+	common_callchain, comm, pid, delay):
 	pass
 
 def sched__sched_process_fork(event_name, context, common_cpu,
 	common_secs, common_nsecs, common_pid, common_comm,
-	parent_comm, parent_pid, child_comm, child_pid):
+	common_callchain, parent_comm, parent_pid, child_comm, child_pid):
 	pass
 
 def sched__sched_process_wait(event_name, context, common_cpu,
 	common_secs, common_nsecs, common_pid, common_comm,
-	comm, pid, prio):
+	common_callchain, comm, pid, prio):
 	pass
 
 def sched__sched_process_exit(event_name, context, common_cpu,
 	common_secs, common_nsecs, common_pid, common_comm,
-	comm, pid, prio):
+	common_callchain, comm, pid, prio):
 	pass
 
 def sched__sched_process_free(event_name, context, common_cpu,
 	common_secs, common_nsecs, common_pid, common_comm,
-	comm, pid, prio):
+	common_callchain, comm, pid, prio):
 	pass
 
 def sched__sched_migrate_task(event_name, context, common_cpu,
 	common_secs, common_nsecs, common_pid, common_comm,
-	comm, pid, prio, orig_cpu,
+	common_callchain, comm, pid, prio, orig_cpu,
 	dest_cpu):
 	headers = EventHeaders(common_cpu, common_secs, common_nsecs,
-				common_pid, common_comm)
+				common_pid, common_comm, common_callchain)
 	parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
 
 def sched__sched_switch(event_name, context, common_cpu,
-	common_secs, common_nsecs, common_pid, common_comm,
+	common_secs, common_nsecs, common_pid, common_comm, common_callchain,
 	prev_comm, prev_pid, prev_prio, prev_state,
 	next_comm, next_pid, next_prio):
 
 	headers = EventHeaders(common_cpu, common_secs, common_nsecs,
-				common_pid, common_comm)
+				common_pid, common_comm, common_callchain)
 	parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
 			 next_comm, next_pid, next_prio)
 
 def sched__sched_wakeup_new(event_name, context, common_cpu,
 	common_secs, common_nsecs, common_pid, common_comm,
-	comm, pid, prio, success,
+	common_callchain, comm, pid, prio, success,
 	target_cpu):
 	headers = EventHeaders(common_cpu, common_secs, common_nsecs,
-				common_pid, common_comm)
+				common_pid, common_comm, common_callchain)
 	parser.wake_up(headers, comm, pid, success, target_cpu, 1)
 
 def sched__sched_wakeup(event_name, context, common_cpu,
 	common_secs, common_nsecs, common_pid, common_comm,
-	comm, pid, prio, success,
+	common_callchain, comm, pid, prio, success,
 	target_cpu):
 	headers = EventHeaders(common_cpu, common_secs, common_nsecs,
-				common_pid, common_comm)
+				common_pid, common_comm, common_callchain)
 	parser.wake_up(headers, comm, pid, success, target_cpu, 0)
 
 def sched__sched_wait_task(event_name, context, common_cpu,
 	common_secs, common_nsecs, common_pid, common_comm,
-	comm, pid, prio):
+	common_callchain, comm, pid, prio):
 	pass
 
 def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
 	common_secs, common_nsecs, common_pid, common_comm,
-	ret):
+	common_callchain, ret):
 	pass
 
 def sched__sched_kthread_stop(event_name, context, common_cpu,
 	common_secs, common_nsecs, common_pid, common_comm,
-	comm, pid):
+	common_callchain, comm, pid):
 	pass
 
-def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
-		common_pid, common_comm):
+def trace_unhandled(event_name, context, event_fields_dict):
 	pass
diff --git a/tools/perf/scripts/python/sctop.py b/tools/perf/scripts/python/sctop.py
index 42c267e..61621b9 100644
--- a/tools/perf/scripts/python/sctop.py
+++ b/tools/perf/scripts/python/sctop.py
@@ -44,7 +44,7 @@
 
 def raw_syscalls__sys_enter(event_name, context, common_cpu,
 	common_secs, common_nsecs, common_pid, common_comm,
-	id, args):
+	common_callchain, id, args):
 	if for_comm is not None:
 		if common_comm != for_comm:
 			return
@@ -53,6 +53,11 @@
 	except TypeError:
 		syscalls[id] = 1
 
+def syscalls__sys_enter(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	id, args):
+	raw_syscalls__sys_enter(**locals())
+
 def print_syscall_totals(interval):
 	while 1:
 		clear_term()
diff --git a/tools/perf/scripts/python/syscall-counts-by-pid.py b/tools/perf/scripts/python/syscall-counts-by-pid.py
index c64d1c5..daf314c 100644
--- a/tools/perf/scripts/python/syscall-counts-by-pid.py
+++ b/tools/perf/scripts/python/syscall-counts-by-pid.py
@@ -38,7 +38,7 @@
 
 def raw_syscalls__sys_enter(event_name, context, common_cpu,
 	common_secs, common_nsecs, common_pid, common_comm,
-	id, args):
+	common_callchain, id, args):
 
 	if (for_comm and common_comm != for_comm) or \
 	   (for_pid  and common_pid  != for_pid ):
@@ -48,6 +48,11 @@
 	except TypeError:
 		syscalls[common_comm][common_pid][id] = 1
 
+def syscalls__sys_enter(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	id, args):
+	raw_syscalls__sys_enter(**locals())
+
 def print_syscall_totals():
     if for_comm is not None:
 	    print "\nsyscall events for %s:\n\n" % (for_comm),
diff --git a/tools/perf/scripts/python/syscall-counts.py b/tools/perf/scripts/python/syscall-counts.py
index b435d3f..e66a773 100644
--- a/tools/perf/scripts/python/syscall-counts.py
+++ b/tools/perf/scripts/python/syscall-counts.py
@@ -35,7 +35,7 @@
 
 def raw_syscalls__sys_enter(event_name, context, common_cpu,
 	common_secs, common_nsecs, common_pid, common_comm,
-	id, args):
+	common_callchain, id, args):
 	if for_comm is not None:
 		if common_comm != for_comm:
 			return
@@ -44,6 +44,11 @@
 	except TypeError:
 		syscalls[id] = 1
 
+def syscalls__sys_enter(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	id, args):
+	raw_syscalls__sys_enter(**locals())
+
 def print_syscall_totals():
     if for_comm is not None:
 	    print "\nsyscall events for %s:\n\n" % (for_comm),
diff --git a/tools/perf/tests/attr/base-record b/tools/perf/tests/attr/base-record
index e9bd639..f710b92 100644
--- a/tools/perf/tests/attr/base-record
+++ b/tools/perf/tests/attr/base-record
@@ -1,7 +1,8 @@
 [event]
 fd=1
 group_fd=-1
-flags=0
+# 0 or PERF_FLAG_FD_CLOEXEC flag
+flags=0|8
 cpu=*
 type=0|1
 size=96
diff --git a/tools/perf/tests/attr/base-stat b/tools/perf/tests/attr/base-stat
index 91cd48b..dc3ada2 100644
--- a/tools/perf/tests/attr/base-stat
+++ b/tools/perf/tests/attr/base-stat
@@ -1,7 +1,8 @@
 [event]
 fd=1
 group_fd=-1
-flags=0
+# 0 or PERF_FLAG_FD_CLOEXEC flag
+flags=0|8
 cpu=*
 type=0
 size=96
diff --git a/tools/perf/tests/bp_signal.c b/tools/perf/tests/bp_signal.c
index aba0954..a02b035 100644
--- a/tools/perf/tests/bp_signal.c
+++ b/tools/perf/tests/bp_signal.c
@@ -25,6 +25,7 @@
 #include "tests.h"
 #include "debug.h"
 #include "perf.h"
+#include "cloexec.h"
 
 static int fd1;
 static int fd2;
@@ -78,7 +79,8 @@
 	pe.exclude_kernel = 1;
 	pe.exclude_hv = 1;
 
-	fd = sys_perf_event_open(&pe, 0, -1, -1, 0);
+	fd = sys_perf_event_open(&pe, 0, -1, -1,
+				 perf_event_open_cloexec_flag());
 	if (fd < 0) {
 		pr_debug("failed opening event %llx\n", pe.config);
 		return TEST_FAIL;
diff --git a/tools/perf/tests/bp_signal_overflow.c b/tools/perf/tests/bp_signal_overflow.c
index 44ac821..e765377 100644
--- a/tools/perf/tests/bp_signal_overflow.c
+++ b/tools/perf/tests/bp_signal_overflow.c
@@ -24,6 +24,7 @@
 #include "tests.h"
 #include "debug.h"
 #include "perf.h"
+#include "cloexec.h"
 
 static int overflows;
 
@@ -91,7 +92,8 @@
 	pe.exclude_kernel = 1;
 	pe.exclude_hv = 1;
 
-	fd = sys_perf_event_open(&pe, 0, -1, -1, 0);
+	fd = sys_perf_event_open(&pe, 0, -1, -1,
+				 perf_event_open_cloexec_flag());
 	if (fd < 0) {
 		pr_debug("failed opening event %llx\n", pe.config);
 		return TEST_FAIL;
diff --git a/tools/perf/tests/dso-data.c b/tools/perf/tests/dso-data.c
index 630808c..caaf37f 100644
--- a/tools/perf/tests/dso-data.c
+++ b/tools/perf/tests/dso-data.c
@@ -10,6 +10,7 @@
 #include "machine.h"
 #include "symbol.h"
 #include "tests.h"
+#include "debug.h"
 
 static char *test_file(int size)
 {
diff --git a/tools/perf/tests/evsel-roundtrip-name.c b/tools/perf/tests/evsel-roundtrip-name.c
index 465cdbc..b8d8341 100644
--- a/tools/perf/tests/evsel-roundtrip-name.c
+++ b/tools/perf/tests/evsel-roundtrip-name.c
@@ -2,6 +2,7 @@
 #include "evsel.h"
 #include "parse-events.h"
 #include "tests.h"
+#include "debug.h"
 
 static int perf_evsel__roundtrip_cache_name_test(void)
 {
diff --git a/tools/perf/tests/evsel-tp-sched.c b/tools/perf/tests/evsel-tp-sched.c
index 35d7fdb..5216242 100644
--- a/tools/perf/tests/evsel-tp-sched.c
+++ b/tools/perf/tests/evsel-tp-sched.c
@@ -1,6 +1,7 @@
 #include <traceevent/event-parse.h>
 #include "evsel.h"
 #include "tests.h"
+#include "debug.h"
 
 static int perf_evsel__test_field(struct perf_evsel *evsel, const char *name,
 				  int size, bool should_be_signed)
diff --git a/tools/perf/tests/open-syscall-tp-fields.c b/tools/perf/tests/open-syscall-tp-fields.c
index c505ef2..0785b64 100644
--- a/tools/perf/tests/open-syscall-tp-fields.c
+++ b/tools/perf/tests/open-syscall-tp-fields.c
@@ -3,6 +3,7 @@
 #include "evsel.h"
 #include "thread_map.h"
 #include "tests.h"
+#include "debug.h"
 
 int test__syscall_open_tp_fields(void)
 {
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index deba669..5941927 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -5,6 +5,7 @@
 #include <api/fs/fs.h>
 #include <api/fs/debugfs.h>
 #include "tests.h"
+#include "debug.h"
 #include <linux/hw_breakpoint.h>
 
 #define PERF_TP_SAMPLE_TYPE (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | \
diff --git a/tools/perf/tests/parse-no-sample-id-all.c b/tools/perf/tests/parse-no-sample-id-all.c
index 905019f..2c63ea6 100644
--- a/tools/perf/tests/parse-no-sample-id-all.c
+++ b/tools/perf/tests/parse-no-sample-id-all.c
@@ -7,6 +7,7 @@
 #include "evlist.h"
 #include "header.h"
 #include "util.h"
+#include "debug.h"
 
 static int process_event(struct perf_evlist **pevlist, union perf_event *event)
 {
diff --git a/tools/perf/tests/perf-time-to-tsc.c b/tools/perf/tests/perf-time-to-tsc.c
index 3b7cd4d..f238442 100644
--- a/tools/perf/tests/perf-time-to-tsc.c
+++ b/tools/perf/tests/perf-time-to-tsc.c
@@ -8,10 +8,9 @@
 #include "evsel.h"
 #include "thread_map.h"
 #include "cpumap.h"
+#include "tsc.h"
 #include "tests.h"
 
-#include "../arch/x86/util/tsc.h"
-
 #define CHECK__(x) {				\
 	while ((x) < 0) {			\
 		pr_debug(#x " failed!\n");	\
@@ -26,15 +25,6 @@
 	}					\
 }
 
-static u64 rdtsc(void)
-{
-	unsigned int low, high;
-
-	asm volatile("rdtsc" : "=a" (low), "=d" (high));
-
-	return low | ((u64)high) << 32;
-}
-
 /**
  * test__perf_time_to_tsc - test converting perf time to TSC.
  *
diff --git a/tools/perf/tests/rdpmc.c b/tools/perf/tests/rdpmc.c
index e59143f..c04d1f2 100644
--- a/tools/perf/tests/rdpmc.c
+++ b/tools/perf/tests/rdpmc.c
@@ -6,6 +6,7 @@
 #include "perf.h"
 #include "debug.h"
 #include "tests.h"
+#include "cloexec.h"
 
 #if defined(__x86_64__) || defined(__i386__)
 
@@ -104,7 +105,8 @@
 	sa.sa_sigaction = segfault_handler;
 	sigaction(SIGSEGV, &sa, NULL);
 
-	fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
+	fd = sys_perf_event_open(&attr, 0, -1, -1,
+				 perf_event_open_cloexec_flag());
 	if (fd < 0) {
 		pr_err("Error: sys_perf_event_open() syscall returned "
 		       "with %d (%s)\n", fd, strerror(errno));
diff --git a/tools/perf/tests/sample-parsing.c b/tools/perf/tests/sample-parsing.c
index 7ae8d17..ca292f9 100644
--- a/tools/perf/tests/sample-parsing.c
+++ b/tools/perf/tests/sample-parsing.c
@@ -4,6 +4,7 @@
 #include "util.h"
 #include "event.h"
 #include "evsel.h"
+#include "debug.h"
 
 #include "tests.h"
 
diff --git a/tools/perf/tests/thread-mg-share.c b/tools/perf/tests/thread-mg-share.c
index 2b2e0db..b028499 100644
--- a/tools/perf/tests/thread-mg-share.c
+++ b/tools/perf/tests/thread-mg-share.c
@@ -2,6 +2,7 @@
 #include "machine.h"
 #include "thread.h"
 #include "map.h"
+#include "debug.h"
 
 int test__thread_mg_share(void)
 {
diff --git a/tools/perf/ui/browser.c b/tools/perf/ui/browser.c
index 3ccf6e1..6680fa5 100644
--- a/tools/perf/ui/browser.c
+++ b/tools/perf/ui/browser.c
@@ -150,7 +150,7 @@
 	while (nd != NULL) {
 		ui_browser__gotorc(browser, row, 0);
 		browser->write(browser, nd, row);
-		if (++row == browser->height)
+		if (++row == browser->rows)
 			break;
 		nd = rb_next(nd);
 	}
@@ -166,7 +166,7 @@
 void ui_browser__refresh_dimensions(struct ui_browser *browser)
 {
 	browser->width = SLtt_Screen_Cols - 1;
-	browser->height = SLtt_Screen_Rows - 2;
+	browser->height = browser->rows = SLtt_Screen_Rows - 2;
 	browser->y = 1;
 	browser->x = 0;
 }
@@ -250,7 +250,10 @@
 	int err;
 	va_list ap;
 
-	ui_browser__refresh_dimensions(browser);
+	if (browser->refresh_dimensions == NULL)
+		browser->refresh_dimensions = ui_browser__refresh_dimensions;
+
+	browser->refresh_dimensions(browser);
 
 	pthread_mutex_lock(&ui__lock);
 	__ui_browser__show_title(browser, title);
@@ -279,7 +282,7 @@
 {
 	int height = browser->height, h = 0, pct = 0,
 	    col = browser->width,
-	    row = browser->y - 1;
+	    row = 0;
 
 	if (browser->nr_entries > 1) {
 		pct = ((browser->index * (browser->height - 1)) /
@@ -367,7 +370,7 @@
 
 		if (key == K_RESIZE) {
 			ui__refresh_dimensions(false);
-			ui_browser__refresh_dimensions(browser);
+			browser->refresh_dimensions(browser);
 			__ui_browser__show_title(browser, browser->title);
 			ui_helpline__puts(browser->helpline);
 			continue;
@@ -389,7 +392,7 @@
 			if (browser->index == browser->nr_entries - 1)
 				break;
 			++browser->index;
-			if (browser->index == browser->top_idx + browser->height) {
+			if (browser->index == browser->top_idx + browser->rows) {
 				++browser->top_idx;
 				browser->seek(browser, +1, SEEK_CUR);
 			}
@@ -405,10 +408,10 @@
 			break;
 		case K_PGDN:
 		case ' ':
-			if (browser->top_idx + browser->height > browser->nr_entries - 1)
+			if (browser->top_idx + browser->rows > browser->nr_entries - 1)
 				break;
 
-			offset = browser->height;
+			offset = browser->rows;
 			if (browser->index + offset > browser->nr_entries - 1)
 				offset = browser->nr_entries - 1 - browser->index;
 			browser->index += offset;
@@ -419,10 +422,10 @@
 			if (browser->top_idx == 0)
 				break;
 
-			if (browser->top_idx < browser->height)
+			if (browser->top_idx < browser->rows)
 				offset = browser->top_idx;
 			else
-				offset = browser->height;
+				offset = browser->rows;
 
 			browser->index -= offset;
 			browser->top_idx -= offset;
@@ -432,7 +435,7 @@
 			ui_browser__reset_index(browser);
 			break;
 		case K_END:
-			offset = browser->height - 1;
+			offset = browser->rows - 1;
 			if (offset >= browser->nr_entries)
 				offset = browser->nr_entries - 1;
 
@@ -462,7 +465,7 @@
 		if (!browser->filter || !browser->filter(browser, pos)) {
 			ui_browser__gotorc(browser, row, 0);
 			browser->write(browser, pos, row);
-			if (++row == browser->height)
+			if (++row == browser->rows)
 				break;
 		}
 	}
@@ -587,7 +590,7 @@
 		if (!browser->filter || !browser->filter(browser, *pos)) {
 			ui_browser__gotorc(browser, row, 0);
 			browser->write(browser, pos, row);
-			if (++row == browser->height)
+			if (++row == browser->rows)
 				break;
 		}
 
@@ -623,7 +626,7 @@
 
 	SLsmg_set_char_set(1);
 
-	if (start < browser->top_idx + browser->height) {
+	if (start < browser->top_idx + browser->rows) {
 		row = start - browser->top_idx;
 		ui_browser__gotorc(browser, row, column);
 		SLsmg_write_char(SLSMG_LLCORN_CHAR);
@@ -633,7 +636,7 @@
 		if (row-- == 0)
 			goto out;
 	} else
-		row = browser->height - 1;
+		row = browser->rows - 1;
 
 	if (end > browser->top_idx)
 		end_row = end - browser->top_idx;
@@ -675,8 +678,8 @@
 	} else
 		row = 0;
 
-	if (end >= browser->top_idx + browser->height)
-		end_row = browser->height - 1;
+	if (end >= browser->top_idx + browser->rows)
+		end_row = browser->rows - 1;
 	else
 		end_row = end - browser->top_idx;
 
@@ -684,7 +687,7 @@
 	SLsmg_draw_vline(end_row - row + 1);
 
 	ui_browser__gotorc(browser, end_row, column);
-	if (end < browser->top_idx + browser->height) {
+	if (end < browser->top_idx + browser->rows) {
 		SLsmg_write_char(SLSMG_LLCORN_CHAR);
 		ui_browser__gotorc(browser, end_row, column + 1);
 		SLsmg_write_char(SLSMG_HLINE_CHAR);
diff --git a/tools/perf/ui/browser.h b/tools/perf/ui/browser.h
index 03d4d62..92ae721 100644
--- a/tools/perf/ui/browser.h
+++ b/tools/perf/ui/browser.h
@@ -14,11 +14,12 @@
 struct ui_browser {
 	u64	      index, top_idx;
 	void	      *top, *entries;
-	u16	      y, x, width, height;
+	u16	      y, x, width, height, rows;
 	int	      current_color;
 	void	      *priv;
 	const char    *title;
 	char	      *helpline;
+	void 	      (*refresh_dimensions)(struct ui_browser *browser);
 	unsigned int  (*refresh)(struct ui_browser *browser);
 	void	      (*write)(struct ui_browser *browser, void *entry, int row);
 	void	      (*seek)(struct ui_browser *browser, off_t offset, int whence);
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index 04a229a..a94b11fc 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -26,6 +26,7 @@
 	struct map_symbol   *selection;
 	int		     print_seq;
 	bool		     show_dso;
+	bool		     show_headers;
 	float		     min_pcnt;
 	u64		     nr_non_filtered_entries;
 	u64		     nr_callchain_rows;
@@ -33,8 +34,7 @@
 
 extern void hist_browser__init_hpp(void);
 
-static int hists__browser_title(struct hists *hists, char *bf, size_t size,
-				const char *ev_name);
+static int hists__browser_title(struct hists *hists, char *bf, size_t size);
 static void hist_browser__update_nr_entries(struct hist_browser *hb);
 
 static struct rb_node *hists__filter_entries(struct rb_node *nd,
@@ -57,11 +57,42 @@
 	return nr_entries + hb->nr_callchain_rows;
 }
 
-static void hist_browser__refresh_dimensions(struct hist_browser *browser)
+static void hist_browser__update_rows(struct hist_browser *hb)
 {
+	struct ui_browser *browser = &hb->b;
+	u16 header_offset = hb->show_headers ? 1 : 0, index_row;
+
+	browser->rows = browser->height - header_offset;
+	/*
+	 * Verify if we were at the last line and that line isn't
+	 * visibe because we now show the header line(s).
+	 */
+	index_row = browser->index - browser->top_idx;
+	if (index_row >= browser->rows)
+		browser->index -= index_row - browser->rows + 1;
+}
+
+static void hist_browser__refresh_dimensions(struct ui_browser *browser)
+{
+	struct hist_browser *hb = container_of(browser, struct hist_browser, b);
+
 	/* 3 == +/- toggle symbol before actual hist_entry rendering */
-	browser->b.width = 3 + (hists__sort_list_width(browser->hists) +
-			     sizeof("[k]"));
+	browser->width = 3 + (hists__sort_list_width(hb->hists) + sizeof("[k]"));
+	/*
+ 	 * FIXME: Just keeping existing behaviour, but this really should be
+ 	 *	  before updating browser->width, as it will invalidate the
+ 	 *	  calculation above. Fix this and the fallout in another
+ 	 *	  changeset.
+ 	 */
+	ui_browser__refresh_dimensions(browser);
+	hist_browser__update_rows(hb);
+}
+
+static void hist_browser__gotorc(struct hist_browser *browser, int row, int column)
+{
+	u16 header_offset = browser->show_headers ? 1 : 0;
+
+	ui_browser__gotorc(&browser->b, row + header_offset, column);
 }
 
 static void hist_browser__reset(struct hist_browser *browser)
@@ -74,7 +105,7 @@
 
 	hist_browser__update_nr_entries(browser);
 	browser->b.nr_entries = hist_browser__nr_entries(browser);
-	hist_browser__refresh_dimensions(browser);
+	hist_browser__refresh_dimensions(&browser->b);
 	ui_browser__reset_index(&browser->b);
 }
 
@@ -346,7 +377,7 @@
 		"Or reduce the sampling frequency.");
 }
 
-static int hist_browser__run(struct hist_browser *browser, const char *ev_name,
+static int hist_browser__run(struct hist_browser *browser,
 			     struct hist_browser_timer *hbt)
 {
 	int key;
@@ -356,8 +387,7 @@
 	browser->b.entries = &browser->hists->entries;
 	browser->b.nr_entries = hist_browser__nr_entries(browser);
 
-	hist_browser__refresh_dimensions(browser);
-	hists__browser_title(browser->hists, title, sizeof(title), ev_name);
+	hists__browser_title(browser->hists, title, sizeof(title));
 
 	if (ui_browser__show(&browser->b, title,
 			     "Press '?' for help on key bindings") < 0)
@@ -384,7 +414,7 @@
 				ui_browser__warn_lost_events(&browser->b);
 			}
 
-			hists__browser_title(browser->hists, title, sizeof(title), ev_name);
+			hists__browser_title(browser->hists, title, sizeof(title));
 			ui_browser__show_title(&browser->b, title);
 			continue;
 		}
@@ -393,10 +423,10 @@
 			struct hist_entry *h = rb_entry(browser->b.top,
 							struct hist_entry, rb_node);
 			ui_helpline__pop();
-			ui_helpline__fpush("%d: nr_ent=(%d,%d), height=%d, idx=%d, fve: idx=%d, row_off=%d, nrows=%d",
+			ui_helpline__fpush("%d: nr_ent=(%d,%d), rows=%d, idx=%d, fve: idx=%d, row_off=%d, nrows=%d",
 					   seq++, browser->b.nr_entries,
 					   browser->hists->nr_entries,
-					   browser->b.height,
+					   browser->b.rows,
 					   browser->b.index,
 					   browser->b.top_idx,
 					   h->row_offset, h->nr_rows);
@@ -410,6 +440,10 @@
 			/* Expand the whole world. */
 			hist_browser__set_folding(browser, true);
 			break;
+		case 'H':
+			browser->show_headers = !browser->show_headers;
+			hist_browser__update_rows(browser);
+			break;
 		case K_ENTER:
 			if (hist_browser__toggle_fold(browser))
 				break;
@@ -509,13 +543,13 @@
 			}
 
 			ui_browser__set_color(&browser->b, color);
-			ui_browser__gotorc(&browser->b, row, 0);
+			hist_browser__gotorc(browser, row, 0);
 			slsmg_write_nstring(" ", offset + extra_offset);
 			slsmg_printf("%c ", folded_sign);
 			slsmg_write_nstring(str, width);
 			free(alloc_str);
 
-			if (++row == browser->b.height)
+			if (++row == browser->b.rows)
 				goto out;
 do_next:
 			if (folded_sign == '+')
@@ -528,7 +562,7 @@
 									 new_level, row, row_offset,
 									 is_current_entry);
 		}
-		if (row == browser->b.height)
+		if (row == browser->b.rows)
 			goto out;
 		node = next;
 	}
@@ -568,13 +602,13 @@
 
 		s = callchain_list__sym_name(chain, bf, sizeof(bf),
 					     browser->show_dso);
-		ui_browser__gotorc(&browser->b, row, 0);
+		hist_browser__gotorc(browser, row, 0);
 		ui_browser__set_color(&browser->b, color);
 		slsmg_write_nstring(" ", offset);
 		slsmg_printf("%c ", folded_sign);
 		slsmg_write_nstring(s, width - 2);
 
-		if (++row == browser->b.height)
+		if (++row == browser->b.rows)
 			goto out;
 	}
 
@@ -603,7 +637,7 @@
 		row += hist_browser__show_callchain_node(browser, node, level,
 							 row, row_offset,
 							 is_current_entry);
-		if (row == browser->b.height)
+		if (row == browser->b.rows)
 			break;
 	}
 
@@ -733,7 +767,7 @@
 			.ptr		= &arg,
 		};
 
-		ui_browser__gotorc(&browser->b, row, 0);
+		hist_browser__gotorc(browser, row, 0);
 
 		perf_hpp__for_each_format(fmt) {
 			if (perf_hpp__should_skip(fmt))
@@ -777,7 +811,7 @@
 	} else
 		--row_offset;
 
-	if (folded_sign == '-' && row != browser->b.height) {
+	if (folded_sign == '-' && row != browser->b.rows) {
 		printed += hist_browser__show_callchain(browser, &entry->sorted_chain,
 							1, row, &row_offset,
 							&current_entry);
@@ -788,6 +822,56 @@
 	return printed;
 }
 
+static int advance_hpp_check(struct perf_hpp *hpp, int inc)
+{
+	advance_hpp(hpp, inc);
+	return hpp->size <= 0;
+}
+
+static int hists__scnprintf_headers(char *buf, size_t size, struct hists *hists)
+{
+	struct perf_hpp dummy_hpp = {
+		.buf    = buf,
+		.size   = size,
+	};
+	struct perf_hpp_fmt *fmt;
+	size_t ret = 0;
+
+	if (symbol_conf.use_callchain) {
+		ret = scnprintf(buf, size, "  ");
+		if (advance_hpp_check(&dummy_hpp, ret))
+			return ret;
+	}
+
+	perf_hpp__for_each_format(fmt) {
+		if (perf_hpp__should_skip(fmt))
+			continue;
+
+		/* We need to add the length of the columns header. */
+		perf_hpp__reset_width(fmt, hists);
+
+		ret = fmt->header(fmt, &dummy_hpp, hists_to_evsel(hists));
+		if (advance_hpp_check(&dummy_hpp, ret))
+			break;
+
+		ret = scnprintf(dummy_hpp.buf, dummy_hpp.size, "  ");
+		if (advance_hpp_check(&dummy_hpp, ret))
+			break;
+	}
+
+	return ret;
+}
+
+static void hist_browser__show_headers(struct hist_browser *browser)
+{
+	char headers[1024];
+
+	hists__scnprintf_headers(headers, sizeof(headers), browser->hists);
+	ui_browser__gotorc(&browser->b, 0, 0);
+	ui_browser__set_color(&browser->b, HE_COLORSET_ROOT);
+	slsmg_write_nstring(headers, browser->b.width + 1);
+}
+
 static void ui_browser__hists_init_top(struct ui_browser *browser)
 {
 	if (browser->top == NULL) {
@@ -801,9 +885,15 @@
 static unsigned int hist_browser__refresh(struct ui_browser *browser)
 {
 	unsigned row = 0;
+	u16 header_offset = 0;
 	struct rb_node *nd;
 	struct hist_browser *hb = container_of(browser, struct hist_browser, b);
 
+	if (hb->show_headers) {
+		hist_browser__show_headers(hb);
+		header_offset = 1;
+	}
+
 	ui_browser__hists_init_top(browser);
 
 	for (nd = browser->top; nd; nd = rb_next(nd)) {
@@ -818,11 +908,11 @@
 			continue;
 
 		row += hist_browser__show_entry(hb, h, row);
-		if (row == browser->height)
+		if (row == browser->rows)
 			break;
 	}
 
-	return row;
+	return row + header_offset;
 }
 
 static struct rb_node *hists__filter_entries(struct rb_node *nd,
@@ -1191,8 +1281,10 @@
 	if (browser) {
 		browser->hists = hists;
 		browser->b.refresh = hist_browser__refresh;
+		browser->b.refresh_dimensions = hist_browser__refresh_dimensions;
 		browser->b.seek = ui_browser__hists_seek;
 		browser->b.use_navkeypressed = true;
+		browser->show_headers = symbol_conf.show_hist_headers;
 	}
 
 	return browser;
@@ -1213,8 +1305,7 @@
 	return browser->he_selection->thread;
 }
 
-static int hists__browser_title(struct hists *hists, char *bf, size_t size,
-				const char *ev_name)
+static int hists__browser_title(struct hists *hists, char *bf, size_t size)
 {
 	char unit;
 	int printed;
@@ -1223,6 +1314,7 @@
 	unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
 	u64 nr_events = hists->stats.total_period;
 	struct perf_evsel *evsel = hists_to_evsel(hists);
+	const char *ev_name = perf_evsel__name(evsel);
 	char buf[512];
 	size_t buflen = sizeof(buf);
 
@@ -1390,7 +1482,7 @@
 }
 
 static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
-				    const char *helpline, const char *ev_name,
+				    const char *helpline,
 				    bool left_exits,
 				    struct hist_browser_timer *hbt,
 				    float min_pcnt,
@@ -1422,6 +1514,7 @@
 	"d             Zoom into current DSO\n"				\
 	"E             Expand all callchains\n"				\
 	"F             Toggle percentage of filtered entries\n"		\
+	"H             Display column headers\n"			\
 
 	/* help messages are sorted by lexical order of the hotkey */
 	const char report_help[] = HIST_BROWSER_HELP_COMMON
@@ -1465,7 +1558,7 @@
 
 		nr_options = 0;
 
-		key = hist_browser__run(browser, ev_name, hbt);
+		key = hist_browser__run(browser, hbt);
 
 		if (browser->he_selection != NULL) {
 			thread = hist_browser__selected_thread(browser);
@@ -1843,7 +1936,7 @@
 {
 	struct perf_evlist *evlist = menu->b.priv;
 	struct perf_evsel *pos;
-	const char *ev_name, *title = "Available samples";
+	const char *title = "Available samples";
 	int delay_secs = hbt ? hbt->refresh : 0;
 	int key;
 
@@ -1876,9 +1969,8 @@
 			 */
 			if (hbt)
 				hbt->timer(hbt->arg);
-			ev_name = perf_evsel__name(pos);
 			key = perf_evsel__hists_browse(pos, nr_events, help,
-						       ev_name, true, hbt,
+						       true, hbt,
 						       menu->min_pcnt,
 						       menu->env);
 			ui_browser__show_title(&menu->b, title);
@@ -1982,10 +2074,9 @@
 single_entry:
 	if (nr_entries == 1) {
 		struct perf_evsel *first = perf_evlist__first(evlist);
-		const char *ev_name = perf_evsel__name(first);
 
 		return perf_evsel__hists_browse(first, nr_entries, help,
-						ev_name, false, hbt, min_pcnt,
+						false, hbt, min_pcnt,
 						env);
 	}
 
diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c
index 90122ab..40af0ac 100644
--- a/tools/perf/ui/stdio/hist.c
+++ b/tools/perf/ui/stdio/hist.c
@@ -479,7 +479,7 @@
 
 		if (h->ms.map == NULL && verbose > 1) {
 			__map_groups__fprintf_maps(h->thread->mg,
-						   MAP__FUNCTION, verbose, fp);
+						   MAP__FUNCTION, fp);
 			fprintf(fp, "%.10s end\n", graph_dotted_line);
 		}
 	}
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 48b6d3f..437ee09 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -626,7 +626,7 @@
 
 int hist_entry__append_callchain(struct hist_entry *he, struct perf_sample *sample)
 {
-	if (!symbol_conf.use_callchain)
+	if (!symbol_conf.use_callchain || sample->callchain == NULL)
 		return 0;
 	return callchain_append(he->callchain, &callchain_cursor, sample->period);
 }
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index 8f84423..da43619 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -176,4 +176,17 @@
 	dest->first = src->curr;
 	dest->nr -= src->pos;
 }
+
+#ifdef HAVE_SKIP_CALLCHAIN_IDX
+extern int arch_skip_callchain_idx(struct machine *machine,
+			struct thread *thread, struct ip_callchain *chain);
+#else
+static inline int arch_skip_callchain_idx(struct machine *machine __maybe_unused,
+			struct thread *thread __maybe_unused,
+			struct ip_callchain *chain __maybe_unused)
+{
+	return -1;
+}
+#endif
+
 #endif	/* __PERF_CALLCHAIN_H */
diff --git a/tools/perf/util/cloexec.c b/tools/perf/util/cloexec.c
new file mode 100644
index 0000000..c5d05ec
--- /dev/null
+++ b/tools/perf/util/cloexec.c
@@ -0,0 +1,57 @@
+#include "util.h"
+#include "../perf.h"
+#include "cloexec.h"
+#include "asm/bug.h"
+
+static unsigned long flag = PERF_FLAG_FD_CLOEXEC;
+
+static int perf_flag_probe(void)
+{
+	/* use 'safest' configuration as used in perf_evsel__fallback() */
+	struct perf_event_attr attr = {
+		.type = PERF_COUNT_SW_CPU_CLOCK,
+		.config = PERF_COUNT_SW_CPU_CLOCK,
+	};
+	int fd;
+	int err;
+
+	/* check cloexec flag */
+	fd = sys_perf_event_open(&attr, 0, -1, -1,
+				 PERF_FLAG_FD_CLOEXEC);
+	err = errno;
+
+	if (fd >= 0) {
+		close(fd);
+		return 1;
+	}
+
+	WARN_ONCE(err != EINVAL,
+		  "perf_event_open(..., PERF_FLAG_FD_CLOEXEC) failed with unexpected error %d (%s)\n",
+		  err, strerror(err));
+
+	/* not supported, confirm error related to PERF_FLAG_FD_CLOEXEC */
+	fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
+	err = errno;
+
+	if (WARN_ONCE(fd < 0,
+		      "perf_event_open(..., 0) failed unexpectedly with error %d (%s)\n",
+		      err, strerror(err)))
+		return -1;
+
+	close(fd);
+
+	return 0;
+}
+
+unsigned long perf_event_open_cloexec_flag(void)
+{
+	static bool probed;
+
+	if (!probed) {
+		if (perf_flag_probe() <= 0)
+			flag = 0;
+		probed = true;
+	}
+
+	return flag;
+}
diff --git a/tools/perf/util/cloexec.h b/tools/perf/util/cloexec.h
new file mode 100644
index 0000000..94a5a7d
--- /dev/null
+++ b/tools/perf/util/cloexec.h
@@ -0,0 +1,6 @@
+#ifndef __PERF_CLOEXEC_H
+#define __PERF_CLOEXEC_H
+
+unsigned long perf_event_open_cloexec_flag(void);
+
+#endif /* __PERF_CLOEXEC_H */
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index 24519e1..1e5e2e5 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -350,6 +350,16 @@
 	return 0;
 }
 
+static int perf_ui_config(const char *var, const char *value)
+{
+	/* Add other config variables here. */
+	if (!strcmp(var, "ui.show-headers")) {
+		symbol_conf.show_hist_headers = perf_config_bool(var, value);
+		return 0;
+	}
+	return 0;
+}
+
 int perf_default_config(const char *var, const char *value,
 			void *dummy __maybe_unused)
 {
@@ -359,6 +369,9 @@
 	if (!prefixcmp(var, "hist."))
 		return perf_hist_config(var, value);
 
+	if (!prefixcmp(var, "ui."))
+		return perf_ui_config(var, value);
+
 	/* Add other config variables here. */
 	return 0;
 }
diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c
index 55de44e..29d720c 100644
--- a/tools/perf/util/data.c
+++ b/tools/perf/util/data.c
@@ -7,6 +7,7 @@
 
 #include "data.h"
 #include "util.h"
+#include "debug.h"
 
 static bool check_pipe(struct perf_data_file *file)
 {
@@ -65,7 +66,7 @@
 		goto out_close;
 
 	if (!file->force && st.st_uid && (st.st_uid != geteuid())) {
-		pr_err("file %s not owned by current user or root\n",
+		pr_err("File %s not owned by current user or root (use -f to override)\n",
 		       file->path);
 		goto out_close;
 	}
diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c
index 299b555..71d4193 100644
--- a/tools/perf/util/debug.c
+++ b/tools/perf/util/debug.c
@@ -16,11 +16,11 @@
 int verbose;
 bool dump_trace = false, quiet = false;
 
-static int _eprintf(int level, const char *fmt, va_list args)
+static int _eprintf(int level, int var, const char *fmt, va_list args)
 {
 	int ret = 0;
 
-	if (verbose >= level) {
+	if (var >= level) {
 		if (use_browser >= 1)
 			ui_helpline__vshow(fmt, args);
 		else
@@ -30,13 +30,13 @@
 	return ret;
 }
 
-int eprintf(int level, const char *fmt, ...)
+int eprintf(int level, int var, const char *fmt, ...)
 {
 	va_list args;
 	int ret;
 
 	va_start(args, fmt);
-	ret = _eprintf(level, fmt, args);
+	ret = _eprintf(level, var, fmt, args);
 	va_end(args);
 
 	return ret;
@@ -51,9 +51,9 @@
 	va_list args;
 
 	va_start(args, fmt);
-	_eprintf(1, fmt, args);
+	_eprintf(1, verbose, fmt, args);
 	va_end(args);
-	eprintf(1, "\n");
+	eprintf(1, verbose, "\n");
 }
 
 int dump_printf(const char *fmt, ...)
@@ -105,3 +105,47 @@
 	}
 	printf(".\n");
 }
+
+static struct debug_variable {
+	const char *name;
+	int *ptr;
+} debug_variables[] = {
+	{ .name = "verbose", .ptr = &verbose },
+	{ .name = NULL, }
+};
+
+int perf_debug_option(const char *str)
+{
+	struct debug_variable *var = &debug_variables[0];
+	char *vstr, *s = strdup(str);
+	int v = 1;
+
+	vstr = strchr(s, '=');
+	if (vstr)
+		*vstr++ = 0;
+
+	while (var->name) {
+		if (!strcmp(s, var->name))
+			break;
+		var++;
+	}
+
+	if (!var->name) {
+		pr_err("Unknown debug variable name '%s'\n", s);
+		free(s);
+		return -1;
+	}
+
+	if (vstr) {
+		v = atoi(vstr);
+		/*
+		 * Allow only values in range (0, 10),
+		 * otherwise set 0.
+		 */
+		v = (v < 0) || (v > 10) ? 0 : v;
+	}
+
+	*var->ptr = v;
+	free(s);
+	return 0;
+}
diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h
index 443694c..89fb6b0 100644
--- a/tools/perf/util/debug.h
+++ b/tools/perf/util/debug.h
@@ -11,6 +11,24 @@
 extern int verbose;
 extern bool quiet, dump_trace;
 
+#ifndef pr_fmt
+#define pr_fmt(fmt) fmt
+#endif
+
+#define pr_err(fmt, ...) \
+	eprintf(0, verbose, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_warning(fmt, ...) \
+	eprintf(0, verbose, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_info(fmt, ...) \
+	eprintf(0, verbose, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_debug(fmt, ...) \
+	eprintf(1, verbose, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_debugN(n, fmt, ...) \
+	eprintf(n, verbose, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_debug2(fmt, ...) pr_debugN(2, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_debug3(fmt, ...) pr_debugN(3, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_debug4(fmt, ...) pr_debugN(4, pr_fmt(fmt), ##__VA_ARGS__)
+
 int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
 void trace_event(union perf_event *event);
 
@@ -19,4 +37,8 @@
 
 void pr_stat(const char *fmt, ...);
 
+int eprintf(int level, int var, const char *fmt, ...) __attribute__((format(printf, 3, 4)));
+
+int perf_debug_option(const char *str);
+
 #endif	/* __PERF_DEBUG_H */
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index 819f104..90d02c66 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -216,7 +216,7 @@
 {
 	int fd = __open_dso(dso, machine);
 
-	if (fd > 0) {
+	if (fd >= 0) {
 		dso__list_add(dso);
 		/*
 		 * Check if we crossed the allowed number
@@ -331,26 +331,44 @@
 	};
 	int i = 0;
 
+	if (dso->data.status == DSO_DATA_STATUS_ERROR)
+		return -1;
+
 	if (dso->data.fd >= 0)
-		return dso->data.fd;
+		goto out;
 
 	if (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND) {
 		dso->data.fd = open_dso(dso, machine);
-		return dso->data.fd;
+		goto out;
 	}
 
 	do {
-		int fd;
-
 		dso->binary_type = binary_type_data[i++];
 
-		fd = open_dso(dso, machine);
-		if (fd >= 0)
-			return dso->data.fd = fd;
+		dso->data.fd = open_dso(dso, machine);
+		if (dso->data.fd >= 0)
+			goto out;
 
 	} while (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND);
+out:
+	if (dso->data.fd >= 0)
+		dso->data.status = DSO_DATA_STATUS_OK;
+	else
+		dso->data.status = DSO_DATA_STATUS_ERROR;
 
-	return -EINVAL;
+	return dso->data.fd;
+}
+
+bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by)
+{
+	u32 flag = 1 << by;
+
+	if (dso->data.status_seen & flag)
+		return true;
+
+	dso->data.status_seen |= flag;
+
+	return false;
 }
 
 static void
@@ -526,6 +544,28 @@
 	return 0;
 }
 
+/**
+ * dso__data_size - Return dso data size
+ * @dso: dso object
+ * @machine: machine object
+ *
+ * Return: dso data size
+ */
+off_t dso__data_size(struct dso *dso, struct machine *machine)
+{
+	int fd;
+
+	fd = dso__data_fd(dso, machine);
+	if (fd < 0)
+		return fd;
+
+	if (data_file_size(dso))
+		return -1;
+
+	/* For now just estimate dso data size is close to file size */
+	return dso->data.file_size;
+}
+
 static ssize_t data_read_offset(struct dso *dso, u64 offset,
 				u8 *data, ssize_t size)
 {
@@ -701,8 +741,10 @@
 			dso->symbols[i] = dso->symbol_names[i] = RB_ROOT;
 		dso->data.cache = RB_ROOT;
 		dso->data.fd = -1;
+		dso->data.status = DSO_DATA_STATUS_UNKNOWN;
 		dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND;
 		dso->binary_type = DSO_BINARY_TYPE__NOT_FOUND;
+		dso->is_64_bit = (sizeof(void *) == 8);
 		dso->loaded = 0;
 		dso->rel = 0;
 		dso->sorted_by_name = 0;
@@ -898,3 +940,14 @@
 
 	return ret;
 }
+
+enum dso_type dso__type(struct dso *dso, struct machine *machine)
+{
+	int fd;
+
+	fd = dso__data_fd(dso, machine);
+	if (fd < 0)
+		return DSO__TYPE_UNKNOWN;
+
+	return dso__type_fd(fd);
+}
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index ad553ba..5e463c0 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -5,6 +5,7 @@
 #include <linux/rbtree.h>
 #include <stdbool.h>
 #include <linux/types.h>
+#include <linux/bitops.h>
 #include "map.h"
 #include "build-id.h"
 
@@ -40,6 +41,23 @@
 	DSO_SWAP__YES,
 };
 
+enum dso_data_status {
+	DSO_DATA_STATUS_ERROR	= -1,
+	DSO_DATA_STATUS_UNKNOWN	= 0,
+	DSO_DATA_STATUS_OK	= 1,
+};
+
+enum dso_data_status_seen {
+	DSO_DATA_STATUS_SEEN_ITRACE,
+};
+
+enum dso_type {
+	DSO__TYPE_UNKNOWN,
+	DSO__TYPE_64BIT,
+	DSO__TYPE_32BIT,
+	DSO__TYPE_X32BIT,
+};
+
 #define DSO__SWAP(dso, type, val)			\
 ({							\
 	type ____r = val;				\
@@ -90,6 +108,7 @@
 	u8		 annotate_warned:1;
 	u8		 short_name_allocated:1;
 	u8		 long_name_allocated:1;
+	u8		 is_64_bit:1;
 	u8		 sorted_by_name;
 	u8		 loaded;
 	u8		 rel;
@@ -103,6 +122,8 @@
 	struct {
 		struct rb_root	 cache;
 		int		 fd;
+		int		 status;
+		u32		 status_seen;
 		size_t		 file_size;
 		struct list_head open_entry;
 	} data;
@@ -153,6 +174,7 @@
  * The dso__data_* external interface provides following functions:
  *   dso__data_fd
  *   dso__data_close
+ *   dso__data_size
  *   dso__data_read_offset
  *   dso__data_read_addr
  *
@@ -190,11 +212,13 @@
 int dso__data_fd(struct dso *dso, struct machine *machine);
 void dso__data_close(struct dso *dso);
 
+off_t dso__data_size(struct dso *dso, struct machine *machine);
 ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
 			      u64 offset, u8 *data, ssize_t size);
 ssize_t dso__data_read_addr(struct dso *dso, struct map *map,
 			    struct machine *machine, u64 addr,
 			    u8 *data, ssize_t size);
+bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by);
 
 struct map *dso__new_map(const char *name);
 struct dso *dso__kernel_findnew(struct machine *machine, const char *name,
@@ -229,4 +253,6 @@
 
 void dso__free_a2l(struct dso *dso);
 
+enum dso_type dso__type(struct dso *dso, struct machine *machine);
+
 #endif /* __PERF_DSO */
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index d0281bd..1398c83 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -603,7 +603,14 @@
 
 size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
 {
-	return fprintf(fp, ": %s:%d\n", event->comm.comm, event->comm.tid);
+	const char *s;
+
+	if (event->header.misc & PERF_RECORD_MISC_COMM_EXEC)
+		s = " exec";
+	else
+		s = "";
+
+	return fprintf(fp, "%s: %s:%d\n", s, event->comm.comm, event->comm.tid);
 }
 
 int perf_event__process_comm(struct perf_tool *tool __maybe_unused,
@@ -781,6 +788,7 @@
 		    cpumode == PERF_RECORD_MISC_USER &&
 		    machine && mg != &machine->kmaps) {
 			mg = &machine->kmaps;
+			load_map = true;
 			goto try_again;
 		}
 	} else {
@@ -866,3 +874,45 @@
 
 	return 0;
 }
+
+bool is_bts_event(struct perf_event_attr *attr)
+{
+	return attr->type == PERF_TYPE_HARDWARE &&
+	       (attr->config & PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
+	       attr->sample_period == 1;
+}
+
+bool sample_addr_correlates_sym(struct perf_event_attr *attr)
+{
+	if (attr->type == PERF_TYPE_SOFTWARE &&
+	    (attr->config == PERF_COUNT_SW_PAGE_FAULTS ||
+	     attr->config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
+	     attr->config == PERF_COUNT_SW_PAGE_FAULTS_MAJ))
+		return true;
+
+	if (is_bts_event(attr))
+		return true;
+
+	return false;
+}
+
+void perf_event__preprocess_sample_addr(union perf_event *event,
+					struct perf_sample *sample,
+					struct machine *machine,
+					struct thread *thread,
+					struct addr_location *al)
+{
+	u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+
+	thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION,
+			      sample->addr, al);
+	if (!al->map)
+		thread__find_addr_map(thread, machine, cpumode, MAP__VARIABLE,
+				      sample->addr, al);
+
+	al->cpu = sample->cpu;
+	al->sym = NULL;
+
+	if (al->map)
+		al->sym = map__find_symbol(al->map, al->addr, NULL);
+}
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index e5dd40a..94d6976 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -288,6 +288,16 @@
 				  struct addr_location *al,
 				  struct perf_sample *sample);
 
+struct thread;
+
+bool is_bts_event(struct perf_event_attr *attr);
+bool sample_addr_correlates_sym(struct perf_event_attr *attr);
+void perf_event__preprocess_sample_addr(union perf_event *event,
+					struct perf_sample *sample,
+					struct machine *machine,
+					struct thread *thread,
+					struct addr_location *al);
+
 const char *perf_event__name(unsigned int id);
 
 size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 59ef280..814e954 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -606,12 +606,17 @@
 	return evlist->mmap != NULL ? 0 : -ENOMEM;
 }
 
-static int __perf_evlist__mmap(struct perf_evlist *evlist,
-			       int idx, int prot, int mask, int fd)
+struct mmap_params {
+	int prot;
+	int mask;
+};
+
+static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx,
+			       struct mmap_params *mp, int fd)
 {
 	evlist->mmap[idx].prev = 0;
-	evlist->mmap[idx].mask = mask;
-	evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
+	evlist->mmap[idx].mask = mp->mask;
+	evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, mp->prot,
 				      MAP_SHARED, fd, 0);
 	if (evlist->mmap[idx].base == MAP_FAILED) {
 		pr_debug2("failed to mmap perf event ring buffer, error %d\n",
@@ -625,8 +630,8 @@
 }
 
 static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
-				       int prot, int mask, int cpu, int thread,
-				       int *output)
+				       struct mmap_params *mp, int cpu,
+				       int thread, int *output)
 {
 	struct perf_evsel *evsel;
 
@@ -635,8 +640,7 @@
 
 		if (*output == -1) {
 			*output = fd;
-			if (__perf_evlist__mmap(evlist, idx, prot, mask,
-						*output) < 0)
+			if (__perf_evlist__mmap(evlist, idx, mp, *output) < 0)
 				return -1;
 		} else {
 			if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
@@ -651,8 +655,8 @@
 	return 0;
 }
 
-static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot,
-				     int mask)
+static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist,
+				     struct mmap_params *mp)
 {
 	int cpu, thread;
 	int nr_cpus = cpu_map__nr(evlist->cpus);
@@ -663,8 +667,8 @@
 		int output = -1;
 
 		for (thread = 0; thread < nr_threads; thread++) {
-			if (perf_evlist__mmap_per_evsel(evlist, cpu, prot, mask,
-							cpu, thread, &output))
+			if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
+							thread, &output))
 				goto out_unmap;
 		}
 	}
@@ -677,8 +681,8 @@
 	return -1;
 }
 
-static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot,
-					int mask)
+static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist,
+					struct mmap_params *mp)
 {
 	int thread;
 	int nr_threads = thread_map__nr(evlist->threads);
@@ -687,8 +691,8 @@
 	for (thread = 0; thread < nr_threads; thread++) {
 		int output = -1;
 
-		if (perf_evlist__mmap_per_evsel(evlist, thread, prot, mask, 0,
-						thread, &output))
+		if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
+						&output))
 			goto out_unmap;
 	}
 
@@ -793,7 +797,9 @@
 	struct perf_evsel *evsel;
 	const struct cpu_map *cpus = evlist->cpus;
 	const struct thread_map *threads = evlist->threads;
-	int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
+	struct mmap_params mp = {
+		.prot = PROT_READ | (overwrite ? 0 : PROT_WRITE),
+	};
 
 	if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
 		return -ENOMEM;
@@ -804,7 +810,7 @@
 	evlist->overwrite = overwrite;
 	evlist->mmap_len = perf_evlist__mmap_size(pages);
 	pr_debug("mmap size %zuB\n", evlist->mmap_len);
-	mask = evlist->mmap_len - page_size - 1;
+	mp.mask = evlist->mmap_len - page_size - 1;
 
 	evlist__for_each(evlist, evsel) {
 		if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
@@ -814,9 +820,9 @@
 	}
 
 	if (cpu_map__empty(cpus))
-		return perf_evlist__mmap_per_thread(evlist, prot, mask);
+		return perf_evlist__mmap_per_thread(evlist, &mp);
 
-	return perf_evlist__mmap_per_cpu(evlist, prot, mask);
+	return perf_evlist__mmap_per_cpu(evlist, &mp);
 }
 
 int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
@@ -1214,10 +1220,11 @@
 					     "For your workloads it needs to be <= 1\nHint:\t");
 		}
 		printed += scnprintf(buf + printed, size - printed,
-				     "For system wide tracing it needs to be set to -1");
+				     "For system wide tracing it needs to be set to -1.\n");
 
 		printed += scnprintf(buf + printed, size - printed,
-				    ".\nHint:\tThe current value is %d.", value);
+				    "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n"
+				    "Hint:\tThe current value is %d.", value);
 		break;
 	default:
 		scnprintf(buf, size, "%s", emsg);
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 8606175..21a373e 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -29,6 +29,7 @@
 	bool sample_id_all;
 	bool exclude_guest;
 	bool mmap2;
+	bool cloexec;
 } perf_missing_features;
 
 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
@@ -623,7 +624,7 @@
 		attr->mmap_data = track;
 	}
 
-	if (opts->call_graph_enabled)
+	if (opts->call_graph_enabled && !evsel->no_aux_samples)
 		perf_evsel__config_callgraph(evsel, opts);
 
 	if (target__has_cpu(&opts->target))
@@ -637,7 +638,7 @@
 	     target__has_cpu(&opts->target) || per_cpu))
 		perf_evsel__set_sample_bit(evsel, TIME);
 
-	if (opts->raw_samples) {
+	if (opts->raw_samples && !evsel->no_aux_samples) {
 		perf_evsel__set_sample_bit(evsel, TIME);
 		perf_evsel__set_sample_bit(evsel, RAW);
 		perf_evsel__set_sample_bit(evsel, CPU);
@@ -650,7 +651,7 @@
 		attr->watermark = 0;
 		attr->wakeup_events = 1;
 	}
-	if (opts->branch_stack) {
+	if (opts->branch_stack && !evsel->no_aux_samples) {
 		perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
 		attr->branch_sample_type = opts->branch_stack;
 	}
@@ -681,6 +682,11 @@
 	if (target__none(&opts->target) && perf_evsel__is_group_leader(evsel) &&
 		!opts->initial_delay)
 		attr->enable_on_exec = 1;
+
+	if (evsel->immediate) {
+		attr->disabled = 0;
+		attr->enable_on_exec = 0;
+	}
 }
 
 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
@@ -960,6 +966,7 @@
 	ret += PRINT_ATTR2(exclude_user, exclude_kernel);
 	ret += PRINT_ATTR2(exclude_hv, exclude_idle);
 	ret += PRINT_ATTR2(mmap, comm);
+	ret += PRINT_ATTR2(mmap2, comm_exec);
 	ret += PRINT_ATTR2(freq, inherit_stat);
 	ret += PRINT_ATTR2(enable_on_exec, task);
 	ret += PRINT_ATTR2(watermark, precise_ip);
@@ -967,7 +974,6 @@
 	ret += PRINT_ATTR2(exclude_host, exclude_guest);
 	ret += PRINT_ATTR2N("excl.callchain_kern", exclude_callchain_kernel,
 			    "excl.callchain_user", exclude_callchain_user);
-	ret += PRINT_ATTR_U32(mmap2);
 
 	ret += PRINT_ATTR_U32(wakeup_events);
 	ret += PRINT_ATTR_U32(wakeup_watermark);
@@ -989,7 +995,7 @@
 			      struct thread_map *threads)
 {
 	int cpu, thread;
-	unsigned long flags = 0;
+	unsigned long flags = PERF_FLAG_FD_CLOEXEC;
 	int pid = -1, err;
 	enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE;
 
@@ -998,11 +1004,13 @@
 		return -ENOMEM;
 
 	if (evsel->cgrp) {
-		flags = PERF_FLAG_PID_CGROUP;
+		flags |= PERF_FLAG_PID_CGROUP;
 		pid = evsel->cgrp->fd;
 	}
 
 fallback_missing_features:
+	if (perf_missing_features.cloexec)
+		flags &= ~(unsigned long)PERF_FLAG_FD_CLOEXEC;
 	if (perf_missing_features.mmap2)
 		evsel->attr.mmap2 = 0;
 	if (perf_missing_features.exclude_guest)
@@ -1071,7 +1079,10 @@
 	if (err != -EINVAL || cpu > 0 || thread > 0)
 		goto out_close;
 
-	if (!perf_missing_features.mmap2 && evsel->attr.mmap2) {
+	if (!perf_missing_features.cloexec && (flags & PERF_FLAG_FD_CLOEXEC)) {
+		perf_missing_features.cloexec = true;
+		goto fallback_missing_features;
+	} else if (!perf_missing_features.mmap2 && evsel->attr.mmap2) {
 		perf_missing_features.mmap2 = true;
 		goto fallback_missing_features;
 	} else if (!perf_missing_features.exclude_guest &&
@@ -1940,6 +1951,7 @@
 		if_print(mmap);
 		if_print(mmap2);
 		if_print(comm);
+		if_print(comm_exec);
 		if_print(freq);
 		if_print(inherit_stat);
 		if_print(enable_on_exec);
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index a52e9a5..d7f93ce 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -83,6 +83,8 @@
 	int			is_pos;
 	bool 			supported;
 	bool 			needs_swap;
+	bool			no_aux_samples;
+	bool			immediate;
 	/* parse modifier helper */
 	int			exclude_GH;
 	int			nr_members;
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 893f8e2..158c787 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -200,6 +200,47 @@
 	return write_padded(fd, name, name_len + 1, len);
 }
 
+static int __dsos__hit_all(struct list_head *head)
+{
+	struct dso *pos;
+
+	list_for_each_entry(pos, head, node)
+		pos->hit = true;
+
+	return 0;
+}
+
+static int machine__hit_all_dsos(struct machine *machine)
+{
+	int err;
+
+	err = __dsos__hit_all(&machine->kernel_dsos);
+	if (err)
+		return err;
+
+	return __dsos__hit_all(&machine->user_dsos);
+}
+
+int dsos__hit_all(struct perf_session *session)
+{
+	struct rb_node *nd;
+	int err;
+
+	err = machine__hit_all_dsos(&session->machines.host);
+	if (err)
+		return err;
+
+	for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
+		struct machine *pos = rb_entry(nd, struct machine, rb_node);
+
+		err = machine__hit_all_dsos(pos);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
 static int __dsos__write_buildid_table(struct list_head *head,
 				       struct machine *machine,
 				       pid_t pid, u16 misc, int fd)
@@ -215,9 +256,9 @@
 		if (!pos->hit)
 			continue;
 
-		if (is_vdso_map(pos->short_name)) {
-			name = (char *) VDSO__MAP_NAME;
-			name_len = sizeof(VDSO__MAP_NAME) + 1;
+		if (dso__is_vdso(pos)) {
+			name = pos->short_name;
+			name_len = pos->short_name_len + 1;
 		} else if (dso__is_kcore(pos)) {
 			machine__mmap_name(machine, nm, sizeof(nm));
 			name = nm;
@@ -298,7 +339,7 @@
 
 	len = scnprintf(filename, size, "%s%s%s",
 		       debugdir, slash ? "/" : "",
-		       is_vdso ? VDSO__MAP_NAME : realname);
+		       is_vdso ? DSO__NAME_VDSO : realname);
 	if (mkdir_p(filename, 0755))
 		goto out_free;
 
@@ -386,7 +427,7 @@
 			       const char *debugdir)
 {
 	bool is_kallsyms = dso->kernel && dso->long_name[0] != '/';
-	bool is_vdso = is_vdso_map(dso->short_name);
+	bool is_vdso = dso__is_vdso(dso);
 	const char *name = dso->long_name;
 	char nm[PATH_MAX];
 
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index d08cfe4..8f5cbae 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -151,6 +151,8 @@
 				 struct perf_session *session);
 bool is_perf_magic(u64 magic);
 
+int dsos__hit_all(struct perf_session *session);
+
 /*
  * arch specific callback
  */
diff --git a/tools/perf/util/include/linux/kernel.h b/tools/perf/util/include/linux/kernel.h
index 9844c31..09e8e7a 100644
--- a/tools/perf/util/include/linux/kernel.h
+++ b/tools/perf/util/include/linux/kernel.h
@@ -94,27 +94,6 @@
 	return (i >= ssize) ? (ssize - 1) : i;
 }
 
-int eprintf(int level,
-	    const char *fmt, ...) __attribute__((format(printf, 2, 3)));
-
-#ifndef pr_fmt
-#define pr_fmt(fmt) fmt
-#endif
-
-#define pr_err(fmt, ...) \
-	eprintf(0, pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_warning(fmt, ...) \
-	eprintf(0, pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_info(fmt, ...) \
-	eprintf(0, pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_debug(fmt, ...) \
-	eprintf(1, pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_debugN(n, fmt, ...) \
-	eprintf(n, pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_debug2(fmt, ...) pr_debugN(2, pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_debug3(fmt, ...) pr_debugN(3, pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_debug4(fmt, ...) pr_debugN(4, pr_fmt(fmt), ##__VA_ARGS__)
-
 /*
  * This looks more complex than it should be. But we need to
  * get the type for the ~ right in round_down (it needs to be
diff --git a/tools/perf/util/kvm-stat.h b/tools/perf/util/kvm-stat.h
new file mode 100644
index 0000000..0b5a8cd
--- /dev/null
+++ b/tools/perf/util/kvm-stat.h
@@ -0,0 +1,140 @@
+#ifndef __PERF_KVM_STAT_H
+#define __PERF_KVM_STAT_H
+
+#include "../perf.h"
+#include "evsel.h"
+#include "evlist.h"
+#include "session.h"
+#include "tool.h"
+#include "stat.h"
+
+struct event_key {
+	#define INVALID_KEY     (~0ULL)
+	u64 key;
+	int info;
+	struct exit_reasons_table *exit_reasons;
+};
+
+struct kvm_event_stats {
+	u64 time;
+	struct stats stats;
+};
+
+struct kvm_event {
+	struct list_head hash_entry;
+	struct rb_node rb;
+
+	struct event_key key;
+
+	struct kvm_event_stats total;
+
+	#define DEFAULT_VCPU_NUM 8
+	int max_vcpu;
+	struct kvm_event_stats *vcpu;
+};
+
+typedef int (*key_cmp_fun)(struct kvm_event*, struct kvm_event*, int);
+
+struct kvm_event_key {
+	const char *name;
+	key_cmp_fun key;
+};
+
+struct perf_kvm_stat;
+
+struct child_event_ops {
+	void (*get_key)(struct perf_evsel *evsel,
+			struct perf_sample *sample,
+			struct event_key *key);
+	const char *name;
+};
+
+struct kvm_events_ops {
+	bool (*is_begin_event)(struct perf_evsel *evsel,
+			       struct perf_sample *sample,
+			       struct event_key *key);
+	bool (*is_end_event)(struct perf_evsel *evsel,
+			     struct perf_sample *sample, struct event_key *key);
+	struct child_event_ops *child_ops;
+	void (*decode_key)(struct perf_kvm_stat *kvm, struct event_key *key,
+			   char *decode);
+	const char *name;
+};
+
+struct exit_reasons_table {
+	unsigned long exit_code;
+	const char *reason;
+};
+
+#define EVENTS_BITS		12
+#define EVENTS_CACHE_SIZE	(1UL << EVENTS_BITS)
+
+struct perf_kvm_stat {
+	struct perf_tool    tool;
+	struct record_opts  opts;
+	struct perf_evlist  *evlist;
+	struct perf_session *session;
+
+	const char *file_name;
+	const char *report_event;
+	const char *sort_key;
+	int trace_vcpu;
+
+	struct exit_reasons_table *exit_reasons;
+	const char *exit_reasons_isa;
+
+	struct kvm_events_ops *events_ops;
+	key_cmp_fun compare;
+	struct list_head kvm_events_cache[EVENTS_CACHE_SIZE];
+
+	u64 total_time;
+	u64 total_count;
+	u64 lost_events;
+	u64 duration;
+
+	const char *pid_str;
+	struct intlist *pid_list;
+
+	struct rb_root result;
+
+	int timerfd;
+	unsigned int display_time;
+	bool live;
+};
+
+struct kvm_reg_events_ops {
+	const char *name;
+	struct kvm_events_ops *ops;
+};
+
+void exit_event_get_key(struct perf_evsel *evsel,
+			struct perf_sample *sample,
+			struct event_key *key);
+bool exit_event_begin(struct perf_evsel *evsel,
+		      struct perf_sample *sample,
+		      struct event_key *key);
+bool exit_event_end(struct perf_evsel *evsel,
+		    struct perf_sample *sample,
+		    struct event_key *key);
+void exit_event_decode_key(struct perf_kvm_stat *kvm,
+			   struct event_key *key,
+			   char *decode);
+
+bool kvm_exit_event(struct perf_evsel *evsel);
+bool kvm_entry_event(struct perf_evsel *evsel);
+
+#define define_exit_reasons_table(name, symbols)	\
+	static struct exit_reasons_table name[] = {	\
+		symbols, { -1, NULL }			\
+	}
+
+/*
+ * arch specific callbacks and data structures
+ */
+int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid);
+
+extern const char * const kvm_events_tp[];
+extern struct kvm_reg_events_ops kvm_reg_events_ops[];
+extern const char * const kvm_skip_events[];
+
+#endif /* __PERF_KVM_STAT_H */
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index c73e1fc..16bba9f 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -8,6 +8,7 @@
 #include "sort.h"
 #include "strlist.h"
 #include "thread.h"
+#include "vdso.h"
 #include <stdbool.h>
 #include <symbol/kallsyms.h>
 #include "unwind.h"
@@ -23,6 +24,8 @@
 	INIT_LIST_HEAD(&machine->dead_threads);
 	machine->last_match = NULL;
 
+	machine->vdso_info = NULL;
+
 	machine->kmaps.machine = machine;
 	machine->pid = pid;
 
@@ -34,7 +37,7 @@
 		return -ENOMEM;
 
 	if (pid != HOST_KERNEL_ID) {
-		struct thread *thread = machine__findnew_thread(machine, 0,
+		struct thread *thread = machine__findnew_thread(machine, -1,
 								pid);
 		char comm[64];
 
@@ -45,6 +48,8 @@
 		thread__set_comm(thread, comm, 0);
 	}
 
+	machine->current_tid = NULL;
+
 	return 0;
 }
 
@@ -103,7 +108,9 @@
 	map_groups__exit(&machine->kmaps);
 	dsos__delete(&machine->user_dsos);
 	dsos__delete(&machine->kernel_dsos);
+	vdso__exit(machine);
 	zfree(&machine->root_dir);
+	zfree(&machine->current_tid);
 }
 
 void machine__delete(struct machine *machine)
@@ -272,6 +279,52 @@
 	return;
 }
 
+static void machine__update_thread_pid(struct machine *machine,
+				       struct thread *th, pid_t pid)
+{
+	struct thread *leader;
+
+	if (pid == th->pid_ || pid == -1 || th->pid_ != -1)
+		return;
+
+	th->pid_ = pid;
+
+	if (th->pid_ == th->tid)
+		return;
+
+	leader = machine__findnew_thread(machine, th->pid_, th->pid_);
+	if (!leader)
+		goto out_err;
+
+	if (!leader->mg)
+		leader->mg = map_groups__new();
+
+	if (!leader->mg)
+		goto out_err;
+
+	if (th->mg == leader->mg)
+		return;
+
+	if (th->mg) {
+		/*
+		 * Maps are created from MMAP events which provide the pid and
+		 * tid.  Consequently there never should be any maps on a thread
+		 * with an unknown pid.  Just print an error if there are.
+		 */
+		if (!map_groups__empty(th->mg))
+			pr_err("Discarding thread maps for %d:%d\n",
+			       th->pid_, th->tid);
+		map_groups__delete(th->mg);
+	}
+
+	th->mg = map_groups__get(leader->mg);
+
+	return;
+
+out_err:
+	pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
+}
+
 static struct thread *__machine__findnew_thread(struct machine *machine,
 						pid_t pid, pid_t tid,
 						bool create)
@@ -285,10 +338,10 @@
 	 * so most of the time we dont have to look up
 	 * the full rbtree:
 	 */
-	if (machine->last_match && machine->last_match->tid == tid) {
-		if (pid && pid != machine->last_match->pid_)
-			machine->last_match->pid_ = pid;
-		return machine->last_match;
+	th = machine->last_match;
+	if (th && th->tid == tid) {
+		machine__update_thread_pid(machine, th, pid);
+		return th;
 	}
 
 	while (*p != NULL) {
@@ -297,8 +350,7 @@
 
 		if (th->tid == tid) {
 			machine->last_match = th;
-			if (pid && pid != th->pid_)
-				th->pid_ = pid;
+			machine__update_thread_pid(machine, th, pid);
 			return th;
 		}
 
@@ -325,8 +377,10 @@
 		 * within thread__init_map_groups to find the thread
 		 * leader and that would screwed the rb tree.
 		 */
-		if (thread__init_map_groups(th, machine))
+		if (thread__init_map_groups(th, machine)) {
+			thread__delete(th);
 			return NULL;
+		}
 	}
 
 	return th;
@@ -1045,14 +1099,14 @@
 	else
 		type = MAP__FUNCTION;
 
-	map = map__new(&machine->user_dsos, event->mmap2.start,
+	map = map__new(machine, event->mmap2.start,
 			event->mmap2.len, event->mmap2.pgoff,
 			event->mmap2.pid, event->mmap2.maj,
 			event->mmap2.min, event->mmap2.ino,
 			event->mmap2.ino_generation,
 			event->mmap2.prot,
 			event->mmap2.flags,
-			event->mmap2.filename, type);
+			event->mmap2.filename, type, thread);
 
 	if (map == NULL)
 		goto out_problem;
@@ -1095,11 +1149,11 @@
 	else
 		type = MAP__FUNCTION;
 
-	map = map__new(&machine->user_dsos, event->mmap.start,
+	map = map__new(machine, event->mmap.start,
 			event->mmap.len, event->mmap.pgoff,
 			event->mmap.pid, 0, 0, 0, 0, 0, 0,
 			event->mmap.filename,
-			type);
+			type, thread);
 
 	if (map == NULL)
 		goto out_problem;
@@ -1281,7 +1335,9 @@
 	u8 cpumode = PERF_RECORD_MISC_USER;
 	int chain_nr = min(max_stack, (int)chain->nr);
 	int i;
+	int j;
 	int err;
+	int skip_idx __maybe_unused;
 
 	callchain_cursor_reset(&callchain_cursor);
 
@@ -1290,14 +1346,26 @@
 		return 0;
 	}
 
+	/*
+	 * Based on DWARF debug information, some architectures skip
+	 * a callchain entry saved by the kernel.
+	 */
+	skip_idx = arch_skip_callchain_idx(machine, thread, chain);
+
 	for (i = 0; i < chain_nr; i++) {
 		u64 ip;
 		struct addr_location al;
 
 		if (callchain_param.order == ORDER_CALLEE)
-			ip = chain->ips[i];
+			j = i;
 		else
-			ip = chain->ips[chain->nr - i - 1];
+			j = chain->nr - i - 1;
+
+#ifdef HAVE_SKIP_CALLCHAIN_IDX
+		if (j == skip_idx)
+			continue;
+#endif
+		ip = chain->ips[j];
 
 		if (ip >= PERF_CONTEXT_MAX) {
 			switch (ip) {
@@ -1420,3 +1488,46 @@
 	/* command specified */
 	return 0;
 }
+
+pid_t machine__get_current_tid(struct machine *machine, int cpu)
+{
+	if (cpu < 0 || cpu >= MAX_NR_CPUS || !machine->current_tid)
+		return -1;
+
+	return machine->current_tid[cpu];
+}
+
+int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
+			     pid_t tid)
+{
+	struct thread *thread;
+
+	if (cpu < 0)
+		return -EINVAL;
+
+	if (!machine->current_tid) {
+		int i;
+
+		machine->current_tid = calloc(MAX_NR_CPUS, sizeof(pid_t));
+		if (!machine->current_tid)
+			return -ENOMEM;
+		for (i = 0; i < MAX_NR_CPUS; i++)
+			machine->current_tid[i] = -1;
+	}
+
+	if (cpu >= MAX_NR_CPUS) {
+		pr_err("Requested CPU %d too large. ", cpu);
+		pr_err("Consider raising MAX_NR_CPUS\n");
+		return -EINVAL;
+	}
+
+	machine->current_tid[cpu] = tid;
+
+	thread = machine__findnew_thread(machine, pid, tid);
+	if (!thread)
+		return -ENOMEM;
+
+	thread->cpu = cpu;
+
+	return 0;
+}
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index c8c74a1..b972824 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -20,6 +20,8 @@
 
 extern const char *ref_reloc_sym_names[];
 
+struct vdso_info;
+
 struct machine {
 	struct rb_node	  rb_node;
 	pid_t		  pid;
@@ -28,11 +30,13 @@
 	struct rb_root	  threads;
 	struct list_head  dead_threads;
 	struct thread	  *last_match;
+	struct vdso_info  *vdso_info;
 	struct list_head  user_dsos;
 	struct list_head  kernel_dsos;
 	struct map_groups kmaps;
 	struct map	  *vmlinux_maps[MAP__NR_TYPES];
 	symbol_filter_t	  symbol_filter;
+	pid_t		  *current_tid;
 };
 
 static inline
@@ -191,4 +195,8 @@
 					     perf_event__process, data_mmap);
 }
 
+pid_t machine__get_current_tid(struct machine *machine, int cpu);
+int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
+			     pid_t tid);
+
 #endif /* __PERF_MACHINE_H */
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 25c571f..31b8905 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -12,6 +12,8 @@
 #include "vdso.h"
 #include "build-id.h"
 #include "util.h"
+#include "debug.h"
+#include "machine.h"
 #include <linux/string.h>
 
 const char *map_type__name[MAP__NR_TYPES] = {
@@ -136,10 +138,10 @@
 	map->erange_warned = false;
 }
 
-struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
+struct map *map__new(struct machine *machine, u64 start, u64 len,
 		     u64 pgoff, u32 pid, u32 d_maj, u32 d_min, u64 ino,
 		     u64 ino_gen, u32 prot, u32 flags, char *filename,
-		     enum map_type type)
+		     enum map_type type, struct thread *thread)
 {
 	struct map *map = malloc(sizeof(*map));
 
@@ -172,9 +174,9 @@
 
 		if (vdso) {
 			pgoff = 0;
-			dso = vdso__dso_findnew(dsos__list);
+			dso = vdso__dso_findnew(machine, thread);
 		} else
-			dso = __dsos__findnew(dsos__list, filename);
+			dso = __dsos__findnew(&machine->user_dsos, filename);
 
 		if (dso == NULL)
 			goto out_delete;
@@ -454,6 +456,20 @@
 	}
 }
 
+bool map_groups__empty(struct map_groups *mg)
+{
+	int i;
+
+	for (i = 0; i < MAP__NR_TYPES; ++i) {
+		if (maps__first(&mg->maps[i]))
+			return false;
+		if (!list_empty(&mg->removed_maps[i]))
+			return false;
+	}
+
+	return true;
+}
+
 struct map_groups *map_groups__new(void)
 {
 	struct map_groups *mg = malloc(sizeof(*mg));
@@ -554,8 +570,8 @@
 	return ams->sym ? 0 : -1;
 }
 
-size_t __map_groups__fprintf_maps(struct map_groups *mg,
-				  enum map_type type, int verbose, FILE *fp)
+size_t __map_groups__fprintf_maps(struct map_groups *mg, enum map_type type,
+				  FILE *fp)
 {
 	size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
 	struct rb_node *nd;
@@ -573,17 +589,16 @@
 	return printed;
 }
 
-size_t map_groups__fprintf_maps(struct map_groups *mg, int verbose, FILE *fp)
+static size_t map_groups__fprintf_maps(struct map_groups *mg, FILE *fp)
 {
 	size_t printed = 0, i;
 	for (i = 0; i < MAP__NR_TYPES; ++i)
-		printed += __map_groups__fprintf_maps(mg, i, verbose, fp);
+		printed += __map_groups__fprintf_maps(mg, i, fp);
 	return printed;
 }
 
 static size_t __map_groups__fprintf_removed_maps(struct map_groups *mg,
-						 enum map_type type,
-						 int verbose, FILE *fp)
+						 enum map_type type, FILE *fp)
 {
 	struct map *pos;
 	size_t printed = 0;
@@ -600,23 +615,23 @@
 }
 
 static size_t map_groups__fprintf_removed_maps(struct map_groups *mg,
-					       int verbose, FILE *fp)
+					       FILE *fp)
 {
 	size_t printed = 0, i;
 	for (i = 0; i < MAP__NR_TYPES; ++i)
-		printed += __map_groups__fprintf_removed_maps(mg, i, verbose, fp);
+		printed += __map_groups__fprintf_removed_maps(mg, i, fp);
 	return printed;
 }
 
-size_t map_groups__fprintf(struct map_groups *mg, int verbose, FILE *fp)
+size_t map_groups__fprintf(struct map_groups *mg, FILE *fp)
 {
-	size_t printed = map_groups__fprintf_maps(mg, verbose, fp);
+	size_t printed = map_groups__fprintf_maps(mg, fp);
 	printed += fprintf(fp, "Removed maps:\n");
-	return printed + map_groups__fprintf_removed_maps(mg, verbose, fp);
+	return printed + map_groups__fprintf_removed_maps(mg, fp);
 }
 
 int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
-				   int verbose, FILE *fp)
+				   FILE *fp)
 {
 	struct rb_root *root = &mg->maps[map->type];
 	struct rb_node *next = rb_first(root);
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index 7758c72..2f83954 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -66,6 +66,7 @@
 
 struct map_groups *map_groups__new(void);
 void map_groups__delete(struct map_groups *mg);
+bool map_groups__empty(struct map_groups *mg);
 
 static inline struct map_groups *map_groups__get(struct map_groups *mg)
 {
@@ -103,6 +104,7 @@
 u64 map__objdump_2mem(struct map *map, u64 ip);
 
 struct symbol;
+struct thread;
 
 /* map__for_each_symbol - iterate over the symbols in the given map
  *
@@ -118,10 +120,10 @@
 
 void map__init(struct map *map, enum map_type type,
 	       u64 start, u64 end, u64 pgoff, struct dso *dso);
-struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
+struct map *map__new(struct machine *machine, u64 start, u64 len,
 		     u64 pgoff, u32 pid, u32 d_maj, u32 d_min, u64 ino,
 		     u64 ino_gen, u32 prot, u32 flags,
-		     char *filename, enum map_type type);
+		     char *filename, enum map_type type, struct thread *thread);
 struct map *map__new2(u64 start, struct dso *dso, enum map_type type);
 void map__delete(struct map *map);
 struct map *map__clone(struct map *map);
@@ -141,8 +143,8 @@
 
 void map__reloc_vmlinux(struct map *map);
 
-size_t __map_groups__fprintf_maps(struct map_groups *mg,
-				  enum map_type type, int verbose, FILE *fp);
+size_t __map_groups__fprintf_maps(struct map_groups *mg, enum map_type type,
+				  FILE *fp);
 void maps__insert(struct rb_root *maps, struct map *map);
 void maps__remove(struct rb_root *maps, struct map *map);
 struct map *maps__find(struct rb_root *maps, u64 addr);
@@ -152,8 +154,7 @@
 void map_groups__exit(struct map_groups *mg);
 int map_groups__clone(struct map_groups *mg,
 		      struct map_groups *parent, enum map_type type);
-size_t map_groups__fprintf(struct map_groups *mg, int verbose, FILE *fp);
-size_t map_groups__fprintf_maps(struct map_groups *mg, int verbose, FILE *fp);
+size_t map_groups__fprintf(struct map_groups *mg, FILE *fp);
 
 int maps__set_kallsyms_ref_reloc_sym(struct map **maps, const char *symbol_name,
 				     u64 addr);
@@ -210,7 +211,7 @@
 }
 
 int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
-				   int verbose, FILE *fp);
+				   FILE *fp);
 
 struct map *map_groups__find_by_name(struct map_groups *mg,
 				     enum map_type type, const char *name);
diff --git a/tools/perf/util/parse-options.h b/tools/perf/util/parse-options.h
index d8dac8a..b59ba85 100644
--- a/tools/perf/util/parse-options.h
+++ b/tools/perf/util/parse-options.h
@@ -98,6 +98,7 @@
 	parse_opt_cb *callback;
 	intptr_t defval;
 	bool *set;
+	void *data;
 };
 
 #define check_vtype(v, type) ( BUILD_BUG_ON_ZERO(!__builtin_types_compatible_p(typeof(v), type)) + v )
@@ -131,6 +132,10 @@
 	{ .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l),\
 	.value = (v), (a), .help = (h), .callback = (f), .defval = (intptr_t)d,\
 	.flags = PARSE_OPT_LASTARG_DEFAULT | PARSE_OPT_NOARG}
+#define OPT_CALLBACK_OPTARG(s, l, v, d, a, h, f) \
+	{ .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), \
+	  .value = (v), (a), .help = (h), .callback = (f), \
+	  .flags = PARSE_OPT_OPTARG, .data = (d) }
 
 /* parse_options() will filter out the processed options and leave the
  * non-option argments in argv[].
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 98e3047..dca9145 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -26,7 +26,6 @@
 #include <errno.h>
 #include <stdio.h>
 #include <unistd.h>
-#include <getopt.h>
 #include <stdlib.h>
 #include <string.h>
 #include <stdarg.h>
diff --git a/tools/perf/util/pstack.c b/tools/perf/util/pstack.c
index daa17ae..a126e6c 100644
--- a/tools/perf/util/pstack.c
+++ b/tools/perf/util/pstack.c
@@ -6,6 +6,7 @@
 
 #include "util.h"
 #include "pstack.h"
+#include "debug.h"
 #include <linux/kernel.h>
 #include <stdlib.h>
 
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index 122669c..12aa9b0 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -14,12 +14,12 @@
  */
 int verbose;
 
-int eprintf(int level, const char *fmt, ...)
+int eprintf(int level, int var, const char *fmt, ...)
 {
 	va_list args;
 	int ret = 0;
 
-	if (verbose >= level) {
+	if (var >= level) {
 		va_start(args, fmt);
 		ret = vfprintf(stderr, fmt, args);
 		va_end(args);
diff --git a/tools/perf/util/record.c b/tools/perf/util/record.c
index 049e0a0..fe8079e 100644
--- a/tools/perf/util/record.c
+++ b/tools/perf/util/record.c
@@ -4,6 +4,7 @@
 #include "parse-events.h"
 #include <api/fs/fs.h>
 #include "util.h"
+#include "cloexec.h"
 
 typedef void (*setup_probe_fn_t)(struct perf_evsel *evsel);
 
@@ -11,6 +12,7 @@
 {
 	struct perf_evlist *evlist;
 	struct perf_evsel *evsel;
+	unsigned long flags = perf_event_open_cloexec_flag();
 	int err = -EAGAIN, fd;
 
 	evlist = perf_evlist__new();
@@ -22,14 +24,14 @@
 
 	evsel = perf_evlist__first(evlist);
 
-	fd = sys_perf_event_open(&evsel->attr, -1, cpu, -1, 0);
+	fd = sys_perf_event_open(&evsel->attr, -1, cpu, -1, flags);
 	if (fd < 0)
 		goto out_delete;
 	close(fd);
 
 	fn(evsel);
 
-	fd = sys_perf_event_open(&evsel->attr, -1, cpu, -1, 0);
+	fd = sys_perf_event_open(&evsel->attr, -1, cpu, -1, flags);
 	if (fd < 0) {
 		if (errno == EINVAL)
 			err = -EINVAL;
@@ -69,15 +71,26 @@
 	evsel->attr.sample_type |= PERF_SAMPLE_IDENTIFIER;
 }
 
+static void perf_probe_comm_exec(struct perf_evsel *evsel)
+{
+	evsel->attr.comm_exec = 1;
+}
+
 bool perf_can_sample_identifier(void)
 {
 	return perf_probe_api(perf_probe_sample_identifier);
 }
 
+static bool perf_can_comm_exec(void)
+{
+	return perf_probe_api(perf_probe_comm_exec);
+}
+
 void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts)
 {
 	struct perf_evsel *evsel;
 	bool use_sample_identifier = false;
+	bool use_comm_exec;
 
 	/*
 	 * Set the evsel leader links before we configure attributes,
@@ -89,8 +102,13 @@
 	if (evlist->cpus->map[0] < 0)
 		opts->no_inherit = true;
 
-	evlist__for_each(evlist, evsel)
+	use_comm_exec = perf_can_comm_exec();
+
+	evlist__for_each(evlist, evsel) {
 		perf_evsel__config(evsel, opts);
+		if (!evsel->idx && use_comm_exec)
+			evsel->attr.comm_exec = 1;
+	}
 
 	if (evlist->nr_entries > 1) {
 		struct perf_evsel *first = perf_evlist__first(evlist);
@@ -203,7 +221,8 @@
 		cpu = evlist->cpus->map[0];
 	}
 
-	fd = sys_perf_event_open(&evsel->attr, -1, cpu, -1, 0);
+	fd = sys_perf_event_open(&evsel->attr, -1, cpu, -1,
+				 perf_event_open_cloexec_flag());
 	if (fd >= 0) {
 		close(fd);
 		ret = true;
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index af7da56..b2dba9c 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -34,6 +34,7 @@
 #include "../event.h"
 #include "../trace-event.h"
 #include "../evsel.h"
+#include "../debug.h"
 
 void boot_Perf__Trace__Context(pTHX_ CV *cv);
 void boot_DynaLoader(pTHX_ CV *cv);
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 1c41932..cbce254 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -27,11 +27,13 @@
 #include <errno.h>
 
 #include "../../perf.h"
+#include "../debug.h"
 #include "../evsel.h"
 #include "../util.h"
 #include "../event.h"
 #include "../thread.h"
 #include "../trace-event.h"
+#include "../machine.h"
 
 PyMODINIT_FUNC initperf_trace_context(void);
 
@@ -50,10 +52,14 @@
 
 static PyObject *main_module, *main_dict;
 
+static void handler_call_die(const char *handler_name) NORETURN;
 static void handler_call_die(const char *handler_name)
 {
 	PyErr_Print();
 	Py_FatalError("problem in Python trace event handler");
+	// Py_FatalError does not return
+	// but we have to make the compiler happy
+	abort();
 }
 
 /*
@@ -97,6 +103,7 @@
 		retval = PyObject_CallObject(handler, t);
 		if (retval == NULL)
 			handler_call_die(handler_name);
+		Py_DECREF(retval);
 	}
 
 	Py_DECREF(t);
@@ -143,6 +150,7 @@
 		retval = PyObject_CallObject(handler, t);
 		if (retval == NULL)
 			handler_call_die(handler_name);
+		Py_DECREF(retval);
 	}
 
 	Py_DECREF(t);
@@ -231,15 +239,133 @@
 	return event;
 }
 
+static PyObject *get_field_numeric_entry(struct event_format *event,
+		struct format_field *field, void *data)
+{
+	bool is_array = field->flags & FIELD_IS_ARRAY;
+	PyObject *obj, *list = NULL;
+	unsigned long long val;
+	unsigned int item_size, n_items, i;
+
+	if (is_array) {
+		list = PyList_New(field->arraylen);
+		item_size = field->size / field->arraylen;
+		n_items = field->arraylen;
+	} else {
+		item_size = field->size;
+		n_items = 1;
+	}
+
+	for (i = 0; i < n_items; i++) {
+
+		val = read_size(event, data + field->offset + i * item_size,
+				item_size);
+		if (field->flags & FIELD_IS_SIGNED) {
+			if ((long long)val >= LONG_MIN &&
+					(long long)val <= LONG_MAX)
+				obj = PyInt_FromLong(val);
+			else
+				obj = PyLong_FromLongLong(val);
+		} else {
+			if (val <= LONG_MAX)
+				obj = PyInt_FromLong(val);
+			else
+				obj = PyLong_FromUnsignedLongLong(val);
+		}
+		if (is_array)
+			PyList_SET_ITEM(list, i, obj);
+	}
+	if (is_array)
+		obj = list;
+	return obj;
+}
+
+
+static PyObject *python_process_callchain(struct perf_sample *sample,
+					 struct perf_evsel *evsel,
+					 struct addr_location *al)
+{
+	PyObject *pylist;
+
+	pylist = PyList_New(0);
+	if (!pylist)
+		Py_FatalError("couldn't create Python list");
+
+	if (!symbol_conf.use_callchain || !sample->callchain)
+		goto exit;
+
+	if (machine__resolve_callchain(al->machine, evsel, al->thread,
+					   sample, NULL, NULL,
+					   PERF_MAX_STACK_DEPTH) != 0) {
+		pr_err("Failed to resolve callchain. Skipping\n");
+		goto exit;
+	}
+	callchain_cursor_commit(&callchain_cursor);
+
+
+	while (1) {
+		PyObject *pyelem;
+		struct callchain_cursor_node *node;
+		node = callchain_cursor_current(&callchain_cursor);
+		if (!node)
+			break;
+
+		pyelem = PyDict_New();
+		if (!pyelem)
+			Py_FatalError("couldn't create Python dictionary");
+
+
+		pydict_set_item_string_decref(pyelem, "ip",
+				PyLong_FromUnsignedLongLong(node->ip));
+
+		if (node->sym) {
+			PyObject *pysym  = PyDict_New();
+			if (!pysym)
+				Py_FatalError("couldn't create Python dictionary");
+			pydict_set_item_string_decref(pysym, "start",
+					PyLong_FromUnsignedLongLong(node->sym->start));
+			pydict_set_item_string_decref(pysym, "end",
+					PyLong_FromUnsignedLongLong(node->sym->end));
+			pydict_set_item_string_decref(pysym, "binding",
+					PyInt_FromLong(node->sym->binding));
+			pydict_set_item_string_decref(pysym, "name",
+					PyString_FromStringAndSize(node->sym->name,
+							node->sym->namelen));
+			pydict_set_item_string_decref(pyelem, "sym", pysym);
+		}
+
+		if (node->map) {
+			struct map *map = node->map;
+			const char *dsoname = "[unknown]";
+			if (map && map->dso && (map->dso->name || map->dso->long_name)) {
+				if (symbol_conf.show_kernel_path && map->dso->long_name)
+					dsoname = map->dso->long_name;
+				else if (map->dso->name)
+					dsoname = map->dso->name;
+			}
+			pydict_set_item_string_decref(pyelem, "dso",
+					PyString_FromString(dsoname));
+		}
+
+		callchain_cursor_advance(&callchain_cursor);
+		PyList_Append(pylist, pyelem);
+		Py_DECREF(pyelem);
+	}
+
+exit:
+	return pylist;
+}
+
+
 static void python_process_tracepoint(struct perf_sample *sample,
 				      struct perf_evsel *evsel,
 				      struct thread *thread,
 				      struct addr_location *al)
 {
-	PyObject *handler, *retval, *context, *t, *obj, *dict = NULL;
+	PyObject *handler, *retval, *context, *t, *obj, *callchain;
+	PyObject *dict = NULL;
 	static char handler_name[256];
 	struct format_field *field;
-	unsigned long long val;
 	unsigned long s, ns;
 	struct event_format *event;
 	unsigned n = 0;
@@ -280,18 +406,23 @@
 	PyTuple_SetItem(t, n++, PyString_FromString(handler_name));
 	PyTuple_SetItem(t, n++, context);
 
+	/* ip unwinding */
+	callchain = python_process_callchain(sample, evsel, al);
+
 	if (handler) {
 		PyTuple_SetItem(t, n++, PyInt_FromLong(cpu));
 		PyTuple_SetItem(t, n++, PyInt_FromLong(s));
 		PyTuple_SetItem(t, n++, PyInt_FromLong(ns));
 		PyTuple_SetItem(t, n++, PyInt_FromLong(pid));
 		PyTuple_SetItem(t, n++, PyString_FromString(comm));
+		PyTuple_SetItem(t, n++, callchain);
 	} else {
 		pydict_set_item_string_decref(dict, "common_cpu", PyInt_FromLong(cpu));
 		pydict_set_item_string_decref(dict, "common_s", PyInt_FromLong(s));
 		pydict_set_item_string_decref(dict, "common_ns", PyInt_FromLong(ns));
 		pydict_set_item_string_decref(dict, "common_pid", PyInt_FromLong(pid));
 		pydict_set_item_string_decref(dict, "common_comm", PyString_FromString(comm));
+		pydict_set_item_string_decref(dict, "common_callchain", callchain);
 	}
 	for (field = event->format.fields; field; field = field->next) {
 		if (field->flags & FIELD_IS_STRING) {
@@ -303,20 +434,7 @@
 				offset = field->offset;
 			obj = PyString_FromString((char *)data + offset);
 		} else { /* FIELD_IS_NUMERIC */
-			val = read_size(event, data + field->offset,
-					field->size);
-			if (field->flags & FIELD_IS_SIGNED) {
-				if ((long long)val >= LONG_MIN &&
-				    (long long)val <= LONG_MAX)
-					obj = PyInt_FromLong(val);
-				else
-					obj = PyLong_FromLongLong(val);
-			} else {
-				if (val <= LONG_MAX)
-					obj = PyInt_FromLong(val);
-				else
-					obj = PyLong_FromUnsignedLongLong(val);
-			}
+			obj = get_field_numeric_entry(event, field, data);
 		}
 		if (handler)
 			PyTuple_SetItem(t, n++, obj);
@@ -324,6 +442,7 @@
 			pydict_set_item_string_decref(dict, field->name, obj);
 
 	}
+
 	if (!handler)
 		PyTuple_SetItem(t, n++, dict);
 
@@ -334,6 +453,7 @@
 		retval = PyObject_CallObject(handler, t);
 		if (retval == NULL)
 			handler_call_die(handler_name);
+		Py_DECREF(retval);
 	} else {
 		handler = PyDict_GetItemString(main_dict, "trace_unhandled");
 		if (handler && PyCallable_Check(handler)) {
@@ -341,6 +461,7 @@
 			retval = PyObject_CallObject(handler, t);
 			if (retval == NULL)
 				handler_call_die("trace_unhandled");
+			Py_DECREF(retval);
 		}
 		Py_DECREF(dict);
 	}
@@ -353,7 +474,7 @@
 					 struct thread *thread,
 					 struct addr_location *al)
 {
-	PyObject *handler, *retval, *t, *dict;
+	PyObject *handler, *retval, *t, *dict, *callchain, *dict_sample;
 	static char handler_name[64];
 	unsigned n = 0;
 
@@ -369,6 +490,10 @@
 	if (!dict)
 		Py_FatalError("couldn't create Python dictionary");
 
+	dict_sample = PyDict_New();
+	if (!dict_sample)
+		Py_FatalError("couldn't create Python dictionary");
+
 	snprintf(handler_name, sizeof(handler_name), "%s", "process_event");
 
 	handler = PyDict_GetItemString(main_dict, handler_name);
@@ -378,8 +503,21 @@
 	pydict_set_item_string_decref(dict, "ev_name", PyString_FromString(perf_evsel__name(evsel)));
 	pydict_set_item_string_decref(dict, "attr", PyString_FromStringAndSize(
 			(const char *)&evsel->attr, sizeof(evsel->attr)));
-	pydict_set_item_string_decref(dict, "sample", PyString_FromStringAndSize(
-			(const char *)sample, sizeof(*sample)));
+
+	pydict_set_item_string_decref(dict_sample, "pid",
+			PyInt_FromLong(sample->pid));
+	pydict_set_item_string_decref(dict_sample, "tid",
+			PyInt_FromLong(sample->tid));
+	pydict_set_item_string_decref(dict_sample, "cpu",
+			PyInt_FromLong(sample->cpu));
+	pydict_set_item_string_decref(dict_sample, "ip",
+			PyLong_FromUnsignedLongLong(sample->ip));
+	pydict_set_item_string_decref(dict_sample, "time",
+			PyLong_FromUnsignedLongLong(sample->time));
+	pydict_set_item_string_decref(dict_sample, "period",
+			PyLong_FromUnsignedLongLong(sample->period));
+	pydict_set_item_string_decref(dict, "sample", dict_sample);
+
 	pydict_set_item_string_decref(dict, "raw_buf", PyString_FromStringAndSize(
 			(const char *)sample->raw_data, sample->raw_size));
 	pydict_set_item_string_decref(dict, "comm",
@@ -393,6 +531,10 @@
 			PyString_FromString(al->sym->name));
 	}
 
+	/* ip unwinding */
+	callchain = python_process_callchain(sample, evsel, al);
+	pydict_set_item_string_decref(dict, "callchain", callchain);
+
 	PyTuple_SetItem(t, n++, dict);
 	if (_PyTuple_Resize(&t, n) == -1)
 		Py_FatalError("error resizing Python tuple");
@@ -400,6 +542,7 @@
 	retval = PyObject_CallObject(handler, t);
 	if (retval == NULL)
 		handler_call_die(handler_name);
+	Py_DECREF(retval);
 exit:
 	Py_DECREF(dict);
 	Py_DECREF(t);
@@ -521,8 +664,7 @@
 	retval = PyObject_CallObject(handler, NULL);
 	if (retval == NULL)
 		handler_call_die("trace_end");
-	else
-		Py_DECREF(retval);
+	Py_DECREF(retval);
 out:
 	Py_XDECREF(main_dict);
 	Py_XDECREF(main_module);
@@ -589,6 +731,7 @@
 		fprintf(ofp, "common_nsecs, ");
 		fprintf(ofp, "common_pid, ");
 		fprintf(ofp, "common_comm,\n\t");
+		fprintf(ofp, "common_callchain, ");
 
 		not_first = 0;
 		count = 0;
@@ -632,7 +775,7 @@
 				fprintf(ofp, "%%u");
 		}
 
-		fprintf(ofp, "\\n\" %% \\\n\t\t(");
+		fprintf(ofp, "\" %% \\\n\t\t(");
 
 		not_first = 0;
 		count = 0;
@@ -668,7 +811,15 @@
 				fprintf(ofp, "%s", f->name);
 		}
 
-		fprintf(ofp, "),\n\n");
+		fprintf(ofp, ")\n\n");
+
+		fprintf(ofp, "\t\tfor node in common_callchain:");
+		fprintf(ofp, "\n\t\t\tif 'sym' in node:");
+		fprintf(ofp, "\n\t\t\t\tprint \"\\t[%%x] %%s\" %% (node['ip'], node['sym']['name'])");
+		fprintf(ofp, "\n\t\t\telse:");
+		fprintf(ofp, "\n\t\t\t\tprint \"\t[%%x]\" %% (node['ip'])\n\n");
+		fprintf(ofp, "\t\tprint \"\\n\"\n\n");
+
 	}
 
 	fprintf(ofp, "def trace_unhandled(event_name, context, "
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 64a186e..88dfef7 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -14,7 +14,6 @@
 #include "util.h"
 #include "cpumap.h"
 #include "perf_regs.h"
-#include "vdso.h"
 
 static int perf_session__open(struct perf_session *session)
 {
@@ -156,7 +155,6 @@
 	if (session->file)
 		perf_data_file__close(session->file);
 	free(session);
-	vdso__exit();
 }
 
 static int process_event_synth_tracing_data_stub(struct perf_tool *tool
@@ -511,6 +509,7 @@
 		os->last_flush = iter->timestamp;
 		list_del(&iter->list);
 		list_add(&iter->list, &os->sample_cache);
+		os->nr_samples--;
 
 		if (show_progress)
 			ui_progress__update(&prog, 1);
@@ -523,8 +522,6 @@
 			list_entry(head->prev, struct sample_queue, list);
 	}
 
-	os->nr_samples = 0;
-
 	return 0;
 }
 
@@ -994,8 +991,10 @@
 	}
 }
 
-static int perf_session__process_user_event(struct perf_session *session, union perf_event *event,
-					    struct perf_tool *tool, u64 file_offset)
+static s64 perf_session__process_user_event(struct perf_session *session,
+					    union perf_event *event,
+					    struct perf_tool *tool,
+					    u64 file_offset)
 {
 	int fd = perf_data_file__fd(session->file);
 	int err;
@@ -1037,7 +1036,7 @@
 		swap(event, sample_id_all);
 }
 
-static int perf_session__process_event(struct perf_session *session,
+static s64 perf_session__process_event(struct perf_session *session,
 				       union perf_event *event,
 				       struct perf_tool *tool,
 				       u64 file_offset)
@@ -1083,13 +1082,14 @@
 
 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
 {
-	return machine__findnew_thread(&session->machines.host, 0, pid);
+	return machine__findnew_thread(&session->machines.host, -1, pid);
 }
 
 static struct thread *perf_session__register_idle_thread(struct perf_session *session)
 {
-	struct thread *thread = perf_session__findnew(session, 0);
+	struct thread *thread;
 
+	thread = machine__findnew_thread(&session->machines.host, 0, 0);
 	if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
 		pr_err("problem inserting idle task.\n");
 		thread = NULL;
@@ -1147,7 +1147,7 @@
 	union perf_event *event;
 	uint32_t size, cur_size = 0;
 	void *buf = NULL;
-	int skip = 0;
+	s64 skip = 0;
 	u64 head;
 	ssize_t err;
 	void *p;
@@ -1276,13 +1276,13 @@
 				   u64 file_size, struct perf_tool *tool)
 {
 	int fd = perf_data_file__fd(session->file);
-	u64 head, page_offset, file_offset, file_pos;
+	u64 head, page_offset, file_offset, file_pos, size;
 	int err, mmap_prot, mmap_flags, map_idx = 0;
 	size_t	mmap_size;
 	char *buf, *mmaps[NUM_MMAPS];
 	union perf_event *event;
-	uint32_t size;
 	struct ui_progress prog;
+	s64 skip;
 
 	perf_tool__fill_defaults(tool);
 
@@ -1296,8 +1296,10 @@
 	ui_progress__init(&prog, file_size, "Processing events...");
 
 	mmap_size = MMAP_SIZE;
-	if (mmap_size > file_size)
+	if (mmap_size > file_size) {
 		mmap_size = file_size;
+		session->one_mmap = true;
+	}
 
 	memset(mmaps, 0, sizeof(mmaps));
 
@@ -1319,6 +1321,10 @@
 	mmaps[map_idx] = buf;
 	map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
 	file_pos = file_offset + head;
+	if (session->one_mmap) {
+		session->one_mmap_addr = buf;
+		session->one_mmap_offset = file_offset;
+	}
 
 more:
 	event = fetch_mmaped_event(session, head, mmap_size, buf);
@@ -1337,7 +1343,8 @@
 	size = event->header.size;
 
 	if (size < sizeof(struct perf_event_header) ||
-	    perf_session__process_event(session, event, tool, file_pos) < 0) {
+	    (skip = perf_session__process_event(session, event, tool, file_pos))
+									< 0) {
 		pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
 		       file_offset + head, event->header.size,
 		       event->header.type);
@@ -1345,6 +1352,9 @@
 		goto out_err;
 	}
 
+	if (skip)
+		size += skip;
+
 	head += size;
 	file_pos += size;
 
@@ -1364,6 +1374,7 @@
 	ui_progress__finish();
 	perf_session__warn_about_errors(session, tool);
 	perf_session_free_sample_buffers(session);
+	session->one_mmap = false;
 	return err;
 }
 
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 3140f8a..0321013 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -36,6 +36,9 @@
 	struct trace_event	tevent;
 	struct events_stats	stats;
 	bool			repipe;
+	bool			one_mmap;
+	void			*one_mmap_addr;
+	u64			one_mmap_offset;
 	struct ordered_samples	ordered_samples;
 	struct perf_data_file	*file;
 };
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 1ec57dd..14e5a03 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -1215,7 +1215,7 @@
 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
 	len = hists__col_len(&evsel->hists, hse->se->se_width_idx);
 
-	return scnprintf(hpp->buf, hpp->size, "%*s", len, hse->se->se_header);
+	return scnprintf(hpp->buf, hpp->size, "%-*s", len, hse->se->se_header);
 }
 
 static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c
index 6a0a13d..283d3e7 100644
--- a/tools/perf/util/svghelper.c
+++ b/tools/perf/util/svghelper.c
@@ -30,6 +30,7 @@
 
 #define SLOT_MULT 30.0
 #define SLOT_HEIGHT 25.0
+#define SLOT_HALF (SLOT_HEIGHT / 2)
 
 int svg_page_width = 1000;
 u64 svg_highlight;
@@ -114,8 +115,14 @@
 	fprintf(svgfile, "      rect          { stroke-width: 1; }\n");
 	fprintf(svgfile, "      rect.process  { fill:rgb(180,180,180); fill-opacity:0.9; stroke-width:1;   stroke:rgb(  0,  0,  0); } \n");
 	fprintf(svgfile, "      rect.process2 { fill:rgb(180,180,180); fill-opacity:0.9; stroke-width:0;   stroke:rgb(  0,  0,  0); } \n");
+	fprintf(svgfile, "      rect.process3 { fill:rgb(180,180,180); fill-opacity:0.5; stroke-width:0;   stroke:rgb(  0,  0,  0); } \n");
 	fprintf(svgfile, "      rect.sample   { fill:rgb(  0,  0,255); fill-opacity:0.8; stroke-width:0;   stroke:rgb(  0,  0,  0); } \n");
 	fprintf(svgfile, "      rect.sample_hi{ fill:rgb(255,128,  0); fill-opacity:0.8; stroke-width:0;   stroke:rgb(  0,  0,  0); } \n");
+	fprintf(svgfile, "      rect.error    { fill:rgb(255,  0,  0); fill-opacity:0.5; stroke-width:0;   stroke:rgb(  0,  0,  0); } \n");
+	fprintf(svgfile, "      rect.net      { fill:rgb(  0,128,  0); fill-opacity:0.5; stroke-width:0;   stroke:rgb(  0,  0,  0); } \n");
+	fprintf(svgfile, "      rect.disk     { fill:rgb(  0,  0,255); fill-opacity:0.5; stroke-width:0;   stroke:rgb(  0,  0,  0); } \n");
+	fprintf(svgfile, "      rect.sync     { fill:rgb(128,128,  0); fill-opacity:0.5; stroke-width:0;   stroke:rgb(  0,  0,  0); } \n");
+	fprintf(svgfile, "      rect.poll     { fill:rgb(  0,128,128); fill-opacity:0.2; stroke-width:0;   stroke:rgb(  0,  0,  0); } \n");
 	fprintf(svgfile, "      rect.blocked  { fill:rgb(255,  0,  0); fill-opacity:0.5; stroke-width:0;   stroke:rgb(  0,  0,  0); } \n");
 	fprintf(svgfile, "      rect.waiting  { fill:rgb(224,214,  0); fill-opacity:0.8; stroke-width:0;   stroke:rgb(  0,  0,  0); } \n");
 	fprintf(svgfile, "      rect.WAITING  { fill:rgb(255,214, 48); fill-opacity:0.6; stroke-width:0;   stroke:rgb(  0,  0,  0); } \n");
@@ -132,12 +139,81 @@
 	fprintf(svgfile, "    ]]>\n   </style>\n</defs>\n");
 }
 
+static double normalize_height(double height)
+{
+	if (height < 0.25)
+		return 0.25;
+	else if (height < 0.50)
+		return 0.50;
+	else if (height < 0.75)
+		return 0.75;
+	else
+		return 0.100;
+}
+
+void svg_ubox(int Yslot, u64 start, u64 end, double height, const char *type, int fd, int err, int merges)
+{
+	double w = time2pixels(end) - time2pixels(start);
+	height = normalize_height(height);
+
+	if (!svgfile)
+		return;
+
+	fprintf(svgfile, "<g>\n");
+	fprintf(svgfile, "<title>fd=%d error=%d merges=%d</title>\n", fd, err, merges);
+	fprintf(svgfile, "<rect x=\"%.8f\" width=\"%.8f\" y=\"%.1f\" height=\"%.1f\" class=\"%s\"/>\n",
+		time2pixels(start),
+		w,
+		Yslot * SLOT_MULT,
+		SLOT_HALF * height,
+		type);
+	fprintf(svgfile, "</g>\n");
+}
+
+void svg_lbox(int Yslot, u64 start, u64 end, double height, const char *type, int fd, int err, int merges)
+{
+	double w = time2pixels(end) - time2pixels(start);
+	height = normalize_height(height);
+
+	if (!svgfile)
+		return;
+
+	fprintf(svgfile, "<g>\n");
+	fprintf(svgfile, "<title>fd=%d error=%d merges=%d</title>\n", fd, err, merges);
+	fprintf(svgfile, "<rect x=\"%.8f\" width=\"%.8f\" y=\"%.1f\" height=\"%.1f\" class=\"%s\"/>\n",
+		time2pixels(start),
+		w,
+		Yslot * SLOT_MULT + SLOT_HEIGHT - SLOT_HALF * height,
+		SLOT_HALF * height,
+		type);
+	fprintf(svgfile, "</g>\n");
+}
+
+void svg_fbox(int Yslot, u64 start, u64 end, double height, const char *type, int fd, int err, int merges)
+{
+	double w = time2pixels(end) - time2pixels(start);
+	height = normalize_height(height);
+
+	if (!svgfile)
+		return;
+
+	fprintf(svgfile, "<g>\n");
+	fprintf(svgfile, "<title>fd=%d error=%d merges=%d</title>\n", fd, err, merges);
+	fprintf(svgfile, "<rect x=\"%.8f\" width=\"%.8f\" y=\"%.1f\" height=\"%.1f\" class=\"%s\"/>\n",
+		time2pixels(start),
+		w,
+		Yslot * SLOT_MULT + SLOT_HEIGHT - SLOT_HEIGHT * height,
+		SLOT_HEIGHT * height,
+		type);
+	fprintf(svgfile, "</g>\n");
+}
+
 void svg_box(int Yslot, u64 start, u64 end, const char *type)
 {
 	if (!svgfile)
 		return;
 
-	fprintf(svgfile, "<rect x=\"%4.8f\" width=\"%4.8f\" y=\"%4.1f\" height=\"%4.1f\" class=\"%s\"/>\n",
+	fprintf(svgfile, "<rect x=\"%.8f\" width=\"%.8f\" y=\"%.1f\" height=\"%.1f\" class=\"%s\"/>\n",
 		time2pixels(start), time2pixels(end)-time2pixels(start), Yslot * SLOT_MULT, SLOT_HEIGHT, type);
 }
 
@@ -174,7 +250,7 @@
 		cpu, time_to_string(end - start));
 	if (backtrace)
 		fprintf(svgfile, "<desc>Switched because:\n%s</desc>\n", backtrace);
-	fprintf(svgfile, "<rect x=\"%4.8f\" width=\"%4.8f\" y=\"%4.1f\" height=\"%4.1f\" class=\"%s\"/>\n",
+	fprintf(svgfile, "<rect x=\"%.8f\" width=\"%.8f\" y=\"%.1f\" height=\"%.1f\" class=\"%s\"/>\n",
 		time2pixels(start), time2pixels(end)-time2pixels(start), Yslot * SLOT_MULT, SLOT_HEIGHT,
 		type);
 
@@ -186,7 +262,7 @@
 	text_size = round_text_size(text_size);
 
 	if (text_size > MIN_TEXT_SIZE)
-		fprintf(svgfile, "<text x=\"%1.8f\" y=\"%1.8f\" font-size=\"%1.8fpt\">%i</text>\n",
+		fprintf(svgfile, "<text x=\"%.8f\" y=\"%.8f\" font-size=\"%.8fpt\">%i</text>\n",
 			time2pixels(start), Yslot *  SLOT_MULT + SLOT_HEIGHT - 1, text_size,  cpu + 1);
 
 	fprintf(svgfile, "</g>\n");
@@ -202,10 +278,10 @@
 		return text;
 
 	if (duration < 1000 * 1000) { /* less than 1 msec */
-		sprintf(text, "%4.1f us", duration / 1000.0);
+		sprintf(text, "%.1f us", duration / 1000.0);
 		return text;
 	}
-	sprintf(text, "%4.1f ms", duration / 1000.0 / 1000);
+	sprintf(text, "%.1f ms", duration / 1000.0 / 1000);
 
 	return text;
 }
@@ -233,14 +309,14 @@
 
 	font_size = round_text_size(font_size);
 
-	fprintf(svgfile, "<g transform=\"translate(%4.8f,%4.8f)\">\n", time2pixels(start), Yslot * SLOT_MULT);
+	fprintf(svgfile, "<g transform=\"translate(%.8f,%.8f)\">\n", time2pixels(start), Yslot * SLOT_MULT);
 	fprintf(svgfile, "<title>#%d waiting %s</title>\n", cpu, time_to_string(end - start));
 	if (backtrace)
 		fprintf(svgfile, "<desc>Waiting on:\n%s</desc>\n", backtrace);
-	fprintf(svgfile, "<rect x=\"0\" width=\"%4.8f\" y=\"0\" height=\"%4.1f\" class=\"%s\"/>\n",
+	fprintf(svgfile, "<rect x=\"0\" width=\"%.8f\" y=\"0\" height=\"%.1f\" class=\"%s\"/>\n",
 		time2pixels(end)-time2pixels(start), SLOT_HEIGHT, style);
 	if (font_size > MIN_TEXT_SIZE)
-		fprintf(svgfile, "<text transform=\"rotate(90)\" font-size=\"%1.8fpt\"> %s</text>\n",
+		fprintf(svgfile, "<text transform=\"rotate(90)\" font-size=\"%.8fpt\"> %s</text>\n",
 			font_size, text);
 	fprintf(svgfile, "</g>\n");
 }
@@ -289,16 +365,16 @@
 
 	fprintf(svgfile, "<g>\n");
 
-	fprintf(svgfile, "<rect x=\"%4.8f\" width=\"%4.8f\" y=\"%4.1f\" height=\"%4.1f\" class=\"cpu\"/>\n",
+	fprintf(svgfile, "<rect x=\"%.8f\" width=\"%.8f\" y=\"%.1f\" height=\"%.1f\" class=\"cpu\"/>\n",
 		time2pixels(first_time),
 		time2pixels(last_time)-time2pixels(first_time),
 		cpu2y(cpu), SLOT_MULT+SLOT_HEIGHT);
 
 	sprintf(cpu_string, "CPU %i", (int)cpu);
-	fprintf(svgfile, "<text x=\"%4.8f\" y=\"%4.8f\">%s</text>\n",
+	fprintf(svgfile, "<text x=\"%.8f\" y=\"%.8f\">%s</text>\n",
 		10+time2pixels(first_time), cpu2y(cpu) + SLOT_HEIGHT/2, cpu_string);
 
-	fprintf(svgfile, "<text transform=\"translate(%4.8f,%4.8f)\" font-size=\"1.25pt\">%s</text>\n",
+	fprintf(svgfile, "<text transform=\"translate(%.8f,%.8f)\" font-size=\"1.25pt\">%s</text>\n",
 		10+time2pixels(first_time), cpu2y(cpu) + SLOT_MULT + SLOT_HEIGHT - 4, cpu_model());
 
 	fprintf(svgfile, "</g>\n");
@@ -319,11 +395,11 @@
 	else
 		type = "sample";
 
-	fprintf(svgfile, "<g transform=\"translate(%4.8f,%4.8f)\">\n", time2pixels(start), cpu2y(cpu));
+	fprintf(svgfile, "<g transform=\"translate(%.8f,%.8f)\">\n", time2pixels(start), cpu2y(cpu));
 	fprintf(svgfile, "<title>%d %s running %s</title>\n", pid, name, time_to_string(end - start));
 	if (backtrace)
 		fprintf(svgfile, "<desc>Switched because:\n%s</desc>\n", backtrace);
-	fprintf(svgfile, "<rect x=\"0\" width=\"%4.8f\" y=\"0\" height=\"%4.1f\" class=\"%s\"/>\n",
+	fprintf(svgfile, "<rect x=\"0\" width=\"%.8f\" y=\"0\" height=\"%.1f\" class=\"%s\"/>\n",
 		time2pixels(end)-time2pixels(start), SLOT_MULT+SLOT_HEIGHT, type);
 	width = time2pixels(end)-time2pixels(start);
 	if (width > 6)
@@ -332,7 +408,7 @@
 	width = round_text_size(width);
 
 	if (width > MIN_TEXT_SIZE)
-		fprintf(svgfile, "<text transform=\"rotate(90)\" font-size=\"%3.8fpt\">%s</text>\n",
+		fprintf(svgfile, "<text transform=\"rotate(90)\" font-size=\"%.8fpt\">%s</text>\n",
 			width, name);
 
 	fprintf(svgfile, "</g>\n");
@@ -353,7 +429,7 @@
 		type = 6;
 	sprintf(style, "c%i", type);
 
-	fprintf(svgfile, "<rect class=\"%s\" x=\"%4.8f\" width=\"%4.8f\" y=\"%4.1f\" height=\"%4.1f\"/>\n",
+	fprintf(svgfile, "<rect class=\"%s\" x=\"%.8f\" width=\"%.8f\" y=\"%.1f\" height=\"%.1f\"/>\n",
 		style,
 		time2pixels(start), time2pixels(end)-time2pixels(start),
 		cpu2y(cpu), SLOT_MULT+SLOT_HEIGHT);
@@ -365,7 +441,7 @@
 	width = round_text_size(width);
 
 	if (width > MIN_TEXT_SIZE)
-		fprintf(svgfile, "<text x=\"%4.8f\" y=\"%4.8f\" font-size=\"%3.8fpt\">C%i</text>\n",
+		fprintf(svgfile, "<text x=\"%.8f\" y=\"%.8f\" font-size=\"%.8fpt\">C%i</text>\n",
 			time2pixels(start), cpu2y(cpu)+width, width, type);
 
 	fprintf(svgfile, "</g>\n");
@@ -407,9 +483,9 @@
 	if (max_freq)
 		height = freq * 1.0 / max_freq * (SLOT_HEIGHT + SLOT_MULT);
 	height = 1 + cpu2y(cpu) + SLOT_MULT + SLOT_HEIGHT - height;
-	fprintf(svgfile, "<line x1=\"%4.8f\" x2=\"%4.8f\" y1=\"%4.1f\" y2=\"%4.1f\" class=\"pstate\"/>\n",
+	fprintf(svgfile, "<line x1=\"%.8f\" x2=\"%.8f\" y1=\"%.1f\" y2=\"%.1f\" class=\"pstate\"/>\n",
 		time2pixels(start), time2pixels(end), height, height);
-	fprintf(svgfile, "<text x=\"%4.8f\" y=\"%4.8f\" font-size=\"0.25pt\">%s</text>\n",
+	fprintf(svgfile, "<text x=\"%.8f\" y=\"%.8f\" font-size=\"0.25pt\">%s</text>\n",
 		time2pixels(start), height+0.9, HzToHuman(freq));
 
 	fprintf(svgfile, "</g>\n");
@@ -435,32 +511,32 @@
 
 	if (row1 < row2) {
 		if (row1) {
-			fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%4.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
+			fprintf(svgfile, "<line x1=\"%.8f\" y1=\"%.2f\" x2=\"%.8f\" y2=\"%.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
 				time2pixels(start), row1 * SLOT_MULT + SLOT_HEIGHT,  time2pixels(start), row1 * SLOT_MULT + SLOT_HEIGHT + SLOT_MULT/32);
 			if (desc2)
-				fprintf(svgfile, "<g transform=\"translate(%4.8f,%4.8f)\"><text transform=\"rotate(90)\" font-size=\"0.02pt\">%s &gt;</text></g>\n",
+				fprintf(svgfile, "<g transform=\"translate(%.8f,%.8f)\"><text transform=\"rotate(90)\" font-size=\"0.02pt\">%s &gt;</text></g>\n",
 					time2pixels(start), row1 * SLOT_MULT + SLOT_HEIGHT + SLOT_HEIGHT/48, desc2);
 		}
 		if (row2) {
-			fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%4.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
+			fprintf(svgfile, "<line x1=\"%.8f\" y1=\"%.2f\" x2=\"%.8f\" y2=\"%.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
 				time2pixels(start), row2 * SLOT_MULT - SLOT_MULT/32,  time2pixels(start), row2 * SLOT_MULT);
 			if (desc1)
-				fprintf(svgfile, "<g transform=\"translate(%4.8f,%4.8f)\"><text transform=\"rotate(90)\" font-size=\"0.02pt\">%s &gt;</text></g>\n",
+				fprintf(svgfile, "<g transform=\"translate(%.8f,%.8f)\"><text transform=\"rotate(90)\" font-size=\"0.02pt\">%s &gt;</text></g>\n",
 					time2pixels(start), row2 * SLOT_MULT - SLOT_MULT/32, desc1);
 		}
 	} else {
 		if (row2) {
-			fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%4.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
+			fprintf(svgfile, "<line x1=\"%.8f\" y1=\"%.2f\" x2=\"%.8f\" y2=\"%.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
 				time2pixels(start), row2 * SLOT_MULT + SLOT_HEIGHT,  time2pixels(start), row2 * SLOT_MULT + SLOT_HEIGHT + SLOT_MULT/32);
 			if (desc1)
-				fprintf(svgfile, "<g transform=\"translate(%4.8f,%4.8f)\"><text transform=\"rotate(90)\" font-size=\"0.02pt\">%s &lt;</text></g>\n",
+				fprintf(svgfile, "<g transform=\"translate(%.8f,%.8f)\"><text transform=\"rotate(90)\" font-size=\"0.02pt\">%s &lt;</text></g>\n",
 					time2pixels(start), row2 * SLOT_MULT + SLOT_HEIGHT + SLOT_MULT/48, desc1);
 		}
 		if (row1) {
-			fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%4.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
+			fprintf(svgfile, "<line x1=\"%.8f\" y1=\"%.2f\" x2=\"%.8f\" y2=\"%.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
 				time2pixels(start), row1 * SLOT_MULT - SLOT_MULT/32,  time2pixels(start), row1 * SLOT_MULT);
 			if (desc2)
-				fprintf(svgfile, "<g transform=\"translate(%4.8f,%4.8f)\"><text transform=\"rotate(90)\" font-size=\"0.02pt\">%s &lt;</text></g>\n",
+				fprintf(svgfile, "<g transform=\"translate(%.8f,%.8f)\"><text transform=\"rotate(90)\" font-size=\"0.02pt\">%s &lt;</text></g>\n",
 					time2pixels(start), row1 * SLOT_MULT - SLOT_HEIGHT/32, desc2);
 		}
 	}
@@ -468,7 +544,7 @@
 	if (row2 > row1)
 		height += SLOT_HEIGHT;
 	if (row1)
-		fprintf(svgfile, "<circle  cx=\"%4.8f\" cy=\"%4.2f\" r = \"0.01\"  style=\"fill:rgb(32,255,32)\"/>\n",
+		fprintf(svgfile, "<circle  cx=\"%.8f\" cy=\"%.2f\" r = \"0.01\"  style=\"fill:rgb(32,255,32)\"/>\n",
 			time2pixels(start), height);
 
 	fprintf(svgfile, "</g>\n");
@@ -488,16 +564,16 @@
 		fprintf(svgfile, "<desc>%s</desc>\n", backtrace);
 
 	if (row1 < row2)
-		fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%4.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
+		fprintf(svgfile, "<line x1=\"%.8f\" y1=\"%.2f\" x2=\"%.8f\" y2=\"%.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
 			time2pixels(start), row1 * SLOT_MULT + SLOT_HEIGHT,  time2pixels(start), row2 * SLOT_MULT);
 	else
-		fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%4.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
+		fprintf(svgfile, "<line x1=\"%.8f\" y1=\"%.2f\" x2=\"%.8f\" y2=\"%.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
 			time2pixels(start), row2 * SLOT_MULT + SLOT_HEIGHT,  time2pixels(start), row1 * SLOT_MULT);
 
 	height = row1 * SLOT_MULT;
 	if (row2 > row1)
 		height += SLOT_HEIGHT;
-	fprintf(svgfile, "<circle  cx=\"%4.8f\" cy=\"%4.2f\" r = \"0.01\"  style=\"fill:rgb(32,255,32)\"/>\n",
+	fprintf(svgfile, "<circle  cx=\"%.8f\" cy=\"%.2f\" r = \"0.01\"  style=\"fill:rgb(32,255,32)\"/>\n",
 			time2pixels(start), height);
 
 	fprintf(svgfile, "</g>\n");
@@ -515,9 +591,9 @@
 	if (backtrace)
 		fprintf(svgfile, "<desc>%s</desc>\n", backtrace);
 
-	fprintf(svgfile, "<circle  cx=\"%4.8f\" cy=\"%4.2f\" r = \"0.01\"  style=\"fill:rgb(255,128,128)\"/>\n",
+	fprintf(svgfile, "<circle  cx=\"%.8f\" cy=\"%.2f\" r = \"0.01\"  style=\"fill:rgb(255,128,128)\"/>\n",
 			time2pixels(start), row * SLOT_MULT);
-	fprintf(svgfile, "<circle  cx=\"%4.8f\" cy=\"%4.2f\" r = \"0.01\"  style=\"fill:rgb(255,128,128)\"/>\n",
+	fprintf(svgfile, "<circle  cx=\"%.8f\" cy=\"%.2f\" r = \"0.01\"  style=\"fill:rgb(255,128,128)\"/>\n",
 			time2pixels(start), row * SLOT_MULT + SLOT_HEIGHT);
 
 	fprintf(svgfile, "</g>\n");
@@ -528,7 +604,7 @@
 	if (!svgfile)
 		return;
 
-	fprintf(svgfile, "<text x=\"%4.8f\" y=\"%4.8f\">%s</text>\n",
+	fprintf(svgfile, "<text x=\"%.8f\" y=\"%.8f\">%s</text>\n",
 		time2pixels(start), Yslot * SLOT_MULT+SLOT_HEIGHT/2, text);
 }
 
@@ -537,12 +613,26 @@
 	double boxsize;
 	boxsize = SLOT_HEIGHT / 2;
 
-	fprintf(svgfile, "<rect x=\"%i\" width=\"%4.8f\" y=\"0\" height=\"%4.1f\" class=\"%s\"/>\n",
+	fprintf(svgfile, "<rect x=\"%i\" width=\"%.8f\" y=\"0\" height=\"%.1f\" class=\"%s\"/>\n",
 		X, boxsize, boxsize, style);
-	fprintf(svgfile, "<text transform=\"translate(%4.8f, %4.8f)\" font-size=\"%4.8fpt\">%s</text>\n",
+	fprintf(svgfile, "<text transform=\"translate(%.8f, %.8f)\" font-size=\"%.8fpt\">%s</text>\n",
 		X + boxsize + 5, boxsize, 0.8 * boxsize, text);
 }
 
+void svg_io_legenda(void)
+{
+	if (!svgfile)
+		return;
+
+	fprintf(svgfile, "<g>\n");
+	svg_legenda_box(0,	"Disk", "disk");
+	svg_legenda_box(100,	"Network", "net");
+	svg_legenda_box(200,	"Sync", "sync");
+	svg_legenda_box(300,	"Poll", "poll");
+	svg_legenda_box(400,	"Error", "error");
+	fprintf(svgfile, "</g>\n");
+}
+
 void svg_legenda(void)
 {
 	if (!svgfile)
@@ -559,7 +649,7 @@
 	fprintf(svgfile, "</g>\n");
 }
 
-void svg_time_grid(void)
+void svg_time_grid(double min_thickness)
 {
 	u64 i;
 
@@ -579,8 +669,10 @@
 			color = 128;
 		}
 
-		fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%" PRIu64 "\" style=\"stroke:rgb(%i,%i,%i);stroke-width:%1.3f\"/>\n",
-			time2pixels(i), SLOT_MULT/2, time2pixels(i), total_height, color, color, color, thickness);
+		if (thickness >= min_thickness)
+			fprintf(svgfile, "<line x1=\"%.8f\" y1=\"%.2f\" x2=\"%.8f\" y2=\"%" PRIu64 "\" style=\"stroke:rgb(%i,%i,%i);stroke-width:%.3f\"/>\n",
+				time2pixels(i), SLOT_MULT/2, time2pixels(i),
+				total_height, color, color, color, thickness);
 
 		i += 10000000;
 	}
diff --git a/tools/perf/util/svghelper.h b/tools/perf/util/svghelper.h
index e3aff53..9292a52 100644
--- a/tools/perf/util/svghelper.h
+++ b/tools/perf/util/svghelper.h
@@ -4,6 +4,9 @@
 #include <linux/types.h>
 
 extern void open_svg(const char *filename, int cpus, int rows, u64 start, u64 end);
+extern void svg_ubox(int Yslot, u64 start, u64 end, double height, const char *type, int fd, int err, int merges);
+extern void svg_lbox(int Yslot, u64 start, u64 end, double height, const char *type, int fd, int err, int merges);
+extern void svg_fbox(int Yslot, u64 start, u64 end, double height, const char *type, int fd, int err, int merges);
 extern void svg_box(int Yslot, u64 start, u64 end, const char *type);
 extern void svg_blocked(int Yslot, int cpu, u64 start, u64 end, const char *backtrace);
 extern void svg_running(int Yslot, int cpu, u64 start, u64 end, const char *backtrace);
@@ -16,7 +19,8 @@
 extern void svg_pstate(int cpu, u64 start, u64 end, u64 freq);
 
 
-extern void svg_time_grid(void);
+extern void svg_time_grid(double min_thickness);
+extern void svg_io_legenda(void);
 extern void svg_legenda(void);
 extern void svg_wakeline(u64 start, int row1, int row2, const char *backtrace);
 extern void svg_partial_wakeline(u64 start, int row1, char *desc1, int row2, char *desc2, const char *backtrace);
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 6864661..d753499 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -49,7 +49,8 @@
 
 static inline int elf_sym__is_function(const GElf_Sym *sym)
 {
-	return elf_sym__type(sym) == STT_FUNC &&
+	return (elf_sym__type(sym) == STT_FUNC ||
+		elf_sym__type(sym) == STT_GNU_IFUNC) &&
 	       sym->st_name != 0 &&
 	       sym->st_shndx != SHN_UNDEF;
 }
@@ -598,6 +599,8 @@
 			goto out_elf_end;
 	}
 
+	ss->is_64_bit = (gelf_getclass(elf) == ELFCLASS64);
+
 	ss->symtab = elf_section_by_name(elf, &ehdr, &ss->symshdr, ".symtab",
 			NULL);
 	if (ss->symshdr.sh_type != SHT_SYMTAB)
@@ -619,7 +622,7 @@
 		GElf_Shdr shdr;
 		ss->adjust_symbols = (ehdr.e_type == ET_EXEC ||
 				ehdr.e_type == ET_REL ||
-				is_vdso_map(dso->short_name) ||
+				dso__is_vdso(dso) ||
 				elf_section_by_name(elf, &ehdr, &shdr,
 						     ".gnu.prelink_undo",
 						     NULL) != NULL);
@@ -698,6 +701,7 @@
 	bool remap_kernel = false, adjust_kernel_syms = false;
 
 	dso->symtab_type = syms_ss->type;
+	dso->is_64_bit = syms_ss->is_64_bit;
 	dso->rel = syms_ss->ehdr.e_type == ET_REL;
 
 	/*
@@ -1024,6 +1028,39 @@
 	return err;
 }
 
+enum dso_type dso__type_fd(int fd)
+{
+	enum dso_type dso_type = DSO__TYPE_UNKNOWN;
+	GElf_Ehdr ehdr;
+	Elf_Kind ek;
+	Elf *elf;
+
+	elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
+	if (elf == NULL)
+		goto out;
+
+	ek = elf_kind(elf);
+	if (ek != ELF_K_ELF)
+		goto out_end;
+
+	if (gelf_getclass(elf) == ELFCLASS64) {
+		dso_type = DSO__TYPE_64BIT;
+		goto out_end;
+	}
+
+	if (gelf_getehdr(elf, &ehdr) == NULL)
+		goto out_end;
+
+	if (ehdr.e_machine == EM_X86_64)
+		dso_type = DSO__TYPE_X32BIT;
+	else
+		dso_type = DSO__TYPE_32BIT;
+out_end:
+	elf_end(elf);
+out:
+	return dso_type;
+}
+
 static int copy_bytes(int from, off_t from_offs, int to, off_t to_offs, u64 len)
 {
 	ssize_t r;
diff --git a/tools/perf/util/symbol-minimal.c b/tools/perf/util/symbol-minimal.c
index bd15f49..c9541fe 100644
--- a/tools/perf/util/symbol-minimal.c
+++ b/tools/perf/util/symbol-minimal.c
@@ -288,6 +288,44 @@
 	return 0;
 }
 
+static int fd__is_64_bit(int fd)
+{
+	u8 e_ident[EI_NIDENT];
+
+	if (lseek(fd, 0, SEEK_SET))
+		return -1;
+
+	if (readn(fd, e_ident, sizeof(e_ident)) != sizeof(e_ident))
+		return -1;
+
+	if (memcmp(e_ident, ELFMAG, SELFMAG) ||
+	    e_ident[EI_VERSION] != EV_CURRENT)
+		return -1;
+
+	return e_ident[EI_CLASS] == ELFCLASS64;
+}
+
+enum dso_type dso__type_fd(int fd)
+{
+	Elf64_Ehdr ehdr;
+	int ret;
+
+	ret = fd__is_64_bit(fd);
+	if (ret < 0)
+		return DSO__TYPE_UNKNOWN;
+
+	if (ret)
+		return DSO__TYPE_64BIT;
+
+	if (readn(fd, &ehdr, sizeof(ehdr)) != sizeof(ehdr))
+		return DSO__TYPE_UNKNOWN;
+
+	if (ehdr.e_machine == EM_X86_64)
+		return DSO__TYPE_X32BIT;
+
+	return DSO__TYPE_32BIT;
+}
+
 int dso__load_sym(struct dso *dso, struct map *map __maybe_unused,
 		  struct symsrc *ss,
 		  struct symsrc *runtime_ss __maybe_unused,
@@ -295,6 +333,11 @@
 		  int kmodule __maybe_unused)
 {
 	unsigned char *build_id[BUILD_ID_SIZE];
+	int ret;
+
+	ret = fd__is_64_bit(ss->fd);
+	if (ret >= 0)
+		dso->is_64_bit = ret;
 
 	if (filename__read_build_id(ss->name, build_id, BUILD_ID_SIZE) > 0) {
 		dso__set_build_id(dso, build_id);
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 7b9096f..eb06746 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -34,6 +34,7 @@
 	.annotate_src		= true,
 	.demangle		= true,
 	.cumulate_callchain	= true,
+	.show_hist_headers	= true,
 	.symfs			= "",
 };
 
@@ -341,6 +342,16 @@
 	return NULL;
 }
 
+static struct symbol *symbols__next(struct symbol *sym)
+{
+	struct rb_node *n = rb_next(&sym->rb_node);
+
+	if (n)
+		return rb_entry(n, struct symbol, rb_node);
+
+	return NULL;
+}
+
 struct symbol_name_rb_node {
 	struct rb_node	rb_node;
 	struct symbol	sym;
@@ -411,11 +422,16 @@
 	return symbols__find(&dso->symbols[type], addr);
 }
 
-static struct symbol *dso__first_symbol(struct dso *dso, enum map_type type)
+struct symbol *dso__first_symbol(struct dso *dso, enum map_type type)
 {
 	return symbols__first(&dso->symbols[type]);
 }
 
+struct symbol *dso__next_symbol(struct symbol *sym)
+{
+	return symbols__next(sym);
+}
+
 struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type,
 					const char *name)
 {
@@ -1064,6 +1080,7 @@
 			      &is_64_bit);
 	if (err)
 		goto out_err;
+	dso->is_64_bit = is_64_bit;
 
 	if (list_empty(&md.maps)) {
 		err = -EINVAL;
@@ -1662,6 +1679,7 @@
 	free(kallsyms_allocated_filename);
 
 	if (err > 0 && !dso__is_kcore(dso)) {
+		dso->binary_type = DSO_BINARY_TYPE__KALLSYMS;
 		dso__set_long_name(dso, "[kernel.kallsyms]", false);
 		map__fixup_start(map);
 		map__fixup_end(map);
@@ -1709,6 +1727,7 @@
 	if (err > 0)
 		pr_debug("Using %s for symbols\n", kallsyms_filename);
 	if (err > 0 && !dso__is_kcore(dso)) {
+		dso->binary_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
 		machine__mmap_name(machine, path, sizeof(path));
 		dso__set_long_name(dso, strdup(path), true);
 		map__fixup_start(map);
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 615c752..e7295e9 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -118,7 +118,8 @@
 			annotate_src,
 			event_group,
 			demangle,
-			filter_relative;
+			filter_relative,
+			show_hist_headers;
 	const char	*vmlinux_name,
 			*kallsyms_name,
 			*source_prefix,
@@ -215,6 +216,7 @@
 	GElf_Shdr dynshdr;
 
 	bool adjust_symbols;
+	bool is_64_bit;
 #endif
 };
 
@@ -238,6 +240,11 @@
 struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type,
 					const char *name);
 
+struct symbol *dso__first_symbol(struct dso *dso, enum map_type type);
+struct symbol *dso__next_symbol(struct symbol *sym);
+
+enum dso_type dso__type_fd(int fd);
+
 int filename__read_build_id(const char *filename, void *bf, size_t size);
 int sysfs__read_build_id(const char *filename, void *bf, size_t size);
 int modules__parse(const char *filename, void *arg,
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 2fde0d5..12c7a25 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -13,7 +13,7 @@
 	struct thread *leader;
 	pid_t pid = thread->pid_;
 
-	if (pid == thread->tid) {
+	if (pid == thread->tid || pid == -1) {
 		thread->mg = map_groups__new();
 	} else {
 		leader = machine__findnew_thread(machine, pid, pid);
@@ -34,6 +34,7 @@
 		thread->pid_ = pid;
 		thread->tid = tid;
 		thread->ppid = -1;
+		thread->cpu = -1;
 		INIT_LIST_HEAD(&thread->comm_list);
 
 		comm_str = malloc(32);
@@ -60,8 +61,10 @@
 {
 	struct comm *comm, *tmp;
 
-	map_groups__put(thread->mg);
-	thread->mg = NULL;
+	if (thread->mg) {
+		map_groups__put(thread->mg);
+		thread->mg = NULL;
+	}
 	list_for_each_entry_safe(comm, tmp, &thread->comm_list, list) {
 		list_del(&comm->list);
 		comm__free(comm);
@@ -127,12 +130,12 @@
 size_t thread__fprintf(struct thread *thread, FILE *fp)
 {
 	return fprintf(fp, "Thread %d %s\n", thread->tid, thread__comm_str(thread)) +
-	       map_groups__fprintf(thread->mg, verbose, fp);
+	       map_groups__fprintf(thread->mg, fp);
 }
 
 void thread__insert_map(struct thread *thread, struct map *map)
 {
-	map_groups__fixup_overlappings(thread->mg, map, verbose, stderr);
+	map_groups__fixup_overlappings(thread->mg, map, stderr);
 	map_groups__insert(thread->mg, map);
 }
 
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index 3c0c272..716b772 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -17,6 +17,7 @@
 	pid_t			pid_; /* Not all tools update this */
 	pid_t			tid;
 	pid_t			ppid;
+	int			cpu;
 	char			shortname[3];
 	bool			comm_set;
 	bool			dead; /* if set thread has exited */
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
index 7e6fcfe..eb72716 100644
--- a/tools/perf/util/trace-event-info.c
+++ b/tools/perf/util/trace-event-info.c
@@ -40,6 +40,7 @@
 #include "trace-event.h"
 #include <api/fs/debugfs.h>
 #include "evsel.h"
+#include "debug.h"
 
 #define VERSION "0.5"
 
@@ -191,12 +192,10 @@
 		    strcmp(dent->d_name, "..") == 0 ||
 		    !name_in_tp_list(dent->d_name, tps))
 			continue;
-		format = malloc(strlen(sys) + strlen(dent->d_name) + 10);
-		if (!format) {
+		if (asprintf(&format, "%s/%s/format", sys, dent->d_name) < 0) {
 			err = -ENOMEM;
 			goto out;
 		}
-		sprintf(format, "%s/%s/format", sys, dent->d_name);
 		ret = stat(format, &st);
 		free(format);
 		if (ret < 0)
@@ -217,12 +216,10 @@
 		    strcmp(dent->d_name, "..") == 0 ||
 		    !name_in_tp_list(dent->d_name, tps))
 			continue;
-		format = malloc(strlen(sys) + strlen(dent->d_name) + 10);
-		if (!format) {
+		if (asprintf(&format, "%s/%s/format", sys, dent->d_name) < 0) {
 			err = -ENOMEM;
 			goto out;
 		}
-		sprintf(format, "%s/%s/format", sys, dent->d_name);
 		ret = stat(format, &st);
 
 		if (ret >= 0) {
@@ -317,12 +314,10 @@
 		    strcmp(dent->d_name, "ftrace") == 0 ||
 		    !system_in_tp_list(dent->d_name, tps))
 			continue;
-		sys = malloc(strlen(path) + strlen(dent->d_name) + 2);
-		if (!sys) {
+		if (asprintf(&sys, "%s/%s", path, dent->d_name) < 0) {
 			err = -ENOMEM;
 			goto out;
 		}
-		sprintf(sys, "%s/%s", path, dent->d_name);
 		ret = stat(sys, &st);
 		if (ret >= 0) {
 			ssize_t size = strlen(dent->d_name) + 1;
diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c
index e113e18..54d9e9b 100644
--- a/tools/perf/util/trace-event-read.c
+++ b/tools/perf/util/trace-event-read.c
@@ -22,7 +22,6 @@
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
-#include <getopt.h>
 #include <stdarg.h>
 #include <sys/types.h>
 #include <sys/stat.h>
@@ -36,6 +35,7 @@
 #include "../perf.h"
 #include "util.h"
 #include "trace-event.h"
+#include "debug.h"
 
 static int input_fd;
 
diff --git a/tools/perf/util/tsc.c b/tools/perf/util/tsc.c
new file mode 100644
index 0000000..4d4210d
--- /dev/null
+++ b/tools/perf/util/tsc.c
@@ -0,0 +1,30 @@
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+#include "tsc.h"
+
+u64 perf_time_to_tsc(u64 ns, struct perf_tsc_conversion *tc)
+{
+	u64 t, quot, rem;
+
+	t = ns - tc->time_zero;
+	quot = t / tc->time_mult;
+	rem  = t % tc->time_mult;
+	return (quot << tc->time_shift) +
+	       (rem << tc->time_shift) / tc->time_mult;
+}
+
+u64 tsc_to_perf_time(u64 cyc, struct perf_tsc_conversion *tc)
+{
+	u64 quot, rem;
+
+	quot = cyc >> tc->time_shift;
+	rem  = cyc & ((1 << tc->time_shift) - 1);
+	return tc->time_zero + quot * tc->time_mult +
+	       ((rem * tc->time_mult) >> tc->time_shift);
+}
+
+u64 __weak rdtsc(void)
+{
+	return 0;
+}
diff --git a/tools/perf/util/tsc.h b/tools/perf/util/tsc.h
new file mode 100644
index 0000000..a8b78f1
--- /dev/null
+++ b/tools/perf/util/tsc.h
@@ -0,0 +1,12 @@
+#ifndef __PERF_TSC_H
+#define __PERF_TSC_H
+
+#include <linux/types.h>
+
+#include "../arch/x86/util/tsc.h"
+
+u64 perf_time_to_tsc(u64 ns, struct perf_tsc_conversion *tc);
+u64 tsc_to_perf_time(u64 cyc, struct perf_tsc_conversion *tc);
+u64 rdtsc(void);
+
+#endif
diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
index 5ec80a5..7419768 100644
--- a/tools/perf/util/unwind-libdw.c
+++ b/tools/perf/util/unwind-libdw.c
@@ -3,6 +3,7 @@
 #include <elfutils/libdwfl.h>
 #include <inttypes.h>
 #include <errno.h>
+#include "debug.h"
 #include "unwind.h"
 #include "unwind-libdw.h"
 #include "machine.h"
diff --git a/tools/perf/util/unwind-libunwind.c b/tools/perf/util/unwind-libunwind.c
index 25578b9..92b56db 100644
--- a/tools/perf/util/unwind-libunwind.c
+++ b/tools/perf/util/unwind-libunwind.c
@@ -30,6 +30,7 @@
 #include "unwind.h"
 #include "symbol.h"
 #include "util.h"
+#include "debug.h"
 
 extern int
 UNW_OBJ(dwarf_search_unwind_table) (unw_addr_space_t as,
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index 95aefa7..e52e746 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -1,5 +1,6 @@
 #include "../perf.h"
 #include "util.h"
+#include "debug.h"
 #include <api/fs/fs.h>
 #include <sys/mman.h>
 #ifdef HAVE_BACKTRACE_SUPPORT
@@ -333,12 +334,9 @@
 	if (!debugfs)
 		return NULL;
 
-	tracing = malloc(strlen(debugfs) + 9);
-	if (!tracing)
+	if (asprintf(&tracing, "%s/tracing", debugfs) < 0)
 		return NULL;
 
-	sprintf(tracing, "%s/tracing", debugfs);
-
 	tracing_found = 1;
 	return tracing;
 }
@@ -352,11 +350,9 @@
 	if (!tracing)
 		return NULL;
 
-	file = malloc(strlen(tracing) + strlen(name) + 2);
-	if (!file)
+	if (asprintf(&file, "%s/%s", tracing, name) < 0)
 		return NULL;
 
-	sprintf(file, "%s/%s", tracing, name);
 	return file;
 }
 
diff --git a/tools/perf/util/vdso.c b/tools/perf/util/vdso.c
index 0ddb3b8..adca693 100644
--- a/tools/perf/util/vdso.c
+++ b/tools/perf/util/vdso.c
@@ -11,10 +11,34 @@
 #include "vdso.h"
 #include "util.h"
 #include "symbol.h"
+#include "machine.h"
 #include "linux/string.h"
+#include "debug.h"
 
-static bool vdso_found;
-static char vdso_file[] = "/tmp/perf-vdso.so-XXXXXX";
+#define VDSO__TEMP_FILE_NAME "/tmp/perf-vdso.so-XXXXXX"
+
+struct vdso_file {
+	bool found;
+	bool error;
+	char temp_file_name[sizeof(VDSO__TEMP_FILE_NAME)];
+	const char *dso_name;
+};
+
+struct vdso_info {
+	struct vdso_file vdso;
+};
+
+static struct vdso_info *vdso_info__new(void)
+{
+	static const struct vdso_info vdso_info_init = {
+		.vdso    = {
+			.temp_file_name = VDSO__TEMP_FILE_NAME,
+			.dso_name = DSO__NAME_VDSO,
+		},
+	};
+
+	return memdup(&vdso_info_init, sizeof(vdso_info_init));
+}
 
 static int find_vdso_map(void **start, void **end)
 {
@@ -47,7 +71,7 @@
 	return !found;
 }
 
-static char *get_file(void)
+static char *get_file(struct vdso_file *vdso_file)
 {
 	char *vdso = NULL;
 	char *buf = NULL;
@@ -55,10 +79,10 @@
 	size_t size;
 	int fd;
 
-	if (vdso_found)
-		return vdso_file;
+	if (vdso_file->found)
+		return vdso_file->temp_file_name;
 
-	if (find_vdso_map(&start, &end))
+	if (vdso_file->error || find_vdso_map(&start, &end))
 		return NULL;
 
 	size = end - start;
@@ -67,45 +91,78 @@
 	if (!buf)
 		return NULL;
 
-	fd = mkstemp(vdso_file);
+	fd = mkstemp(vdso_file->temp_file_name);
 	if (fd < 0)
 		goto out;
 
 	if (size == (size_t) write(fd, buf, size))
-		vdso = vdso_file;
+		vdso = vdso_file->temp_file_name;
 
 	close(fd);
 
  out:
 	free(buf);
 
-	vdso_found = (vdso != NULL);
+	vdso_file->found = (vdso != NULL);
+	vdso_file->error = !vdso_file->found;
 	return vdso;
 }
 
-void vdso__exit(void)
+void vdso__exit(struct machine *machine)
 {
-	if (vdso_found)
-		unlink(vdso_file);
+	struct vdso_info *vdso_info = machine->vdso_info;
+
+	if (!vdso_info)
+		return;
+
+	if (vdso_info->vdso.found)
+		unlink(vdso_info->vdso.temp_file_name);
+
+	zfree(&machine->vdso_info);
 }
 
-struct dso *vdso__dso_findnew(struct list_head *head)
+static struct dso *vdso__new(struct machine *machine, const char *short_name,
+			     const char *long_name)
 {
-	struct dso *dso = dsos__find(head, VDSO__MAP_NAME, true);
+	struct dso *dso;
 
-	if (!dso) {
-		char *file;
-
-		file = get_file();
-		if (!file)
-			return NULL;
-
-		dso = dso__new(VDSO__MAP_NAME);
-		if (dso != NULL) {
-			dsos__add(head, dso);
-			dso__set_long_name(dso, file, false);
-		}
+	dso = dso__new(short_name);
+	if (dso != NULL) {
+		dsos__add(&machine->user_dsos, dso);
+		dso__set_long_name(dso, long_name, false);
 	}
 
 	return dso;
 }
+
+struct dso *vdso__dso_findnew(struct machine *machine,
+			      struct thread *thread __maybe_unused)
+{
+	struct vdso_info *vdso_info;
+	struct dso *dso;
+
+	if (!machine->vdso_info)
+		machine->vdso_info = vdso_info__new();
+
+	vdso_info = machine->vdso_info;
+	if (!vdso_info)
+		return NULL;
+
+	dso = dsos__find(&machine->user_dsos, DSO__NAME_VDSO, true);
+	if (!dso) {
+		char *file;
+
+		file = get_file(&vdso_info->vdso);
+		if (!file)
+			return NULL;
+
+		dso = vdso__new(machine, DSO__NAME_VDSO, file);
+	}
+
+	return dso;
+}
+
+bool dso__is_vdso(struct dso *dso)
+{
+	return !strcmp(dso->short_name, DSO__NAME_VDSO);
+}
diff --git a/tools/perf/util/vdso.h b/tools/perf/util/vdso.h
index 0f76e7c..af9d692 100644
--- a/tools/perf/util/vdso.h
+++ b/tools/perf/util/vdso.h
@@ -7,12 +7,21 @@
 
 #define VDSO__MAP_NAME "[vdso]"
 
+#define DSO__NAME_VDSO "[vdso]"
+
 static inline bool is_vdso_map(const char *filename)
 {
 	return !strcmp(filename, VDSO__MAP_NAME);
 }
 
-struct dso *vdso__dso_findnew(struct list_head *head);
-void vdso__exit(void);
+struct dso;
+
+bool dso__is_vdso(struct dso *dso);
+
+struct machine;
+struct thread;
+
+struct dso *vdso__dso_findnew(struct machine *machine, struct thread *thread);
+void vdso__exit(struct machine *machine);
 
 #endif /* __PERF_VDSO__ */
diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
index 4063156..55ab700 100755
--- a/tools/testing/ktest/ktest.pl
+++ b/tools/testing/ktest/ktest.pl
@@ -72,7 +72,7 @@
     "IGNORE_UNUSED"		=> 0,
 );
 
-my $ktest_config;
+my $ktest_config = "ktest.conf";
 my $version;
 my $have_version = 0;
 my $machine;
@@ -149,7 +149,6 @@
 my $bisect_ret_default;
 my $in_patchcheck = 0;
 my $run_test;
-my $redirect;
 my $buildlog;
 my $testlog;
 my $dmesg;
@@ -522,7 +521,7 @@
     return read_prompt 1, $prompt;
 }
 
-sub get_ktest_config {
+sub get_mandatory_config {
     my ($config) = @_;
     my $ans;
 
@@ -553,29 +552,29 @@
     }
 }
 
-sub get_ktest_configs {
-    get_ktest_config("MACHINE");
-    get_ktest_config("BUILD_DIR");
-    get_ktest_config("OUTPUT_DIR");
+sub get_mandatory_configs {
+    get_mandatory_config("MACHINE");
+    get_mandatory_config("BUILD_DIR");
+    get_mandatory_config("OUTPUT_DIR");
 
     if ($newconfig) {
-	get_ktest_config("BUILD_OPTIONS");
+	get_mandatory_config("BUILD_OPTIONS");
     }
 
     # options required for other than just building a kernel
     if (!$buildonly) {
-	get_ktest_config("POWER_CYCLE");
-	get_ktest_config("CONSOLE");
+	get_mandatory_config("POWER_CYCLE");
+	get_mandatory_config("CONSOLE");
     }
 
     # options required for install and more
     if ($buildonly != 1) {
-	get_ktest_config("SSH_USER");
-	get_ktest_config("BUILD_TARGET");
-	get_ktest_config("TARGET_IMAGE");
+	get_mandatory_config("SSH_USER");
+	get_mandatory_config("BUILD_TARGET");
+	get_mandatory_config("TARGET_IMAGE");
     }
 
-    get_ktest_config("LOCALVERSION");
+    get_mandatory_config("LOCALVERSION");
 
     return if ($buildonly);
 
@@ -583,7 +582,7 @@
 
     if (!defined($rtype)) {
 	if (!defined($opt{"GRUB_MENU"})) {
-	    get_ktest_config("REBOOT_TYPE");
+	    get_mandatory_config("REBOOT_TYPE");
 	    $rtype = $entered_configs{"REBOOT_TYPE"};
 	} else {
 	    $rtype = "grub";
@@ -591,16 +590,16 @@
     }
 
     if ($rtype eq "grub") {
-	get_ktest_config("GRUB_MENU");
+	get_mandatory_config("GRUB_MENU");
     }
 
     if ($rtype eq "grub2") {
-	get_ktest_config("GRUB_MENU");
-	get_ktest_config("GRUB_FILE");
+	get_mandatory_config("GRUB_MENU");
+	get_mandatory_config("GRUB_FILE");
     }
 
     if ($rtype eq "syslinux") {
-	get_ktest_config("SYSLINUX_LABEL");
+	get_mandatory_config("SYSLINUX_LABEL");
     }
 }
 
@@ -1090,7 +1089,7 @@
     $test_case = __read_config $config, \$test_num;
 
     # make sure we have all mandatory configs
-    get_ktest_configs;
+    get_mandatory_configs;
 
     # was a test specified?
     if (!$test_case) {
@@ -1529,7 +1528,7 @@
 }
 
 sub run_command {
-    my ($command) = @_;
+    my ($command, $redirect) = @_;
     my $dolog = 0;
     my $dord = 0;
     my $pid;
@@ -2265,9 +2264,7 @@
     # Run old config regardless, to enforce min configurations
     make_oldconfig;
 
-    $redirect = "$buildlog";
-    my $build_ret = run_command "$make $build_options";
-    undef $redirect;
+    my $build_ret = run_command "$make $build_options", $buildlog;
 
     if (defined($post_build)) {
 	# Because a post build may change the kernel version
@@ -2360,9 +2357,7 @@
     $poweroff_on_error = 0;
     $die_on_failure = 1;
 
-    $redirect = "$testlog";
-    run_command $run_test or $failed = 1;
-    undef $redirect;
+    run_command $run_test, $testlog or $failed = 1;
 
     exit $failed;
 }
@@ -2789,12 +2784,17 @@
 sub assign_configs {
     my ($hash, $config) = @_;
 
+    doprint "Reading configs from $config\n";
+
     open (IN, $config)
 	or dodie "Failed to read $config";
 
     while (<IN>) {
+	chomp;
 	if (/^((CONFIG\S*)=.*)/) {
 	    ${$hash}{$2} = $1;
+	} elsif (/^(# (CONFIG\S*) is not set)/) {
+	    ${$hash}{$2} = $1;
 	}
     }
 
@@ -2807,27 +2807,6 @@
     assign_configs \%config_ignore, $config;
 }
 
-sub read_current_config {
-    my ($config_ref) = @_;
-
-    %{$config_ref} = ();
-    undef %{$config_ref};
-
-    my @key = keys %{$config_ref};
-    if ($#key >= 0) {
-	print "did not delete!\n";
-	exit;
-    }
-    open (IN, "$output_config");
-
-    while (<IN>) {
-	if (/^(CONFIG\S+)=(.*)/) {
-	    ${$config_ref}{$1} = $2;
-	}
-    }
-    close(IN);
-}
-
 sub get_dependencies {
     my ($config) = @_;
 
@@ -2846,78 +2825,111 @@
     return @deps;
 }
 
-sub create_config {
-    my @configs = @_;
+sub save_config {
+    my ($pc, $file) = @_;
 
-    open(OUT, ">$output_config") or dodie "Can not write to $output_config";
+    my %configs = %{$pc};
 
-    foreach my $config (@configs) {
-	print OUT "$config_set{$config}\n";
-	my @deps = get_dependencies $config;
-	foreach my $dep (@deps) {
-	    print OUT "$config_set{$dep}\n";
-	}
-    }
+    doprint "Saving configs into $file\n";
 
-    # turn off configs to keep off
-    foreach my $config (keys %config_off) {
-	print OUT "# $config is not set\n";
-    }
+    open(OUT, ">$file") or dodie "Can not write to $file";
 
-    # turn off configs that should be off for now
-    foreach my $config (@config_off_tmp) {
-	print OUT "# $config is not set\n";
-    }
-
-    foreach my $config (keys %config_ignore) {
-	print OUT "$config_ignore{$config}\n";
+    foreach my $config (keys %configs) {
+	print OUT "$configs{$config}\n";
     }
     close(OUT);
+}
+
+sub create_config {
+    my ($name, $pc) = @_;
+
+    doprint "Creating old config from $name configs\n";
+
+    save_config $pc, $output_config;
 
     make_oldconfig;
 }
 
+# compare two config hashes, and return configs with different vals.
+# It returns B's config values, but you can use A to see what A was.
+sub diff_config_vals {
+    my ($pa, $pb) = @_;
+
+    # crappy Perl way to pass in hashes.
+    my %a = %{$pa};
+    my %b = %{$pb};
+
+    my %ret;
+
+    foreach my $item (keys %a) {
+	if (defined($b{$item}) && $b{$item} ne $a{$item}) {
+	    $ret{$item} = $b{$item};
+	}
+    }
+
+    return %ret;
+}
+
+# compare two config hashes and return the configs in B but not A
+sub diff_configs {
+    my ($pa, $pb) = @_;
+
+    my %ret;
+
+    # crappy Perl way to pass in hashes.
+    my %a = %{$pa};
+    my %b = %{$pb};
+
+    foreach my $item (keys %b) {
+	if (!defined($a{$item})) {
+	    $ret{$item} = $b{$item};
+	}
+    }
+
+    return %ret;
+}
+
+# return if two configs are equal or not
+# 0 is equal +1 b has something a does not
+# +1 if a and b have a different item.
+# -1 if a has something b does not
 sub compare_configs {
-    my (%a, %b) = @_;
+    my ($pa, $pb) = @_;
+
+    my %ret;
+
+    # crappy Perl way to pass in hashes.
+    my %a = %{$pa};
+    my %b = %{$pb};
+
+    foreach my $item (keys %b) {
+	if (!defined($a{$item})) {
+	    return 1;
+	}
+	if ($a{$item} ne $b{$item}) {
+	    return 1;
+	}
+    }
 
     foreach my $item (keys %a) {
 	if (!defined($b{$item})) {
-	    print "diff $item\n";
-	    return 1;
+	    return -1;
 	}
-	delete $b{$item};
     }
 
-    my @keys = keys %b;
-    if ($#keys) {
-	print "diff2 $keys[0]\n";
-    }
-    return -1 if ($#keys >= 0);
-
     return 0;
 }
 
 sub run_config_bisect_test {
     my ($type) = @_;
 
-    return run_bisect_test $type, "oldconfig";
-}
+    my $ret = run_bisect_test $type, "oldconfig";
 
-sub process_passed {
-    my (%configs) = @_;
-
-    doprint "These configs had no failure: (Enabling them for further compiles)\n";
-    # Passed! All these configs are part of a good compile.
-    # Add them to the min options.
-    foreach my $config (keys %configs) {
-	if (defined($config_list{$config})) {
-	    doprint " removing $config\n";
-	    $config_ignore{$config} = $config_list{$config};
-	    delete $config_list{$config};
-	}
+    if ($bisect_manual) {
+	$ret = answer_bisect;
     }
-    doprint "config copied to $outputdir/config_good\n";
-    run_command "cp -f $output_config $outputdir/config_good";
+
+    return $ret;
 }
 
 sub process_failed {
@@ -2928,253 +2940,225 @@
     doprint "***************************************\n\n";
 }
 
-sub run_config_bisect {
+# used for config bisecting
+my $good_config;
+my $bad_config;
 
-    my @start_list = keys %config_list;
+sub process_new_config {
+    my ($tc, $nc, $gc, $bc) = @_;
 
-    if ($#start_list < 0) {
-	doprint "No more configs to test!!!\n";
-	return -1;
+    my %tmp_config = %{$tc};
+    my %good_configs = %{$gc};
+    my %bad_configs = %{$bc};
+
+    my %new_configs;
+
+    my $runtest = 1;
+    my $ret;
+
+    create_config "tmp_configs", \%tmp_config;
+    assign_configs \%new_configs, $output_config;
+
+    $ret = compare_configs \%new_configs, \%bad_configs;
+    if (!$ret) {
+	doprint "New config equals bad config, try next test\n";
+	$runtest = 0;
     }
 
-    doprint "***** RUN TEST ***\n";
+    if ($runtest) {
+	$ret = compare_configs \%new_configs, \%good_configs;
+	if (!$ret) {
+	    doprint "New config equals good config, try next test\n";
+	    $runtest = 0;
+	}
+    }
+
+    %{$nc} = %new_configs;
+
+    return $runtest;
+}
+
+sub run_config_bisect {
+    my ($pgood, $pbad) = @_;
+
     my $type = $config_bisect_type;
+
+    my %good_configs = %{$pgood};
+    my %bad_configs = %{$pbad};
+
+    my %diff_configs = diff_config_vals \%good_configs, \%bad_configs;
+    my %b_configs = diff_configs \%good_configs, \%bad_configs;
+    my %g_configs = diff_configs \%bad_configs, \%good_configs;
+
+    my @diff_arr = keys %diff_configs;
+    my $len_diff = $#diff_arr + 1;
+
+    my @b_arr = keys %b_configs;
+    my $len_b = $#b_arr + 1;
+
+    my @g_arr = keys %g_configs;
+    my $len_g = $#g_arr + 1;
+
+    my $runtest = 1;
+    my %new_configs;
     my $ret;
-    my %current_config;
 
-    my $count = $#start_list + 1;
-    doprint "  $count configs to test\n";
+    # First, lets get it down to a single subset.
+    # Is the problem with a difference in values?
+    # Is the problem with a missing config?
+    # Is the problem with a config that breaks things?
 
-    my $half = int($#start_list / 2);
+    # Enable all of one set and see if we get a new bad
+    # or good config.
 
-    do {
-	my @tophalf = @start_list[0 .. $half];
+    # first set the good config to the bad values.
 
-	# keep the bottom half off
-	if ($half < $#start_list) {
-	    @config_off_tmp = @start_list[$half + 1 .. $#start_list];
-	} else {
-	    @config_off_tmp = ();
-	}
+    doprint "d=$len_diff g=$len_g b=$len_b\n";
 
-	create_config @tophalf;
-	read_current_config \%current_config;
+    # first lets enable things in bad config that are enabled in good config
 
-	$count = $#tophalf + 1;
-	doprint "Testing $count configs\n";
-	my $found = 0;
-	# make sure we test something
-	foreach my $config (@tophalf) {
-	    if (defined($current_config{$config})) {
-		logit " $config\n";
-		$found = 1;
+    if ($len_diff > 0) {
+	if ($len_b > 0 || $len_g > 0) {
+	    my %tmp_config = %bad_configs;
+
+	    doprint "Set tmp config to be bad config with good config values\n";
+	    foreach my $item (@diff_arr) {
+		$tmp_config{$item} = $good_configs{$item};
 	    }
+
+	    $runtest = process_new_config \%tmp_config, \%new_configs,
+			    \%good_configs, \%bad_configs;
 	}
-	if (!$found) {
-	    # try the other half
-	    doprint "Top half produced no set configs, trying bottom half\n";
+    }
 
-	    # keep the top half off
-	    @config_off_tmp = @tophalf;
-	    @tophalf = @start_list[$half + 1 .. $#start_list];
+    if (!$runtest && $len_diff > 0) {
 
-	    create_config @tophalf;
-	    read_current_config \%current_config;
-	    foreach my $config (@tophalf) {
-		if (defined($current_config{$config})) {
-		    logit " $config\n";
-		    $found = 1;
-		}
-	    }
-	    if (!$found) {
-		doprint "Failed: Can't make new config with current configs\n";
-		foreach my $config (@start_list) {
-		    doprint "  CONFIG: $config\n";
-		}
-		return -1;
-	    }
-	    $count = $#tophalf + 1;
-	    doprint "Testing $count configs\n";
-	}
-
-	$ret = run_config_bisect_test $type;
-	if ($bisect_manual) {
-	    $ret = answer_bisect;
-	}
-	if ($ret) {
-	    process_passed %current_config;
-	    return 0;
-	}
-
-	doprint "This config had a failure.\n";
-	doprint "Removing these configs that were not set in this config:\n";
-	doprint "config copied to $outputdir/config_bad\n";
-	run_command "cp -f $output_config $outputdir/config_bad";
-
-	# A config exists in this group that was bad.
-	foreach my $config (keys %config_list) {
-	    if (!defined($current_config{$config})) {
-		doprint " removing $config\n";
-		delete $config_list{$config};
-	    }
-	}
-
-	@start_list = @tophalf;
-
-	if ($#start_list == 0) {
-	    process_failed $start_list[0];
+	if ($len_diff == 1) {
+	    process_failed $diff_arr[0];
 	    return 1;
 	}
+	my %tmp_config = %bad_configs;
 
-	# remove half the configs we are looking at and see if
-	# they are good.
-	$half = int($#start_list / 2);
-    } while ($#start_list > 0);
+	my $half = int($#diff_arr / 2);
+	my @tophalf = @diff_arr[0 .. $half];
 
-    # we found a single config, try it again unless we are running manually
+	doprint "Settings bisect with top half:\n";
+	doprint "Set tmp config to be bad config with some good config values\n";
+	foreach my $item (@tophalf) {
+	    $tmp_config{$item} = $good_configs{$item};
+	}
 
-    if ($bisect_manual) {
-	process_failed $start_list[0];
-	return 1;
+	$runtest = process_new_config \%tmp_config, \%new_configs,
+			    \%good_configs, \%bad_configs;
+
+	if (!$runtest) {
+	    my %tmp_config = %bad_configs;
+
+	    doprint "Try bottom half\n";
+
+	    my @bottomhalf = @diff_arr[$half+1 .. $#diff_arr];
+
+	    foreach my $item (@bottomhalf) {
+		$tmp_config{$item} = $good_configs{$item};
+	    }
+
+	    $runtest = process_new_config \%tmp_config, \%new_configs,
+			    \%good_configs, \%bad_configs;
+	}
     }
 
-    my @tophalf = @start_list[0 .. 0];
-
-    $ret = run_config_bisect_test $type;
-    if ($ret) {
-	process_passed %current_config;
+    if ($runtest) {
+	$ret = run_config_bisect_test $type;
+	if ($ret) {
+	    doprint "NEW GOOD CONFIG\n";
+	    %good_configs = %new_configs;
+	    run_command "mv $good_config ${good_config}.last";
+	    save_config \%good_configs, $good_config;
+	    %{$pgood} = %good_configs;
+	} else {
+	    doprint "NEW BAD CONFIG\n";
+	    %bad_configs = %new_configs;
+	    run_command "mv $bad_config ${bad_config}.last";
+	    save_config \%bad_configs, $bad_config;
+	    %{$pbad} = %bad_configs;
+	}
 	return 0;
     }
 
-    process_failed $start_list[0];
-    return 1;
+    fail "Hmm, need to do a mix match?\n";
+    return -1;
 }
 
 sub config_bisect {
     my ($i) = @_;
 
-    my $start_config = $config_bisect;
-
-    my $tmpconfig = "$tmpdir/use_config";
-
-    if (defined($config_bisect_good)) {
-	process_config_ignore $config_bisect_good;
-    }
-
-    # Make the file with the bad config and the min config
-    if (defined($minconfig)) {
-	# read the min config for things to ignore
-	run_command "cp $minconfig $tmpconfig" or
-	    dodie "failed to copy $minconfig to $tmpconfig";
-    } else {
-	unlink $tmpconfig;
-    }
-
-    if (-f $tmpconfig) {
-	load_force_config($tmpconfig);
-	process_config_ignore $tmpconfig;
-    }
-
-    # now process the start config
-    run_command "cp $start_config $output_config" or
-	dodie "failed to copy $start_config to $output_config";
-
-    # read directly what we want to check
-    my %config_check;
-    open (IN, $output_config)
-	or dodie "failed to open $output_config";
-
-    while (<IN>) {
-	if (/^((CONFIG\S*)=.*)/) {
-	    $config_check{$2} = $1;
-	}
-    }
-    close(IN);
-
-    # Now run oldconfig with the minconfig
-    make_oldconfig;
-
-    # check to see what we lost (or gained)
-    open (IN, $output_config)
-	or dodie "Failed to read $start_config";
-
-    my %removed_configs;
-    my %added_configs;
-
-    while (<IN>) {
-	if (/^((CONFIG\S*)=.*)/) {
-	    # save off all options
-	    $config_set{$2} = $1;
-	    if (defined($config_check{$2})) {
-		if (defined($config_ignore{$2})) {
-		    $removed_configs{$2} = $1;
-		} else {
-		    $config_list{$2} = $1;
-		}
-	    } elsif (!defined($config_ignore{$2})) {
-		$added_configs{$2} = $1;
-		$config_list{$2} = $1;
-	    }
-	} elsif (/^# ((CONFIG\S*).*)/) {
-	    # Keep these configs disabled
-	    $config_set{$2} = $1;
-	    $config_off{$2} = $1;
-	}
-    }
-    close(IN);
-
-    my @confs = keys %removed_configs;
-    if ($#confs >= 0) {
-	doprint "Configs overridden by default configs and removed from check:\n";
-	foreach my $config (@confs) {
-	    doprint " $config\n";
-	}
-    }
-    @confs = keys %added_configs;
-    if ($#confs >= 0) {
-	doprint "Configs appearing in make oldconfig and added:\n";
-	foreach my $config (@confs) {
-	    doprint " $config\n";
-	}
-    }
-
-    my %config_test;
-    my $once = 0;
-
-    @config_off_tmp = ();
-
-    # Sometimes kconfig does weird things. We must make sure
-    # that the config we autocreate has everything we need
-    # to test, otherwise we may miss testing configs, or
-    # may not be able to create a new config.
-    # Here we create a config with everything set.
-    create_config (keys %config_list);
-    read_current_config \%config_test;
-    foreach my $config (keys %config_list) {
-	if (!defined($config_test{$config})) {
-	    if (!$once) {
-		$once = 1;
-		doprint "Configs not produced by kconfig (will not be checked):\n";
-	    }
-	    doprint "  $config\n";
-	    delete $config_list{$config};
-	}
-    }
+    my $type = $config_bisect_type;
     my $ret;
 
-    if (defined($config_bisect_check) && $config_bisect_check) {
-	doprint " Checking to make sure bad config with min config fails\n";
-	create_config keys %config_list;
-	$ret = run_config_bisect_test $config_bisect_type;
-	if ($ret) {
-	    doprint " FAILED! Bad config with min config boots fine\n";
-	    return -1;
+    $bad_config = $config_bisect;
+
+    if (defined($config_bisect_good)) {
+	$good_config = $config_bisect_good;
+    } elsif (defined($minconfig)) {
+	$good_config = $minconfig;
+    } else {
+	doprint "No config specified, checking if defconfig works";
+	$ret = run_bisect_test $type, "defconfig";
+	if (!$ret) {
+	    fail "Have no good config to compare with, please set CONFIG_BISECT_GOOD";
+	    return 1;
 	}
-	doprint " Bad config with min config fails as expected\n";
+	$good_config = $output_config;
+    }
+
+    # we don't want min configs to cause issues here.
+    doprint "Disabling 'MIN_CONFIG' for this test\n";
+    undef $minconfig;
+
+    my %good_configs;
+    my %bad_configs;
+    my %tmp_configs;
+
+    doprint "Run good configs through make oldconfig\n";
+    assign_configs \%tmp_configs, $good_config;
+    create_config "$good_config", \%tmp_configs;
+    assign_configs \%good_configs, $output_config;
+
+    doprint "Run bad configs through make oldconfig\n";
+    assign_configs \%tmp_configs, $bad_config;
+    create_config "$bad_config", \%tmp_configs;
+    assign_configs \%bad_configs, $output_config;
+
+    $good_config = "$tmpdir/good_config";
+    $bad_config = "$tmpdir/bad_config";
+
+    save_config \%good_configs, $good_config;
+    save_config \%bad_configs, $bad_config;
+
+
+    if (defined($config_bisect_check) && $config_bisect_check ne "0") {
+	if ($config_bisect_check ne "good") {
+	    doprint "Testing bad config\n";
+
+	    $ret = run_bisect_test $type, "useconfig:$bad_config";
+	    if ($ret) {
+		fail "Bad config succeeded when expected to fail!";
+		return 0;
+	    }
+	}
+	if ($config_bisect_check ne "bad") {
+	    doprint "Testing good config\n";
+
+	    $ret = run_bisect_test $type, "useconfig:$good_config";
+	    if (!$ret) {
+		fail "Good config failed when expected to succeed!";
+		return 0;
+	    }
+	}
     }
 
     do {
-	$ret = run_config_bisect;
+	$ret = run_config_bisect \%good_configs, \%bad_configs;
     } while (!$ret);
 
     return $ret if ($ret < 0);
@@ -3455,29 +3439,6 @@
     read_kconfig($kconfig);
 }
 
-sub read_config_list {
-    my ($config) = @_;
-
-    open (IN, $config)
-	or dodie "Failed to read $config";
-
-    while (<IN>) {
-	if (/^((CONFIG\S*)=.*)/) {
-	    if (!defined($config_ignore{$2})) {
-		$config_list{$2} = $1;
-	    }
-	}
-    }
-
-    close(IN);
-}
-
-sub read_output_config {
-    my ($config) = @_;
-
-    assign_configs \%config_ignore, $config;
-}
-
 sub make_new_config {
     my @configs = @_;
 
@@ -3863,7 +3824,7 @@
     success $i;
 }
 
-$#ARGV < 1 or die "ktest.pl version: $VERSION\n   usage: ktest.pl config-file\n";
+$#ARGV < 1 or die "ktest.pl version: $VERSION\n   usage: ktest.pl [config-file]\n";
 
 if ($#ARGV == 0) {
     $ktest_config = $ARGV[0];
@@ -3873,8 +3834,6 @@
 	    exit 0;
 	}
     }
-} else {
-    $ktest_config = "ktest.conf";
 }
 
 if (! -f $ktest_config) {
diff --git a/tools/testing/ktest/sample.conf b/tools/testing/ktest/sample.conf
index 172eec4..911e45a 100644
--- a/tools/testing/ktest/sample.conf
+++ b/tools/testing/ktest/sample.conf
@@ -1098,49 +1098,35 @@
 #
 #  The way it works is this:
 #
-#   First it finds a config to work with. Since a different version, or
-#   MIN_CONFIG may cause different dependecies, it must run through this
-#   preparation.
+#   You can specify a good config with CONFIG_BISECT_GOOD, otherwise it
+#   will use the MIN_CONFIG, and if that's not specified, it will use
+#   the config that comes with "make defconfig".
 #
-#   Overwrites any config set in the bad config with a config set in
-#   either the MIN_CONFIG or ADD_CONFIG. Thus, make sure these configs
-#   are minimal and do not disable configs you want to test:
-#   (ie.  # CONFIG_FOO is not set).
+#   It runs both the good and bad configs through a make oldconfig to
+#   make sure that they are set up for the kernel that is checked out.
 #
-#   An oldconfig is run on the bad config and any new config that
-#   appears will be added to the configs to test.
+#   It then reads the configs that are set, as well as the ones that are
+#   not set for both the good and bad configs, and then compares them.
+#   It will set half of the good configs within the bad config (note,
+#   "set" means to make the bad config match the good config, a config
+#   in the good config that is off, will be turned off in the bad
+#   config. That is considered a "set").
 #
-#   Finally, it generates a config with the above result and runs it
-#   again through make oldconfig to produce a config that should be
-#   satisfied by kconfig.
+#   It tests this new config and if it works, it becomes the new good
+#   config, otherwise it becomes the new bad config. It continues this
+#   process until there's only one config left and it will report that
+#   config.
 #
-#   Then it starts the bisect.
+#   The "bad config" can also be a config that is needed to boot but was
+#   disabled because it depended on something that wasn't set.
 #
-#   The configs to test are cut in half. If all the configs in this
-#   half depend on a config in the other half, then the other half
-#   is tested instead. If no configs are enabled by either half, then
-#   this means a circular dependency exists and the test fails.
+#   During this process, it saves the current good and bad configs in
+#   ${TMP_DIR}/good_config and ${TMP_DIR}/bad_config respectively.
+#   If you stop the test, you can copy them to a new location to
+#   reuse them again.
 #
-#   A config is created with the test half, and the bisect test is run.
-#
-#   If the bisect succeeds, then all configs in the generated config
-#   are removed from the configs to test and added to the configs that
-#   will be enabled for all builds (they will be enabled, but not be part
-#   of the configs to examine).
-#
-#   If the bisect fails, then all test configs that were not enabled by
-#   the config file are removed from the test. These configs will not
-#   be enabled in future tests. Since current config failed, we consider
-#   this to be a subset of the config that we started with.
-#
-#   When we are down to one config, it is considered the bad config.
-#
-#   Note, the config chosen may not be the true bad config. Due to
-#   dependencies and selections of the kbuild system, mulitple
-#   configs may be needed to cause a failure. If you disable the
-#   config that was found and restart the test, if the test fails
-#   again, it is recommended to rerun the config_bisect with a new
-#   bad config without the found config enabled.
+#   Although the MIN_CONFIG may be the config it starts with, the
+#   MIN_CONFIG is ignored.
 #
 #  The option BUILD_TYPE will be ignored.
 #
@@ -1160,13 +1146,16 @@
 # CONFIG_BISECT_GOOD (optional)
 #  If you have a good config to start with, then you
 #  can specify it with CONFIG_BISECT_GOOD. Otherwise
-#  the MIN_CONFIG is the base.
+#  the MIN_CONFIG is the base, if MIN_CONFIG is not set
+#  It will build a config with "make defconfig"
 #
 # CONFIG_BISECT_CHECK (optional)
 #  Set this to 1 if you want to confirm that the config ktest
 #  generates (the bad config with the min config) is still bad.
 #  It may be that the min config fixes what broke the bad config
 #  and the test will not return a result.
+#  Set it to "good" to test only the good config and set it
+#  to "bad" to only test the bad config.
 #
 # Example:
 #   TEST_START
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
index ee1f6ca..3f6c9b7 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
@@ -54,10 +54,16 @@
 			if test -f "$i/qemu-cmd"
 			then
 				print_bug qemu failed
+				echo "   $i"
+			elif test -f "$i/buildonly"
+			then
+				echo Build-only run, no boot/test
+				configcheck.sh $i/.config $i/ConfigFragment
+				parse-build.sh $i/Make.out $configfile
 			else
 				print_bug Build failed
+				echo "   $i"
 			fi
-			echo "   $i"
 		fi
 	done
 done
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
index 27e544e..0f69dcb 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
@@ -42,6 +42,7 @@
 
 T=/tmp/kvm-test-1-run.sh.$$
 trap 'rm -rf $T' 0
+touch $T
 
 . $KVM/bin/functions.sh
 . $KVPATH/ver_functions.sh
@@ -131,7 +132,10 @@
 
 cd $KVM
 kstarttime=`awk 'BEGIN { print systime() }' < /dev/null`
-echo ' ---' `date`: Starting kernel
+if test -z "$TORTURE_BUILDONLY"
+then
+	echo ' ---' `date`: Starting kernel
+fi
 
 # Generate -smp qemu argument.
 qemu_args="-nographic $qemu_args"
@@ -157,12 +161,13 @@
 # Generate kernel-version-specific boot parameters
 boot_args="`per_version_boot_params "$boot_args" $builddir/.config $seconds`"
 
-echo $QEMU $qemu_args -m 512 -kernel $builddir/$BOOT_IMAGE -append \"$qemu_append $boot_args\" > $resdir/qemu-cmd
 if test -n "$TORTURE_BUILDONLY"
 then
 	echo Build-only run specified, boot/test omitted.
+	touch $resdir/buildonly
 	exit 0
 fi
+echo $QEMU $qemu_args -m 512 -kernel $builddir/$BOOT_IMAGE -append \"$qemu_append $boot_args\" > $resdir/qemu-cmd
 ( $QEMU $qemu_args -m 512 -kernel $builddir/$BOOT_IMAGE -append "$qemu_append $boot_args"; echo $? > $resdir/qemu-retval ) &
 qemu_pid=$!
 commandcompleted=0
diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh
index 40285c5..589e9c3 100644
--- a/tools/testing/selftests/rcutorture/bin/kvm.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm.sh
@@ -340,12 +340,18 @@
 	for (j = 1; j < jn; j++) {
 		builddir=KVM "/b" j
 		print "rm -f " builddir ".ready"
-		print "echo ----", cfr[j], cpusr[j] ovf ": Starting kernel. `date`";
-		print "echo ----", cfr[j], cpusr[j] ovf ": Starting kernel. `date` >> " rd "/log";
+		print "if test -z \"$TORTURE_BUILDONLY\""
+		print "then"
+		print "\techo ----", cfr[j], cpusr[j] ovf ": Starting kernel. `date`";
+		print "\techo ----", cfr[j], cpusr[j] ovf ": Starting kernel. `date` >> " rd "/log";
+		print "fi"
 	}
 	print "wait"
-	print "echo ---- All kernel runs complete. `date`";
-	print "echo ---- All kernel runs complete. `date` >> " rd "/log";
+	print "if test -z \"$TORTURE_BUILDONLY\""
+	print "then"
+	print "\techo ---- All kernel runs complete. `date`";
+	print "\techo ---- All kernel runs complete. `date` >> " rd "/log";
+	print "fi"
 	for (j = 1; j < jn; j++) {
 		builddir=KVM "/b" j
 		print "echo ----", cfr[j], cpusr[j] ovf ": Build/run results:";
@@ -385,10 +391,7 @@
 echo
 echo " --- `date` Test summary:"
 echo Results directory: $resdir/$ds
-if test -z "$TORTURE_BUILDONLY"
-then
-	kvm-recheck.sh $resdir/$ds
-fi
+kvm-recheck.sh $resdir/$ds
 ___EOF___
 
 if test "$dryrun" = script
@@ -403,7 +406,7 @@
 		sed -e 's/:.*$//' -e 's/^echo //'
 	exit 0
 else
-	# Not a dryru, so run the script.
+	# Not a dryrun, so run the script.
 	sh $T/script
 fi
 
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE01 b/tools/testing/selftests/rcutorture/configs/rcu/TREE01
index 9c827ec..063b707 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE01
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE01
@@ -15,7 +15,6 @@
 CONFIG_RCU_NOCB_CPU=y
 CONFIG_RCU_NOCB_CPU_ZERO=y
 CONFIG_DEBUG_LOCK_ALLOC=n
-CONFIG_PROVE_RCU_DELAY=n
 CONFIG_RCU_CPU_STALL_INFO=n
 CONFIG_RCU_CPU_STALL_VERBOSE=n
 CONFIG_RCU_BOOST=n
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE02 b/tools/testing/selftests/rcutorture/configs/rcu/TREE02
index 1a777b5..ea119ba 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE02
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE02
@@ -18,7 +18,6 @@
 CONFIG_RCU_NOCB_CPU=n
 CONFIG_DEBUG_LOCK_ALLOC=y
 CONFIG_PROVE_LOCKING=n
-CONFIG_PROVE_RCU_DELAY=n
 CONFIG_RCU_CPU_STALL_INFO=n
 CONFIG_RCU_CPU_STALL_VERBOSE=y
 CONFIG_RCU_BOOST=n
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE02-T b/tools/testing/selftests/rcutorture/configs/rcu/TREE02-T
index 61c8d9c..19cf948 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE02-T
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE02-T
@@ -18,7 +18,6 @@
 CONFIG_RCU_NOCB_CPU=n
 CONFIG_DEBUG_LOCK_ALLOC=y
 CONFIG_PROVE_LOCKING=n
-CONFIG_PROVE_RCU_DELAY=n
 CONFIG_RCU_CPU_STALL_INFO=n
 CONFIG_RCU_CPU_STALL_VERBOSE=y
 CONFIG_RCU_BOOST=n
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE03 b/tools/testing/selftests/rcutorture/configs/rcu/TREE03
index c1f111c..f4567fb 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE03
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE03
@@ -14,7 +14,6 @@
 CONFIG_RCU_FANOUT_EXACT=n
 CONFIG_RCU_NOCB_CPU=n
 CONFIG_DEBUG_LOCK_ALLOC=n
-CONFIG_PROVE_RCU_DELAY=n
 CONFIG_RCU_CPU_STALL_INFO=n
 CONFIG_RCU_CPU_STALL_VERBOSE=n
 CONFIG_RCU_BOOST=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE04 b/tools/testing/selftests/rcutorture/configs/rcu/TREE04
index 7dbd27c..0a262fb 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE04
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE04
@@ -18,7 +18,6 @@
 CONFIG_RCU_FANOUT_EXACT=n
 CONFIG_RCU_NOCB_CPU=n
 CONFIG_DEBUG_LOCK_ALLOC=n
-CONFIG_PROVE_RCU_DELAY=n
 CONFIG_RCU_CPU_STALL_INFO=y
 CONFIG_RCU_CPU_STALL_VERBOSE=y
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE05 b/tools/testing/selftests/rcutorture/configs/rcu/TREE05
index d0f32e5..3a06b97 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE05
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE05
@@ -18,7 +18,6 @@
 CONFIG_DEBUG_LOCK_ALLOC=y
 CONFIG_PROVE_LOCKING=y
 CONFIG_PROVE_RCU=y
-CONFIG_PROVE_RCU_DELAY=y
 CONFIG_RCU_CPU_STALL_INFO=n
 CONFIG_RCU_CPU_STALL_VERBOSE=n
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE06 b/tools/testing/selftests/rcutorture/configs/rcu/TREE06
index 2e477df..8f084cc 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE06
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE06
@@ -19,7 +19,6 @@
 CONFIG_DEBUG_LOCK_ALLOC=y
 CONFIG_PROVE_LOCKING=y
 CONFIG_PROVE_RCU=y
-CONFIG_PROVE_RCU_DELAY=n
 CONFIG_RCU_CPU_STALL_INFO=n
 CONFIG_RCU_CPU_STALL_VERBOSE=n
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE07 b/tools/testing/selftests/rcutorture/configs/rcu/TREE07
index 042f86e..ab62255 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE07
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE07
@@ -17,7 +17,6 @@
 CONFIG_RCU_FANOUT_EXACT=n
 CONFIG_RCU_NOCB_CPU=n
 CONFIG_DEBUG_LOCK_ALLOC=n
-CONFIG_PROVE_RCU_DELAY=n
 CONFIG_RCU_CPU_STALL_INFO=y
 CONFIG_RCU_CPU_STALL_VERBOSE=n
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE08 b/tools/testing/selftests/rcutorture/configs/rcu/TREE08
index 3438cee..69a2e25 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE08
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE08
@@ -18,7 +18,6 @@
 CONFIG_RCU_NOCB_CPU=y
 CONFIG_RCU_NOCB_CPU_ALL=y
 CONFIG_DEBUG_LOCK_ALLOC=n
-CONFIG_PROVE_RCU_DELAY=n
 CONFIG_RCU_CPU_STALL_INFO=n
 CONFIG_RCU_CPU_STALL_VERBOSE=n
 CONFIG_RCU_BOOST=n
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE08-T b/tools/testing/selftests/rcutorture/configs/rcu/TREE08-T
index bf4523d..a0f32fb 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE08-T
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE08-T
@@ -18,7 +18,6 @@
 CONFIG_RCU_NOCB_CPU=y
 CONFIG_RCU_NOCB_CPU_ALL=y
 CONFIG_DEBUG_LOCK_ALLOC=n
-CONFIG_PROVE_RCU_DELAY=n
 CONFIG_RCU_CPU_STALL_INFO=n
 CONFIG_RCU_CPU_STALL_VERBOSE=n
 CONFIG_RCU_BOOST=n
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE09 b/tools/testing/selftests/rcutorture/configs/rcu/TREE09
index 81e4f7c..b7a62a5 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE09
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE09
@@ -13,7 +13,6 @@
 CONFIG_HIBERNATION=n
 CONFIG_RCU_NOCB_CPU=n
 CONFIG_DEBUG_LOCK_ALLOC=n
-CONFIG_PROVE_RCU_DELAY=n
 CONFIG_RCU_CPU_STALL_INFO=n
 CONFIG_RCU_CPU_STALL_VERBOSE=n
 CONFIG_RCU_BOOST=n
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v0.0/P5-U-T-NH-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/P5-U-T-NH-sd-SMP-hp
index ef624ce..a55c008 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/v0.0/P5-U-T-NH-sd-SMP-hp
+++ b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/P5-U-T-NH-sd-SMP-hp
@@ -13,7 +13,6 @@
 CONFIG_PREEMPT=y
 #CHECK#CONFIG_TREE_PREEMPT_RCU=y
 CONFIG_DEBUG_KERNEL=y
-CONFIG_PROVE_RCU_DELAY=y
 CONFIG_DEBUG_OBJECTS=y
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
 CONFIG_RT_MUTEXES=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P5-U-T-NH-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P5-U-T-NH-sd-SMP-hp
index ef624ce..a55c008 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P5-U-T-NH-sd-SMP-hp
+++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P5-U-T-NH-sd-SMP-hp
@@ -13,7 +13,6 @@
 CONFIG_PREEMPT=y
 #CHECK#CONFIG_TREE_PREEMPT_RCU=y
 CONFIG_DEBUG_KERNEL=y
-CONFIG_PROVE_RCU_DELAY=y
 CONFIG_DEBUG_OBJECTS=y
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
 CONFIG_RT_MUTEXES=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.3/P5-U-T-NH-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/P5-U-T-NH-sd-SMP-hp
index ef624ce..a55c008 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/v3.3/P5-U-T-NH-sd-SMP-hp
+++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/P5-U-T-NH-sd-SMP-hp
@@ -13,7 +13,6 @@
 CONFIG_PREEMPT=y
 #CHECK#CONFIG_TREE_PREEMPT_RCU=y
 CONFIG_DEBUG_KERNEL=y
-CONFIG_PROVE_RCU_DELAY=y
 CONFIG_DEBUG_OBJECTS=y
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
 CONFIG_RT_MUTEXES=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.5/P5-U-T-NH-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/P5-U-T-NH-sd-SMP-hp
index ef624ce..a55c008 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/v3.5/P5-U-T-NH-sd-SMP-hp
+++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/P5-U-T-NH-sd-SMP-hp
@@ -13,7 +13,6 @@
 CONFIG_PREEMPT=y
 #CHECK#CONFIG_TREE_PREEMPT_RCU=y
 CONFIG_DEBUG_KERNEL=y
-CONFIG_PROVE_RCU_DELAY=y
 CONFIG_DEBUG_OBJECTS=y
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
 CONFIG_RT_MUTEXES=y
diff --git a/tools/testing/selftests/rcutorture/doc/TREE_RCU-kconfig.txt b/tools/testing/selftests/rcutorture/doc/TREE_RCU-kconfig.txt
index adbb76c..3e588db 100644
--- a/tools/testing/selftests/rcutorture/doc/TREE_RCU-kconfig.txt
+++ b/tools/testing/selftests/rcutorture/doc/TREE_RCU-kconfig.txt
@@ -14,7 +14,6 @@
 CONFIG_PREEMPT -- Do half.  (First three and #8.)
 CONFIG_PROVE_LOCKING -- Do all but two, covering CONFIG_PROVE_RCU and not.
 CONFIG_PROVE_RCU -- Do all but one under CONFIG_PROVE_LOCKING.
-CONFIG_PROVE_RCU_DELAY -- Do one.
 CONFIG_RCU_BOOST -- one of TREE_PREEMPT_RCU.
 CONFIG_RCU_BOOST_PRIO -- set to 2 for _BOOST testing.
 CONFIG_RCU_CPU_STALL_INFO -- do one with and without _VERBOSE.
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 56ff9be..476d3bf 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -1526,17 +1526,33 @@
 		goto out_unmap;
 	}
 
-	kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
-		 vctrl_res.start, vgic_maint_irq);
-	on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
-
 	if (of_address_to_resource(vgic_node, 3, &vcpu_res)) {
 		kvm_err("Cannot obtain VCPU resource\n");
 		ret = -ENXIO;
 		goto out_unmap;
 	}
+
+	if (!PAGE_ALIGNED(vcpu_res.start)) {
+		kvm_err("GICV physical address 0x%llx not page aligned\n",
+			(unsigned long long)vcpu_res.start);
+		ret = -ENXIO;
+		goto out_unmap;
+	}
+
+	if (!PAGE_ALIGNED(resource_size(&vcpu_res))) {
+		kvm_err("GICV size 0x%llx not a multiple of page size 0x%lx\n",
+			(unsigned long long)resource_size(&vcpu_res),
+			PAGE_SIZE);
+		ret = -ENXIO;
+		goto out_unmap;
+	}
+
 	vgic_vcpu_base = vcpu_res.start;
 
+	kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
+		 vctrl_res.start, vgic_maint_irq);
+	on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
+
 	goto out;
 
 out_unmap:
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index 2458a1d..e8ce34c 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -254,10 +254,9 @@
 	spin_lock(&ioapic->lock);
 	for (index = 0; index < IOAPIC_NUM_PINS; index++) {
 		e = &ioapic->redirtbl[index];
-		if (!e->fields.mask &&
-			(e->fields.trig_mode == IOAPIC_LEVEL_TRIG ||
-			 kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC,
-				 index) || index == RTC_GSI)) {
+		if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG ||
+		    kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index) ||
+		    index == RTC_GSI) {
 			if (kvm_apic_match_dest(vcpu, NULL, 0,
 				e->fields.dest_id, e->fields.dest_mode)) {
 				__set_bit(e->fields.vector,
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index ced4a54..a228ee8 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -323,13 +323,13 @@
 
 #define IOAPIC_ROUTING_ENTRY(irq) \
 	{ .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP,	\
-	  .u.irqchip.irqchip = KVM_IRQCHIP_IOAPIC, .u.irqchip.pin = (irq) }
+	  .u.irqchip = { .irqchip = KVM_IRQCHIP_IOAPIC, .pin = (irq) } }
 #define ROUTING_ENTRY1(irq) IOAPIC_ROUTING_ENTRY(irq)
 
 #ifdef CONFIG_X86
 #  define PIC_ROUTING_ENTRY(irq) \
 	{ .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP,	\
-	  .u.irqchip.irqchip = SELECT_PIC(irq), .u.irqchip.pin = (irq) % 8 }
+	  .u.irqchip = { .irqchip = SELECT_PIC(irq), .pin = (irq) % 8 } }
 #  define ROUTING_ENTRY2(irq) \
 	IOAPIC_ROUTING_ENTRY(irq), PIC_ROUTING_ENTRY(irq)
 #else