Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf

Pablo Neira Ayuso says:

====================
The following patchset contains Netfilter fixes for you net tree,
mostly targeted to ipset, they are:

* Fix ICMPv6 NAT due to wrong comparison, code instead of type, from
  Phil Oester.

* Fix RCU race in conntrack extensions release path, from Michal Kubecek.

* Fix missing inversion in the userspace ipset test command match if
  the nomatch option is specified, from Jozsef Kadlecsik.

* Skip layer 4 protocol matching in ipset in case of IPv6 fragments,
  also from Jozsef Kadlecsik.

* Fix sequence adjustment in nfnetlink_queue due to using the netlink
  skb instead of the network skb, from Gao feng.

* Make sure we cannot swap of sets with different layer 3 family in
  ipset, from Jozsef Kadlecsik.

* Fix possible bogus matching in ipset if hash sets with net elements
  are used, from Oliver Smith.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/Documentation/aoe/udev.txt b/Documentation/aoe/udev.txt
index 8686e78..1f06daf 100644
--- a/Documentation/aoe/udev.txt
+++ b/Documentation/aoe/udev.txt
@@ -23,4 +23,4 @@
 SUBSYSTEM=="aoe", KERNEL=="flush",	NAME="etherd/%k", GROUP="disk", MODE="0220"
 
 # aoe block devices     
-KERNEL=="etherd*",       NAME="%k", GROUP="disk"
+KERNEL=="etherd*",       GROUP="disk"
diff --git a/Documentation/block/cmdline-partition.txt b/Documentation/block/cmdline-partition.txt
new file mode 100644
index 0000000..2bbf4cc
--- /dev/null
+++ b/Documentation/block/cmdline-partition.txt
@@ -0,0 +1,39 @@
+Embedded device command line partition
+=====================================================================
+
+Read block device partition table from command line.
+The partition used for fixed block device (eMMC) embedded device.
+It is no MBR, save storage space. Bootloader can be easily accessed
+by absolute address of data on the block device.
+Users can easily change the partition.
+
+The format for the command line is just like mtdparts:
+
+blkdevparts=<blkdev-def>[;<blkdev-def>]
+  <blkdev-def> := <blkdev-id>:<partdef>[,<partdef>]
+    <partdef> := <size>[@<offset>](part-name)
+
+<blkdev-id>
+    block device disk name, embedded device used fixed block device,
+    it's disk name also fixed. such as: mmcblk0, mmcblk1, mmcblk0boot0.
+
+<size>
+    partition size, in bytes, such as: 512, 1m, 1G.
+
+<offset>
+    partition start address, in bytes.
+
+(part-name)
+    partition name, kernel send uevent with "PARTNAME". application can create
+    a link to block device partition with the name "PARTNAME".
+    user space application can access partition by partition name.
+
+Example:
+    eMMC disk name is "mmcblk0" and "mmcblk0boot0"
+
+  bootargs:
+    'blkdevparts=mmcblk0:1G(data0),1G(data1),-;mmcblk0boot0:1m(boot),-(kernel)'
+
+  dmesg:
+    mmcblk0: p1(data0) p2(data1) p3()
+    mmcblk0boot0: p1(boot) p2(kernel)
diff --git a/Documentation/clk.txt b/Documentation/clk.txt
index 6f68ba0..3aeb5c4 100644
--- a/Documentation/clk.txt
+++ b/Documentation/clk.txt
@@ -70,6 +70,10 @@
 						unsigned long parent_rate);
 		long		(*round_rate)(struct clk_hw *hw, unsigned long,
 						unsigned long *);
+		long		(*determine_rate)(struct clk_hw *hw,
+						unsigned long rate,
+						unsigned long *best_parent_rate,
+						struct clk **best_parent_clk);
 		int		(*set_parent)(struct clk_hw *hw, u8 index);
 		u8		(*get_parent)(struct clk_hw *hw);
 		int		(*set_rate)(struct clk_hw *hw, unsigned long);
@@ -179,26 +183,28 @@
 callback is invalid or otherwise unnecessary.  Empty cells are either
 optional or must be evaluated on a case-by-case basis.
 
-                           clock hardware characteristics
-	     -----------------------------------------------------------
-             | gate | change rate | single parent | multiplexer | root |
-             |------|-------------|---------------|-------------|------|
-.prepare     |      |             |               |             |      |
-.unprepare   |      |             |               |             |      |
-             |      |             |               |             |      |
-.enable      | y    |             |               |             |      |
-.disable     | y    |             |               |             |      |
-.is_enabled  | y    |             |               |             |      |
-             |      |             |               |             |      |
-.recalc_rate |      | y           |               |             |      |
-.round_rate  |      | y           |               |             |      |
-.set_rate    |      | y           |               |             |      |
-             |      |             |               |             |      |
-.set_parent  |      |             | n             | y           | n    |
-.get_parent  |      |             | n             | y           | n    |
-             |      |             |               |             |      |
-.init        |      |             |               |             |      |
-	     -----------------------------------------------------------
+                              clock hardware characteristics
+                -----------------------------------------------------------
+                | gate | change rate | single parent | multiplexer | root |
+                |------|-------------|---------------|-------------|------|
+.prepare        |      |             |               |             |      |
+.unprepare      |      |             |               |             |      |
+                |      |             |               |             |      |
+.enable         | y    |             |               |             |      |
+.disable        | y    |             |               |             |      |
+.is_enabled     | y    |             |               |             |      |
+                |      |             |               |             |      |
+.recalc_rate    |      | y           |               |             |      |
+.round_rate     |      | y [1]       |               |             |      |
+.determine_rate |      | y [1]       |               |             |      |
+.set_rate       |      | y           |               |             |      |
+                |      |             |               |             |      |
+.set_parent     |      |             | n             | y           | n    |
+.get_parent     |      |             | n             | y           | n    |
+                |      |             |               |             |      |
+.init           |      |             |               |             |      |
+                -----------------------------------------------------------
+[1] either one of round_rate or determine_rate is required.
 
 Finally, register your clock at run-time with a hardware-specific
 registration function.  This function simply populates struct clk_foo's
diff --git a/Documentation/device-mapper/cache.txt b/Documentation/device-mapper/cache.txt
index e8cdf72..33d45ee 100644
--- a/Documentation/device-mapper/cache.txt
+++ b/Documentation/device-mapper/cache.txt
@@ -50,14 +50,16 @@
    which are dirty, and extra hints for use by the policy object.
    This information could be put on the cache device, but having it
    separate allows the volume manager to configure it differently,
-   e.g. as a mirror for extra robustness.
+   e.g. as a mirror for extra robustness.  This metadata device may only
+   be used by a single cache device.
 
 Fixed block size
 ----------------
 
 The origin is divided up into blocks of a fixed size.  This block size
 is configurable when you first create the cache.  Typically we've been
-using block sizes of 256k - 1024k.
+using block sizes of 256KB - 1024KB.  The block size must be between 64
+(32KB) and 2097152 (1GB) and a multiple of 64 (32KB).
 
 Having a fixed block size simplifies the target a lot.  But it is
 something of a compromise.  For instance, a small part of a block may be
diff --git a/Documentation/device-mapper/statistics.txt b/Documentation/device-mapper/statistics.txt
new file mode 100644
index 0000000..2a1673a
--- /dev/null
+++ b/Documentation/device-mapper/statistics.txt
@@ -0,0 +1,186 @@
+DM statistics
+=============
+
+Device Mapper supports the collection of I/O statistics on user-defined
+regions of a DM device.	 If no regions are defined no statistics are
+collected so there isn't any performance impact.  Only bio-based DM
+devices are currently supported.
+
+Each user-defined region specifies a starting sector, length and step.
+Individual statistics will be collected for each step-sized area within
+the range specified.
+
+The I/O statistics counters for each step-sized area of a region are
+in the same format as /sys/block/*/stat or /proc/diskstats (see:
+Documentation/iostats.txt).  But two extra counters (12 and 13) are
+provided: total time spent reading and writing in milliseconds.	 All
+these counters may be accessed by sending the @stats_print message to
+the appropriate DM device via dmsetup.
+
+Each region has a corresponding unique identifier, which we call a
+region_id, that is assigned when the region is created.	 The region_id
+must be supplied when querying statistics about the region, deleting the
+region, etc.  Unique region_ids enable multiple userspace programs to
+request and process statistics for the same DM device without stepping
+on each other's data.
+
+The creation of DM statistics will allocate memory via kmalloc or
+fallback to using vmalloc space.  At most, 1/4 of the overall system
+memory may be allocated by DM statistics.  The admin can see how much
+memory is used by reading
+/sys/module/dm_mod/parameters/stats_current_allocated_bytes
+
+Messages
+========
+
+    @stats_create <range> <step> [<program_id> [<aux_data>]]
+
+	Create a new region and return the region_id.
+
+	<range>
+	  "-" - whole device
+	  "<start_sector>+<length>" - a range of <length> 512-byte sectors
+				      starting with <start_sector>.
+
+	<step>
+	  "<area_size>" - the range is subdivided into areas each containing
+			  <area_size> sectors.
+	  "/<number_of_areas>" - the range is subdivided into the specified
+				 number of areas.
+
+	<program_id>
+	  An optional parameter.  A name that uniquely identifies
+	  the userspace owner of the range.  This groups ranges together
+	  so that userspace programs can identify the ranges they
+	  created and ignore those created by others.
+	  The kernel returns this string back in the output of
+	  @stats_list message, but it doesn't use it for anything else.
+
+	<aux_data>
+	  An optional parameter.  A word that provides auxiliary data
+	  that is useful to the client program that created the range.
+	  The kernel returns this string back in the output of
+	  @stats_list message, but it doesn't use this value for anything.
+
+    @stats_delete <region_id>
+
+	Delete the region with the specified id.
+
+	<region_id>
+	  region_id returned from @stats_create
+
+    @stats_clear <region_id>
+
+	Clear all the counters except the in-flight i/o counters.
+
+	<region_id>
+	  region_id returned from @stats_create
+
+    @stats_list [<program_id>]
+
+	List all regions registered with @stats_create.
+
+	<program_id>
+	  An optional parameter.
+	  If this parameter is specified, only matching regions
+	  are returned.
+	  If it is not specified, all regions are returned.
+
+	Output format:
+	  <region_id>: <start_sector>+<length> <step> <program_id> <aux_data>
+
+    @stats_print <region_id> [<starting_line> <number_of_lines>]
+
+	Print counters for each step-sized area of a region.
+
+	<region_id>
+	  region_id returned from @stats_create
+
+	<starting_line>
+	  The index of the starting line in the output.
+	  If omitted, all lines are returned.
+
+	<number_of_lines>
+	  The number of lines to include in the output.
+	  If omitted, all lines are returned.
+
+	Output format for each step-sized area of a region:
+
+	  <start_sector>+<length> counters
+
+	  The first 11 counters have the same meaning as
+	  /sys/block/*/stat or /proc/diskstats.
+
+	  Please refer to Documentation/iostats.txt for details.
+
+	  1. the number of reads completed
+	  2. the number of reads merged
+	  3. the number of sectors read
+	  4. the number of milliseconds spent reading
+	  5. the number of writes completed
+	  6. the number of writes merged
+	  7. the number of sectors written
+	  8. the number of milliseconds spent writing
+	  9. the number of I/Os currently in progress
+	  10. the number of milliseconds spent doing I/Os
+	  11. the weighted number of milliseconds spent doing I/Os
+
+	  Additional counters:
+	  12. the total time spent reading in milliseconds
+	  13. the total time spent writing in milliseconds
+
+    @stats_print_clear <region_id> [<starting_line> <number_of_lines>]
+
+	Atomically print and then clear all the counters except the
+	in-flight i/o counters.	 Useful when the client consuming the
+	statistics does not want to lose any statistics (those updated
+	between printing and clearing).
+
+	<region_id>
+	  region_id returned from @stats_create
+
+	<starting_line>
+	  The index of the starting line in the output.
+	  If omitted, all lines are printed and then cleared.
+
+	<number_of_lines>
+	  The number of lines to process.
+	  If omitted, all lines are printed and then cleared.
+
+    @stats_set_aux <region_id> <aux_data>
+
+	Store auxiliary data aux_data for the specified region.
+
+	<region_id>
+	  region_id returned from @stats_create
+
+	<aux_data>
+	  The string that identifies data which is useful to the client
+	  program that created the range.  The kernel returns this
+	  string back in the output of @stats_list message, but it
+	  doesn't use this value for anything.
+
+Examples
+========
+
+Subdivide the DM device 'vol' into 100 pieces and start collecting
+statistics on them:
+
+  dmsetup message vol 0 @stats_create - /100
+
+Set the auxillary data string to "foo bar baz" (the escape for each
+space must also be escaped, otherwise the shell will consume them):
+
+  dmsetup message vol 0 @stats_set_aux 0 foo\\ bar\\ baz
+
+List the statistics:
+
+  dmsetup message vol 0 @stats_list
+
+Print the statistics:
+
+  dmsetup message vol 0 @stats_print 0
+
+Delete the statistics:
+
+  dmsetup message vol 0 @stats_delete 0
diff --git a/Documentation/device-mapper/thin-provisioning.txt b/Documentation/device-mapper/thin-provisioning.txt
index 30b8b83..50c44cf 100644
--- a/Documentation/device-mapper/thin-provisioning.txt
+++ b/Documentation/device-mapper/thin-provisioning.txt
@@ -99,13 +99,14 @@
 		 $data_block_size $low_water_mark"
 
 $data_block_size gives the smallest unit of disk space that can be
-allocated at a time expressed in units of 512-byte sectors.  People
-primarily interested in thin provisioning may want to use a value such
-as 1024 (512KB).  People doing lots of snapshotting may want a smaller value
-such as 128 (64KB).  If you are not zeroing newly-allocated data,
-a larger $data_block_size in the region of 256000 (128MB) is suggested.
-$data_block_size must be the same for the lifetime of the
-metadata device.
+allocated at a time expressed in units of 512-byte sectors.
+$data_block_size must be between 128 (64KB) and 2097152 (1GB) and a
+multiple of 128 (64KB).  $data_block_size cannot be changed after the
+thin-pool is created.  People primarily interested in thin provisioning
+may want to use a value such as 1024 (512KB).  People doing lots of
+snapshotting may want a smaller value such as 128 (64KB).  If you are
+not zeroing newly-allocated data, a larger $data_block_size in the
+region of 256000 (128MB) is suggested.
 
 $low_water_mark is expressed in blocks of size $data_block_size.  If
 free space on the data device drops below this level then a dm event
diff --git a/Documentation/devicetree/bindings/clock/exynos4-clock.txt b/Documentation/devicetree/bindings/clock/exynos4-clock.txt
index 14d5c2a..c6bf8a6 100644
--- a/Documentation/devicetree/bindings/clock/exynos4-clock.txt
+++ b/Documentation/devicetree/bindings/clock/exynos4-clock.txt
@@ -236,6 +236,7 @@
   spi0_isp_sclk       380     Exynos4x12
   spi1_isp_sclk       381     Exynos4x12
   uart_isp_sclk       382     Exynos4x12
+  tmu_apbif           383
 
 		[Mux Clocks]
 
diff --git a/Documentation/devicetree/bindings/clock/exynos5250-clock.txt b/Documentation/devicetree/bindings/clock/exynos5250-clock.txt
index 781a627..24765c1 100644
--- a/Documentation/devicetree/bindings/clock/exynos5250-clock.txt
+++ b/Documentation/devicetree/bindings/clock/exynos5250-clock.txt
@@ -59,6 +59,9 @@
   sclk_spi0		154
   sclk_spi1		155
   sclk_spi2		156
+  div_i2s1		157
+  div_i2s2		158
+  sclk_hdmiphy		159
 
 
    [Peripheral Clock Gates]
@@ -154,7 +157,16 @@
   dsim0			341
   dp			342
   mixer			343
-  hdmi			345
+  hdmi			344
+  g2d			345
+
+
+   [Clock Muxes]
+
+  Clock			ID
+  ----------------------------
+  mout_hdmi		1024
+
 
 Example 1: An example of a clock controller node is listed below.
 
diff --git a/Documentation/devicetree/bindings/clock/exynos5420-clock.txt b/Documentation/devicetree/bindings/clock/exynos5420-clock.txt
index 9bcc4b1..32aa34e 100644
--- a/Documentation/devicetree/bindings/clock/exynos5420-clock.txt
+++ b/Documentation/devicetree/bindings/clock/exynos5420-clock.txt
@@ -59,6 +59,7 @@
   sclk_pwm		155
   sclk_gscl_wa		156
   sclk_gscl_wb		157
+  sclk_hdmiphy		158
 
    [Peripheral Clock Gates]
 
@@ -179,6 +180,17 @@
   fimc_lite3		495
   aclk_g3d		500
   g3d			501
+  smmu_mixer		502
+
+  Mux			ID
+  ----------------------------
+
+  mout_hdmi		640
+
+  Divider		ID
+  ----------------------------
+
+  dout_pixel		768
 
 Example 1: An example of a clock controller node is listed below.
 
diff --git a/Documentation/devicetree/bindings/clock/samsung,s3c64xx-clock.txt b/Documentation/devicetree/bindings/clock/samsung,s3c64xx-clock.txt
new file mode 100644
index 0000000..fa171dc
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/samsung,s3c64xx-clock.txt
@@ -0,0 +1,77 @@
+* Samsung S3C64xx Clock Controller
+
+The S3C64xx clock controller generates and supplies clock to various controllers
+within the SoC. The clock binding described here is applicable to all SoCs in
+the S3C64xx family.
+
+Required Properties:
+
+- compatible: should be one of the following.
+  - "samsung,s3c6400-clock" - controller compatible with S3C6400 SoC.
+  - "samsung,s3c6410-clock" - controller compatible with S3C6410 SoC.
+
+- reg: physical base address of the controller and length of memory mapped
+  region.
+
+- #clock-cells: should be 1.
+
+Each clock is assigned an identifier and client nodes can use this identifier
+to specify the clock which they consume. Some of the clocks are available only
+on a particular S3C64xx SoC and this is specified where applicable.
+
+All available clocks are defined as preprocessor macros in
+dt-bindings/clock/samsung,s3c64xx-clock.h header and can be used in device
+tree sources.
+
+External clocks:
+
+There are several clocks that are generated outside the SoC. It is expected
+that they are defined using standard clock bindings with following
+clock-output-names:
+ - "fin_pll" - PLL input clock (xtal/extclk) - required,
+ - "xusbxti" - USB xtal - required,
+ - "iiscdclk0" - I2S0 codec clock - optional,
+ - "iiscdclk1" - I2S1 codec clock - optional,
+ - "iiscdclk2" - I2S2 codec clock - optional,
+ - "pcmcdclk0" - PCM0 codec clock - optional,
+ - "pcmcdclk1" - PCM1 codec clock - optional, only S3C6410.
+
+Example: Clock controller node:
+
+	clock: clock-controller@7e00f000 {
+		compatible = "samsung,s3c6410-clock";
+		reg = <0x7e00f000 0x1000>;
+		#clock-cells = <1>;
+	};
+
+Example: Required external clocks:
+
+	fin_pll: clock-fin-pll {
+		compatible = "fixed-clock";
+		clock-output-names = "fin_pll";
+		clock-frequency = <12000000>;
+		#clock-cells = <0>;
+	};
+
+	xusbxti: clock-xusbxti {
+		compatible = "fixed-clock";
+		clock-output-names = "xusbxti";
+		clock-frequency = <48000000>;
+		#clock-cells = <0>;
+	};
+
+Example: UART controller node that consumes the clock generated by the clock
+  controller (refer to the standard clock bindings for information about
+  "clocks" and "clock-names" properties):
+
+		uart0: serial@7f005000 {
+			compatible = "samsung,s3c6400-uart";
+			reg = <0x7f005000 0x100>;
+			interrupt-parent = <&vic1>;
+			interrupts = <5>;
+			clock-names = "uart", "clk_uart_baud2",
+					"clk_uart_baud3";
+			clocks = <&clock PCLK_UART0>, <&clocks PCLK_UART0>,
+					<&clock SCLK_UART>;
+			status = "disabled";
+		};
diff --git a/Documentation/devicetree/bindings/clock/sunxi.txt b/Documentation/devicetree/bindings/clock/sunxi.txt
index d495521..00a5c264 100644
--- a/Documentation/devicetree/bindings/clock/sunxi.txt
+++ b/Documentation/devicetree/bindings/clock/sunxi.txt
@@ -8,19 +8,31 @@
 - compatible : shall be one of the following:
 	"allwinner,sun4i-osc-clk" - for a gatable oscillator
 	"allwinner,sun4i-pll1-clk" - for the main PLL clock
+	"allwinner,sun6i-a31-pll1-clk" - for the main PLL clock on A31
 	"allwinner,sun4i-cpu-clk" - for the CPU multiplexer clock
 	"allwinner,sun4i-axi-clk" - for the AXI clock
 	"allwinner,sun4i-axi-gates-clk" - for the AXI gates
 	"allwinner,sun4i-ahb-clk" - for the AHB clock
 	"allwinner,sun4i-ahb-gates-clk" - for the AHB gates on A10
 	"allwinner,sun5i-a13-ahb-gates-clk" - for the AHB gates on A13
+	"allwinner,sun5i-a10s-ahb-gates-clk" - for the AHB gates on A10s
+	"allwinner,sun7i-a20-ahb-gates-clk" - for the AHB gates on A20
+	"allwinner,sun6i-a31-ahb1-mux-clk" - for the AHB1 multiplexer on A31
+	"allwinner,sun6i-a31-ahb1-gates-clk" - for the AHB1 gates on A31
 	"allwinner,sun4i-apb0-clk" - for the APB0 clock
 	"allwinner,sun4i-apb0-gates-clk" - for the APB0 gates on A10
 	"allwinner,sun5i-a13-apb0-gates-clk" - for the APB0 gates on A13
+	"allwinner,sun5i-a10s-apb0-gates-clk" - for the APB0 gates on A10s
+	"allwinner,sun7i-a20-apb0-gates-clk" - for the APB0 gates on A20
 	"allwinner,sun4i-apb1-clk" - for the APB1 clock
 	"allwinner,sun4i-apb1-mux-clk" - for the APB1 clock muxing
 	"allwinner,sun4i-apb1-gates-clk" - for the APB1 gates on A10
 	"allwinner,sun5i-a13-apb1-gates-clk" - for the APB1 gates on A13
+	"allwinner,sun5i-a10s-apb1-gates-clk" - for the APB1 gates on A10s
+	"allwinner,sun6i-a31-apb1-gates-clk" - for the APB1 gates on A31
+	"allwinner,sun7i-a20-apb1-gates-clk" - for the APB1 gates on A20
+	"allwinner,sun6i-a31-apb2-div-clk" - for the APB2 gates on A31
+	"allwinner,sun6i-a31-apb2-gates-clk" - for the APB2 gates on A31
 
 Required properties for all clocks:
 - reg : shall be the control register address for the clock.
diff --git a/Documentation/devicetree/bindings/clock/sunxi/sun5i-a10s-gates.txt b/Documentation/devicetree/bindings/clock/sunxi/sun5i-a10s-gates.txt
new file mode 100644
index 0000000..d24279f
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/sunxi/sun5i-a10s-gates.txt
@@ -0,0 +1,75 @@
+Gate clock outputs
+------------------
+
+  * AXI gates ("allwinner,sun4i-axi-gates-clk")
+
+    DRAM					0
+
+  * AHB gates ("allwinner,sun5i-a10s-ahb-gates-clk")
+
+    USB0					0
+    EHCI0					1
+    OHCI0					2
+
+    SS						5
+    DMA						6
+    BIST					7
+    MMC0					8
+    MMC1					9
+    MMC2					10
+
+    NAND					13
+    SDRAM					14
+
+    EMAC					17
+    TS						18
+
+    SPI0					20
+    SPI1					21
+    SPI2					22
+
+    GPS						26
+
+    HSTIMER					28
+
+    VE						32
+
+    TVE						34
+
+    LCD						36
+
+    CSI						40
+
+    HDMI					43
+    DE_BE					44
+
+    DE_FE					46
+
+    IEP						51
+    MALI400					52
+
+  * APB0 gates ("allwinner,sun5i-a10s-apb0-gates-clk")
+
+    CODEC					0
+
+    IIS						3
+
+    PIO						5
+    IR						6
+
+    KEYPAD					10
+
+  * APB1 gates ("allwinner,sun5i-a10s-apb1-gates-clk")
+
+    I2C0					0
+    I2C1					1
+    I2C2					2
+
+    UART0					16
+    UART1					17
+    UART2					18
+    UART3					19
+
+Notation:
+ [*]:  The datasheet didn't mention these, but they are present on AW code
+ [**]: The datasheet had this marked as "NC" but they are used on AW code
diff --git a/Documentation/devicetree/bindings/clock/sunxi/sun6i-a31-gates.txt b/Documentation/devicetree/bindings/clock/sunxi/sun6i-a31-gates.txt
new file mode 100644
index 0000000..fe44932
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/sunxi/sun6i-a31-gates.txt
@@ -0,0 +1,83 @@
+Gate clock outputs
+------------------
+
+  * AHB1 gates ("allwinner,sun6i-a31-ahb1-gates-clk")
+
+    MIPI DSI					1
+
+    SS						5
+    DMA						6
+
+    MMC0					8
+    MMC1					9
+    MMC2					10
+    MMC3					11
+
+    NAND1					12
+    NAND0					13
+    SDRAM					14
+
+    GMAC					17
+    TS						18
+    HSTIMER					19
+    SPI0					20
+    SPI1					21
+    SPI2					22
+    SPI3					23
+    USB_OTG					24
+
+    EHCI0					26
+    EHCI1					27
+
+    OHCI0					29
+    OHCI1					30
+    OHCI2					31
+    VE						32
+
+    LCD0					36
+    LCD1					37
+
+    CSI						40
+
+    HDMI					43
+    DE_BE0					44
+    DE_BE1					45
+    DE_FE1					46
+    DE_FE1					47
+
+    MP						50
+
+    GPU						52
+
+    DEU0					55
+    DEU1					56
+    DRC0					57
+    DRC1					58
+
+  * APB1 gates ("allwinner,sun6i-a31-apb1-gates-clk")
+
+    CODEC					0
+
+    DIGITAL MIC					4
+    PIO						5
+
+    DAUDIO0					12
+    DAUDIO1					13
+
+  * APB2 gates ("allwinner,sun6i-a31-apb2-gates-clk")
+
+    I2C0					0
+    I2C1					1
+    I2C2					2
+    I2C3					3
+
+    UART0					16
+    UART1					17
+    UART2					18
+    UART3					19
+    UART4					20
+    UART5					21
+
+Notation:
+ [*]:  The datasheet didn't mention these, but they are present on AW code
+ [**]: The datasheet had this marked as "NC" but they are used on AW code
diff --git a/Documentation/devicetree/bindings/clock/sunxi/sun7i-a20-gates.txt b/Documentation/devicetree/bindings/clock/sunxi/sun7i-a20-gates.txt
new file mode 100644
index 0000000..357f4fd
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/sunxi/sun7i-a20-gates.txt
@@ -0,0 +1,98 @@
+Gate clock outputs
+------------------
+
+  * AXI gates ("allwinner,sun4i-axi-gates-clk")
+
+    DRAM					0
+
+  * AHB gates ("allwinner,sun7i-a20-ahb-gates-clk")
+
+    USB0					0
+    EHCI0					1
+    OHCI0					2
+    EHCI1					3
+    OHCI1					4
+    SS						5
+    DMA						6
+    BIST					7
+    MMC0					8
+    MMC1					9
+    MMC2					10
+    MMC3					11
+    MS						12
+    NAND					13
+    SDRAM					14
+
+    ACE						16
+    EMAC					17
+    TS						18
+
+    SPI0					20
+    SPI1					21
+    SPI2					22
+    SPI3					23
+
+    SATA					25
+
+    HSTIMER					28
+
+    VE						32
+    TVD						33
+    TVE0					34
+    TVE1					35
+    LCD0					36
+    LCD1					37
+
+    CSI0					40
+    CSI1					41
+
+    HDMI1					42
+    HDMI0					43
+    DE_BE0					44
+    DE_BE1					45
+    DE_FE1					46
+    DE_FE1					47
+
+    GMAC					49
+    MP						50
+
+    MALI400					52
+
+  * APB0 gates ("allwinner,sun7i-a20-apb0-gates-clk")
+
+    CODEC					0
+    SPDIF					1
+    AC97					2
+    IIS0					3
+    IIS1					4
+    PIO						5
+    IR0						6
+    IR1						7
+    IIS2					8
+
+    KEYPAD					10
+
+  * APB1 gates ("allwinner,sun7i-a20-apb1-gates-clk")
+
+    I2C0					0
+    I2C1					1
+    I2C2					2
+    I2C3					3
+    CAN						4
+    SCR						5
+    PS20					6
+    PS21					7
+
+    I2C4					15
+    UART0					16
+    UART1					17
+    UART2					18
+    UART3					19
+    UART4					20
+    UART5					21
+    UART6					22
+    UART7					23
+
+Notation:
+ [*]:  The datasheet didn't mention these, but they are present on AW code
+ [**]: The datasheet had this marked as "NC" but they are used on AW code
diff --git a/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt b/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt
index 68cee4f5..4fa814d 100644
--- a/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt
+++ b/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt
@@ -1,7 +1,12 @@
 * Freescale Smart Direct Memory Access (SDMA) Controller for i.MX
 
 Required properties:
-- compatible : Should be "fsl,<chip>-sdma"
+- compatible : Should be "fsl,imx31-sdma", "fsl,imx31-to1-sdma",
+  "fsl,imx31-to2-sdma", "fsl,imx35-sdma", "fsl,imx35-to1-sdma",
+  "fsl,imx35-to2-sdma", "fsl,imx51-sdma", "fsl,imx53-sdma" or
+  "fsl,imx6q-sdma". The -to variants should be preferred since they
+  allow to determnine the correct ROM script addresses needed for
+  the driver to work without additional firmware.
 - reg : Should contain SDMA registers location and length
 - interrupts : Should contain SDMA interrupt
 - #dma-cells : Must be <3>.
diff --git a/Documentation/devicetree/bindings/dma/k3dma.txt b/Documentation/devicetree/bindings/dma/k3dma.txt
new file mode 100644
index 0000000..23f8d71
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/k3dma.txt
@@ -0,0 +1,46 @@
+* Hisilicon K3 DMA controller
+
+See dma.txt first
+
+Required properties:
+- compatible: Should be "hisilicon,k3-dma-1.0"
+- reg: Should contain DMA registers location and length.
+- interrupts: Should contain one interrupt shared by all channel
+- #dma-cells: see dma.txt, should be 1, para number
+- dma-channels: physical channels supported
+- dma-requests: virtual channels supported, each virtual channel
+		have specific request line
+- clocks: clock required
+
+Example:
+
+Controller:
+		dma0: dma@fcd02000 {
+			compatible = "hisilicon,k3-dma-1.0";
+			reg = <0xfcd02000 0x1000>;
+			#dma-cells = <1>;
+			dma-channels = <16>;
+			dma-requests = <27>;
+			interrupts = <0 12 4>;
+			clocks = <&pclk>;
+			status = "disable";
+		};
+
+Client:
+Use specific request line passing from dmax
+For example, i2c0 read channel request line is 18, while write channel use 19
+
+		i2c0: i2c@fcb08000 {
+			compatible = "snps,designware-i2c";
+			dmas =	<&dma0 18          /* read channel */
+				 &dma0 19>;        /* write channel */
+			dma-names = "rx", "tx";
+		};
+
+		i2c1: i2c@fcb09000 {
+			compatible = "snps,designware-i2c";
+			dmas =	<&dma0 20          /* read channel */
+				 &dma0 21>;        /* write channel */
+			dma-names = "rx", "tx";
+		};
+
diff --git a/Documentation/devicetree/bindings/dma/shdma.txt b/Documentation/devicetree/bindings/dma/shdma.txt
index c15994a..2a3f3b8 100644
--- a/Documentation/devicetree/bindings/dma/shdma.txt
+++ b/Documentation/devicetree/bindings/dma/shdma.txt
@@ -22,42 +22,51 @@
 * DMA controller
 
 Required properties:
-- compatible:	should be "renesas,shdma"
+- compatible:	should be of the form "renesas,shdma-<soc>", where <soc> should
+		be replaced with the desired SoC model, e.g.
+		"renesas,shdma-r8a73a4" for the system DMAC on r8a73a4 SoC
 
 Example:
-	dmac: dma-mux0 {
+	dmac: dma-multiplexer@0 {
 		compatible = "renesas,shdma-mux";
 		#dma-cells = <1>;
-		dma-channels = <6>;
+		dma-channels = <20>;
 		dma-requests = <256>;
-		reg = <0 0>;	/* Needed for AUXDATA */
-		#address-cells = <1>;
-		#size-cells = <1>;
+		#address-cells = <2>;
+		#size-cells = <2>;
 		ranges;
 
-		dma0: shdma@fe008020 {
-			compatible = "renesas,shdma";
-			reg = <0xfe008020 0x270>,
-				<0xfe009000 0xc>;
+		dma0: dma-controller@e6700020 {
+			compatible = "renesas,shdma-r8a73a4";
+			reg = <0 0xe6700020 0 0x89e0>;
 			interrupt-parent = <&gic>;
-			interrupts = <0 34 4
-					0 28 4
-					0 29 4
-					0 30 4
-					0 31 4
-					0 32 4
-					0 33 4>;
+			interrupts = <0 220 4
+					0 200 4
+					0 201 4
+					0 202 4
+					0 203 4
+					0 204 4
+					0 205 4
+					0 206 4
+					0 207 4
+					0 208 4
+					0 209 4
+					0 210 4
+					0 211 4
+					0 212 4
+					0 213 4
+					0 214 4
+					0 215 4
+					0 216 4
+					0 217 4
+					0 218 4
+					0 219 4>;
 			interrupt-names = "error",
 					"ch0", "ch1", "ch2", "ch3",
-					"ch4", "ch5";
-		};
-
-		dma1: shdma@fe018020 {
-			...
-		};
-
-		dma2: shdma@fe028020 {
-			...
+					"ch4", "ch5", "ch6", "ch7",
+					"ch8", "ch9", "ch10", "ch11",
+					"ch12", "ch13", "ch14", "ch15",
+					"ch16", "ch17", "ch18", "ch19";
 		};
 	};
 
diff --git a/Documentation/devicetree/bindings/gpu/samsung-g2d.txt b/Documentation/devicetree/bindings/gpu/samsung-g2d.txt
index 3f454ff..c4f358d 100644
--- a/Documentation/devicetree/bindings/gpu/samsung-g2d.txt
+++ b/Documentation/devicetree/bindings/gpu/samsung-g2d.txt
@@ -11,8 +11,11 @@
 
   - interrupts : G2D interrupt number to the CPU.
   - clocks : from common clock binding: handle to G2D clocks.
-  - clock-names : from common clock binding: must contain "sclk_fimg2d" and
-		  "fimg2d", corresponding to entries in the clocks property.
+  - clock-names : names of clocks listed in clocks property, in the same
+		  order, depending on SoC type:
+		  - for S5PV210 and Exynos4 based SoCs: "fimg2d" and
+		    "sclk_fimg2d"
+		  - for Exynos5250 SoC: "fimg2d".
 
 Example:
 	g2d@12800000 {
diff --git a/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt
index bd9be0b..b7943f3 100644
--- a/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt
+++ b/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt
@@ -19,6 +19,9 @@
     "bus-width = <1>" property.
   - sdhci,auto-cmd12: specifies that a controller can only handle auto
     CMD12.
+  - voltage-ranges : two cells are required, first cell specifies minimum
+    slot voltage (mV), second cell specifies maximum slot voltage (mV).
+    Several ranges could be specified.
 
 Example:
 
@@ -29,4 +32,5 @@
 	interrupt-parent = <&ipic>;
 	/* Filled in by U-Boot */
 	clock-frequency = <0>;
+	voltage-ranges = <3300 3300>;
 };
diff --git a/Documentation/devicetree/bindings/net/can/sja1000.txt b/Documentation/devicetree/bindings/net/can/sja1000.txt
index c2dbcec..f2105a4 100644
--- a/Documentation/devicetree/bindings/net/can/sja1000.txt
+++ b/Documentation/devicetree/bindings/net/can/sja1000.txt
@@ -37,7 +37,7 @@
 	If not specified or if the specified value is 0, the CLKOUT pin
 	will be disabled.
 
-- nxp,no-comparator-bypass : Allows to disable the CAN input comperator.
+- nxp,no-comparator-bypass : Allows to disable the CAN input comparator.
 
 For further information, please have a look to the SJA1000 data sheet.
 
diff --git a/Documentation/devicetree/bindings/power_supply/msm-poweroff.txt b/Documentation/devicetree/bindings/power_supply/msm-poweroff.txt
new file mode 100644
index 0000000..ce44ad3
--- /dev/null
+++ b/Documentation/devicetree/bindings/power_supply/msm-poweroff.txt
@@ -0,0 +1,17 @@
+MSM Restart Driver
+
+A power supply hold (ps-hold) bit is set to power the msm chipsets.
+Clearing that bit allows us to restart/poweroff. The difference
+between poweroff and restart is determined by unique power manager IC
+settings.
+
+Required Properties:
+-compatible: "qcom,pshold"
+-reg: Specifies the physical address of the ps-hold register
+
+Example:
+
+	restart@fc4ab000 {
+		compatible = "qcom,pshold";
+		reg = <0xfc4ab000 0x4>;
+	};
diff --git a/Documentation/devicetree/bindings/pwm/pwm-samsung.txt b/Documentation/devicetree/bindings/pwm/pwm-samsung.txt
index 4caa1a7..d61fccd 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-samsung.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-samsung.txt
@@ -19,6 +19,16 @@
 - reg: base address and size of register area
 - interrupts: list of timer interrupts (one interrupt per timer, starting at
   timer 0)
+- clock-names: should contain all following required clock names:
+    - "timers" - PWM base clock used to generate PWM signals,
+  and any subset of following optional clock names:
+    - "pwm-tclk0" - first external PWM clock source,
+    - "pwm-tclk1" - second external PWM clock source.
+  Note that not all IP variants allow using all external clock sources.
+  Refer to SoC documentation to learn which clock source configurations
+  are available.
+- clocks: should contain clock specifiers of all clocks, which input names
+  have been specified in clock-names property, in same order.
 - #pwm-cells: should be 3. See pwm.txt in this directory for a description of
   the cells format. The only third cell flag supported by this binding is
   PWM_POLARITY_INVERTED.
@@ -34,6 +44,8 @@
 		reg = <0x7f006000 0x1000>;
 		interrupt-parent = <&vic0>;
 		interrupts = <23>, <24>, <25>, <27>, <28>;
+		clocks = <&clock 67>;
+		clock-names = "timers";
 		samsung,pwm-outputs = <0>, <1>;
 		#pwm-cells = <3>;
 	}
diff --git a/Documentation/devicetree/bindings/rtc/moxa,moxart-rtc.txt b/Documentation/devicetree/bindings/rtc/moxa,moxart-rtc.txt
new file mode 100644
index 0000000..c9d3ac1
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/moxa,moxart-rtc.txt
@@ -0,0 +1,17 @@
+MOXA ART real-time clock
+
+Required properties:
+
+- compatible : Should be "moxa,moxart-rtc"
+- gpio-rtc-sclk : RTC sclk gpio, with zero flags
+- gpio-rtc-data : RTC data gpio, with zero flags
+- gpio-rtc-reset : RTC reset gpio, with zero flags
+
+Example:
+
+	rtc: rtc {
+		compatible = "moxa,moxart-rtc";
+		gpio-rtc-sclk = <&gpio 5 0>;
+		gpio-rtc-data = <&gpio 6 0>;
+		gpio-rtc-reset = <&gpio 7 0>;
+	};
diff --git a/Documentation/devicetree/bindings/rtc/rtc-omap.txt b/Documentation/devicetree/bindings/rtc/rtc-omap.txt
index b47aa41..5a0f02d 100644
--- a/Documentation/devicetree/bindings/rtc/rtc-omap.txt
+++ b/Documentation/devicetree/bindings/rtc/rtc-omap.txt
@@ -1,7 +1,11 @@
 TI Real Time Clock
 
 Required properties:
-- compatible: "ti,da830-rtc"
+- compatible:
+	- "ti,da830-rtc"  - for RTC IP used similar to that on DA8xx SoC family.
+	- "ti,am3352-rtc" - for RTC IP used similar to that on AM335x SoC family.
+			    This RTC IP has special WAKE-EN Register to enable
+			    Wakeup generation for event Alarm.
 - reg: Address range of rtc register set
 - interrupts: rtc timer, alarm interrupts in order
 - interrupt-parent: phandle for the interrupt controller
diff --git a/Documentation/devicetree/bindings/rtc/rtc-palmas.txt b/Documentation/devicetree/bindings/rtc/rtc-palmas.txt
new file mode 100644
index 0000000..adbccc0
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/rtc-palmas.txt
@@ -0,0 +1,33 @@
+Palmas RTC controller bindings
+
+Required properties:
+- compatible:
+  - "ti,palmas-rtc" for palma series of the RTC controller
+- interrupt-parent: Parent interrupt device, must be handle of palmas node.
+- interrupts: Interrupt number of RTC submodule on device.
+
+Optional properties:
+
+- ti,backup-battery-chargeable: The Palmas series device like TPS65913 or
+	TPS80036 supports the backup battery for powering the RTC when main
+	battery is removed or in very low power state. The backup battery
+	can be chargeable or non-chargeable. This flag will tells whether
+	battery is chargeable or not. If charging battery then driver can
+	enable the charging.
+- ti,backup-battery-charge-high-current: Enable high current charging in
+	backup battery. Device supports the < 100mA and > 100mA charging.
+	The high current will be > 100mA. Absence of this property will
+	charge battery to lower current i.e. < 100mA.
+
+Example:
+	palmas: tps65913@58 {
+		...
+		palmas_rtc: rtc {
+			compatible = "ti,palmas-rtc";
+			interrupt-parent = <&palmas>;
+			interrupts = <8 0>;
+			ti,backup-battery-chargeable;
+			ti,backup-battery-charge-high-current;
+		};
+		...
+	};
diff --git a/Documentation/dma-buf-sharing.txt b/Documentation/dma-buf-sharing.txt
index e31a2a9..505e711 100644
--- a/Documentation/dma-buf-sharing.txt
+++ b/Documentation/dma-buf-sharing.txt
@@ -407,6 +407,18 @@
    interesting ways depending upong the exporter (if userspace starts depending
    upon this implicit synchronization).
 
+Other Interfaces Exposed to Userspace on the dma-buf FD
+------------------------------------------------------
+
+- Since kernel 3.12 the dma-buf FD supports the llseek system call, but only
+  with offset=0 and whence=SEEK_END|SEEK_SET. SEEK_SET is supported to allow
+  the usual size discover pattern size = SEEK_END(0); SEEK_SET(0). Every other
+  llseek operation will report -EINVAL.
+
+  If llseek on dma-buf FDs isn't support the kernel will report -ESPIPE for all
+  cases. Userspace can use this to detect support for discovering the dma-buf
+  size using llseek.
+
 Miscellaneous notes
 -------------------
 
diff --git a/Documentation/dmatest.txt b/Documentation/dmatest.txt
index 132a094..a2b5663 100644
--- a/Documentation/dmatest.txt
+++ b/Documentation/dmatest.txt
@@ -16,15 +16,16 @@
 	Part 2 - When dmatest is built as a module...
 
 After mounting debugfs and loading the module, the /sys/kernel/debug/dmatest
-folder with nodes will be created. They are the same as module parameters with
-addition of the 'run' node that controls run and stop phases of the test.
+folder with nodes will be created. There are two important files located. First
+is the 'run' node that controls run and stop phases of the test, and the second
+one, 'results', is used to get the test case results.
 
 Note that in this case test will not run on load automatically.
 
 Example of usage:
-	% echo dma0chan0 > /sys/kernel/debug/dmatest/channel
-	% echo 2000 > /sys/kernel/debug/dmatest/timeout
-	% echo 1 > /sys/kernel/debug/dmatest/iterations
+	% echo dma0chan0 > /sys/module/dmatest/parameters/channel
+	% echo 2000 > /sys/module/dmatest/parameters/timeout
+	% echo 1 > /sys/module/dmatest/parameters/iterations
 	% echo 1 > /sys/kernel/debug/dmatest/run
 
 Hint: available channel list could be extracted by running the following
@@ -55,8 +56,8 @@
 re-run with the same or different parameters. For the details see the above
 section "Part 2 - When dmatest is built as a module..."
 
-In both cases the module parameters are used as initial values for the test case.
-You always could check them at run-time by running
+In both cases the module parameters are used as the actual values for the test
+case. You always could check them at run-time by running
 	% grep -H . /sys/module/dmatest/parameters/*
 
 	Part 4 - Gathering the test results
diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt
index fb57d85..fcb34a5 100644
--- a/Documentation/driver-model/devres.txt
+++ b/Documentation/driver-model/devres.txt
@@ -299,3 +299,6 @@
 PHY
   devm_usb_get_phy()
   devm_usb_put_phy()
+
+SLAVE DMA ENGINE
+  devm_acpi_dma_controller_register()
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index fcc22c9..823c95f 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -854,16 +854,15 @@
               The committed memory is a sum of all of the memory which
               has been allocated by processes, even if it has not been
               "used" by them as of yet. A process which malloc()'s 1G
-              of memory, but only touches 300M of it will only show up
-              as using 300M of memory even if it has the address space
-              allocated for the entire 1G. This 1G is memory which has
-              been "committed" to by the VM and can be used at any time
-              by the allocating application. With strict overcommit
-              enabled on the system (mode 2 in 'vm.overcommit_memory'),
-              allocations which would exceed the CommitLimit (detailed
-              above) will not be permitted. This is useful if one needs
-              to guarantee that processes will not fail due to lack of
-              memory once that memory has been successfully allocated.
+              of memory, but only touches 300M of it will show up as
+	      using 1G. This 1G is memory which has been "committed" to
+              by the VM and can be used at any time by the allocating
+              application. With strict overcommit enabled on the system
+              (mode 2 in 'vm.overcommit_memory'),allocations which would
+              exceed the CommitLimit (detailed above) will not be permitted.
+              This is useful if one needs to guarantee that processes will
+              not fail due to lack of memory once that memory has been
+              successfully allocated.
 VmallocTotal: total size of vmalloc memory area
  VmallocUsed: amount of vmalloc area which is used
 VmallocChunk: largest contiguous block of vmalloc area which is free
diff --git a/Documentation/filesystems/ramfs-rootfs-initramfs.txt b/Documentation/filesystems/ramfs-rootfs-initramfs.txt
index 59b4a09..b176928 100644
--- a/Documentation/filesystems/ramfs-rootfs-initramfs.txt
+++ b/Documentation/filesystems/ramfs-rootfs-initramfs.txt
@@ -79,6 +79,10 @@
 Most systems just mount another filesystem over rootfs and ignore it.  The
 amount of space an empty instance of ramfs takes up is tiny.
 
+If CONFIG_TMPFS is enabled, rootfs will use tmpfs instead of ramfs by
+default.  To force ramfs, add "rootfstype=ramfs" to the kernel command
+line.
+
 What is initramfs?
 ------------------
 
diff --git a/Documentation/kbuild/kconfig-language.txt b/Documentation/kbuild/kconfig-language.txt
index c858f84..c420676 100644
--- a/Documentation/kbuild/kconfig-language.txt
+++ b/Documentation/kbuild/kconfig-language.txt
@@ -147,6 +147,7 @@
   - "modules"
     This declares the symbol to be used as the MODULES symbol, which
     enables the third modular state for all config symbols.
+    At most one symbol may have the "modules" option set.
 
   - "env"=<value>
     This imports the environment variable into Kconfig. It behaves like
diff --git a/Documentation/kbuild/kconfig.txt b/Documentation/kbuild/kconfig.txt
index e349f29..8ef6dbb 100644
--- a/Documentation/kbuild/kconfig.txt
+++ b/Documentation/kbuild/kconfig.txt
@@ -175,11 +175,9 @@
 		/^hotplug
 
 	When searching, symbols are sorted thus:
-	  - exact match first: an exact match is when the search matches
-	    the complete symbol name;
-	  - alphabetical order: when two symbols do not match exactly,
-	    they are sorted in alphabetical order (in the user's current
-	    locale).
+	  - first, exact matches, sorted alphabetically (an exact match
+	    is when the search matches the complete symbol name);
+	  - then, other matches, sorted alphabetically.
 	For example: ^ATH.K matches:
 	    ATH5K ATH9K ATH5K_AHB ATH5K_DEBUG [...] ATH6KL ATH6KL_DEBUG
 	    [...] ATH9K_AHB ATH9K_BTCOEX_SUPPORT ATH9K_COMMON [...]
diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX
index 18b64b2..f11580f 100644
--- a/Documentation/networking/00-INDEX
+++ b/Documentation/networking/00-INDEX
@@ -86,6 +86,8 @@
 	- info on Generic Netlink
 gianfar.txt
 	- Gianfar Ethernet Driver.
+i40e.txt
+	- README for the Intel Ethernet Controller XL710 Driver (i40e).
 ieee802154.txt
 	- Linux IEEE 802.15.4 implementation, API and drivers
 igb.txt
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt
index 87bbcfe..9b28e71 100644
--- a/Documentation/networking/bonding.txt
+++ b/Documentation/networking/bonding.txt
@@ -1362,6 +1362,12 @@
 To remove an ARP target:
 # echo -192.168.0.100 > /sys/class/net/bond0/bonding/arp_ip_target
 
+To configure the interval between learning packet transmits:
+# echo 12 > /sys/class/net/bond0/bonding/lp_interval
+	NOTE: the lp_inteval is the number of seconds between instances where
+the bonding driver sends learning packets to each slaves peer switch.  The
+default interval is 1 second.
+
 Example Configuration
 ---------------------
 	We begin with the same example that is shown in section 3.3,
diff --git a/Documentation/networking/i40e.txt b/Documentation/networking/i40e.txt
new file mode 100644
index 0000000..f737273
--- /dev/null
+++ b/Documentation/networking/i40e.txt
@@ -0,0 +1,115 @@
+Linux Base Driver for the Intel(R) Ethernet Controller XL710 Family
+===================================================================
+
+Intel i40e Linux driver.
+Copyright(c) 2013 Intel Corporation.
+
+Contents
+========
+
+- Identifying Your Adapter
+- Additional Configurations
+- Performance Tuning
+- Known Issues
+- Support
+
+
+Identifying Your Adapter
+========================
+
+The driver in this release is compatible with the Intel Ethernet
+Controller XL710 Family.
+
+For more information on how to identify your adapter, go to the Adapter &
+Driver ID Guide at:
+
+    http://support.intel.com/support/network/sb/CS-012904.htm
+
+
+Enabling the driver
+===================
+
+The driver is enabled via the standard kernel configuration system,
+using the make command:
+
+     Make oldconfig/silentoldconfig/menuconfig/etc.
+
+The driver is located in the menu structure at:
+
+	-> Device Drivers
+	  -> Network device support (NETDEVICES [=y])
+	    -> Ethernet driver support
+	      -> Intel devices
+	        -> Intel(R) Ethernet Controller XL710 Family
+
+Additional Configurations
+=========================
+
+  Generic Receive Offload (GRO)
+  -----------------------------
+  The driver supports the in-kernel software implementation of GRO.  GRO has
+  shown that by coalescing Rx traffic into larger chunks of data, CPU
+  utilization can be significantly reduced when under large Rx load.  GRO is
+  an evolution of the previously-used LRO interface.  GRO is able to coalesce
+  other protocols besides TCP.  It's also safe to use with configurations that
+  are problematic for LRO, namely bridging and iSCSI.
+
+  Ethtool
+  -------
+  The driver utilizes the ethtool interface for driver configuration and
+  diagnostics, as well as displaying statistical information. The latest
+  ethtool version is required for this functionality.
+
+  The latest release of ethtool can be found from
+  https://www.kernel.org/pub/software/network/ethtool
+
+  Data Center Bridging (DCB)
+  --------------------------
+  DCB configuration is not currently supported.
+
+  FCoE
+  ----
+  Fiber Channel over Ethernet (FCoE) hardware offload is not currently
+  supported.
+
+  MAC and VLAN anti-spoofing feature
+  ----------------------------------
+  When a malicious driver attempts to send a spoofed packet, it is dropped by
+  the hardware and not transmitted.  An interrupt is sent to the PF driver
+  notifying it of the spoof attempt.
+
+  When a spoofed packet is detected the PF driver will send the following
+  message to the system log (displayed by  the "dmesg" command):
+
+  Spoof event(s) detected on VF (n)
+
+  Where n=the VF that attempted to do the spoofing.
+
+
+Performance Tuning
+==================
+
+An excellent article on performance tuning can be found at:
+
+http://www.redhat.com/promo/summit/2008/downloads/pdf/Thursday/Mark_Wagner.pdf
+
+
+Known Issues
+============
+
+
+Support
+=======
+
+For general information, go to the Intel support website at:
+
+    http://support.intel.com
+
+or the Intel Wired Networking project hosted by Sourceforge at:
+
+    http://e1000.sourceforge.net
+
+If an issue is identified with the released source code on the supported
+kernel with a supported adapter, email the specific information related
+to the issue to e1000-devel@lists.sourceforge.net and copy
+netdev@vger.kernel.org.
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index ab7d16e..9d4c1d1 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -182,6 +182,7 @@
 	%<NUL>	'%' is dropped
 	%%	output one '%'
 	%p	pid
+	%P	global pid (init PID namespace)
 	%u	uid
 	%g	gid
 	%d	dump mode, matches PR_SET_DUMPABLE and
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index 36ecc26..79a797e 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -200,17 +200,25 @@
 
 hugepages_treat_as_movable
 
-This parameter is only useful when kernelcore= is specified at boot time to
-create ZONE_MOVABLE for pages that may be reclaimed or migrated. Huge pages
-are not movable so are not normally allocated from ZONE_MOVABLE. A non-zero
-value written to hugepages_treat_as_movable allows huge pages to be allocated
-from ZONE_MOVABLE.
+This parameter controls whether we can allocate hugepages from ZONE_MOVABLE
+or not. If set to non-zero, hugepages can be allocated from ZONE_MOVABLE.
+ZONE_MOVABLE is created when kernel boot parameter kernelcore= is specified,
+so this parameter has no effect if used without kernelcore=.
 
-Once enabled, the ZONE_MOVABLE is treated as an area of memory the huge
-pages pool can easily grow or shrink within. Assuming that applications are
-not running that mlock() a lot of memory, it is likely the huge pages pool
-can grow to the size of ZONE_MOVABLE by repeatedly entering the desired value
-into nr_hugepages and triggering page reclaim.
+Hugepage migration is now available in some situations which depend on the
+architecture and/or the hugepage size. If a hugepage supports migration,
+allocation from ZONE_MOVABLE is always enabled for the hugepage regardless
+of the value of this parameter.
+IOW, this parameter affects only non-migratable hugepages.
+
+Assuming that hugepages are not migratable in your system, one usecase of
+this parameter is that users can make hugepage pool more extensible by
+enabling the allocation from ZONE_MOVABLE. This is because on ZONE_MOVABLE
+page reclaim/migration/compaction work more and you can get contiguous
+memory more likely. Note that using ZONE_MOVABLE for non-migratable
+hugepages can do harm to other features like memory hotremove (because
+memory hotremove expects that memory blocks on ZONE_MOVABLE are always
+removable,) so it's a trade-off responsible for the users.
 
 ==============================================================
 
diff --git a/Documentation/vm/hugetlbpage.txt b/Documentation/vm/hugetlbpage.txt
index 4ac359b..bdd4bb9 100644
--- a/Documentation/vm/hugetlbpage.txt
+++ b/Documentation/vm/hugetlbpage.txt
@@ -165,6 +165,7 @@
 
 
 Interaction of Task Memory Policy with Huge Page Allocation/Freeing
+===================================================================
 
 Whether huge pages are allocated and freed via the /proc interface or
 the /sysfs interface using the nr_hugepages_mempolicy attribute, the NUMA
@@ -229,6 +230,7 @@
    of huge pages over all on-lines nodes with memory.
 
 Per Node Hugepages Attributes
+=============================
 
 A subset of the contents of the root huge page control directory in sysfs,
 described above, will be replicated under each the system device of each
@@ -258,6 +260,7 @@
 
 
 Using Huge Pages
+================
 
 If the user applications are going to request huge pages using mmap system
 call, then it is required that system administrator mount a file system of
@@ -296,20 +299,16 @@
 without MAP_HUGETLB.  For an example of how to use mmap with MAP_HUGETLB see
 map_hugetlb.c.
 
-*******************************************************************
+Examples
+========
 
-/*
- * map_hugetlb: see tools/testing/selftests/vm/map_hugetlb.c
- */
+1) map_hugetlb: see tools/testing/selftests/vm/map_hugetlb.c
 
-*******************************************************************
+2) hugepage-shm:  see tools/testing/selftests/vm/hugepage-shm.c
 
-/*
- * hugepage-shm:  see tools/testing/selftests/vm/hugepage-shm.c
- */
+3) hugepage-mmap:  see tools/testing/selftests/vm/hugepage-mmap.c
 
-*******************************************************************
-
-/*
- * hugepage-mmap:  see tools/testing/selftests/vm/hugepage-mmap.c
- */
+4) The libhugetlbfs (http://libhugetlbfs.sourceforge.net) library provides a
+   wide range of userspace tools to help with huge page usability, environment
+   setup, and control. Furthermore it provides useful test cases that should be
+   used when modifying code to ensure no regressions are introduced.
diff --git a/Documentation/vm/soft-dirty.txt b/Documentation/vm/soft-dirty.txt
index 9a12a59..55684d1 100644
--- a/Documentation/vm/soft-dirty.txt
+++ b/Documentation/vm/soft-dirty.txt
@@ -28,6 +28,13 @@
 the kernel does is finds this fact out and puts both writable and soft-dirty
 bits on the PTE.
 
+  While in most cases tracking memory changes by #PF-s is more than enough
+there is still a scenario when we can lose soft dirty bits -- a task
+unmaps a previously mapped memory region and then maps a new one at exactly
+the same place. When unmap is called, the kernel internally clears PTE values
+including soft dirty bits. To notify user space application about such
+memory region renewal the kernel always marks new memory regions (and
+expanded regions) as soft dirty.
 
   This feature is actively used by the checkpoint-restore project. You
 can find more details about it on http://criu.org
diff --git a/MAINTAINERS b/MAINTAINERS
index 233ddce..e61c2e8 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -933,24 +933,24 @@
 
 ARM/INTEL IOP32X ARM ARCHITECTURE
 M:	Lennert Buytenhek <kernel@wantstofly.org>
-M:	Dan Williams <djbw@fb.com>
+M:	Dan Williams <dan.j.williams@intel.com>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Maintained
 
 ARM/INTEL IOP33X ARM ARCHITECTURE
-M:	Dan Williams <djbw@fb.com>
+M:	Dan Williams <dan.j.williams@intel.com>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Maintained
 
 ARM/INTEL IOP13XX ARM ARCHITECTURE
 M:	Lennert Buytenhek <kernel@wantstofly.org>
-M:	Dan Williams <djbw@fb.com>
+M:	Dan Williams <dan.j.williams@intel.com>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Maintained
 
 ARM/INTEL IQ81342EX MACHINE SUPPORT
 M:	Lennert Buytenhek <kernel@wantstofly.org>
-M:	Dan Williams <djbw@fb.com>
+M:	Dan Williams <dan.j.williams@intel.com>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Maintained
 
@@ -975,7 +975,7 @@
 
 ARM/INTEL XSC3 (MANZANO) ARM CORE
 M:	Lennert Buytenhek <kernel@wantstofly.org>
-M:	Dan Williams <djbw@fb.com>
+M:	Dan Williams <dan.j.williams@intel.com>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Maintained
 
@@ -1028,7 +1028,7 @@
 ARM/MICREL KS8695 ARCHITECTURE
 M:	Greg Ungerer <gerg@uclinux.org>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-F:	arch/arm/mach-ks8695
+F:	arch/arm/mach-ks8695/
 S:	Odd Fixes
 
 ARM/MIOA701 MACHINE SUPPORT
@@ -1048,7 +1048,6 @@
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Maintained
 F:	arch/arm/mach-nomadik/
-F:	arch/arm/plat-nomadik/
 F:	drivers/i2c/busses/i2c-nomadik.c
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-nomadik.git
 
@@ -1070,7 +1069,7 @@
 F:	drivers/tty/serial/msm_serial.h
 F:	drivers/tty/serial/msm_serial.c
 F:	drivers/*/pm8???-*
-F:	drivers/ssbi/
+F:	drivers/mfd/ssbi/
 F:	include/linux/mfd/pm8xxx/
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/davidb/linux-msm.git
 S:	Maintained
@@ -1156,7 +1155,6 @@
 W:	http://www.fluff.org/ben/linux/
 S:	Maintained
 F:	arch/arm/plat-samsung/
-F:	arch/arm/plat-s3c24xx/
 F:	arch/arm/mach-s3c24*/
 F:	arch/arm/mach-s3c64xx/
 F:	drivers/*/*s3c2410*
@@ -1179,8 +1177,6 @@
 S:	Maintained
 F:	arch/arm/mach-s5pv210/mach-aquila.c
 F:	arch/arm/mach-s5pv210/mach-goni.c
-F:	arch/arm/mach-exynos/mach-universal_c210.c
-F:	arch/arm/mach-exynos/mach-nuri.c
 
 ARM/SAMSUNG S5P SERIES 2D GRAPHICS ACCELERATION (G2D) SUPPORT
 M:	Kyungmin Park <kyungmin.park@samsung.com>
@@ -1325,7 +1321,7 @@
 F:	drivers/pwm/pwm-vt8500.c
 F:	drivers/rtc/rtc-vt8500.c
 F:	drivers/tty/serial/vt8500_serial.c
-F:	drivers/usb/host/ehci-vt8500.c
+F:	drivers/usb/host/ehci-platform.c
 F:	drivers/usb/host/uhci-platform.c
 F:	drivers/video/vt8500lcdfb.*
 F:	drivers/video/wm8505fb*
@@ -1386,7 +1382,7 @@
 F:	drivers/platform/x86/eeepc*.c
 
 ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API
-M:	Dan Williams <djbw@fb.com>
+M:	Dan Williams <dan.j.williams@intel.com>
 W:	http://sourceforge.net/projects/xscaleiop
 S:	Maintained
 F:	Documentation/crypto/async-tx-api.txt
@@ -1815,6 +1811,17 @@
 S:	Supported
 F:	drivers/net/ethernet/broadcom/bnx2x/
 
+BROADCOM BCM281XX/BCM11XXX ARM ARCHITECTURE
+M:	Christian Daudt <csd@broadcom.com>
+T:	git git://git.github.com/broadcom/bcm11351
+S:	Maintained
+F:	arch/arm/mach-bcm/
+F:	arch/arm/boot/dts/bcm113*
+F:	arch/arm/boot/dts/bcm281*
+F:	arch/arm/configs/bcm_defconfig
+F:	drivers/mmc/host/sdhci_bcm_kona.c
+F:	drivers/clocksource/bcm_kona_timer.c
+
 BROADCOM BCM2835 ARM ARCHICTURE
 M:	Stephen Warren <swarren@wwwdotorg.org>
 L:	linux-rpi-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -2035,10 +2042,10 @@
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
 S:	Supported
 F:	Documentation/filesystems/ceph.txt
-F:	fs/ceph
-F:	net/ceph
-F:	include/linux/ceph
-F:	include/linux/crush
+F:	fs/ceph/
+F:	net/ceph/
+F:	include/linux/ceph/
+F:	include/linux/crush/
 
 CERTIFIED WIRELESS USB (WUSB) SUBSYSTEM:
 L:	linux-usb@vger.kernel.org
@@ -2307,6 +2314,15 @@
 F:	drivers/cpufreq/arm_big_little.c
 F:	drivers/cpufreq/arm_big_little_dt.c
 
+CPUIDLE DRIVER - ARM BIG LITTLE
+M:      Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+M:      Daniel Lezcano <daniel.lezcano@linaro.org>
+L:      linux-pm@vger.kernel.org
+L:      linux-arm-kernel@lists.infradead.org
+T:      git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
+S:      Maintained
+F:      drivers/cpuidle/cpuidle-big_little.c
+
 CPUIDLE DRIVERS
 M:	Rafael J. Wysocki <rjw@sisk.pl>
 M:	Daniel Lezcano <daniel.lezcano@linaro.org>
@@ -2326,7 +2342,7 @@
 M:	Dominik Brodowski <linux@dominikbrodowski.net>
 M:	Thomas Renninger <trenn@suse.de>
 S:	Maintained
-F:	tools/power/cpupower
+F:	tools/power/cpupower/
 
 CPUSETS
 M:	Li Zefan <lizefan@huawei.com>
@@ -2682,7 +2698,7 @@
 
 DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
 M:	Vinod Koul <vinod.koul@intel.com>
-M:	Dan Williams <djbw@fb.com>
+M:	Dan Williams <dan.j.williams@intel.com>
 S:	Supported
 F:	drivers/dma/
 F:	include/linux/dma*
@@ -2764,7 +2780,7 @@
 L:	dri-devel@lists.freedesktop.org
 T:	git git://people.freedesktop.org/~danvet/drm-intel
 S:	Supported
-F:	drivers/gpu/drm/i915
+F:	drivers/gpu/drm/i915/
 F:	include/drm/i915*
 F:	include/uapi/drm/i915*
 
@@ -2776,7 +2792,7 @@
 L:	dri-devel@lists.freedesktop.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/daeinki/drm-exynos.git
 S:	Supported
-F:	drivers/gpu/drm/exynos
+F:	drivers/gpu/drm/exynos/
 F:	include/drm/exynos*
 F:	include/uapi/drm/exynos*
 
@@ -3029,7 +3045,7 @@
 L:	linux-edac@vger.kernel.org
 W:	bluesmoke.sourceforge.net
 S:	Maintained
-F:	drivers/edac/ghes-edac.c
+F:	drivers/edac/ghes_edac.c
 
 EDAC-I82443BXGX
 M:	Tim Small <tim@buttersideup.com>
@@ -3635,8 +3651,8 @@
 L:	linux-arch@vger.kernel.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic.git
 S:	Maintained
-F:	include/asm-generic
-F:	include/uapi/asm-generic
+F:	include/asm-generic/
+F:	include/uapi/asm-generic/
 
 GENERIC UIO DRIVER FOR PCI DEVICES
 M:	"Michael S. Tsirkin" <mst@redhat.com>
@@ -3678,7 +3694,8 @@
 M:	Dmitry Kozlov <xeb@mail.ru>
 L:	netdev@vger.kernel.org
 S:	Maintained
-F:	net/ipv4/gre.c
+F:	net/ipv4/gre_demux.c
+F:	net/ipv4/gre_offload.c
 F:	include/net/gre.h
 
 GRETH 10/100/1G Ethernet MAC device driver
@@ -3756,7 +3773,7 @@
 T:	git git://linuxtv.org/media_tree.git
 W:	http://linuxtv.org
 S:	Odd Fixes
-F:	drivers/media/usb/hdpvr
+F:	drivers/media/usb/hdpvr/
 
 HWPOISON MEMORY FAILURE HANDLING
 M:	Andi Kleen <andi@firstfloor.org>
@@ -4314,7 +4331,7 @@
 F:	arch/x86/kernel/microcode_intel.c
 
 INTEL I/OAT DMA DRIVER
-M:	Dan Williams <djbw@fb.com>
+M:	Dan Williams <dan.j.williams@intel.com>
 S:	Maintained
 F:	drivers/dma/ioat*
 
@@ -4327,7 +4344,7 @@
 F:	include/linux/intel-iommu.h
 
 INTEL IOP-ADMA DMA DRIVER
-M:	Dan Williams <djbw@fb.com>
+M:	Dan Williams <dan.j.williams@intel.com>
 S:	Odd fixes
 F:	drivers/dma/iop-adma.c
 
@@ -4346,7 +4363,7 @@
 S:	Maintained
 F:	drivers/char/hw_random/ixp4xx-rng.c
 
-INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe/ixgbevf)
+INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe/ixgbevf/i40e)
 M:	Jeff Kirsher <jeffrey.t.kirsher@intel.com>
 M:	Jesse Brandeburg <jesse.brandeburg@intel.com>
 M:	Bruce Allan <bruce.w.allan@intel.com>
@@ -4371,6 +4388,7 @@
 F:	Documentation/networking/ixgb.txt
 F:	Documentation/networking/ixgbe.txt
 F:	Documentation/networking/ixgbevf.txt
+F:	Documentation/networking/i40e.txt
 F:	drivers/net/ethernet/intel/
 
 INTEL PRO/WIRELESS 2100, 2200BG, 2915ABG NETWORK CONNECTION SUPPORT
@@ -4564,7 +4582,7 @@
 W:	http://www.openfabrics.org
 W:	www.open-iscsi.org
 Q:	http://patchwork.kernel.org/project/linux-rdma/list/
-F:	drivers/infiniband/ulp/iser
+F:	drivers/infiniband/ulp/iser/
 
 ISDN SUBSYSTEM
 M:	Karsten Keil <isdn@linux-pingi.de>
@@ -4618,7 +4636,7 @@
 Q:	http://patchwork.linuxtv.org/project/linux-media/list/
 T:	git git://linuxtv.org/anttip/media_tree.git
 S:	Maintained
-F:	drivers/media/tuners/it913x*
+F:	drivers/media/tuners/tuner_it913x*
 
 IVTV VIDEO4LINUX DRIVER
 M:	Andy Walls <awalls@md.metrocast.net>
@@ -5954,15 +5972,12 @@
 F:	arch/arm/*omap*/*pm*
 F:	drivers/cpufreq/omap-cpufreq.c
 
-OMAP POWERDOMAIN/CLOCKDOMAIN SOC ADAPTATION LAYER SUPPORT
+OMAP POWERDOMAIN SOC ADAPTATION LAYER SUPPORT
 M:	Rajendra Nayak <rnayak@ti.com>
 M:	Paul Walmsley <paul@pwsan.com>
 L:	linux-omap@vger.kernel.org
 S:	Maintained
-F:	arch/arm/mach-omap2/powerdomain2xxx_3xxx.c
-F:	arch/arm/mach-omap2/powerdomain44xx.c
-F:	arch/arm/mach-omap2/clockdomain2xxx_3xxx.c
-F:	arch/arm/mach-omap2/clockdomain44xx.c
+F:	arch/arm/mach-omap2/prm*
 
 OMAP AUDIO SUPPORT
 M:	Peter Ujfalusi <peter.ujfalusi@ti.com>
@@ -6128,7 +6143,7 @@
 L:	linux@lists.openrisc.net (moderated for non-subscribers)
 S:	Maintained
 T:	git git://openrisc.net/~jonas/linux
-F:	arch/openrisc
+F:	arch/openrisc/
 
 OPENVSWITCH
 M:	Jesse Gross <jesse@nicira.com>
@@ -6419,7 +6434,7 @@
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 T:	git git://github.com/jamieiles/linux-2.6-ji.git
 S:	Supported
-F:	arch/arm/mach-picoxcell
+F:	arch/arm/mach-picoxcell/
 F:	drivers/*/picoxcell*
 F:	drivers/*/*/picoxcell*
 
@@ -6692,7 +6707,7 @@
 F:	drivers/usb/gadget/pxa2*
 F:	include/sound/pxa2xx-lib.h
 F:	sound/arm/pxa*
-F:	sound/soc/pxa
+F:	sound/soc/pxa/
 
 MMP SUPPORT
 M:	Eric Miao <eric.y.miao@gmail.com>
@@ -7145,7 +7160,7 @@
 M:	Sangbeom Kim <sbkim73@samsung.com>
 L:	alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:	Supported
-F:	sound/soc/samsung
+F:	sound/soc/samsung/
 
 SAMSUNG FRAMEBUFFER DRIVER
 M:	Jingoo Han <jg1.han@samsung.com>
@@ -7191,10 +7206,11 @@
 M:	Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 L:	linux-serial@vger.kernel.org
 S:	Maintained
-F:	drivers/tty/serial
+F:	drivers/tty/serial/
 
 SYNOPSYS DESIGNWARE DMAC DRIVER
 M:	Viresh Kumar <viresh.linux@gmail.com>
+M:	Andy Shevchenko <andriy.shevchenko@linux.intel.com>
 S:	Maintained
 F:	include/linux/dw_dmac.h
 F:	drivers/dma/dw/
@@ -7225,7 +7241,7 @@
 M:	Huang Shijie <shijie8@gmail.com>
 M:	Hans Verkuil <hverkuil@xs4all.nl>
 S:	Odd Fixes
-F:	drivers/media/usb/tlg2300
+F:	drivers/media/usb/tlg2300/
 
 SC1200 WDT DRIVER
 M:	Zwane Mwaikambo <zwane@arm.linux.org.uk>
@@ -7486,7 +7502,7 @@
 T:	git git://linuxtv.org/media_tree.git
 W:	http://linuxtv.org
 S:	Odd Fixes
-F:	drivers/media/radio/radio-si4713.h
+F:	drivers/media/radio/radio-si4713.c
 
 SIANO DVB DRIVER
 M:	Mauro Carvalho Chehab <m.chehab@samsung.com>
@@ -7495,9 +7511,9 @@
 T:	git git://linuxtv.org/media_tree.git
 S:	Odd fixes
 F:	drivers/media/common/siano/
-F:	drivers/media/dvb/siano/
 F:	drivers/media/usb/siano/
-F:	drivers/media/mmc/siano
+F:	drivers/media/usb/siano/
+F:	drivers/media/mmc/siano/
 
 SH_VEU V4L2 MEM2MEM DRIVER
 M:	Guennadi Liakhovetski <g.liakhovetski@gmx.de>
@@ -7535,9 +7551,9 @@
 M:	Simtec Linux Team <linux@simtec.co.uk>
 W:	http://www.simtec.co.uk/products/EB2410ITX/
 S:	Supported
-F:	arch/arm/mach-s3c2410/mach-bast.c
-F:	arch/arm/mach-s3c2410/bast-ide.c
-F:	arch/arm/mach-s3c2410/bast-irq.c
+F:	arch/arm/mach-s3c24xx/mach-bast.c
+F:	arch/arm/mach-s3c24xx/bast-ide.c
+F:	arch/arm/mach-s3c24xx/bast-irq.c
 
 TI DAVINCI MACHINE SUPPORT
 M:	Sekhar Nori <nsekhar@ti.com>
@@ -7546,7 +7562,7 @@
 T:	git git://gitorious.org/linux-davinci/linux-davinci.git
 Q:	http://patchwork.kernel.org/project/linux-davinci/list/
 S:	Supported
-F:	arch/arm/mach-davinci
+F:	arch/arm/mach-davinci/
 F:	drivers/i2c/busses/i2c-davinci.c
 
 TI DAVINCI SERIES MEDIA DRIVER
@@ -7614,6 +7630,14 @@
 F:	Documentation/security/Smack.txt
 F:	security/smack/
 
+SMARTREFLEX DRIVERS FOR ADAPTIVE VOLTAGE SCALING (AVS)
+M:	Kevin Hilman <khilman@kernel.org>
+M:	Nishanth Menon <nm@ti.com>
+S:	Maintained
+F:	drivers/power/avs/smartreflex.c
+F:	include/linux/power/smartreflex.h
+L:	linux-pm@vger.kernel.org
+
 SMC91x ETHERNET DRIVER
 M:	Nicolas Pitre <nico@fluxnic.net>
 S:	Odd Fixes
@@ -7623,7 +7647,7 @@
 M:	Sakari Ailus <sakari.ailus@iki.fi>
 L:	linux-media@vger.kernel.org
 S:	Maintained
-F:	drivers/media/i2c/smiapp
+F:	drivers/media/i2c/smiapp/
 F:	include/media/smiapp.h
 F:	drivers/media/i2c/smiapp-pll.c
 F:	drivers/media/i2c/smiapp-pll.h
@@ -7726,6 +7750,11 @@
 S:	Maintained
 F:	drivers/memstick/host/tifm_ms.c
 
+SONY MEMORYSTICK STANDARD SUPPORT
+M:	Maxim Levitsky <maximlevitsky@gmail.com>
+S:	Maintained
+F:	drivers/memstick/core/ms_block.*
+
 SOUND
 M:	Jaroslav Kysela <perex@perex.cz>
 M:	Takashi Iwai <tiwai@suse.de>
@@ -7802,35 +7831,7 @@
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 W:	http://www.st.com/spear
 S:	Maintained
-F:	arch/arm/plat-spear/
-
-SPEAR13XX MACHINE SUPPORT
-M:	Viresh Kumar <viresh.linux@gmail.com>
-M:	Shiraz Hashim <shiraz.hashim@st.com>
-L:	spear-devel@list.st.com
-L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:	http://www.st.com/spear
-S:	Maintained
-F:	arch/arm/mach-spear13xx/
-
-SPEAR3XX MACHINE SUPPORT
-M:	Viresh Kumar <viresh.linux@gmail.com>
-M:	Shiraz Hashim <shiraz.hashim@st.com>
-L:	spear-devel@list.st.com
-L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:	http://www.st.com/spear
-S:	Maintained
-F:	arch/arm/mach-spear3xx/
-
-SPEAR6XX MACHINE SUPPORT
-M:	Rajeev Kumar <rajeev-dlh.kumar@st.com>
-M:	Shiraz Hashim <shiraz.hashim@st.com>
-M:	Viresh Kumar <viresh.linux@gmail.com>
-L:	spear-devel@list.st.com
-L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:	http://www.st.com/spear
-S:	Maintained
-F:	arch/arm/mach-spear6xx/
+F:	arch/arm/mach-spear/
 
 SPEAR CLOCK FRAMEWORK SUPPORT
 M:	Viresh Kumar <viresh.linux@gmail.com>
@@ -8099,7 +8100,7 @@
 S:	Supported
 F:	arch/arc/
 F:	Documentation/devicetree/bindings/arc/
-F:	drivers/tty/serial/arc-uart.c
+F:	drivers/tty/serial/arc_uart.c
 
 SYSV FILESYSTEM
 M:	Christoph Hellwig <hch@infradead.org>
@@ -8789,7 +8790,6 @@
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
 S:	Maintained
 F:	drivers/usb/phy/
-F:	drivers/usb/otg/
 
 USB PRINTER DRIVER (usblp)
 M:	Pete Zaitcev <zaitcev@redhat.com>
@@ -9320,7 +9320,7 @@
 L:	platform-driver-x86@vger.kernel.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/mjg59/platform-drivers-x86.git
 S:	Maintained
-F:	drivers/platform/x86
+F:	drivers/platform/x86/
 
 X86 MCE INFRASTRUCTURE
 M:	Tony Luck <tony.luck@intel.com>
diff --git a/Makefile b/Makefile
index a42f26a..e73f758 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 PATCHLEVEL = 11
 SUBLEVEL = 0
 EXTRAVERSION =
-NAME = Linux for Workgroups
+NAME = Suicidal Squirrel
 
 # *DOCUMENTATION*
 # To see a list of typical targets execute "make help"
diff --git a/arch/alpha/lib/csum_partial_copy.c b/arch/alpha/lib/csum_partial_copy.c
index 40736da..ffb19b7 100644
--- a/arch/alpha/lib/csum_partial_copy.c
+++ b/arch/alpha/lib/csum_partial_copy.c
@@ -338,6 +338,11 @@
 	unsigned long doff = 7 & (unsigned long) dst;
 
 	if (len) {
+		if (!access_ok(VERIFY_READ, src, len)) {
+			*errp = -EFAULT;
+			memset(dst, 0, len);
+			return sum;
+		}
 		if (!doff) {
 			if (!soff)
 				checksum = csum_partial_cfu_aligned(
diff --git a/arch/arc/kernel/devtree.c b/arch/arc/kernel/devtree.c
index bdee3a8..2340af0 100644
--- a/arch/arc/kernel/devtree.c
+++ b/arch/arc/kernel/devtree.c
@@ -18,12 +18,6 @@
 #include <asm/clk.h>
 #include <asm/mach_desc.h>
 
-/* called from unflatten_device_tree() to bootstrap devicetree itself */
-void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
-{
-	return __va(memblock_alloc(size, align));
-}
-
 /**
  * setup_machine_fdt - Machine setup when an dtb was passed to the kernel
  * @dt:		virtual address pointer to dt blob
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index a08ce71..81279ec 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -127,9 +127,8 @@
 #endif
 
 #ifdef CONFIG_OF_FLATTREE
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
-					    unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
 {
-	pr_err("%s(%lx, %lx)\n", __func__, start, end);
+	pr_err("%s(%llx, %llx)\n", __func__, start, end);
 }
 #endif /* CONFIG_OF_FLATTREE */
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index a00f4c1..c8a916f 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -557,6 +557,7 @@
 	select GENERIC_CLOCKEVENTS
 	select GPIO_PXA
 	select IRQ_DOMAIN
+	select MULTI_IRQ_HANDLER
 	select NEED_MACH_GPIO_H
 	select PINCTRL
 	select PLAT_PXA
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index 000cf76..cc0f1fb 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -198,12 +198,16 @@
 	emev2-kzm9d-reference.dtb \
 	r8a7740-armadillo800eva.dtb \
 	r8a7778-bockw.dtb \
+	r8a7778-bockw-reference.dtb \
 	r8a7740-armadillo800eva-reference.dtb \
+	r8a7779-marzen.dtb \
 	r8a7779-marzen-reference.dtb \
 	r8a7790-lager.dtb \
+	r8a7790-lager-reference.dtb \
 	sh73a0-kzm9g.dtb \
 	sh73a0-kzm9g-reference.dtb \
 	r8a73a4-ape6evm.dtb \
+	r8a73a4-ape6evm-reference.dtb \
 	sh7372-mackerel.dtb
 dtb-$(CONFIG_ARCH_SHMOBILE_MULTI) += emev2-kzm9d-reference.dtb
 dtb-$(CONFIG_ARCH_SOCFPGA) += socfpga_cyclone5.dtb \
@@ -227,6 +231,7 @@
 	sun5i-a10s-olinuxino-micro.dtb \
 	sun5i-a13-olinuxino.dtb \
 	sun6i-a31-colombus.dtb \
+	sun7i-a20-cubieboard2.dtb \
 	sun7i-a20-olinuxino-micro.dtb
 dtb-$(CONFIG_ARCH_TEGRA) += tegra20-harmony.dtb \
 	tegra20-iris-512.dtb \
diff --git a/arch/arm/boot/dts/emev2-kzm9d-reference.dts b/arch/arm/boot/dts/emev2-kzm9d-reference.dts
index bed676b..cceefda 100644
--- a/arch/arm/boot/dts/emev2-kzm9d-reference.dts
+++ b/arch/arm/boot/dts/emev2-kzm9d-reference.dts
@@ -21,7 +21,7 @@
 	};
 
 	chosen {
-		bootargs = "console=ttyS1,115200n81 ignore_loglevel root=/dev/nfs ip=dhcp nfsroot=,rsize=4096,wsize=4096";
+		bootargs = "console=ttyS1,115200n81 ignore_loglevel root=/dev/nfs ip=dhcp";
 	};
 
 	reg_1p8v: regulator@0 {
diff --git a/arch/arm/boot/dts/emev2-kzm9d.dts b/arch/arm/boot/dts/emev2-kzm9d.dts
index dda13bc..f92e812 100644
--- a/arch/arm/boot/dts/emev2-kzm9d.dts
+++ b/arch/arm/boot/dts/emev2-kzm9d.dts
@@ -21,6 +21,6 @@
 	};
 
 	chosen {
-		bootargs = "console=ttyS1,115200n81 ignore_loglevel root=/dev/nfs ip=dhcp nfsroot=,rsize=4096,wsize=4096";
+		bootargs = "console=ttyS1,115200n81 ignore_loglevel root=/dev/nfs ip=dhcp";
 	};
 };
diff --git a/arch/arm/boot/dts/emev2.dtsi b/arch/arm/boot/dts/emev2.dtsi
index 99ad2b2..9063a443 100644
--- a/arch/arm/boot/dts/emev2.dtsi
+++ b/arch/arm/boot/dts/emev2.dtsi
@@ -46,6 +46,12 @@
 		      <0xe0020000 0x0100>;
 	};
 
+	pmu {
+		compatible = "arm,cortex-a9-pmu";
+		interrupts = <0 120 4>,
+			     <0 121 4>;
+	};
+
 	sti@e0180000 {
 		compatible = "renesas,em-sti";
 		reg = <0xe0180000 0x54>;
diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi
index 93c2501..caadc02 100644
--- a/arch/arm/boot/dts/exynos4.dtsi
+++ b/arch/arm/boot/dts/exynos4.dtsi
@@ -448,6 +448,8 @@
 		compatible = "samsung,exynos4210-pwm";
 		reg = <0x139D0000 0x1000>;
 		interrupts = <0 37 0>, <0 38 0>, <0 39 0>, <0 40 0>, <0 41 0>;
+		clocks = <&clock 336>;
+		clock-names = "timers";
 		#pwm-cells = <2>;
 		status = "disabled";
 	};
diff --git a/arch/arm/boot/dts/exynos5.dtsi b/arch/arm/boot/dts/exynos5.dtsi
index 6afa57d..074739d 100644
--- a/arch/arm/boot/dts/exynos5.dtsi
+++ b/arch/arm/boot/dts/exynos5.dtsi
@@ -95,7 +95,7 @@
 		interrupts = <0 54 0>;
 	};
 
-	rtc {
+	rtc@101E0000 {
 		compatible = "samsung,s3c6410-rtc";
 		reg = <0x101E0000 0x100>;
 		interrupts = <0 43 0>, <0 44 0>;
diff --git a/arch/arm/boot/dts/exynos5250-arndale.dts b/arch/arm/boot/dts/exynos5250-arndale.dts
index 452d0b0..cee55fa 100644
--- a/arch/arm/boot/dts/exynos5250-arndale.dts
+++ b/arch/arm/boot/dts/exynos5250-arndale.dts
@@ -538,10 +538,6 @@
 		};
 	};
 
-	rtc {
-		status = "okay";
-	};
-
 	usb_hub_bus {
 		compatible = "simple-bus";
 		#address-cells = <1>;
diff --git a/arch/arm/boot/dts/exynos5250-snow.dts b/arch/arm/boot/dts/exynos5250-snow.dts
index e79331d..fd711e2 100644
--- a/arch/arm/boot/dts/exynos5250-snow.dts
+++ b/arch/arm/boot/dts/exynos5250-snow.dts
@@ -171,10 +171,6 @@
 		};
 	};
 
-	rtc {
-		status = "okay";
-	};
-
 	/*
 	 * On Snow we've got SIP WiFi and so can keep drive strengths low to
 	 * reduce EMI.
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index f7e2d34..7d7cc77 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -180,9 +180,10 @@
 		clock-names = "mfc";
 	};
 
-	rtc {
+	rtc@101E0000 {
 		clocks = <&clock 337>;
 		clock-names = "rtc";
+		status = "okay";
 	};
 
 	tmu@10060000 {
@@ -638,4 +639,15 @@
 		clocks = <&clock 133>, <&clock 339>;
 		clock-names = "sclk_fimd", "fimd";
 	};
+
+	adc: adc@12D10000 {
+		compatible = "samsung,exynos-adc-v1";
+		reg = <0x12D10000 0x100>, <0x10040718 0x4>;
+		interrupts = <0 106 0>;
+		clocks = <&clock 303>;
+		clock-names = "adc";
+		#io-channel-cells = <1>;
+		io-channel-ranges;
+		status = "disabled";
+	};
 };
diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi
index 5353e32..d537cd7 100644
--- a/arch/arm/boot/dts/exynos5420.dtsi
+++ b/arch/arm/boot/dts/exynos5420.dtsi
@@ -180,6 +180,12 @@
 		interrupts = <0 47 0>;
 	};
 
+	rtc@101E0000 {
+		clocks = <&clock 317>;
+		clock-names = "rtc";
+		status = "okay";
+	};
+
 	serial@12C00000 {
 		clocks = <&clock 257>, <&clock 128>;
 		clock-names = "uart", "clk_uart_baud0";
@@ -218,4 +224,15 @@
 		clocks = <&clock 147>, <&clock 421>;
 		clock-names = "sclk_fimd", "fimd";
 	};
+
+	adc: adc@12D10000 {
+		compatible = "samsung,exynos-adc-v2";
+		reg = <0x12D10000 0x100>, <0x10040720 0x4>;
+		interrupts = <0 106 0>;
+		clocks = <&clock 270>;
+		clock-names = "adc";
+		#io-channel-cells = <1>;
+		io-channel-ranges;
+		status = "disabled";
+	};
 };
diff --git a/arch/arm/boot/dts/r8a73a4-ape6evm-reference.dts b/arch/arm/boot/dts/r8a73a4-ape6evm-reference.dts
new file mode 100644
index 0000000..f444624
--- /dev/null
+++ b/arch/arm/boot/dts/r8a73a4-ape6evm-reference.dts
@@ -0,0 +1,65 @@
+/*
+ * Device Tree Source for the APE6EVM board
+ *
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+/dts-v1/;
+/include/ "r8a73a4.dtsi"
+
+/ {
+	model = "APE6EVM";
+	compatible = "renesas,ape6evm-reference", "renesas,r8a73a4";
+
+	chosen {
+		bootargs = "console=ttySC0,115200 ignore_loglevel rw";
+	};
+
+	memory@40000000 {
+		device_type = "memory";
+		reg = <0 0x40000000 0 0x40000000>;
+	};
+
+	lbsc {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges = <0 0 0 0x80000000>;
+	};
+};
+
+&i2c5 {
+	vdd_dvfs: max8973@1b {
+		compatible = "maxim,max8973";
+		reg = <0x1b>;
+
+		regulator-min-microvolt = <935000>;
+		regulator-max-microvolt = <1200000>;
+		regulator-boot-on;
+		regulator-always-on;
+	};
+};
+
+&cpu0 {
+	cpu0-supply = <&vdd_dvfs>;
+	operating-points = <
+		/* kHz  uV */
+		1950000 1115000
+		1462500  995000
+	>;
+	voltage-tolerance = <1>; /* 1% */
+};
+
+&pfc {
+	pinctrl-0 = <&scifa0_pins>;
+	pinctrl-names = "default";
+
+	scifa0_pins: scifa0 {
+		renesas,groups = "scifa0_data";
+		renesas,function = "scifa0";
+	};
+};
diff --git a/arch/arm/boot/dts/r8a73a4-ape6evm.dts b/arch/arm/boot/dts/r8a73a4-ape6evm.dts
index e657a9d..72f867e 100644
--- a/arch/arm/boot/dts/r8a73a4-ape6evm.dts
+++ b/arch/arm/boot/dts/r8a73a4-ape6evm.dts
@@ -16,7 +16,7 @@
 	compatible = "renesas,ape6evm", "renesas,r8a73a4";
 
 	chosen {
-		bootargs = "console=ttySC0,115200 ignore_loglevel root=/dev/nfs ip=dhcp";
+		bootargs = "console=ttySC0,115200 ignore_loglevel root=/dev/nfs ip=dhcp rw";
 	};
 
 	memory@40000000 {
diff --git a/arch/arm/boot/dts/r8a7740-armadillo800eva-reference.dts b/arch/arm/boot/dts/r8a7740-armadillo800eva-reference.dts
index 366f729..c638e4a 100644
--- a/arch/arm/boot/dts/r8a7740-armadillo800eva-reference.dts
+++ b/arch/arm/boot/dts/r8a7740-armadillo800eva-reference.dts
@@ -17,7 +17,7 @@
 	compatible = "renesas,armadillo800eva-reference", "renesas,r8a7740";
 
 	chosen {
-		bootargs = "console=tty0 console=ttySC1,115200 earlyprintk=sh-sci.1,115200 ignore_loglevel root=/dev/nfs ip=dhcp nfsroot=,rsize=4096,wsize=4096 rw";
+		bootargs = "console=tty0 console=ttySC1,115200 earlyprintk=sh-sci.1,115200 ignore_loglevel root=/dev/nfs ip=dhcp rw";
 	};
 
 	memory {
diff --git a/arch/arm/boot/dts/r8a7740-armadillo800eva.dts b/arch/arm/boot/dts/r8a7740-armadillo800eva.dts
index 93da655..426cd9c 100644
--- a/arch/arm/boot/dts/r8a7740-armadillo800eva.dts
+++ b/arch/arm/boot/dts/r8a7740-armadillo800eva.dts
@@ -16,7 +16,7 @@
 	compatible = "renesas,armadillo800eva";
 
 	chosen {
-		bootargs = "console=tty0 console=ttySC1,115200 earlyprintk=sh-sci.1,115200 ignore_loglevel root=/dev/nfs ip=dhcp nfsroot=,rsize=4096,wsize=4096 rw";
+		bootargs = "console=tty0 console=ttySC1,115200 earlyprintk=sh-sci.1,115200 ignore_loglevel root=/dev/nfs ip=dhcp rw";
 	};
 
 	memory {
diff --git a/arch/arm/boot/dts/r8a7740.dtsi b/arch/arm/boot/dts/r8a7740.dtsi
index e18a195..44d3d52 100644
--- a/arch/arm/boot/dts/r8a7740.dtsi
+++ b/arch/arm/boot/dts/r8a7740.dtsi
@@ -32,6 +32,11 @@
 		      <0xc2000000 0x1000>;
 	};
 
+	pmu {
+		compatible = "arm,cortex-a9-pmu";
+		interrupts = <0 83 4>;
+	};
+
 	/* irqpin0: IRQ0 - IRQ7 */
 	irqpin0: irqpin@e6900000 {
 		compatible = "renesas,intc-irqpin";
@@ -147,4 +152,11 @@
 		gpio-controller;
 		#gpio-cells = <2>;
 	};
+
+	tpu: pwm@e6600000 {
+		compatible = "renesas,tpu-r8a7740", "renesas,tpu";
+		reg = <0xe6600000 0x100>;
+		status = "disabled";
+		#pwm-cells = <3>;
+	};
 };
diff --git a/arch/arm/boot/dts/r8a7778-bockw-reference.dts b/arch/arm/boot/dts/r8a7778-bockw-reference.dts
new file mode 100644
index 0000000..9bb903a
--- /dev/null
+++ b/arch/arm/boot/dts/r8a7778-bockw-reference.dts
@@ -0,0 +1,32 @@
+/*
+ * Reference Device Tree Source for the Bock-W board
+ *
+ * Copyright (C) 2013  Renesas Solutions Corp.
+ * Copyright (C) 2013  Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * based on r8a7779
+ *
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ * Copyright (C) 2013 Simon Horman
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+/dts-v1/;
+/include/ "r8a7778.dtsi"
+
+/ {
+	model = "bockw";
+	compatible = "renesas,bockw-reference", "renesas,r8a7778";
+
+	chosen {
+		bootargs = "console=ttySC0,115200 ignore_loglevel rw";
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x60000000 0x10000000>;
+	};
+};
diff --git a/arch/arm/boot/dts/r8a7778-bockw.dts b/arch/arm/boot/dts/r8a7778-bockw.dts
index 0076b1e..12bbebc 100644
--- a/arch/arm/boot/dts/r8a7778-bockw.dts
+++ b/arch/arm/boot/dts/r8a7778-bockw.dts
@@ -22,7 +22,7 @@
 	compatible = "renesas,bockw", "renesas,r8a7778";
 
 	chosen {
-		bootargs = "console=ttySC0,115200 ignore_loglevel ip=dhcp root=/dev/nfs";
+		bootargs = "console=ttySC0,115200 ignore_loglevel ip=dhcp root=/dev/nfs rw";
 	};
 
 	memory {
diff --git a/arch/arm/boot/dts/r8a7779-marzen-reference.dts b/arch/arm/boot/dts/r8a7779-marzen-reference.dts
index b64705b..6d55083 100644
--- a/arch/arm/boot/dts/r8a7779-marzen-reference.dts
+++ b/arch/arm/boot/dts/r8a7779-marzen-reference.dts
@@ -18,7 +18,7 @@
 	compatible = "renesas,marzen-reference", "renesas,r8a7779";
 
 	chosen {
-		bootargs = "console=ttySC2,115200 earlyprintk=sh-sci.2,115200 ignore_loglevel root=/dev/nfs ip=on";
+		bootargs = "console=ttySC2,115200 earlyprintk=sh-sci.2,115200 ignore_loglevel root=/dev/nfs ip=on rw";
 	};
 
 	memory {
diff --git a/arch/arm/boot/dts/r8a7779-marzen.dts b/arch/arm/boot/dts/r8a7779-marzen.dts
new file mode 100644
index 0000000..f3f7f79
--- /dev/null
+++ b/arch/arm/boot/dts/r8a7779-marzen.dts
@@ -0,0 +1,27 @@
+/*
+ * Device Tree Source for the Marzen board
+ *
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ * Copyright (C) 2013 Simon Horman
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+/dts-v1/;
+/include/ "r8a7779.dtsi"
+
+/ {
+	model = "marzen";
+	compatible = "renesas,marzen", "renesas,r8a7779";
+
+	chosen {
+		bootargs = "console=ttySC2,115200 earlyprintk=sh-sci.2,115200 ignore_loglevel root=/dev/nfs ip=on";
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x60000000 0x40000000>;
+	};
+};
diff --git a/arch/arm/boot/dts/r8a7779.dtsi b/arch/arm/boot/dts/r8a7779.dtsi
index e9fbe3d..23a6244 100644
--- a/arch/arm/boot/dts/r8a7779.dtsi
+++ b/arch/arm/boot/dts/r8a7779.dtsi
@@ -149,7 +149,7 @@
 		sense-bitfield-width = <2>;
 	};
 
-	i2c0: i2c@0xffc70000 {
+	i2c0: i2c@ffc70000 {
 		#address-cells = <1>;
 		#size-cells = <0>;
 		compatible = "renesas,rmobile-iic";
@@ -158,7 +158,7 @@
 		interrupts = <0 79 0x4>;
 	};
 
-	i2c1: i2c@0xffc71000 {
+	i2c1: i2c@ffc71000 {
 		#address-cells = <1>;
 		#size-cells = <0>;
 		compatible = "renesas,rmobile-iic";
@@ -167,7 +167,7 @@
 		interrupts = <0 82 0x4>;
 	};
 
-	i2c2: i2c@0xffc72000 {
+	i2c2: i2c@ffc72000 {
 		#address-cells = <1>;
 		#size-cells = <0>;
 		compatible = "renesas,rmobile-iic";
@@ -176,7 +176,7 @@
 		interrupts = <0 80 0x4>;
 	};
 
-	i2c3: i2c@0xffc73000 {
+	i2c3: i2c@ffc73000 {
 		#address-cells = <1>;
 		#size-cells = <0>;
 		compatible = "renesas,rmobile-iic";
diff --git a/arch/arm/boot/dts/r8a7790-lager-reference.dts b/arch/arm/boot/dts/r8a7790-lager-reference.dts
new file mode 100644
index 0000000..c462ef1
--- /dev/null
+++ b/arch/arm/boot/dts/r8a7790-lager-reference.dts
@@ -0,0 +1,45 @@
+/*
+ * Device Tree Source for the Lager board
+ *
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+/dts-v1/;
+/include/ "r8a7790.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+	model = "Lager";
+	compatible = "renesas,lager-reference", "renesas,r8a7790";
+
+	chosen {
+		bootargs = "console=ttySC6,115200 ignore_loglevel rw";
+	};
+
+	memory@40000000 {
+		device_type = "memory";
+		reg = <0 0x40000000 0 0x80000000>;
+	};
+
+	lbsc {
+		#address-cells = <1>;
+		#size-cells = <1>;
+	};
+
+	leds {
+		compatible = "gpio-leds";
+		led6 {
+			gpios = <&gpio4 22 GPIO_ACTIVE_HIGH>;
+		};
+		led7 {
+			gpios = <&gpio4 23 GPIO_ACTIVE_HIGH>;
+		};
+		led8 {
+			gpios = <&gpio5 17 GPIO_ACTIVE_HIGH>;
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/r8a7790-lager.dts b/arch/arm/boot/dts/r8a7790-lager.dts
index 09a84fc..203bd08 100644
--- a/arch/arm/boot/dts/r8a7790-lager.dts
+++ b/arch/arm/boot/dts/r8a7790-lager.dts
@@ -16,7 +16,7 @@
 	compatible = "renesas,lager", "renesas,r8a7790";
 
 	chosen {
-		bootargs = "console=ttySC6,115200 ignore_loglevel";
+		bootargs = "console=ttySC6,115200 ignore_loglevel rw root=/dev/nfs ip=dhcp";
 	};
 
 	memory@40000000 {
diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi
index ff63fbb..b7f4961 100644
--- a/arch/arm/boot/dts/sama5d3.dtsi
+++ b/arch/arm/boot/dts/sama5d3.dtsi
@@ -1034,21 +1034,30 @@
 			compatible = "atmel,at91rm9200-nand";
 			#address-cells = <1>;
 			#size-cells = <1>;
+			ranges;
 			reg = <	0x60000000 0x01000000	/* EBI CS3 */
 				0xffffc070 0x00000490	/* SMC PMECC regs */
 				0xffffc500 0x00000100	/* SMC PMECC Error Location regs */
-				0x00100000 0x00100000	/* ROM code */
-				0x70000000 0x10000000	/* NFC Command Registers */
-				0xffffc000 0x00000070	/* NFC HSMC regs */
-				0x00200000 0x00100000	/* NFC SRAM banks */
+				0x00110000 0x00018000	/* ROM code */
 				>;
 			interrupts = <5 IRQ_TYPE_LEVEL_HIGH 6>;
 			atmel,nand-addr-offset = <21>;
 			atmel,nand-cmd-offset = <22>;
 			pinctrl-names = "default";
 			pinctrl-0 = <&pinctrl_nand0_ale_cle>;
-			atmel,pmecc-lookup-table-offset = <0x10000 0x18000>;
+			atmel,pmecc-lookup-table-offset = <0x0 0x8000>;
 			status = "disabled";
+
+			nfc@70000000 {
+				compatible = "atmel,sama5d3-nfc";
+				#address-cells = <1>;
+				#size-cells = <1>;
+				reg = <
+					0x70000000 0x10000000	/* NFC Command Registers */
+					0xffffc000 0x00000070	/* NFC HSMC regs */
+					0x00200000 0x00100000	/* NFC SRAM banks */
+					>;
+			};
 		};
 	};
 };
diff --git a/arch/arm/boot/dts/sama5d3xcm.dtsi b/arch/arm/boot/dts/sama5d3xcm.dtsi
index 1f80508..31ed9e3 100644
--- a/arch/arm/boot/dts/sama5d3xcm.dtsi
+++ b/arch/arm/boot/dts/sama5d3xcm.dtsi
@@ -47,8 +47,6 @@
 			atmel,has-pmecc;
 			atmel,pmecc-cap = <4>;
 			atmel,pmecc-sector-size = <512>;
-			atmel,has-nfc;
-			atmel,use-nfc-sram;
 			nand-on-flash-bbt;
 			status = "okay";
 
diff --git a/arch/arm/boot/dts/sh73a0-kzm9g-reference.dts b/arch/arm/boot/dts/sh73a0-kzm9g-reference.dts
index b99e890..2122306 100644
--- a/arch/arm/boot/dts/sh73a0-kzm9g-reference.dts
+++ b/arch/arm/boot/dts/sh73a0-kzm9g-reference.dts
@@ -33,7 +33,7 @@
 	};
 
 	chosen {
-		bootargs = "console=tty0 console=ttySC4,115200 root=/dev/nfs ip=dhcp ignore_loglevel earlyprintk=sh-sci.4,115200";
+		bootargs = "console=tty0 console=ttySC4,115200 root=/dev/nfs ip=dhcp ignore_loglevel earlyprintk=sh-sci.4,115200 rw";
 	};
 
 	memory {
diff --git a/arch/arm/boot/dts/sh73a0-kzm9g.dts b/arch/arm/boot/dts/sh73a0-kzm9g.dts
index 7c4071e..0f1ca77 100644
--- a/arch/arm/boot/dts/sh73a0-kzm9g.dts
+++ b/arch/arm/boot/dts/sh73a0-kzm9g.dts
@@ -16,7 +16,7 @@
 	compatible = "renesas,kzm9g", "renesas,sh73a0";
 
 	chosen {
-		bootargs = "console=tty0 console=ttySC4,115200 root=/dev/nfs ip=dhcp ignore_loglevel earlyprintk=sh-sci.4,115200";
+		bootargs = "console=tty0 console=ttySC4,115200 root=/dev/nfs ip=dhcp ignore_loglevel earlyprintk=sh-sci.4,115200 rw";
 	};
 
 	memory {
diff --git a/arch/arm/boot/dts/sh73a0.dtsi b/arch/arm/boot/dts/sh73a0.dtsi
index 86e79fe..ba59a58 100644
--- a/arch/arm/boot/dts/sh73a0.dtsi
+++ b/arch/arm/boot/dts/sh73a0.dtsi
@@ -38,6 +38,12 @@
 		      <0xf0000100 0x100>;
 	};
 
+	pmu {
+		compatible = "arm,cortex-a9-pmu";
+		interrupts = <0 55 4>,
+			     <0 56 4>;
+	};
+
 	irqpin0: irqpin@e6900000 {
 		compatible = "renesas,intc-irqpin";
 		#interrupt-cells = <2>;
diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi
index ee0ff9b..3b4a057 100644
--- a/arch/arm/boot/dts/sun5i-a10s.dtsi
+++ b/arch/arm/boot/dts/sun5i-a10s.dtsi
@@ -95,20 +95,16 @@
 
 		ahb_gates: ahb_gates@01c20060 {
 			#clock-cells = <1>;
-			compatible = "allwinner,sun4i-ahb-gates-clk";
+			compatible = "allwinner,sun5i-a10s-ahb-gates-clk";
 			reg = <0x01c20060 0x8>;
 			clocks = <&ahb>;
-			clock-output-names = "ahb_usb0", "ahb_ehci0",
-				"ahb_ohci0", "ahb_ehci1", "ahb_ohci1", "ahb_ss",
-				"ahb_dma", "ahb_bist", "ahb_mmc0", "ahb_mmc1",
-				"ahb_mmc2", "ahb_mmc3", "ahb_ms", "ahb_nand",
-				"ahb_sdram", "ahb_ace",	"ahb_emac", "ahb_ts",
-				"ahb_spi0", "ahb_spi1", "ahb_spi2", "ahb_spi3",
-				"ahb_pata", "ahb_sata", "ahb_gps", "ahb_ve",
-				"ahb_tvd", "ahb_tve0", "ahb_tve1", "ahb_lcd0",
-				"ahb_lcd1", "ahb_csi0", "ahb_csi1", "ahb_hdmi",
-				"ahb_de_be0", "ahb_de_be1", "ahb_de_fe0",
-				"ahb_de_fe1", "ahb_mp", "ahb_mali400";
+			clock-output-names = "ahb_usbotg", "ahb_ehci", "ahb_ohci",
+				"ahb_ss", "ahb_dma", "ahb_bist", "ahb_mmc0",
+				"ahb_mmc1", "ahb_mmc2", "ahb_nand", "ahb_sdram",
+				"ahb_emac", "ahb_ts", "ahb_spi0", "ahb_spi1",
+				"ahb_spi2", "ahb_gps", "ahb_stimer", "ahb_ve",
+				"ahb_tve", "ahb_lcd", "ahb_csi", "ahb_hdmi",
+				"ahb_de_be", "ahb_de_fe", "ahb_iep", "ahb_mali400";
 		};
 
 		apb0: apb0@01c20054 {
@@ -120,12 +116,11 @@
 
 		apb0_gates: apb0_gates@01c20068 {
 			#clock-cells = <1>;
-			compatible = "allwinner,sun4i-apb0-gates-clk";
+			compatible = "allwinner,sun5i-a10s-apb0-gates-clk";
 			reg = <0x01c20068 0x4>;
 			clocks = <&apb0>;
-			clock-output-names = "apb0_codec", "apb0_spdif",
-				"apb0_ac97", "apb0_iis", "apb0_pio", "apb0_ir0",
-				"apb0_ir1", "apb0_keypad";
+			clock-output-names = "apb0_codec", "apb0_iis", "apb0_pio",
+				"apb0_ir", "apb0_keypad";
 		};
 
 		/* dummy is pll62 */
@@ -145,15 +140,12 @@
 
 		apb1_gates: apb1_gates@01c2006c {
 			#clock-cells = <1>;
-			compatible = "allwinner,sun4i-apb1-gates-clk";
+			compatible = "allwinner,sun5i-a10s-apb1-gates-clk";
 			reg = <0x01c2006c 0x4>;
 			clocks = <&apb1>;
 			clock-output-names = "apb1_i2c0", "apb1_i2c1",
-				"apb1_i2c2", "apb1_can", "apb1_scr",
-				"apb1_ps20", "apb1_ps21", "apb1_uart0",
-				"apb1_uart1", "apb1_uart2", "apb1_uart3",
-				"apb1_uart4", "apb1_uart5", "apb1_uart6",
-				"apb1_uart7";
+				"apb1_i2c2", "apb1_uart0", "apb1_uart1",
+				"apb1_uart2", "apb1_uart3";
 		};
 	};
 
diff --git a/arch/arm/boot/dts/sun6i-a31-colombus.dts b/arch/arm/boot/dts/sun6i-a31-colombus.dts
index 99c4b18..e5adae3 100644
--- a/arch/arm/boot/dts/sun6i-a31-colombus.dts
+++ b/arch/arm/boot/dts/sun6i-a31-colombus.dts
@@ -24,6 +24,8 @@
 
 	soc@01c00000 {
 		uart0: serial@01c28000 {
+			pinctrl-names = "default";
+			pinctrl-0 = <&uart0_pins_a>;
 			status = "okay";
 		};
 	};
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
index 4d076ec..f244f5f 100644
--- a/arch/arm/boot/dts/sun6i-a31.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31.dtsi
@@ -51,13 +51,137 @@
 
 	clocks {
 		#address-cells = <1>;
-		#size-cells = <0>;
+		#size-cells = <1>;
+		ranges;
 
-		osc: oscillator {
+		osc24M: osc24M {
 			#clock-cells = <0>;
 			compatible = "fixed-clock";
 			clock-frequency = <24000000>;
 		};
+
+		osc32k: osc32k {
+			#clock-cells = <0>;
+			compatible = "fixed-clock";
+			clock-frequency = <32768>;
+		};
+
+		pll1: pll1@01c20000 {
+			#clock-cells = <0>;
+			compatible = "allwinner,sun6i-a31-pll1-clk";
+			reg = <0x01c20000 0x4>;
+			clocks = <&osc24M>;
+		};
+
+		/*
+		 * This is a dummy clock, to be used as placeholder on
+		 * other mux clocks when a specific parent clock is not
+		 * yet implemented. It should be dropped when the driver
+		 * is complete.
+		 */
+		pll6: pll6 {
+			#clock-cells = <0>;
+			compatible = "fixed-clock";
+			clock-frequency = <0>;
+		};
+
+		cpu: cpu@01c20050 {
+			#clock-cells = <0>;
+			compatible = "allwinner,sun4i-cpu-clk";
+			reg = <0x01c20050 0x4>;
+
+			/*
+			 * PLL1 is listed twice here.
+			 * While it looks suspicious, it's actually documented
+			 * that way both in the datasheet and in the code from
+			 * Allwinner.
+			 */
+			clocks = <&osc32k>, <&osc24M>, <&pll1>, <&pll1>;
+		};
+
+		axi: axi@01c20050 {
+			#clock-cells = <0>;
+			compatible = "allwinner,sun4i-axi-clk";
+			reg = <0x01c20050 0x4>;
+			clocks = <&cpu>;
+		};
+
+		ahb1_mux: ahb1_mux@01c20054 {
+			#clock-cells = <0>;
+			compatible = "allwinner,sun6i-a31-ahb1-mux-clk";
+			reg = <0x01c20054 0x4>;
+			clocks = <&osc32k>, <&osc24M>, <&axi>, <&pll6>;
+		};
+
+		ahb1: ahb1@01c20054 {
+			#clock-cells = <0>;
+			compatible = "allwinner,sun4i-ahb-clk";
+			reg = <0x01c20054 0x4>;
+			clocks = <&ahb1_mux>;
+		};
+
+		ahb1_gates: ahb1_gates@01c20060 {
+			#clock-cells = <1>;
+			compatible = "allwinner,sun6i-a31-ahb1-gates-clk";
+			reg = <0x01c20060 0x8>;
+			clocks = <&ahb1>;
+			clock-output-names = "ahb1_mipidsi", "ahb1_ss",
+					"ahb1_dma", "ahb1_mmc0", "ahb1_mmc1",
+					"ahb1_mmc2", "ahb1_mmc3", "ahb1_nand1",
+					"ahb1_nand0", "ahb1_sdram",
+					"ahb1_gmac", "ahb1_ts", "ahb1_hstimer",
+					"ahb1_spi0", "ahb1_spi1", "ahb1_spi2",
+					"ahb1_spi3", "ahb1_otg", "ahb1_ehci0",
+					"ahb1_ehci1", "ahb1_ohci0",
+					"ahb1_ohci1", "ahb1_ohci2", "ahb1_ve",
+					"ahb1_lcd0", "ahb1_lcd1", "ahb1_csi",
+					"ahb1_hdmi", "ahb1_de0", "ahb1_de1",
+					"ahb1_fe0", "ahb1_fe1", "ahb1_mp",
+					"ahb1_gpu", "ahb1_deu0", "ahb1_deu1",
+					"ahb1_drc0", "ahb1_drc1";
+		};
+
+		apb1: apb1@01c20054 {
+			#clock-cells = <0>;
+			compatible = "allwinner,sun4i-apb0-clk";
+			reg = <0x01c20054 0x4>;
+			clocks = <&ahb1>;
+		};
+
+		apb1_gates: apb1_gates@01c20060 {
+			#clock-cells = <1>;
+			compatible = "allwinner,sun6i-a31-apb1-gates-clk";
+			reg = <0x01c20068 0x4>;
+			clocks = <&apb1>;
+			clock-output-names = "apb1_codec", "apb1_digital_mic",
+					"apb1_pio", "apb1_daudio0",
+					"apb1_daudio1";
+		};
+
+		apb2_mux: apb2_mux@01c20058 {
+			#clock-cells = <0>;
+			compatible = "allwinner,sun4i-apb1-mux-clk";
+			reg = <0x01c20058 0x4>;
+			clocks = <&osc32k>, <&osc24M>, <&pll6>, <&pll6>;
+		};
+
+		apb2: apb2@01c20058 {
+			#clock-cells = <0>;
+			compatible = "allwinner,sun6i-a31-apb2-div-clk";
+			reg = <0x01c20058 0x4>;
+			clocks = <&apb2_mux>;
+		};
+
+		apb2_gates: apb2_gates@01c2006c {
+			#clock-cells = <1>;
+			compatible = "allwinner,sun6i-a31-apb2-gates-clk";
+			reg = <0x01c2006c 0x8>;
+			clocks = <&apb2>;
+			clock-output-names = "apb2_i2c0", "apb2_i2c1",
+					"apb2_i2c2", "apb2_i2c3", "apb2_uart0",
+					"apb2_uart1", "apb2_uart2", "apb2_uart3",
+					"apb2_uart4", "apb2_uart5";
+		};
 	};
 
 	soc@01c00000 {
@@ -66,6 +190,25 @@
 		#size-cells = <1>;
 		ranges;
 
+		pio: pinctrl@01c20800 {
+			compatible = "allwinner,sun6i-a31-pinctrl";
+			reg = <0x01c20800 0x400>;
+			interrupts = <0 11 1>, <0 15 1>, <0 16 1>, <0 17 1>;
+			clocks = <&apb1_gates 5>;
+			gpio-controller;
+			interrupt-controller;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			#gpio-cells = <3>;
+
+			uart0_pins_a: uart0@0 {
+				allwinner,pins = "PH20", "PH21";
+				allwinner,function = "uart0";
+				allwinner,drive = <0>;
+				allwinner,pull = <0>;
+			};
+		};
+
 		timer@01c20c00 {
 			compatible = "allwinner,sun4i-timer";
 			reg = <0x01c20c00 0xa0>;
@@ -74,7 +217,7 @@
 				     <0 20 1>,
 				     <0 21 1>,
 				     <0 22 1>;
-			clocks = <&osc>;
+			clocks = <&osc24M>;
 		};
 
 		wdt1: watchdog@01c20ca0 {
@@ -88,7 +231,7 @@
 			interrupts = <0 0 1>;
 			reg-shift = <2>;
 			reg-io-width = <4>;
-			clocks = <&osc>;
+			clocks = <&apb2_gates 16>;
 			status = "disabled";
 		};
 
@@ -98,7 +241,7 @@
 			interrupts = <0 1 1>;
 			reg-shift = <2>;
 			reg-io-width = <4>;
-			clocks = <&osc>;
+			clocks = <&apb2_gates 17>;
 			status = "disabled";
 		};
 
@@ -108,7 +251,7 @@
 			interrupts = <0 2 1>;
 			reg-shift = <2>;
 			reg-io-width = <4>;
-			clocks = <&osc>;
+			clocks = <&apb2_gates 18>;
 			status = "disabled";
 		};
 
@@ -118,7 +261,7 @@
 			interrupts = <0 3 1>;
 			reg-shift = <2>;
 			reg-io-width = <4>;
-			clocks = <&osc>;
+			clocks = <&apb2_gates 19>;
 			status = "disabled";
 		};
 
@@ -128,7 +271,7 @@
 			interrupts = <0 4 1>;
 			reg-shift = <2>;
 			reg-io-width = <4>;
-			clocks = <&osc>;
+			clocks = <&apb2_gates 20>;
 			status = "disabled";
 		};
 
@@ -138,7 +281,7 @@
 			interrupts = <0 5 1>;
 			reg-shift = <2>;
 			reg-io-width = <4>;
-			clocks = <&osc>;
+			clocks = <&apb2_gates 21>;
 			status = "disabled";
 		};
 
diff --git a/arch/arm/boot/dts/sun7i-a20-cubieboard2.dts b/arch/arm/boot/dts/sun7i-a20-cubieboard2.dts
new file mode 100644
index 0000000..31b76f0
--- /dev/null
+++ b/arch/arm/boot/dts/sun7i-a20-cubieboard2.dts
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2013 Maxime Ripard
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+/include/ "sun7i-a20.dtsi"
+
+/ {
+	model = "Cubietech Cubieboard2";
+	compatible = "cubietech,cubieboard2", "allwinner,sun7i-a20";
+
+	soc@01c00000 {
+		pinctrl@01c20800 {
+			led_pins_cubieboard2: led_pins@0 {
+				allwinner,pins = "PH20", "PH21";
+				allwinner,function = "gpio_out";
+				allwinner,drive = <0>;
+				allwinner,pull = <0>;
+			};
+		};
+
+		uart0: serial@01c28000 {
+			pinctrl-names = "default";
+			pinctrl-0 = <&uart0_pins_a>;
+			status = "okay";
+		};
+	};
+
+	leds {
+		compatible = "gpio-leds";
+		pinctrl-names = "default";
+		pinctrl-0 = <&led_pins_cubieboard2>;
+
+		blue {
+			label = "cubieboard2:blue:usr";
+			gpios = <&pio 7 21 0>;
+		};
+
+		green {
+			label = "cubieboard2:green:usr";
+			gpios = <&pio 7 20 0>;
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts b/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts
index d339584..34a6c02 100644
--- a/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts
+++ b/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts
@@ -19,16 +19,43 @@
 	compatible = "olimex,a20-olinuxino-micro", "allwinner,sun7i-a20";
 
 	soc@01c00000 {
+		pinctrl@01c20800 {
+			led_pins_olinuxino: led_pins@0 {
+				allwinner,pins = "PH2";
+				allwinner,function = "gpio_out";
+				allwinner,drive = <1>;
+				allwinner,pull = <0>;
+			};
+		};
+
 		uart0: serial@01c28000 {
+			pinctrl-names = "default";
+			pinctrl-0 = <&uart0_pins_a>;
 			status = "okay";
 		};
 
 		uart6: serial@01c29800 {
+			pinctrl-names = "default";
+			pinctrl-0 = <&uart6_pins_a>;
 			status = "okay";
 		};
 
 		uart7: serial@01c29c00 {
+			pinctrl-names = "default";
+			pinctrl-0 = <&uart7_pins_a>;
 			status = "okay";
 		};
 	};
+
+	leds {
+		compatible = "gpio-leds";
+		pinctrl-names = "default";
+		pinctrl-0 = <&led_pins_olinuxino>;
+
+		green {
+			label = "a20-olinuxino-micro:green:usr";
+			gpios = <&pio 7 2 0>;
+			default-state = "on";
+		};
+	};
 };
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index 3339151..999ff45 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -44,7 +44,8 @@
 
 		osc24M: osc24M@01c20050 {
 			#clock-cells = <0>;
-			compatible = "fixed-clock";
+			compatible = "allwinner,sun4i-osc-clk";
+			reg = <0x01c20050 0x4>;
 			clock-frequency = <24000000>;
 		};
 
@@ -53,6 +54,111 @@
 			compatible = "fixed-clock";
 			clock-frequency = <32768>;
 		};
+
+		pll1: pll1@01c20000 {
+			#clock-cells = <0>;
+			compatible = "allwinner,sun4i-pll1-clk";
+			reg = <0x01c20000 0x4>;
+			clocks = <&osc24M>;
+		};
+
+		/*
+		 * This is a dummy clock, to be used as placeholder on
+		 * other mux clocks when a specific parent clock is not
+		 * yet implemented. It should be dropped when the driver
+		 * is complete.
+		 */
+		pll6: pll6 {
+			#clock-cells = <0>;
+			compatible = "fixed-clock";
+			clock-frequency = <0>;
+		};
+
+		cpu: cpu@01c20054 {
+			#clock-cells = <0>;
+			compatible = "allwinner,sun4i-cpu-clk";
+			reg = <0x01c20054 0x4>;
+			clocks = <&osc32k>, <&osc24M>, <&pll1>, <&pll6>;
+		};
+
+		axi: axi@01c20054 {
+			#clock-cells = <0>;
+			compatible = "allwinner,sun4i-axi-clk";
+			reg = <0x01c20054 0x4>;
+			clocks = <&cpu>;
+		};
+
+		ahb: ahb@01c20054 {
+			#clock-cells = <0>;
+			compatible = "allwinner,sun4i-ahb-clk";
+			reg = <0x01c20054 0x4>;
+			clocks = <&axi>;
+		};
+
+		ahb_gates: ahb_gates@01c20060 {
+			#clock-cells = <1>;
+			compatible = "allwinner,sun7i-a20-ahb-gates-clk";
+			reg = <0x01c20060 0x8>;
+			clocks = <&ahb>;
+			clock-output-names = "ahb_usb0", "ahb_ehci0",
+				"ahb_ohci0", "ahb_ehci1", "ahb_ohci1",
+				"ahb_ss", "ahb_dma", "ahb_bist", "ahb_mmc0",
+				"ahb_mmc1", "ahb_mmc2", "ahb_mmc3", "ahb_ms",
+				"ahb_nand", "ahb_sdram", "ahb_ace",
+				"ahb_emac", "ahb_ts", "ahb_spi0", "ahb_spi1",
+				"ahb_spi2", "ahb_spi3", "ahb_sata",
+				"ahb_hstimer", "ahb_ve", "ahb_tvd", "ahb_tve0",
+				"ahb_tve1", "ahb_lcd0", "ahb_lcd1", "ahb_csi0",
+				"ahb_csi1", "ahb_hdmi1", "ahb_hdmi0",
+				"ahb_de_be0", "ahb_de_be1", "ahb_de_fe0",
+				"ahb_de_fe1", "ahb_gmac", "ahb_mp",
+				"ahb_mali";
+		};
+
+		apb0: apb0@01c20054 {
+			#clock-cells = <0>;
+			compatible = "allwinner,sun4i-apb0-clk";
+			reg = <0x01c20054 0x4>;
+			clocks = <&ahb>;
+		};
+
+		apb0_gates: apb0_gates@01c20068 {
+			#clock-cells = <1>;
+			compatible = "allwinner,sun7i-a20-apb0-gates-clk";
+			reg = <0x01c20068 0x4>;
+			clocks = <&apb0>;
+			clock-output-names = "apb0_codec", "apb0_spdif",
+				"apb0_ac97", "apb0_iis0", "apb0_iis1",
+				"apb0_pio", "apb0_ir0", "apb0_ir1",
+				"apb0_iis2", "apb0_keypad";
+		};
+
+		apb1_mux: apb1_mux@01c20058 {
+			#clock-cells = <0>;
+			compatible = "allwinner,sun4i-apb1-mux-clk";
+			reg = <0x01c20058 0x4>;
+			clocks = <&osc24M>, <&pll6>, <&osc32k>;
+		};
+
+		apb1: apb1@01c20058 {
+			#clock-cells = <0>;
+			compatible = "allwinner,sun4i-apb1-clk";
+			reg = <0x01c20058 0x4>;
+			clocks = <&apb1_mux>;
+		};
+
+		apb1_gates: apb1_gates@01c2006c {
+			#clock-cells = <1>;
+			compatible = "allwinner,sun7i-a20-apb1-gates-clk";
+			reg = <0x01c2006c 0x4>;
+			clocks = <&apb1>;
+			clock-output-names = "apb1_i2c0", "apb1_i2c1",
+				"apb1_i2c2", "apb1_i2c3", "apb1_can",
+				"apb1_scr", "apb1_ps20", "apb1_ps21",
+				"apb1_i2c4", "apb1_uart0", "apb1_uart1",
+				"apb1_uart2", "apb1_uart3", "apb1_uart4",
+				"apb1_uart5", "apb1_uart6", "apb1_uart7";
+		};
 	};
 
 	soc@01c00000 {
@@ -61,6 +167,39 @@
 		#size-cells = <1>;
 		ranges;
 
+		pio: pinctrl@01c20800 {
+			compatible = "allwinner,sun7i-a20-pinctrl";
+			reg = <0x01c20800 0x400>;
+			interrupts = <0 28 1>;
+			clocks = <&apb0_gates 5>;
+			gpio-controller;
+			interrupt-controller;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			#gpio-cells = <3>;
+
+			uart0_pins_a: uart0@0 {
+				allwinner,pins = "PB22", "PB23";
+				allwinner,function = "uart0";
+				allwinner,drive = <0>;
+				allwinner,pull = <0>;
+			};
+
+			uart6_pins_a: uart6@0 {
+				allwinner,pins = "PI12", "PI13";
+				allwinner,function = "uart6";
+				allwinner,drive = <0>;
+				allwinner,pull = <0>;
+			};
+
+			uart7_pins_a: uart7@0 {
+				allwinner,pins = "PI20", "PI21";
+				allwinner,function = "uart7";
+				allwinner,drive = <0>;
+				allwinner,pull = <0>;
+			};
+		};
+
 		timer@01c20c00 {
 			compatible = "allwinner,sun4i-timer";
 			reg = <0x01c20c00 0x90>;
@@ -84,7 +223,7 @@
 			interrupts = <0 1 1>;
 			reg-shift = <2>;
 			reg-io-width = <4>;
-			clocks = <&osc24M>;
+			clocks = <&apb1_gates 16>;
 			status = "disabled";
 		};
 
@@ -94,7 +233,7 @@
 			interrupts = <0 2 1>;
 			reg-shift = <2>;
 			reg-io-width = <4>;
-			clocks = <&osc24M>;
+			clocks = <&apb1_gates 17>;
 			status = "disabled";
 		};
 
@@ -104,7 +243,7 @@
 			interrupts = <0 3 1>;
 			reg-shift = <2>;
 			reg-io-width = <4>;
-			clocks = <&osc24M>;
+			clocks = <&apb1_gates 18>;
 			status = "disabled";
 		};
 
@@ -114,7 +253,7 @@
 			interrupts = <0 4 1>;
 			reg-shift = <2>;
 			reg-io-width = <4>;
-			clocks = <&osc24M>;
+			clocks = <&apb1_gates 19>;
 			status = "disabled";
 		};
 
@@ -124,7 +263,7 @@
 			interrupts = <0 17 1>;
 			reg-shift = <2>;
 			reg-io-width = <4>;
-			clocks = <&osc24M>;
+			clocks = <&apb1_gates 20>;
 			status = "disabled";
 		};
 
@@ -134,7 +273,7 @@
 			interrupts = <0 18 1>;
 			reg-shift = <2>;
 			reg-io-width = <4>;
-			clocks = <&osc24M>;
+			clocks = <&apb1_gates 21>;
 			status = "disabled";
 		};
 
@@ -144,7 +283,7 @@
 			interrupts = <0 19 1>;
 			reg-shift = <2>;
 			reg-io-width = <4>;
-			clocks = <&osc24M>;
+			clocks = <&apb1_gates 22>;
 			status = "disabled";
 		};
 
@@ -154,7 +293,7 @@
 			interrupts = <0 20 1>;
 			reg-shift = <2>;
 			reg-io-width = <4>;
-			clocks = <&osc24M>;
+			clocks = <&apb1_gates 23>;
 			status = "disabled";
 		};
 
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
index 759b0cd..15f98cb 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
@@ -37,30 +37,35 @@
 			device_type = "cpu";
 			compatible = "arm,cortex-a15";
 			reg = <0>;
+			cci-control-port = <&cci_control1>;
 		};
 
 		cpu1: cpu@1 {
 			device_type = "cpu";
 			compatible = "arm,cortex-a15";
 			reg = <1>;
+			cci-control-port = <&cci_control1>;
 		};
 
 		cpu2: cpu@2 {
 			device_type = "cpu";
 			compatible = "arm,cortex-a7";
 			reg = <0x100>;
+			cci-control-port = <&cci_control2>;
 		};
 
 		cpu3: cpu@3 {
 			device_type = "cpu";
 			compatible = "arm,cortex-a7";
 			reg = <0x101>;
+			cci-control-port = <&cci_control2>;
 		};
 
 		cpu4: cpu@4 {
 			device_type = "cpu";
 			compatible = "arm,cortex-a7";
 			reg = <0x102>;
+			cci-control-port = <&cci_control2>;
 		};
 	};
 
@@ -104,6 +109,26 @@
 		interrupts = <1 9 0xf04>;
 	};
 
+	cci@2c090000 {
+		compatible = "arm,cci-400";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		reg = <0 0x2c090000 0 0x1000>;
+		ranges = <0x0 0x0 0x2c090000 0x10000>;
+
+		cci_control1: slave-if@4000 {
+			compatible = "arm,cci-400-ctrl-if";
+			interface-type = "ace";
+			reg = <0x4000 0x1000>;
+		};
+
+		cci_control2: slave-if@5000 {
+			compatible = "arm,cci-400-ctrl-if";
+			interface-type = "ace";
+			reg = <0x5000 0x1000>;
+		};
+	};
+
 	memory-controller@7ffd0000 {
 		compatible = "arm,pl354", "arm,primecell";
 		reg = <0 0x7ffd0000 0 0x1000>;
diff --git a/arch/arm/common/edma.c b/arch/arm/common/edma.c
index 39ad030..117f955 100644
--- a/arch/arm/common/edma.c
+++ b/arch/arm/common/edma.c
@@ -1235,6 +1235,23 @@
 }
 EXPORT_SYMBOL(edma_resume);
 
+int edma_trigger_channel(unsigned channel)
+{
+	unsigned ctlr;
+	unsigned int mask;
+
+	ctlr = EDMA_CTLR(channel);
+	channel = EDMA_CHAN_SLOT(channel);
+	mask = BIT(channel & 0x1f);
+
+	edma_shadow0_write_array(ctlr, SH_ESR, (channel >> 5), mask);
+
+	pr_debug("EDMA: ESR%d %08x\n", (channel >> 5),
+		 edma_shadow0_read_array(ctlr, SH_ESR, (channel >> 5)));
+	return 0;
+}
+EXPORT_SYMBOL(edma_trigger_channel);
+
 /**
  * edma_start - start dma on a channel
  * @channel: channel being activated
diff --git a/arch/arm/configs/ag5evm_defconfig b/arch/arm/configs/ag5evm_defconfig
deleted file mode 100644
index 212ead3..0000000
--- a/arch/arm/configs/ag5evm_defconfig
+++ /dev/null
@@ -1,83 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-CONFIG_SYSVIPC=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=16
-CONFIG_NAMESPACES=y
-# CONFIG_UTS_NS is not set
-# CONFIG_IPC_NS is not set
-# CONFIG_USER_NS is not set
-# CONFIG_PID_NS is not set
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE=""
-CONFIG_EXPERT=y
-CONFIG_SLAB=y
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
-CONFIG_ARCH_SHMOBILE=y
-CONFIG_ARCH_SH73A0=y
-CONFIG_MACH_AG5EVM=y
-CONFIG_MEMORY_SIZE=0x10000000
-CONFIG_CPU_BPREDICT_DISABLE=y
-CONFIG_ARM_ERRATA_430973=y
-CONFIG_ARM_ERRATA_458693=y
-CONFIG_NO_HZ=y
-CONFIG_AEABI=y
-# CONFIG_OABI_COMPAT is not set
-CONFIG_HIGHMEM=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="console=tty0 console=ttySC2,115200 earlyprintk=sh-sci.2,115200 ignore_loglevel"
-CONFIG_CMDLINE_FORCE=y
-CONFIG_KEXEC=y
-# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_PM=y
-# CONFIG_SUSPEND is not set
-CONFIG_PM_RUNTIME=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
-# CONFIG_INET_DIAG is not set
-# CONFIG_IPV6 is not set
-# CONFIG_WIRELESS is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-# CONFIG_BLK_DEV is not set
-CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_SMSC911X=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
-# CONFIG_WLAN is not set
-CONFIG_INPUT_SPARSEKMAP=y
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
-CONFIG_INPUT_EVDEV=y
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_SERIAL_SH_SCI=y
-CONFIG_SERIAL_SH_SCI_NR_UARTS=9
-CONFIG_SERIAL_SH_SCI_CONSOLE=y
-# CONFIG_LEGACY_PTYS is not set
-# CONFIG_HW_RANDOM is not set
-CONFIG_I2C=y
-CONFIG_I2C_SH_MOBILE=y
-# CONFIG_HWMON is not set
-# CONFIG_MFD_SUPPORT is not set
-CONFIG_FB=y
-CONFIG_FB_SH_MOBILE_LCDC=y
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
-# CONFIG_HID_SUPPORT is not set
-# CONFIG_USB_SUPPORT is not set
-# CONFIG_DNOTIFY is not set
-# CONFIG_INOTIFY_USER is not set
-CONFIG_TMPFS=y
-# CONFIG_MISC_FILESYSTEMS is not set
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
-# CONFIG_FTRACE is not set
diff --git a/arch/arm/configs/at91_dt_defconfig b/arch/arm/configs/at91_dt_defconfig
index 75fd842..690e892 100644
--- a/arch/arm/configs/at91_dt_defconfig
+++ b/arch/arm/configs/at91_dt_defconfig
@@ -14,11 +14,13 @@
 # CONFIG_IOSCHED_DEADLINE is not set
 # CONFIG_IOSCHED_CFQ is not set
 CONFIG_ARCH_AT91=y
+CONFIG_SOC_AT91RM9200=y
 CONFIG_SOC_AT91SAM9260=y
 CONFIG_SOC_AT91SAM9263=y
 CONFIG_SOC_AT91SAM9G45=y
 CONFIG_SOC_AT91SAM9X5=y
 CONFIG_SOC_AT91SAM9N12=y
+CONFIG_MACH_AT91RM9200_DT=y
 CONFIG_MACH_AT91SAM9_DT=y
 CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
 CONFIG_AT91_TIMER_HZ=128
@@ -62,6 +64,7 @@
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
+CONFIG_MTD_DATAFLASH=y
 CONFIG_MTD_NAND=y
 CONFIG_MTD_NAND_ATMEL=y
 CONFIG_MTD_UBI=y
@@ -78,7 +81,6 @@
 CONFIG_SCSI_MULTI_LUN=y
 # CONFIG_SCSI_LOWLEVEL is not set
 CONFIG_NETDEVICES=y
-CONFIG_MII=y
 CONFIG_MACB=y
 # CONFIG_NET_VENDOR_BROADCOM is not set
 # CONFIG_NET_VENDOR_FARADAY is not set
diff --git a/arch/arm/configs/kota2_defconfig b/arch/arm/configs/kota2_defconfig
deleted file mode 100644
index 57ad3d4..0000000
--- a/arch/arm/configs/kota2_defconfig
+++ /dev/null
@@ -1,121 +0,0 @@
-# CONFIG_ARM_PATCH_PHYS_VIRT is not set
-CONFIG_EXPERIMENTAL=y
-CONFIG_SYSVIPC=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=16
-CONFIG_CGROUPS=y
-CONFIG_CPUSETS=y
-CONFIG_NAMESPACES=y
-# CONFIG_UTS_NS is not set
-# CONFIG_IPC_NS is not set
-# CONFIG_USER_NS is not set
-# CONFIG_PID_NS is not set
-CONFIG_SYSCTL_SYSCALL=y
-CONFIG_EMBEDDED=y
-CONFIG_SLAB=y
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
-CONFIG_ARCH_SHMOBILE=y
-CONFIG_KEYBOARD_GPIO_POLLED=y
-CONFIG_ARCH_SH73A0=y
-CONFIG_MACH_KOTA2=y
-CONFIG_MEMORY_SIZE=0x1e000000
-# CONFIG_SH_TIMER_TMU is not set
-# CONFIG_SWP_EMULATE is not set
-CONFIG_CPU_BPREDICT_DISABLE=y
-CONFIG_ARM_ERRATA_460075=y
-CONFIG_ARM_ERRATA_742230=y
-CONFIG_ARM_ERRATA_742231=y
-CONFIG_PL310_ERRATA_588369=y
-CONFIG_ARM_ERRATA_720789=y
-CONFIG_PL310_ERRATA_727915=y
-CONFIG_ARM_ERRATA_743622=y
-CONFIG_ARM_ERRATA_751472=y
-CONFIG_PL310_ERRATA_753970=y
-CONFIG_ARM_ERRATA_754322=y
-CONFIG_PL310_ERRATA_769419=y
-CONFIG_NO_HZ=y
-CONFIG_SMP=y
-CONFIG_AEABI=y
-# CONFIG_OABI_COMPAT is not set
-CONFIG_HIGHMEM=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="console=ttySC2,115200 earlyprintk=sh-sci.2,115200 ignore_loglevel"
-CONFIG_CMDLINE_FORCE=y
-CONFIG_KEXEC=y
-CONFIG_CPU_IDLE=y
-# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_PM_RUNTIME=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
-# CONFIG_INET_DIAG is not set
-# CONFIG_IPV6 is not set
-CONFIG_CFG80211=y
-CONFIG_WIRELESS_EXT_SYSFS=y
-CONFIG_MAC80211=y
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-# CONFIG_BLK_DEV is not set
-CONFIG_NETDEVICES=y
-# CONFIG_NET_VENDOR_BROADCOM is not set
-# CONFIG_NET_VENDOR_CHELSIO is not set
-# CONFIG_NET_VENDOR_FARADAY is not set
-# CONFIG_NET_VENDOR_INTEL is not set
-# CONFIG_NET_VENDOR_MARVELL is not set
-# CONFIG_NET_VENDOR_MICREL is not set
-# CONFIG_NET_VENDOR_NATSEMI is not set
-# CONFIG_NET_VENDOR_SEEQ is not set
-CONFIG_SMSC911X=y
-# CONFIG_NET_VENDOR_STMICRO is not set
-CONFIG_B43=y
-CONFIG_B43_PHY_N=y
-CONFIG_B43_DEBUG=y
-CONFIG_INPUT_SPARSEKMAP=y
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
-CONFIG_INPUT_EVDEV=y
-# CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_KEYBOARD_GPIO=y
-CONFIG_KEYBOARD_SH_KEYSC=y
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_LEGACY_PTYS is not set
-CONFIG_SERIAL_SH_SCI=y
-CONFIG_SERIAL_SH_SCI_NR_UARTS=9
-CONFIG_SERIAL_SH_SCI_CONSOLE=y
-# CONFIG_HW_RANDOM is not set
-CONFIG_I2C_SH_MOBILE=y
-# CONFIG_HWMON is not set
-CONFIG_BCMA=y
-CONFIG_BCMA_DEBUG=y
-CONFIG_FB=y
-CONFIG_FB_SH_MOBILE_LCDC=y
-CONFIG_LCD_PLATFORM=y
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
-# CONFIG_HID_SUPPORT is not set
-# CONFIG_USB_SUPPORT is not set
-CONFIG_MMC=y
-CONFIG_MMC_SDHI=y
-CONFIG_MMC_SH_MMCIF=y
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=y
-CONFIG_LEDS_GPIO=y
-CONFIG_LEDS_RENESAS_TPU=y
-CONFIG_LEDS_TRIGGERS=y
-# CONFIG_DNOTIFY is not set
-CONFIG_TMPFS=y
-# CONFIG_MISC_FILESYSTEMS is not set
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_INFO_REDUCED=y
-# CONFIG_FTRACE is not set
-CONFIG_DEBUG_USER=y
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index 69b879a..402a2bc 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -35,7 +35,7 @@
 	unsigned int		nr_irqs;	/* number of IRQs */
 
 #ifdef CONFIG_ZONE_DMA
-	unsigned long		dma_zone_size;	/* size of DMA-able area */
+	phys_addr_t		dma_zone_size;	/* size of DMA-able area */
 #endif
 
 	unsigned int		video_start;	/* start of video RAM	*/
diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
index 12f71a1..f94784f 100644
--- a/arch/arm/include/asm/outercache.h
+++ b/arch/arm/include/asm/outercache.h
@@ -37,10 +37,10 @@
 	void (*resume)(void);
 };
 
-#ifdef CONFIG_OUTER_CACHE
-
 extern struct outer_cache_fns outer_cache;
 
+#ifdef CONFIG_OUTER_CACHE
+
 static inline void outer_inv_range(phys_addr_t start, phys_addr_t end)
 {
 	if (outer_cache.inv_range)
diff --git a/arch/arm/mach-at91/include/mach/hardware.h b/arch/arm/mach-at91/include/mach/hardware.h
index a832e07..f17aa31 100644
--- a/arch/arm/mach-at91/include/mach/hardware.h
+++ b/arch/arm/mach-at91/include/mach/hardware.h
@@ -33,6 +33,7 @@
 #include <mach/at91sam9g45.h>
 #include <mach/at91sam9x5.h>
 #include <mach/at91sam9n12.h>
+#include <mach/sama5d3.h>
 
 /*
  * On all at91 except rm9200 and x40 have the System Controller starts
diff --git a/arch/arm/mach-at91/include/mach/sama5d3.h b/arch/arm/mach-at91/include/mach/sama5d3.h
index 6dc81ee..31096a8 100644
--- a/arch/arm/mach-at91/include/mach/sama5d3.h
+++ b/arch/arm/mach-at91/include/mach/sama5d3.h
@@ -65,6 +65,14 @@
 #define SAMA5D3_ID_IRQ0		47	/* Advanced Interrupt Controller (IRQ0) */
 
 /*
+ * User Peripheral physical base addresses.
+ */
+#define SAMA5D3_BASE_USART0	0xf001c000
+#define SAMA5D3_BASE_USART1	0xf0020000
+#define SAMA5D3_BASE_USART2	0xf8020000
+#define SAMA5D3_BASE_USART3	0xf8024000
+
+/*
  * Internal Memory
  */
 #define SAMA5D3_SRAM_BASE	0x00300000	/* Internal SRAM base address */
diff --git a/arch/arm/mach-at91/include/mach/uncompress.h b/arch/arm/mach-at91/include/mach/uncompress.h
index 5659f7c..4bb644f 100644
--- a/arch/arm/mach-at91/include/mach/uncompress.h
+++ b/arch/arm/mach-at91/include/mach/uncompress.h
@@ -94,6 +94,15 @@
 	0,
 };
 
+static const u32 uarts_sama5[] = {
+	AT91_BASE_DBGU1,
+	SAMA5D3_BASE_USART0,
+	SAMA5D3_BASE_USART1,
+	SAMA5D3_BASE_USART2,
+	SAMA5D3_BASE_USART3,
+	0,
+};
+
 static inline const u32* decomp_soc_detect(void __iomem *dbgu_base)
 {
 	u32 cidr, socid;
@@ -121,8 +130,12 @@
 	case ARCH_ID_AT91SAM9RL64:
 		return uarts_sam9rl;
 
+	case ARCH_ID_AT91SAM9N12:
 	case ARCH_ID_AT91SAM9X5:
 		return uarts_sam9x5;
+
+	case ARCH_ID_SAMA5D3:
+		return uarts_sama5;
 	}
 
 	/* at91sam9g10 */
diff --git a/arch/arm/mach-ep93xx/vision_ep9307.c b/arch/arm/mach-ep93xx/vision_ep9307.c
index 64f2e50..6bc1c18 100644
--- a/arch/arm/mach-ep93xx/vision_ep9307.c
+++ b/arch/arm/mach-ep93xx/vision_ep9307.c
@@ -224,62 +224,15 @@
 #define VISION_SPI_MMC_WP	EP93XX_GPIO_LINE_F(0)
 #define VISION_SPI_MMC_CD	EP93XX_GPIO_LINE_EGPIO15
 
-static struct gpio vision_spi_mmc_gpios[] = {
-	{ VISION_SPI_MMC_WP, GPIOF_DIR_IN, "mmc_spi:wp" },
-	{ VISION_SPI_MMC_CD, GPIOF_DIR_IN, "mmc_spi:cd" },
-};
-
-static int vision_spi_mmc_init(struct device *pdev,
-			irqreturn_t (*func)(int, void *), void *pdata)
-{
-	int err;
-
-	err = gpio_request_array(vision_spi_mmc_gpios,
-				 ARRAY_SIZE(vision_spi_mmc_gpios));
-	if (err)
-		return err;
-
-	err = gpio_set_debounce(VISION_SPI_MMC_CD, 1);
-	if (err)
-		goto exit_err;
-
-	err = request_irq(gpio_to_irq(VISION_SPI_MMC_CD), func,
-			IRQ_TYPE_EDGE_BOTH, "mmc_spi:cd", pdata);
-	if (err)
-		goto exit_err;
-
-	return 0;
-
-exit_err:
-	gpio_free_array(vision_spi_mmc_gpios, ARRAY_SIZE(vision_spi_mmc_gpios));
-	return err;
-
-}
-
-static void vision_spi_mmc_exit(struct device *pdev, void *pdata)
-{
-	free_irq(gpio_to_irq(VISION_SPI_MMC_CD), pdata);
-	gpio_free_array(vision_spi_mmc_gpios, ARRAY_SIZE(vision_spi_mmc_gpios));
-}
-
-static int vision_spi_mmc_get_ro(struct device *pdev)
-{
-	return !!gpio_get_value(VISION_SPI_MMC_WP);
-}
-
-static int vision_spi_mmc_get_cd(struct device *pdev)
-{
-	return !gpio_get_value(VISION_SPI_MMC_CD);
-}
-
 static struct mmc_spi_platform_data vision_spi_mmc_data = {
-	.init		= vision_spi_mmc_init,
-	.exit		= vision_spi_mmc_exit,
-	.get_ro		= vision_spi_mmc_get_ro,
-	.get_cd		= vision_spi_mmc_get_cd,
 	.detect_delay	= 100,
 	.powerup_msecs	= 100,
 	.ocr_mask	= MMC_VDD_32_33 | MMC_VDD_33_34,
+	.flags		= MMC_SPI_USE_CD_GPIO | MMC_SPI_USE_RO_GPIO,
+	.cd_gpio	= VISION_SPI_MMC_CD,
+	.cd_debounce	= 1,
+	.ro_gpio	= VISION_SPI_MMC_WP,
+	.caps2		= MMC_CAP2_RO_ACTIVE_HIGH,
 };
 
 static int vision_spi_mmc_hw_setup(struct spi_device *spi)
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index 5952e68..56fe819 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -36,6 +36,7 @@
 	bool "SAMSUNG EXYNOS4210"
 	default y
 	depends on ARCH_EXYNOS4
+	select ARCH_HAS_BANDGAP
 	select ARM_CPU_SUSPEND if PM
 	select PINCTRL_EXYNOS
 	select PM_GENERIC_DOMAINS if PM
@@ -49,7 +50,9 @@
 	bool "SAMSUNG EXYNOS4212"
 	default y
 	depends on ARCH_EXYNOS4
+	select ARCH_HAS_BANDGAP
 	select PINCTRL_EXYNOS
+	select PM_GENERIC_DOMAINS if PM
 	select S5P_PM if PM
 	select S5P_SLEEP if PM
 	select SAMSUNG_DMADEV
@@ -60,7 +63,9 @@
 	bool "SAMSUNG EXYNOS4412"
 	default y
 	depends on ARCH_EXYNOS4
+	select ARCH_HAS_BANDGAP
 	select PINCTRL_EXYNOS
+	select PM_GENERIC_DOMAINS if PM
 	select SAMSUNG_DMADEV
 	help
 	  Enable EXYNOS4412 SoC support
@@ -69,6 +74,7 @@
 	bool "SAMSUNG EXYNOS5250"
 	default y
 	depends on ARCH_EXYNOS5
+	select ARCH_HAS_BANDGAP
 	select PINCTRL_EXYNOS
 	select PM_GENERIC_DOMAINS if PM
 	select S5P_PM if PM
@@ -93,6 +99,7 @@
 	default y
 	depends on ARCH_EXYNOS5
 	select ARCH_DMA_ADDR_T_64BIT if ARM_LPAE
+	select ARCH_HAS_BANDGAP
 	select ARCH_HAS_OPP
 	select HAVE_ARM_ARCH_TIMER
 	select AUTO_ZRELADDR
diff --git a/arch/arm/mach-exynos/cpuidle.c b/arch/arm/mach-exynos/cpuidle.c
index 225ee84..ac139226 100644
--- a/arch/arm/mach-exynos/cpuidle.c
+++ b/arch/arm/mach-exynos/cpuidle.c
@@ -200,6 +200,9 @@
 	if (soc_is_exynos5250())
 		exynos5_core_down_clk();
 
+	if (soc_is_exynos5440())
+		exynos4_idle_driver.state_count = 1;
+
 	ret = cpuidle_register_driver(&exynos4_idle_driver);
 	if (ret) {
 		printk(KERN_ERR "CPUidle failed to register driver\n");
diff --git a/arch/arm/mach-highbank/Kconfig b/arch/arm/mach-highbank/Kconfig
index 6acbdab..8e8437d 100644
--- a/arch/arm/mach-highbank/Kconfig
+++ b/arch/arm/mach-highbank/Kconfig
@@ -1,9 +1,14 @@
 config ARCH_HIGHBANK
 	bool "Calxeda ECX-1000/2000 (Highbank/Midway)" if ARCH_MULTI_V7
+	select ARCH_DMA_ADDR_T_64BIT if ARM_LPAE
 	select ARCH_HAS_CPUFREQ
+	select ARCH_HAS_HOLES_MEMORYMODEL
 	select ARCH_HAS_OPP
 	select ARCH_WANT_OPTIONAL_GPIOLIB
 	select ARM_AMBA
+	select ARM_ERRATA_764369
+	select ARM_ERRATA_775420
+	select ARM_ERRATA_798181
 	select ARM_GIC
 	select ARM_TIMER_SP804
 	select CACHE_L2X0
@@ -18,3 +23,4 @@
 	select PL320_MBOX
 	select SPARSE_IRQ
 	select USE_OF
+	select ZONE_DMA if ARM_LPAE
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c
index 8881579..8e63ccd 100644
--- a/arch/arm/mach-highbank/highbank.c
+++ b/arch/arm/mach-highbank/highbank.c
@@ -18,14 +18,11 @@
 #include <linux/clocksource.h>
 #include <linux/dma-mapping.h>
 #include <linux/io.h>
-#include <linux/irq.h>
 #include <linux/irqchip.h>
-#include <linux/irqdomain.h>
 #include <linux/of.h>
 #include <linux/of_irq.h>
 #include <linux/of_platform.h>
 #include <linux/of_address.h>
-#include <linux/smp.h>
 #include <linux/amba/bus.h>
 #include <linux/clk-provider.h>
 
@@ -35,7 +32,6 @@
 #include <asm/hardware/cache-l2x0.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
-#include <asm/mach/time.h>
 
 #include "core.h"
 #include "sysregs.h"
@@ -65,13 +61,11 @@
 			  HB_JUMP_TABLE_PHYS(cpu) + 15);
 }
 
-#ifdef CONFIG_CACHE_L2X0
 static void highbank_l2x0_disable(void)
 {
 	/* Disable PL310 L2 Cache controller */
 	highbank_smc1(0x102, 0x0);
 }
-#endif
 
 static void __init highbank_init_irq(void)
 {
@@ -80,12 +74,13 @@
 	if (of_find_compatible_node(NULL, NULL, "arm,cortex-a9"))
 		highbank_scu_map_io();
 
-#ifdef CONFIG_CACHE_L2X0
 	/* Enable PL310 L2 Cache controller */
-	highbank_smc1(0x102, 0x1);
-	l2x0_of_init(0, ~0UL);
-	outer_cache.disable = highbank_l2x0_disable;
-#endif
+	if (IS_ENABLED(CONFIG_CACHE_L2X0) &&
+	    of_find_compatible_node(NULL, NULL, "arm,pl310-cache")) {
+		highbank_smc1(0x102, 0x1);
+		l2x0_of_init(0, ~0UL);
+		outer_cache.disable = highbank_l2x0_disable;
+	}
 }
 
 static void __init highbank_timer_init(void)
@@ -176,6 +171,9 @@
 };
 
 DT_MACHINE_START(HIGHBANK, "Highbank")
+#if defined(CONFIG_ZONE_DMA) && defined(CONFIG_ARM_LPAE)
+	.dma_zone_size	= (4ULL * SZ_1G),
+#endif
 	.smp		= smp_ops(highbank_smp_ops),
 	.init_irq	= highbank_init_irq,
 	.init_time	= highbank_timer_init,
diff --git a/arch/arm/mach-imx/clk.h b/arch/arm/mach-imx/clk.h
index 3451f1f..048c5ad8 100644
--- a/arch/arm/mach-imx/clk.h
+++ b/arch/arm/mach-imx/clk.h
@@ -89,7 +89,8 @@
 static inline struct clk *imx_clk_mux(const char *name, void __iomem *reg,
 		u8 shift, u8 width, const char **parents, int num_parents)
 {
-	return clk_register_mux(NULL, name, parents, num_parents, 0, reg, shift,
+	return clk_register_mux(NULL, name, parents, num_parents,
+			CLK_SET_RATE_NO_REPARENT, reg, shift,
 			width, 0, &imx_ccm_lock);
 }
 
@@ -98,7 +99,7 @@
 		int num_parents, unsigned long flags)
 {
 	return clk_register_mux(NULL, name, parents, num_parents,
-			flags, reg, shift, width, 0,
+			flags | CLK_SET_RATE_NO_REPARENT, reg, shift, width, 0,
 			&imx_ccm_lock);
 }
 
diff --git a/arch/arm/mach-imx/mm-imx25.c b/arch/arm/mach-imx/mm-imx25.c
index e065c11..5211f62 100644
--- a/arch/arm/mach-imx/mm-imx25.c
+++ b/arch/arm/mach-imx/mm-imx25.c
@@ -61,25 +61,8 @@
 	mxc_init_irq(MX25_IO_ADDRESS(MX25_AVIC_BASE_ADDR));
 }
 
-static struct sdma_script_start_addrs imx25_sdma_script __initdata = {
-	.ap_2_ap_addr = 729,
-	.uart_2_mcu_addr = 904,
-	.per_2_app_addr = 1255,
-	.mcu_2_app_addr = 834,
-	.uartsh_2_mcu_addr = 1120,
-	.per_2_shp_addr = 1329,
-	.mcu_2_shp_addr = 1048,
-	.ata_2_mcu_addr = 1560,
-	.mcu_2_ata_addr = 1479,
-	.app_2_per_addr = 1189,
-	.app_2_mcu_addr = 770,
-	.shp_2_per_addr = 1407,
-	.shp_2_mcu_addr = 979,
-};
-
 static struct sdma_platform_data imx25_sdma_pdata __initdata = {
 	.fw_name = "sdma-imx25.bin",
-	.script_addrs = &imx25_sdma_script,
 };
 
 static const struct resource imx25_audmux_res[] __initconst = {
diff --git a/arch/arm/mach-imx/mm-imx5.c b/arch/arm/mach-imx/mm-imx5.c
index a8229b7..eb3cce3 100644
--- a/arch/arm/mach-imx/mm-imx5.c
+++ b/arch/arm/mach-imx/mm-imx5.c
@@ -103,22 +103,8 @@
 	tzic_init_irq(MX53_IO_ADDRESS(MX53_TZIC_BASE_ADDR));
 }
 
-static struct sdma_script_start_addrs imx51_sdma_script __initdata = {
-	.ap_2_ap_addr = 642,
-	.uart_2_mcu_addr = 817,
-	.mcu_2_app_addr = 747,
-	.mcu_2_shp_addr = 961,
-	.ata_2_mcu_addr = 1473,
-	.mcu_2_ata_addr = 1392,
-	.app_2_per_addr = 1033,
-	.app_2_mcu_addr = 683,
-	.shp_2_per_addr = 1251,
-	.shp_2_mcu_addr = 892,
-};
-
 static struct sdma_platform_data imx51_sdma_pdata __initdata = {
 	.fw_name = "sdma-imx51.bin",
-	.script_addrs = &imx51_sdma_script,
 };
 
 static const struct resource imx51_audmux_res[] __initconst = {
diff --git a/arch/arm/mach-mmp/Makefile b/arch/arm/mach-mmp/Makefile
index 095c155..9b702a1 100644
--- a/arch/arm/mach-mmp/Makefile
+++ b/arch/arm/mach-mmp/Makefile
@@ -2,7 +2,7 @@
 # Makefile for Marvell's PXA168 processors line
 #
 
-obj-y				+= common.o devices.o time.o irq.o
+obj-y				+= common.o devices.o time.o
 
 # SoC support
 obj-$(CONFIG_CPU_PXA168)	+= pxa168.o
diff --git a/arch/arm/mach-mmp/common.h b/arch/arm/mach-mmp/common.h
index 991d7e9..cf445ba 100644
--- a/arch/arm/mach-mmp/common.h
+++ b/arch/arm/mach-mmp/common.h
@@ -3,7 +3,6 @@
 
 extern void timer_init(int irq);
 
-extern void __init icu_init_irq(void);
 extern void __init mmp_map_io(void);
 extern void mmp_restart(enum reboot_mode, const char *);
 extern void __init pxa168_clk_init(void);
diff --git a/arch/arm/mach-mmp/include/mach/entry-macro.S b/arch/arm/mach-mmp/include/mach/entry-macro.S
deleted file mode 100644
index bd152e2..0000000
--- a/arch/arm/mach-mmp/include/mach/entry-macro.S
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * linux/arch/arm/mach-mmp/include/mach/entry-macro.S
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <asm/irq.h>
-#include <mach/regs-icu.h>
-
-	.macro	get_irqnr_preamble, base, tmp
-	mrc	p15, 0, \tmp, c0, c0, 0		@ CPUID
-	and	\tmp, \tmp, #0xff00
-	cmp	\tmp, #0x5800
-	ldr	\base, =mmp_icu_base
-	ldr	\base, [\base, #0]
-	addne	\base, \base, #0x10c		@ PJ1 AP INT SEL register
-	addeq	\base, \base, #0x104		@ PJ4 IRQ SEL register
-	.endm
-
-	.macro	get_irqnr_and_base, irqnr, irqstat, base, tmp
-	ldr	\tmp, [\base, #0]
-	and	\irqnr, \tmp, #0x3f
-	tst	\tmp, #(1 << 6)
-	.endm
diff --git a/arch/arm/mach-mmp/include/mach/pxa168.h b/arch/arm/mach-mmp/include/mach/pxa168.h
index 459c2d0..a83ba7c 100644
--- a/arch/arm/mach-mmp/include/mach/pxa168.h
+++ b/arch/arm/mach-mmp/include/mach/pxa168.h
@@ -4,6 +4,7 @@
 #include <linux/reboot.h>
 
 extern void pxa168_timer_init(void);
+extern void __init icu_init_irq(void);
 extern void __init pxa168_init_irq(void);
 extern void pxa168_restart(enum reboot_mode, const char *);
 extern void pxa168_clear_keypad_wakeup(void);
diff --git a/arch/arm/mach-mmp/include/mach/pxa910.h b/arch/arm/mach-mmp/include/mach/pxa910.h
index b914afa..9225320 100644
--- a/arch/arm/mach-mmp/include/mach/pxa910.h
+++ b/arch/arm/mach-mmp/include/mach/pxa910.h
@@ -2,6 +2,7 @@
 #define __ASM_MACH_PXA910_H
 
 extern void pxa910_timer_init(void);
+extern void __init icu_init_irq(void);
 extern void __init pxa910_init_irq(void);
 
 #include <linux/i2c.h>
diff --git a/arch/arm/mach-mmp/mmp-dt.c b/arch/arm/mach-mmp/mmp-dt.c
index b37915d..cca529c 100644
--- a/arch/arm/mach-mmp/mmp-dt.c
+++ b/arch/arm/mach-mmp/mmp-dt.c
@@ -9,17 +9,13 @@
  *  publishhed by the Free Software Foundation.
  */
 
-#include <linux/irq.h>
-#include <linux/irqdomain.h>
-#include <linux/of_irq.h>
+#include <linux/irqchip.h>
 #include <linux/of_platform.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/time.h>
-#include <mach/irqs.h>
 
 #include "common.h"
 
-extern void __init mmp_dt_irq_init(void);
 extern void __init mmp_dt_init_timer(void);
 
 static const struct of_dev_auxdata pxa168_auxdata_lookup[] __initconst = {
@@ -64,7 +60,6 @@
 
 DT_MACHINE_START(PXA168_DT, "Marvell PXA168 (Device Tree Support)")
 	.map_io		= mmp_map_io,
-	.init_irq	= mmp_dt_irq_init,
 	.init_time	= mmp_dt_init_timer,
 	.init_machine	= pxa168_dt_init,
 	.dt_compat	= mmp_dt_board_compat,
@@ -72,7 +67,6 @@
 
 DT_MACHINE_START(PXA910_DT, "Marvell PXA910 (Device Tree Support)")
 	.map_io		= mmp_map_io,
-	.init_irq	= mmp_dt_irq_init,
 	.init_time	= mmp_dt_init_timer,
 	.init_machine	= pxa910_dt_init,
 	.dt_compat	= mmp_dt_board_compat,
diff --git a/arch/arm/mach-mmp/mmp2-dt.c b/arch/arm/mach-mmp/mmp2-dt.c
index 4ac2567..023cb45 100644
--- a/arch/arm/mach-mmp/mmp2-dt.c
+++ b/arch/arm/mach-mmp/mmp2-dt.c
@@ -10,18 +10,13 @@
  */
 
 #include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/irqdomain.h>
-#include <linux/of_irq.h>
+#include <linux/irqchip.h>
 #include <linux/of_platform.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/time.h>
-#include <mach/irqs.h>
-#include <mach/regs-apbc.h>
 
 #include "common.h"
 
-extern void __init mmp_dt_irq_init(void);
 extern void __init mmp_dt_init_timer(void);
 
 static const struct of_dev_auxdata mmp2_auxdata_lookup[] __initconst = {
@@ -49,7 +44,6 @@
 
 DT_MACHINE_START(MMP2_DT, "Marvell MMP2 (Device Tree Support)")
 	.map_io		= mmp_map_io,
-	.init_irq	= mmp_dt_irq_init,
 	.init_time	= mmp_dt_init_timer,
 	.init_machine	= mmp2_dt_init,
 	.dt_compat	= mmp2_dt_board_compat,
diff --git a/arch/arm/mach-mmp/mmp2.c b/arch/arm/mach-mmp/mmp2.c
index c7592f1..a70b553 100644
--- a/arch/arm/mach-mmp/mmp2.c
+++ b/arch/arm/mach-mmp/mmp2.c
@@ -13,6 +13,8 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip/mmp.h>
 #include <linux/platform_device.h>
 
 #include <asm/hardware/cache-tauros2.h>
@@ -26,6 +28,7 @@
 #include <mach/mfp.h>
 #include <mach/devices.h>
 #include <mach/mmp2.h>
+#include <mach/pm-mmp2.h>
 
 #include "common.h"
 
@@ -94,6 +97,9 @@
 void __init mmp2_init_irq(void)
 {
 	mmp2_init_icu();
+#ifdef CONFIG_PM
+	icu_irq_chip.irq_set_wake = mmp2_set_wake;
+#endif
 }
 
 static int __init mmp2_init(void)
diff --git a/arch/arm/mach-mmp/pxa910.c b/arch/arm/mach-mmp/pxa910.c
index ce6393a..eb57ee1 100644
--- a/arch/arm/mach-mmp/pxa910.c
+++ b/arch/arm/mach-mmp/pxa910.c
@@ -12,6 +12,8 @@
 #include <linux/init.h>
 #include <linux/list.h>
 #include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip/mmp.h>
 #include <linux/platform_device.h>
 
 #include <asm/hardware/cache-tauros2.h>
@@ -23,6 +25,8 @@
 #include <mach/dma.h>
 #include <mach/mfp.h>
 #include <mach/devices.h>
+#include <mach/pm-pxa910.h>
+#include <mach/pxa910.h>
 
 #include "common.h"
 
@@ -79,6 +83,9 @@
 void __init pxa910_init_irq(void)
 {
 	icu_init_irq();
+#ifdef CONFIG_PM
+	icu_irq_chip.irq_set_wake = pxa910_set_wake;
+#endif
 }
 
 static int __init pxa910_init(void)
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index cc36bfe..afb457c 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -63,6 +63,7 @@
 obj-$(CONFIG_ARCH_OMAP3)		+= omap3-restart.o
 obj-$(CONFIG_ARCH_OMAP4)		+= omap4-restart.o
 obj-$(CONFIG_SOC_OMAP5)			+= omap4-restart.o
+obj-$(CONFIG_SOC_DRA7XX)		+= omap4-restart.o
 
 # Pin multiplexing
 obj-$(CONFIG_SOC_OMAP2420)		+= mux2420.o
@@ -148,6 +149,7 @@
 obj-$(CONFIG_SOC_OMAP5)			+= $(powerdomain-common)
 obj-$(CONFIG_SOC_OMAP5)			+= powerdomains54xx_data.o
 obj-$(CONFIG_SOC_DRA7XX)		+= $(powerdomain-common)
+obj-$(CONFIG_SOC_DRA7XX)		+= powerdomains7xx_data.o
 
 # PRCM clockdomain control
 clockdomain-common			+= clockdomain.o
@@ -166,6 +168,7 @@
 obj-$(CONFIG_SOC_OMAP5)			+= $(clockdomain-common)
 obj-$(CONFIG_SOC_OMAP5)			+= clockdomains54xx_data.o
 obj-$(CONFIG_SOC_DRA7XX)		+= $(clockdomain-common)
+obj-$(CONFIG_SOC_DRA7XX)		+= clockdomains7xx_data.o
 
 # Clock framework
 obj-$(CONFIG_ARCH_OMAP2)		+= $(clock-common) clock2xxx.o
@@ -209,6 +212,7 @@
 obj-$(CONFIG_SOC_AM33XX)		+= omap_hwmod_33xx_data.o
 obj-$(CONFIG_ARCH_OMAP4)		+= omap_hwmod_44xx_data.o
 obj-$(CONFIG_SOC_OMAP5)			+= omap_hwmod_54xx_data.o
+obj-$(CONFIG_SOC_DRA7XX)		+= omap_hwmod_7xx_data.o
 
 # EMU peripherals
 obj-$(CONFIG_OMAP3_EMU)			+= emu.o
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index b89e55b..39c7838 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -238,5 +238,6 @@
 	.init_machine	= omap_generic_init,
 	.init_time	= omap5_realtime_timer_init,
 	.dt_compat	= dra7xx_boards_compat,
+	.restart	= omap44xx_restart,
 MACHINE_END
 #endif
diff --git a/arch/arm/mach-omap2/cclock33xx_data.c b/arch/arm/mach-omap2/cclock33xx_data.c
index ba6534d..865d30e 100644
--- a/arch/arm/mach-omap2/cclock33xx_data.c
+++ b/arch/arm/mach-omap2/cclock33xx_data.c
@@ -421,6 +421,10 @@
 DEFINE_STRUCT_CLK_HW_OMAP(aes0_fck, NULL);
 DEFINE_STRUCT_CLK(aes0_fck, dpll_core_ck_parents, clk_ops_null);
 
+static struct clk rng_fck;
+DEFINE_STRUCT_CLK_HW_OMAP(rng_fck, NULL);
+DEFINE_STRUCT_CLK(rng_fck, dpll_core_ck_parents, clk_ops_null);
+
 /*
  * Modules clock nodes
  *
@@ -966,6 +970,7 @@
 	CLK(NULL,	"smartreflex1_fck",	&smartreflex1_fck),
 	CLK(NULL,	"sha0_fck",		&sha0_fck),
 	CLK(NULL,	"aes0_fck",		&aes0_fck),
+	CLK(NULL,	"rng_fck",		&rng_fck),
 	CLK(NULL,	"timer1_fck",		&timer1_fck),
 	CLK(NULL,	"timer2_fck",		&timer2_fck),
 	CLK(NULL,	"timer3_fck",		&timer3_fck),
diff --git a/arch/arm/mach-omap2/cclock44xx_data.c b/arch/arm/mach-omap2/cclock44xx_data.c
index 88e37a4..1d5b529 100644
--- a/arch/arm/mach-omap2/cclock44xx_data.c
+++ b/arch/arm/mach-omap2/cclock44xx_data.c
@@ -1707,6 +1707,18 @@
 	omap2_clk_disable_autoidle_all();
 
 	/*
+	 * A set rate of ABE DPLL inturn triggers a set rate of USB DPLL
+	 * when its in bypass. So always lock USB before ABE DPLL.
+	 */
+	/*
+	 * Lock USB DPLL on OMAP4 devices so that the L3INIT power
+	 * domain can transition to retention state when not in use.
+	 */
+	rc = clk_set_rate(&dpll_usb_ck, OMAP4_DPLL_USB_DEFFREQ);
+	if (rc)
+		pr_err("%s: failed to configure USB DPLL!\n", __func__);
+
+	/*
 	 * On OMAP4460 the ABE DPLL fails to turn on if in idle low-power
 	 * state when turning the ABE clock domain. Workaround this by
 	 * locking the ABE DPLL on boot.
@@ -1718,13 +1730,5 @@
 	if (rc)
 		pr_err("%s: failed to configure ABE DPLL!\n", __func__);
 
-	/*
-	 * Lock USB DPLL on OMAP4 devices so that the L3INIT power
-	 * domain can transition to retention state when not in use.
-	 */
-	rc = clk_set_rate(&dpll_usb_ck, OMAP4_DPLL_USB_DEFFREQ);
-	if (rc)
-		pr_err("%s: failed to configure USB DPLL!\n", __func__);
-
 	return 0;
 }
diff --git a/arch/arm/mach-omap2/clockdomain.h b/arch/arm/mach-omap2/clockdomain.h
index daeecf1..4b03394 100644
--- a/arch/arm/mach-omap2/clockdomain.h
+++ b/arch/arm/mach-omap2/clockdomain.h
@@ -217,6 +217,7 @@
 extern void __init am33xx_clockdomains_init(void);
 extern void __init omap44xx_clockdomains_init(void);
 extern void __init omap54xx_clockdomains_init(void);
+extern void __init dra7xx_clockdomains_init(void);
 
 extern void clkdm_add_autodeps(struct clockdomain *clkdm);
 extern void clkdm_del_autodeps(struct clockdomain *clkdm);
diff --git a/arch/arm/mach-omap2/clockdomains7xx_data.c b/arch/arm/mach-omap2/clockdomains7xx_data.c
new file mode 100644
index 0000000..57d5df0
--- /dev/null
+++ b/arch/arm/mach-omap2/clockdomains7xx_data.c
@@ -0,0 +1,740 @@
+/*
+ * DRA7xx Clock domains framework
+ *
+ * Copyright (C) 2009-2013 Texas Instruments, Inc.
+ * Copyright (C) 2009-2011 Nokia Corporation
+ *
+ * Generated by code originally written by:
+ * Abhijit Pagare (abhijitpagare@ti.com)
+ * Benoit Cousson (b-cousson@ti.com)
+ * Paul Walmsley (paul@pwsan.com)
+ *
+ * This file is automatically generated from the OMAP hardware databases.
+ * We respectfully ask that any modifications to this file be coordinated
+ * with the public linux-omap@vger.kernel.org mailing list and the
+ * authors above to ensure that the autogeneration scripts are kept
+ * up-to-date with the file contents.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+
+#include "clockdomain.h"
+#include "cm1_7xx.h"
+#include "cm2_7xx.h"
+
+#include "cm-regbits-7xx.h"
+#include "prm7xx.h"
+#include "prcm44xx.h"
+#include "prcm_mpu7xx.h"
+
+/* Static Dependencies for DRA7xx Clock Domains */
+
+static struct clkdm_dep cam_wkup_sleep_deps[] = {
+	{ .clkdm_name = "emif_clkdm" },
+	{ NULL },
+};
+
+static struct clkdm_dep dma_wkup_sleep_deps[] = {
+	{ .clkdm_name = "dss_clkdm" },
+	{ .clkdm_name = "emif_clkdm" },
+	{ .clkdm_name = "ipu_clkdm" },
+	{ .clkdm_name = "ipu1_clkdm" },
+	{ .clkdm_name = "ipu2_clkdm" },
+	{ .clkdm_name = "iva_clkdm" },
+	{ .clkdm_name = "l3init_clkdm" },
+	{ .clkdm_name = "l4cfg_clkdm" },
+	{ .clkdm_name = "l4per_clkdm" },
+	{ .clkdm_name = "l4per2_clkdm" },
+	{ .clkdm_name = "l4per3_clkdm" },
+	{ .clkdm_name = "l4sec_clkdm" },
+	{ .clkdm_name = "pcie_clkdm" },
+	{ .clkdm_name = "wkupaon_clkdm" },
+	{ NULL },
+};
+
+static struct clkdm_dep dsp1_wkup_sleep_deps[] = {
+	{ .clkdm_name = "atl_clkdm" },
+	{ .clkdm_name = "cam_clkdm" },
+	{ .clkdm_name = "dsp2_clkdm" },
+	{ .clkdm_name = "dss_clkdm" },
+	{ .clkdm_name = "emif_clkdm" },
+	{ .clkdm_name = "eve1_clkdm" },
+	{ .clkdm_name = "eve2_clkdm" },
+	{ .clkdm_name = "eve3_clkdm" },
+	{ .clkdm_name = "eve4_clkdm" },
+	{ .clkdm_name = "gmac_clkdm" },
+	{ .clkdm_name = "gpu_clkdm" },
+	{ .clkdm_name = "ipu_clkdm" },
+	{ .clkdm_name = "ipu1_clkdm" },
+	{ .clkdm_name = "ipu2_clkdm" },
+	{ .clkdm_name = "iva_clkdm" },
+	{ .clkdm_name = "l3init_clkdm" },
+	{ .clkdm_name = "l4per_clkdm" },
+	{ .clkdm_name = "l4per2_clkdm" },
+	{ .clkdm_name = "l4per3_clkdm" },
+	{ .clkdm_name = "l4sec_clkdm" },
+	{ .clkdm_name = "pcie_clkdm" },
+	{ .clkdm_name = "vpe_clkdm" },
+	{ .clkdm_name = "wkupaon_clkdm" },
+	{ NULL },
+};
+
+static struct clkdm_dep dsp2_wkup_sleep_deps[] = {
+	{ .clkdm_name = "atl_clkdm" },
+	{ .clkdm_name = "cam_clkdm" },
+	{ .clkdm_name = "dsp1_clkdm" },
+	{ .clkdm_name = "dss_clkdm" },
+	{ .clkdm_name = "emif_clkdm" },
+	{ .clkdm_name = "eve1_clkdm" },
+	{ .clkdm_name = "eve2_clkdm" },
+	{ .clkdm_name = "eve3_clkdm" },
+	{ .clkdm_name = "eve4_clkdm" },
+	{ .clkdm_name = "gmac_clkdm" },
+	{ .clkdm_name = "gpu_clkdm" },
+	{ .clkdm_name = "ipu_clkdm" },
+	{ .clkdm_name = "ipu1_clkdm" },
+	{ .clkdm_name = "ipu2_clkdm" },
+	{ .clkdm_name = "iva_clkdm" },
+	{ .clkdm_name = "l3init_clkdm" },
+	{ .clkdm_name = "l4per_clkdm" },
+	{ .clkdm_name = "l4per2_clkdm" },
+	{ .clkdm_name = "l4per3_clkdm" },
+	{ .clkdm_name = "l4sec_clkdm" },
+	{ .clkdm_name = "pcie_clkdm" },
+	{ .clkdm_name = "vpe_clkdm" },
+	{ .clkdm_name = "wkupaon_clkdm" },
+	{ NULL },
+};
+
+static struct clkdm_dep dss_wkup_sleep_deps[] = {
+	{ .clkdm_name = "emif_clkdm" },
+	{ .clkdm_name = "iva_clkdm" },
+	{ NULL },
+};
+
+static struct clkdm_dep eve1_wkup_sleep_deps[] = {
+	{ .clkdm_name = "emif_clkdm" },
+	{ .clkdm_name = "eve2_clkdm" },
+	{ .clkdm_name = "eve3_clkdm" },
+	{ .clkdm_name = "eve4_clkdm" },
+	{ .clkdm_name = "iva_clkdm" },
+	{ NULL },
+};
+
+static struct clkdm_dep eve2_wkup_sleep_deps[] = {
+	{ .clkdm_name = "emif_clkdm" },
+	{ .clkdm_name = "eve1_clkdm" },
+	{ .clkdm_name = "eve3_clkdm" },
+	{ .clkdm_name = "eve4_clkdm" },
+	{ .clkdm_name = "iva_clkdm" },
+	{ NULL },
+};
+
+static struct clkdm_dep eve3_wkup_sleep_deps[] = {
+	{ .clkdm_name = "emif_clkdm" },
+	{ .clkdm_name = "eve1_clkdm" },
+	{ .clkdm_name = "eve2_clkdm" },
+	{ .clkdm_name = "eve4_clkdm" },
+	{ .clkdm_name = "iva_clkdm" },
+	{ NULL },
+};
+
+static struct clkdm_dep eve4_wkup_sleep_deps[] = {
+	{ .clkdm_name = "emif_clkdm" },
+	{ .clkdm_name = "eve1_clkdm" },
+	{ .clkdm_name = "eve2_clkdm" },
+	{ .clkdm_name = "eve3_clkdm" },
+	{ .clkdm_name = "iva_clkdm" },
+	{ NULL },
+};
+
+static struct clkdm_dep gmac_wkup_sleep_deps[] = {
+	{ .clkdm_name = "emif_clkdm" },
+	{ .clkdm_name = "l4per2_clkdm" },
+	{ NULL },
+};
+
+static struct clkdm_dep gpu_wkup_sleep_deps[] = {
+	{ .clkdm_name = "emif_clkdm" },
+	{ .clkdm_name = "iva_clkdm" },
+	{ NULL },
+};
+
+static struct clkdm_dep ipu1_wkup_sleep_deps[] = {
+	{ .clkdm_name = "atl_clkdm" },
+	{ .clkdm_name = "dsp1_clkdm" },
+	{ .clkdm_name = "dsp2_clkdm" },
+	{ .clkdm_name = "dss_clkdm" },
+	{ .clkdm_name = "emif_clkdm" },
+	{ .clkdm_name = "eve1_clkdm" },
+	{ .clkdm_name = "eve2_clkdm" },
+	{ .clkdm_name = "eve3_clkdm" },
+	{ .clkdm_name = "eve4_clkdm" },
+	{ .clkdm_name = "gmac_clkdm" },
+	{ .clkdm_name = "gpu_clkdm" },
+	{ .clkdm_name = "ipu_clkdm" },
+	{ .clkdm_name = "ipu2_clkdm" },
+	{ .clkdm_name = "iva_clkdm" },
+	{ .clkdm_name = "l3init_clkdm" },
+	{ .clkdm_name = "l3main1_clkdm" },
+	{ .clkdm_name = "l4cfg_clkdm" },
+	{ .clkdm_name = "l4per_clkdm" },
+	{ .clkdm_name = "l4per2_clkdm" },
+	{ .clkdm_name = "l4per3_clkdm" },
+	{ .clkdm_name = "l4sec_clkdm" },
+	{ .clkdm_name = "pcie_clkdm" },
+	{ .clkdm_name = "vpe_clkdm" },
+	{ .clkdm_name = "wkupaon_clkdm" },
+	{ NULL },
+};
+
+static struct clkdm_dep ipu2_wkup_sleep_deps[] = {
+	{ .clkdm_name = "atl_clkdm" },
+	{ .clkdm_name = "dsp1_clkdm" },
+	{ .clkdm_name = "dsp2_clkdm" },
+	{ .clkdm_name = "dss_clkdm" },
+	{ .clkdm_name = "emif_clkdm" },
+	{ .clkdm_name = "eve1_clkdm" },
+	{ .clkdm_name = "eve2_clkdm" },
+	{ .clkdm_name = "eve3_clkdm" },
+	{ .clkdm_name = "eve4_clkdm" },
+	{ .clkdm_name = "gmac_clkdm" },
+	{ .clkdm_name = "gpu_clkdm" },
+	{ .clkdm_name = "ipu_clkdm" },
+	{ .clkdm_name = "ipu1_clkdm" },
+	{ .clkdm_name = "iva_clkdm" },
+	{ .clkdm_name = "l3init_clkdm" },
+	{ .clkdm_name = "l3main1_clkdm" },
+	{ .clkdm_name = "l4cfg_clkdm" },
+	{ .clkdm_name = "l4per_clkdm" },
+	{ .clkdm_name = "l4per2_clkdm" },
+	{ .clkdm_name = "l4per3_clkdm" },
+	{ .clkdm_name = "l4sec_clkdm" },
+	{ .clkdm_name = "pcie_clkdm" },
+	{ .clkdm_name = "vpe_clkdm" },
+	{ .clkdm_name = "wkupaon_clkdm" },
+	{ NULL },
+};
+
+static struct clkdm_dep iva_wkup_sleep_deps[] = {
+	{ .clkdm_name = "emif_clkdm" },
+	{ NULL },
+};
+
+static struct clkdm_dep l3init_wkup_sleep_deps[] = {
+	{ .clkdm_name = "emif_clkdm" },
+	{ .clkdm_name = "iva_clkdm" },
+	{ .clkdm_name = "l4cfg_clkdm" },
+	{ .clkdm_name = "l4per_clkdm" },
+	{ .clkdm_name = "l4per3_clkdm" },
+	{ .clkdm_name = "l4sec_clkdm" },
+	{ .clkdm_name = "wkupaon_clkdm" },
+	{ NULL },
+};
+
+static struct clkdm_dep l4per2_wkup_sleep_deps[] = {
+	{ .clkdm_name = "dsp1_clkdm" },
+	{ .clkdm_name = "dsp2_clkdm" },
+	{ .clkdm_name = "ipu1_clkdm" },
+	{ .clkdm_name = "ipu2_clkdm" },
+	{ NULL },
+};
+
+static struct clkdm_dep l4sec_wkup_sleep_deps[] = {
+	{ .clkdm_name = "emif_clkdm" },
+	{ .clkdm_name = "l4per_clkdm" },
+	{ NULL },
+};
+
+static struct clkdm_dep mpu_wkup_sleep_deps[] = {
+	{ .clkdm_name = "cam_clkdm" },
+	{ .clkdm_name = "dsp1_clkdm" },
+	{ .clkdm_name = "dsp2_clkdm" },
+	{ .clkdm_name = "dss_clkdm" },
+	{ .clkdm_name = "emif_clkdm" },
+	{ .clkdm_name = "eve1_clkdm" },
+	{ .clkdm_name = "eve2_clkdm" },
+	{ .clkdm_name = "eve3_clkdm" },
+	{ .clkdm_name = "eve4_clkdm" },
+	{ .clkdm_name = "gmac_clkdm" },
+	{ .clkdm_name = "gpu_clkdm" },
+	{ .clkdm_name = "ipu_clkdm" },
+	{ .clkdm_name = "ipu1_clkdm" },
+	{ .clkdm_name = "ipu2_clkdm" },
+	{ .clkdm_name = "iva_clkdm" },
+	{ .clkdm_name = "l3init_clkdm" },
+	{ .clkdm_name = "l3main1_clkdm" },
+	{ .clkdm_name = "l4cfg_clkdm" },
+	{ .clkdm_name = "l4per_clkdm" },
+	{ .clkdm_name = "l4per2_clkdm" },
+	{ .clkdm_name = "l4per3_clkdm" },
+	{ .clkdm_name = "l4sec_clkdm" },
+	{ .clkdm_name = "pcie_clkdm" },
+	{ .clkdm_name = "vpe_clkdm" },
+	{ .clkdm_name = "wkupaon_clkdm" },
+	{ NULL },
+};
+
+static struct clkdm_dep pcie_wkup_sleep_deps[] = {
+	{ .clkdm_name = "atl_clkdm" },
+	{ .clkdm_name = "cam_clkdm" },
+	{ .clkdm_name = "dsp1_clkdm" },
+	{ .clkdm_name = "dsp2_clkdm" },
+	{ .clkdm_name = "dss_clkdm" },
+	{ .clkdm_name = "emif_clkdm" },
+	{ .clkdm_name = "eve1_clkdm" },
+	{ .clkdm_name = "eve2_clkdm" },
+	{ .clkdm_name = "eve3_clkdm" },
+	{ .clkdm_name = "eve4_clkdm" },
+	{ .clkdm_name = "gmac_clkdm" },
+	{ .clkdm_name = "gpu_clkdm" },
+	{ .clkdm_name = "ipu_clkdm" },
+	{ .clkdm_name = "ipu1_clkdm" },
+	{ .clkdm_name = "iva_clkdm" },
+	{ .clkdm_name = "l3init_clkdm" },
+	{ .clkdm_name = "l4cfg_clkdm" },
+	{ .clkdm_name = "l4per_clkdm" },
+	{ .clkdm_name = "l4per2_clkdm" },
+	{ .clkdm_name = "l4per3_clkdm" },
+	{ .clkdm_name = "l4sec_clkdm" },
+	{ .clkdm_name = "vpe_clkdm" },
+	{ NULL },
+};
+
+static struct clkdm_dep vpe_wkup_sleep_deps[] = {
+	{ .clkdm_name = "emif_clkdm" },
+	{ .clkdm_name = "l4per3_clkdm" },
+	{ NULL },
+};
+
+static struct clockdomain l4per3_7xx_clkdm = {
+	.name		  = "l4per3_clkdm",
+	.pwrdm		  = { .name = "l4per_pwrdm" },
+	.prcm_partition	  = DRA7XX_CM_CORE_PARTITION,
+	.cm_inst	  = DRA7XX_CM_CORE_L4PER_INST,
+	.clkdm_offs	  = DRA7XX_CM_CORE_L4PER_L4PER3_CDOFFS,
+	.dep_bit	  = DRA7XX_L4PER3_STATDEP_SHIFT,
+	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
+};
+
+static struct clockdomain l4per2_7xx_clkdm = {
+	.name		  = "l4per2_clkdm",
+	.pwrdm		  = { .name = "l4per_pwrdm" },
+	.prcm_partition	  = DRA7XX_CM_CORE_PARTITION,
+	.cm_inst	  = DRA7XX_CM_CORE_L4PER_INST,
+	.clkdm_offs	  = DRA7XX_CM_CORE_L4PER_L4PER2_CDOFFS,
+	.dep_bit	  = DRA7XX_L4PER2_STATDEP_SHIFT,
+	.wkdep_srcs	  = l4per2_wkup_sleep_deps,
+	.sleepdep_srcs	  = l4per2_wkup_sleep_deps,
+	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
+};
+
+static struct clockdomain mpu0_7xx_clkdm = {
+	.name		  = "mpu0_clkdm",
+	.pwrdm		  = { .name = "cpu0_pwrdm" },
+	.prcm_partition	  = DRA7XX_MPU_PRCM_PARTITION,
+	.cm_inst	  = DRA7XX_MPU_PRCM_CM_C0_INST,
+	.clkdm_offs	  = DRA7XX_MPU_PRCM_CM_C0_CPU0_CDOFFS,
+	.flags		  = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
+};
+
+static struct clockdomain iva_7xx_clkdm = {
+	.name		  = "iva_clkdm",
+	.pwrdm		  = { .name = "iva_pwrdm" },
+	.prcm_partition	  = DRA7XX_CM_CORE_PARTITION,
+	.cm_inst	  = DRA7XX_CM_CORE_IVA_INST,
+	.clkdm_offs	  = DRA7XX_CM_CORE_IVA_IVA_CDOFFS,
+	.dep_bit	  = DRA7XX_IVA_STATDEP_SHIFT,
+	.wkdep_srcs	  = iva_wkup_sleep_deps,
+	.sleepdep_srcs	  = iva_wkup_sleep_deps,
+	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
+};
+
+static struct clockdomain coreaon_7xx_clkdm = {
+	.name		  = "coreaon_clkdm",
+	.pwrdm		  = { .name = "coreaon_pwrdm" },
+	.prcm_partition	  = DRA7XX_CM_CORE_PARTITION,
+	.cm_inst	  = DRA7XX_CM_CORE_COREAON_INST,
+	.clkdm_offs	  = DRA7XX_CM_CORE_COREAON_COREAON_CDOFFS,
+	.flags		  = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
+};
+
+static struct clockdomain ipu1_7xx_clkdm = {
+	.name		  = "ipu1_clkdm",
+	.pwrdm		  = { .name = "ipu_pwrdm" },
+	.prcm_partition	  = DRA7XX_CM_CORE_AON_PARTITION,
+	.cm_inst	  = DRA7XX_CM_CORE_AON_IPU_INST,
+	.clkdm_offs	  = DRA7XX_CM_CORE_AON_IPU_IPU1_CDOFFS,
+	.dep_bit	  = DRA7XX_IPU1_STATDEP_SHIFT,
+	.wkdep_srcs	  = ipu1_wkup_sleep_deps,
+	.sleepdep_srcs	  = ipu1_wkup_sleep_deps,
+	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
+};
+
+static struct clockdomain ipu2_7xx_clkdm = {
+	.name		  = "ipu2_clkdm",
+	.pwrdm		  = { .name = "core_pwrdm" },
+	.prcm_partition	  = DRA7XX_CM_CORE_PARTITION,
+	.cm_inst	  = DRA7XX_CM_CORE_CORE_INST,
+	.clkdm_offs	  = DRA7XX_CM_CORE_CORE_IPU2_CDOFFS,
+	.dep_bit	  = DRA7XX_IPU2_STATDEP_SHIFT,
+	.wkdep_srcs	  = ipu2_wkup_sleep_deps,
+	.sleepdep_srcs	  = ipu2_wkup_sleep_deps,
+	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
+};
+
+static struct clockdomain l3init_7xx_clkdm = {
+	.name		  = "l3init_clkdm",
+	.pwrdm		  = { .name = "l3init_pwrdm" },
+	.prcm_partition	  = DRA7XX_CM_CORE_PARTITION,
+	.cm_inst	  = DRA7XX_CM_CORE_L3INIT_INST,
+	.clkdm_offs	  = DRA7XX_CM_CORE_L3INIT_L3INIT_CDOFFS,
+	.dep_bit	  = DRA7XX_L3INIT_STATDEP_SHIFT,
+	.wkdep_srcs	  = l3init_wkup_sleep_deps,
+	.sleepdep_srcs	  = l3init_wkup_sleep_deps,
+	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
+};
+
+static struct clockdomain l4sec_7xx_clkdm = {
+	.name		  = "l4sec_clkdm",
+	.pwrdm		  = { .name = "l4per_pwrdm" },
+	.prcm_partition	  = DRA7XX_CM_CORE_PARTITION,
+	.cm_inst	  = DRA7XX_CM_CORE_L4PER_INST,
+	.clkdm_offs	  = DRA7XX_CM_CORE_L4PER_L4SEC_CDOFFS,
+	.dep_bit	  = DRA7XX_L4SEC_STATDEP_SHIFT,
+	.wkdep_srcs	  = l4sec_wkup_sleep_deps,
+	.sleepdep_srcs	  = l4sec_wkup_sleep_deps,
+	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
+};
+
+static struct clockdomain l3main1_7xx_clkdm = {
+	.name		  = "l3main1_clkdm",
+	.pwrdm		  = { .name = "core_pwrdm" },
+	.prcm_partition	  = DRA7XX_CM_CORE_PARTITION,
+	.cm_inst	  = DRA7XX_CM_CORE_CORE_INST,
+	.clkdm_offs	  = DRA7XX_CM_CORE_CORE_L3MAIN1_CDOFFS,
+	.dep_bit	  = DRA7XX_L3MAIN1_STATDEP_SHIFT,
+	.flags		  = CLKDM_CAN_HWSUP,
+};
+
+static struct clockdomain vpe_7xx_clkdm = {
+	.name		  = "vpe_clkdm",
+	.pwrdm		  = { .name = "vpe_pwrdm" },
+	.prcm_partition	  = DRA7XX_CM_CORE_AON_PARTITION,
+	.cm_inst	  = DRA7XX_CM_CORE_AON_VPE_INST,
+	.clkdm_offs	  = DRA7XX_CM_CORE_AON_VPE_VPE_CDOFFS,
+	.dep_bit	  = DRA7XX_VPE_STATDEP_SHIFT,
+	.wkdep_srcs	  = vpe_wkup_sleep_deps,
+	.sleepdep_srcs	  = vpe_wkup_sleep_deps,
+	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
+};
+
+static struct clockdomain mpu_7xx_clkdm = {
+	.name		  = "mpu_clkdm",
+	.pwrdm		  = { .name = "mpu_pwrdm" },
+	.prcm_partition	  = DRA7XX_CM_CORE_AON_PARTITION,
+	.cm_inst	  = DRA7XX_CM_CORE_AON_MPU_INST,
+	.clkdm_offs	  = DRA7XX_CM_CORE_AON_MPU_MPU_CDOFFS,
+	.wkdep_srcs	  = mpu_wkup_sleep_deps,
+	.sleepdep_srcs	  = mpu_wkup_sleep_deps,
+	.flags		  = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
+};
+
+static struct clockdomain custefuse_7xx_clkdm = {
+	.name		  = "custefuse_clkdm",
+	.pwrdm		  = { .name = "custefuse_pwrdm" },
+	.prcm_partition	  = DRA7XX_CM_CORE_PARTITION,
+	.cm_inst	  = DRA7XX_CM_CORE_CUSTEFUSE_INST,
+	.clkdm_offs	  = DRA7XX_CM_CORE_CUSTEFUSE_CUSTEFUSE_CDOFFS,
+	.flags		  = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
+};
+
+static struct clockdomain ipu_7xx_clkdm = {
+	.name		  = "ipu_clkdm",
+	.pwrdm		  = { .name = "ipu_pwrdm" },
+	.prcm_partition	  = DRA7XX_CM_CORE_AON_PARTITION,
+	.cm_inst	  = DRA7XX_CM_CORE_AON_IPU_INST,
+	.clkdm_offs	  = DRA7XX_CM_CORE_AON_IPU_IPU_CDOFFS,
+	.dep_bit	  = DRA7XX_IPU_STATDEP_SHIFT,
+	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
+};
+
+static struct clockdomain mpu1_7xx_clkdm = {
+	.name		  = "mpu1_clkdm",
+	.pwrdm		  = { .name = "cpu1_pwrdm" },
+	.prcm_partition	  = DRA7XX_MPU_PRCM_PARTITION,
+	.cm_inst	  = DRA7XX_MPU_PRCM_CM_C1_INST,
+	.clkdm_offs	  = DRA7XX_MPU_PRCM_CM_C1_CPU1_CDOFFS,
+	.flags		  = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
+};
+
+static struct clockdomain gmac_7xx_clkdm = {
+	.name		  = "gmac_clkdm",
+	.pwrdm		  = { .name = "l3init_pwrdm" },
+	.prcm_partition	  = DRA7XX_CM_CORE_PARTITION,
+	.cm_inst	  = DRA7XX_CM_CORE_L3INIT_INST,
+	.clkdm_offs	  = DRA7XX_CM_CORE_L3INIT_GMAC_CDOFFS,
+	.dep_bit	  = DRA7XX_GMAC_STATDEP_SHIFT,
+	.wkdep_srcs	  = gmac_wkup_sleep_deps,
+	.sleepdep_srcs	  = gmac_wkup_sleep_deps,
+	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
+};
+
+static struct clockdomain l4cfg_7xx_clkdm = {
+	.name		  = "l4cfg_clkdm",
+	.pwrdm		  = { .name = "core_pwrdm" },
+	.prcm_partition	  = DRA7XX_CM_CORE_PARTITION,
+	.cm_inst	  = DRA7XX_CM_CORE_CORE_INST,
+	.clkdm_offs	  = DRA7XX_CM_CORE_CORE_L4CFG_CDOFFS,
+	.dep_bit	  = DRA7XX_L4CFG_STATDEP_SHIFT,
+	.flags		  = CLKDM_CAN_HWSUP,
+};
+
+static struct clockdomain dma_7xx_clkdm = {
+	.name		  = "dma_clkdm",
+	.pwrdm		  = { .name = "core_pwrdm" },
+	.prcm_partition	  = DRA7XX_CM_CORE_PARTITION,
+	.cm_inst	  = DRA7XX_CM_CORE_CORE_INST,
+	.clkdm_offs	  = DRA7XX_CM_CORE_CORE_DMA_CDOFFS,
+	.wkdep_srcs	  = dma_wkup_sleep_deps,
+	.sleepdep_srcs	  = dma_wkup_sleep_deps,
+	.flags		  = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
+};
+
+static struct clockdomain rtc_7xx_clkdm = {
+	.name		  = "rtc_clkdm",
+	.pwrdm		  = { .name = "rtc_pwrdm" },
+	.prcm_partition	  = DRA7XX_CM_CORE_AON_PARTITION,
+	.cm_inst	  = DRA7XX_CM_CORE_AON_RTC_INST,
+	.clkdm_offs	  = DRA7XX_CM_CORE_AON_RTC_RTC_CDOFFS,
+	.flags		  = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
+};
+
+static struct clockdomain pcie_7xx_clkdm = {
+	.name		  = "pcie_clkdm",
+	.pwrdm		  = { .name = "l3init_pwrdm" },
+	.prcm_partition	  = DRA7XX_CM_CORE_PARTITION,
+	.cm_inst	  = DRA7XX_CM_CORE_L3INIT_INST,
+	.clkdm_offs	  = DRA7XX_CM_CORE_L3INIT_PCIE_CDOFFS,
+	.dep_bit	  = DRA7XX_PCIE_STATDEP_SHIFT,
+	.wkdep_srcs	  = pcie_wkup_sleep_deps,
+	.sleepdep_srcs	  = pcie_wkup_sleep_deps,
+	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
+};
+
+static struct clockdomain atl_7xx_clkdm = {
+	.name		  = "atl_clkdm",
+	.pwrdm		  = { .name = "core_pwrdm" },
+	.prcm_partition	  = DRA7XX_CM_CORE_PARTITION,
+	.cm_inst	  = DRA7XX_CM_CORE_CORE_INST,
+	.clkdm_offs	  = DRA7XX_CM_CORE_CORE_ATL_CDOFFS,
+	.dep_bit	  = DRA7XX_ATL_STATDEP_SHIFT,
+	.flags		  = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
+};
+
+static struct clockdomain l3instr_7xx_clkdm = {
+	.name		  = "l3instr_clkdm",
+	.pwrdm		  = { .name = "core_pwrdm" },
+	.prcm_partition	  = DRA7XX_CM_CORE_PARTITION,
+	.cm_inst	  = DRA7XX_CM_CORE_CORE_INST,
+	.clkdm_offs	  = DRA7XX_CM_CORE_CORE_L3INSTR_CDOFFS,
+};
+
+static struct clockdomain dss_7xx_clkdm = {
+	.name		  = "dss_clkdm",
+	.pwrdm		  = { .name = "dss_pwrdm" },
+	.prcm_partition	  = DRA7XX_CM_CORE_PARTITION,
+	.cm_inst	  = DRA7XX_CM_CORE_DSS_INST,
+	.clkdm_offs	  = DRA7XX_CM_CORE_DSS_DSS_CDOFFS,
+	.dep_bit	  = DRA7XX_DSS_STATDEP_SHIFT,
+	.wkdep_srcs	  = dss_wkup_sleep_deps,
+	.sleepdep_srcs	  = dss_wkup_sleep_deps,
+	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
+};
+
+static struct clockdomain emif_7xx_clkdm = {
+	.name		  = "emif_clkdm",
+	.pwrdm		  = { .name = "core_pwrdm" },
+	.prcm_partition	  = DRA7XX_CM_CORE_PARTITION,
+	.cm_inst	  = DRA7XX_CM_CORE_CORE_INST,
+	.clkdm_offs	  = DRA7XX_CM_CORE_CORE_EMIF_CDOFFS,
+	.dep_bit	  = DRA7XX_EMIF_STATDEP_SHIFT,
+	.flags		  = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
+};
+
+static struct clockdomain emu_7xx_clkdm = {
+	.name		  = "emu_clkdm",
+	.pwrdm		  = { .name = "emu_pwrdm" },
+	.prcm_partition	  = DRA7XX_PRM_PARTITION,
+	.cm_inst	  = DRA7XX_PRM_EMU_CM_INST,
+	.clkdm_offs	  = DRA7XX_PRM_EMU_CM_EMU_CDOFFS,
+	.flags		  = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
+};
+
+static struct clockdomain dsp2_7xx_clkdm = {
+	.name		  = "dsp2_clkdm",
+	.pwrdm		  = { .name = "dsp2_pwrdm" },
+	.prcm_partition	  = DRA7XX_CM_CORE_AON_PARTITION,
+	.cm_inst	  = DRA7XX_CM_CORE_AON_DSP2_INST,
+	.clkdm_offs	  = DRA7XX_CM_CORE_AON_DSP2_DSP2_CDOFFS,
+	.dep_bit	  = DRA7XX_DSP2_STATDEP_SHIFT,
+	.wkdep_srcs	  = dsp2_wkup_sleep_deps,
+	.sleepdep_srcs	  = dsp2_wkup_sleep_deps,
+	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
+};
+
+static struct clockdomain dsp1_7xx_clkdm = {
+	.name		  = "dsp1_clkdm",
+	.pwrdm		  = { .name = "dsp1_pwrdm" },
+	.prcm_partition	  = DRA7XX_CM_CORE_AON_PARTITION,
+	.cm_inst	  = DRA7XX_CM_CORE_AON_DSP1_INST,
+	.clkdm_offs	  = DRA7XX_CM_CORE_AON_DSP1_DSP1_CDOFFS,
+	.dep_bit	  = DRA7XX_DSP1_STATDEP_SHIFT,
+	.wkdep_srcs	  = dsp1_wkup_sleep_deps,
+	.sleepdep_srcs	  = dsp1_wkup_sleep_deps,
+	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
+};
+
+static struct clockdomain cam_7xx_clkdm = {
+	.name		  = "cam_clkdm",
+	.pwrdm		  = { .name = "cam_pwrdm" },
+	.prcm_partition	  = DRA7XX_CM_CORE_PARTITION,
+	.cm_inst	  = DRA7XX_CM_CORE_CAM_INST,
+	.clkdm_offs	  = DRA7XX_CM_CORE_CAM_CAM_CDOFFS,
+	.dep_bit	  = DRA7XX_CAM_STATDEP_SHIFT,
+	.wkdep_srcs	  = cam_wkup_sleep_deps,
+	.sleepdep_srcs	  = cam_wkup_sleep_deps,
+	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
+};
+
+static struct clockdomain l4per_7xx_clkdm = {
+	.name		  = "l4per_clkdm",
+	.pwrdm		  = { .name = "l4per_pwrdm" },
+	.prcm_partition	  = DRA7XX_CM_CORE_PARTITION,
+	.cm_inst	  = DRA7XX_CM_CORE_L4PER_INST,
+	.clkdm_offs	  = DRA7XX_CM_CORE_L4PER_L4PER_CDOFFS,
+	.dep_bit	  = DRA7XX_L4PER_STATDEP_SHIFT,
+	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
+};
+
+static struct clockdomain gpu_7xx_clkdm = {
+	.name		  = "gpu_clkdm",
+	.pwrdm		  = { .name = "gpu_pwrdm" },
+	.prcm_partition	  = DRA7XX_CM_CORE_PARTITION,
+	.cm_inst	  = DRA7XX_CM_CORE_GPU_INST,
+	.clkdm_offs	  = DRA7XX_CM_CORE_GPU_GPU_CDOFFS,
+	.dep_bit	  = DRA7XX_GPU_STATDEP_SHIFT,
+	.wkdep_srcs	  = gpu_wkup_sleep_deps,
+	.sleepdep_srcs	  = gpu_wkup_sleep_deps,
+	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
+};
+
+static struct clockdomain eve4_7xx_clkdm = {
+	.name		  = "eve4_clkdm",
+	.pwrdm		  = { .name = "eve4_pwrdm" },
+	.prcm_partition	  = DRA7XX_CM_CORE_AON_PARTITION,
+	.cm_inst	  = DRA7XX_CM_CORE_AON_EVE4_INST,
+	.clkdm_offs	  = DRA7XX_CM_CORE_AON_EVE4_EVE4_CDOFFS,
+	.dep_bit	  = DRA7XX_EVE4_STATDEP_SHIFT,
+	.wkdep_srcs	  = eve4_wkup_sleep_deps,
+	.sleepdep_srcs	  = eve4_wkup_sleep_deps,
+	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
+};
+
+static struct clockdomain eve2_7xx_clkdm = {
+	.name		  = "eve2_clkdm",
+	.pwrdm		  = { .name = "eve2_pwrdm" },
+	.prcm_partition	  = DRA7XX_CM_CORE_AON_PARTITION,
+	.cm_inst	  = DRA7XX_CM_CORE_AON_EVE2_INST,
+	.clkdm_offs	  = DRA7XX_CM_CORE_AON_EVE2_EVE2_CDOFFS,
+	.dep_bit	  = DRA7XX_EVE2_STATDEP_SHIFT,
+	.wkdep_srcs	  = eve2_wkup_sleep_deps,
+	.sleepdep_srcs	  = eve2_wkup_sleep_deps,
+	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
+};
+
+static struct clockdomain eve3_7xx_clkdm = {
+	.name		  = "eve3_clkdm",
+	.pwrdm		  = { .name = "eve3_pwrdm" },
+	.prcm_partition	  = DRA7XX_CM_CORE_AON_PARTITION,
+	.cm_inst	  = DRA7XX_CM_CORE_AON_EVE3_INST,
+	.clkdm_offs	  = DRA7XX_CM_CORE_AON_EVE3_EVE3_CDOFFS,
+	.dep_bit	  = DRA7XX_EVE3_STATDEP_SHIFT,
+	.wkdep_srcs	  = eve3_wkup_sleep_deps,
+	.sleepdep_srcs	  = eve3_wkup_sleep_deps,
+	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
+};
+
+static struct clockdomain wkupaon_7xx_clkdm = {
+	.name		  = "wkupaon_clkdm",
+	.pwrdm		  = { .name = "wkupaon_pwrdm" },
+	.prcm_partition	  = DRA7XX_PRM_PARTITION,
+	.cm_inst	  = DRA7XX_PRM_WKUPAON_CM_INST,
+	.clkdm_offs	  = DRA7XX_PRM_WKUPAON_CM_WKUPAON_CDOFFS,
+	.dep_bit	  = DRA7XX_WKUPAON_STATDEP_SHIFT,
+	.flags		  = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
+};
+
+static struct clockdomain eve1_7xx_clkdm = {
+	.name		  = "eve1_clkdm",
+	.pwrdm		  = { .name = "eve1_pwrdm" },
+	.prcm_partition	  = DRA7XX_CM_CORE_AON_PARTITION,
+	.cm_inst	  = DRA7XX_CM_CORE_AON_EVE1_INST,
+	.clkdm_offs	  = DRA7XX_CM_CORE_AON_EVE1_EVE1_CDOFFS,
+	.dep_bit	  = DRA7XX_EVE1_STATDEP_SHIFT,
+	.wkdep_srcs	  = eve1_wkup_sleep_deps,
+	.sleepdep_srcs	  = eve1_wkup_sleep_deps,
+	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
+};
+
+/* As clockdomains are added or removed above, this list must also be changed */
+static struct clockdomain *clockdomains_dra7xx[] __initdata = {
+	&l4per3_7xx_clkdm,
+	&l4per2_7xx_clkdm,
+	&mpu0_7xx_clkdm,
+	&iva_7xx_clkdm,
+	&coreaon_7xx_clkdm,
+	&ipu1_7xx_clkdm,
+	&ipu2_7xx_clkdm,
+	&l3init_7xx_clkdm,
+	&l4sec_7xx_clkdm,
+	&l3main1_7xx_clkdm,
+	&vpe_7xx_clkdm,
+	&mpu_7xx_clkdm,
+	&custefuse_7xx_clkdm,
+	&ipu_7xx_clkdm,
+	&mpu1_7xx_clkdm,
+	&gmac_7xx_clkdm,
+	&l4cfg_7xx_clkdm,
+	&dma_7xx_clkdm,
+	&rtc_7xx_clkdm,
+	&pcie_7xx_clkdm,
+	&atl_7xx_clkdm,
+	&l3instr_7xx_clkdm,
+	&dss_7xx_clkdm,
+	&emif_7xx_clkdm,
+	&emu_7xx_clkdm,
+	&dsp2_7xx_clkdm,
+	&dsp1_7xx_clkdm,
+	&cam_7xx_clkdm,
+	&l4per_7xx_clkdm,
+	&gpu_7xx_clkdm,
+	&eve4_7xx_clkdm,
+	&eve2_7xx_clkdm,
+	&eve3_7xx_clkdm,
+	&wkupaon_7xx_clkdm,
+	&eve1_7xx_clkdm,
+	NULL
+};
+
+void __init dra7xx_clockdomains_init(void)
+{
+	clkdm_register_platform_funcs(&omap4_clkdm_operations);
+	clkdm_register_clkdms(clockdomains_dra7xx);
+	clkdm_complete_init();
+}
diff --git a/arch/arm/mach-omap2/cm-regbits-7xx.h b/arch/arm/mach-omap2/cm-regbits-7xx.h
new file mode 100644
index 0000000..ad8f81c
--- /dev/null
+++ b/arch/arm/mach-omap2/cm-regbits-7xx.h
@@ -0,0 +1,51 @@
+/*
+ * DRA7xx Clock Management register bits
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Generated by code originally written by:
+ * Paul Walmsley (paul@pwsan.com)
+ * Rajendra Nayak (rnayak@ti.com)
+ * Benoit Cousson (b-cousson@ti.com)
+ *
+ * This file is automatically generated from the OMAP hardware databases.
+ * We respectfully ask that any modifications to this file be coordinated
+ * with the public linux-omap@vger.kernel.org mailing list and the
+ * authors above to ensure that the autogeneration scripts are kept
+ * up-to-date with the file contents.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP2_CM_REGBITS_7XX_H
+#define __ARCH_ARM_MACH_OMAP2_CM_REGBITS_7XX_H
+
+#define DRA7XX_ATL_STATDEP_SHIFT				30
+#define DRA7XX_CAM_STATDEP_SHIFT				9
+#define DRA7XX_DSP1_STATDEP_SHIFT				1
+#define DRA7XX_DSP2_STATDEP_SHIFT				18
+#define DRA7XX_DSS_STATDEP_SHIFT				8
+#define DRA7XX_EMIF_STATDEP_SHIFT				4
+#define DRA7XX_EVE1_STATDEP_SHIFT				19
+#define DRA7XX_EVE2_STATDEP_SHIFT				20
+#define DRA7XX_EVE3_STATDEP_SHIFT				21
+#define DRA7XX_EVE4_STATDEP_SHIFT				22
+#define DRA7XX_GMAC_STATDEP_SHIFT				25
+#define DRA7XX_GPU_STATDEP_SHIFT				10
+#define DRA7XX_IPU1_STATDEP_SHIFT				23
+#define DRA7XX_IPU2_STATDEP_SHIFT				0
+#define DRA7XX_IPU_STATDEP_SHIFT				24
+#define DRA7XX_IVA_STATDEP_SHIFT				2
+#define DRA7XX_L3INIT_STATDEP_SHIFT				7
+#define DRA7XX_L3MAIN1_STATDEP_SHIFT				5
+#define DRA7XX_L4CFG_STATDEP_SHIFT				12
+#define DRA7XX_L4PER2_STATDEP_SHIFT				26
+#define DRA7XX_L4PER3_STATDEP_SHIFT				27
+#define DRA7XX_L4PER_STATDEP_SHIFT				13
+#define DRA7XX_L4SEC_STATDEP_SHIFT				14
+#define DRA7XX_PCIE_STATDEP_SHIFT				29
+#define DRA7XX_VPE_STATDEP_SHIFT				28
+#define DRA7XX_WKUPAON_STATDEP_SHIFT				15
+#endif
diff --git a/arch/arm/mach-omap2/cm1_7xx.h b/arch/arm/mach-omap2/cm1_7xx.h
new file mode 100644
index 0000000..ca6fa1f
--- /dev/null
+++ b/arch/arm/mach-omap2/cm1_7xx.h
@@ -0,0 +1,324 @@
+/*
+ * DRA7xx CM1 instance offset macros
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Generated by code originally written by:
+ * Paul Walmsley (paul@pwsan.com)
+ * Rajendra Nayak (rnayak@ti.com)
+ * Benoit Cousson (b-cousson@ti.com)
+ *
+ * This file is automatically generated from the OMAP hardware databases.
+ * We respectfully ask that any modifications to this file be coordinated
+ * with the public linux-omap@vger.kernel.org mailing list and the
+ * authors above to ensure that the autogeneration scripts are kept
+ * up-to-date with the file contents.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP2_CM1_7XX_H
+#define __ARCH_ARM_MACH_OMAP2_CM1_7XX_H
+
+#include "cm_44xx_54xx.h"
+
+/* CM1 base address */
+#define DRA7XX_CM_CORE_AON_BASE		0x4a005000
+
+#define DRA7XX_CM_CORE_AON_REGADDR(inst, reg)				\
+	OMAP2_L4_IO_ADDRESS(DRA7XX_CM_CORE_AON_BASE + (inst) + (reg))
+
+/* CM_CORE_AON instances */
+#define DRA7XX_CM_CORE_AON_OCP_SOCKET_INST	0x0000
+#define DRA7XX_CM_CORE_AON_CKGEN_INST		0x0100
+#define DRA7XX_CM_CORE_AON_MPU_INST		0x0300
+#define DRA7XX_CM_CORE_AON_DSP1_INST		0x0400
+#define DRA7XX_CM_CORE_AON_IPU_INST		0x0500
+#define DRA7XX_CM_CORE_AON_DSP2_INST		0x0600
+#define DRA7XX_CM_CORE_AON_EVE1_INST		0x0640
+#define DRA7XX_CM_CORE_AON_EVE2_INST		0x0680
+#define DRA7XX_CM_CORE_AON_EVE3_INST		0x06c0
+#define DRA7XX_CM_CORE_AON_EVE4_INST		0x0700
+#define DRA7XX_CM_CORE_AON_RTC_INST		0x0740
+#define DRA7XX_CM_CORE_AON_VPE_INST		0x0760
+#define DRA7XX_CM_CORE_AON_RESTORE_INST		0x0e00
+#define DRA7XX_CM_CORE_AON_INSTR_INST		0x0f00
+
+/* CM_CORE_AON clockdomain register offsets (from instance start) */
+#define DRA7XX_CM_CORE_AON_MPU_MPU_CDOFFS	0x0000
+#define DRA7XX_CM_CORE_AON_DSP1_DSP1_CDOFFS	0x0000
+#define DRA7XX_CM_CORE_AON_IPU_IPU1_CDOFFS	0x0000
+#define DRA7XX_CM_CORE_AON_IPU_IPU_CDOFFS	0x0040
+#define DRA7XX_CM_CORE_AON_DSP2_DSP2_CDOFFS	0x0000
+#define DRA7XX_CM_CORE_AON_EVE1_EVE1_CDOFFS	0x0000
+#define DRA7XX_CM_CORE_AON_EVE2_EVE2_CDOFFS	0x0000
+#define DRA7XX_CM_CORE_AON_EVE3_EVE3_CDOFFS	0x0000
+#define DRA7XX_CM_CORE_AON_EVE4_EVE4_CDOFFS	0x0000
+#define DRA7XX_CM_CORE_AON_RTC_RTC_CDOFFS	0x0000
+#define DRA7XX_CM_CORE_AON_VPE_VPE_CDOFFS	0x0000
+
+/* CM_CORE_AON */
+
+/* CM_CORE_AON.OCP_SOCKET_CM_CORE_AON register offsets */
+#define DRA7XX_REVISION_CM_CORE_AON_OFFSET		0x0000
+#define DRA7XX_CM_CM_CORE_AON_PROFILING_CLKCTRL_OFFSET	0x0040
+#define DRA7XX_CM_CM_CORE_AON_PROFILING_CLKCTRL		DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_OCP_SOCKET_INST, 0x0040)
+#define DRA7XX_CM_CORE_AON_DEBUG_OUT_OFFSET		0x00ec
+#define DRA7XX_CM_CORE_AON_DEBUG_CFG0_OFFSET		0x00f0
+#define DRA7XX_CM_CORE_AON_DEBUG_CFG1_OFFSET		0x00f4
+#define DRA7XX_CM_CORE_AON_DEBUG_CFG2_OFFSET		0x00f8
+#define DRA7XX_CM_CORE_AON_DEBUG_CFG3_OFFSET		0x00fc
+
+/* CM_CORE_AON.CKGEN_CM_CORE_AON register offsets */
+#define DRA7XX_CM_CLKSEL_CORE_OFFSET			0x0000
+#define DRA7XX_CM_CLKSEL_CORE				DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x0000)
+#define DRA7XX_CM_CLKSEL_ABE_OFFSET			0x0008
+#define DRA7XX_CM_CLKSEL_ABE				DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x0008)
+#define DRA7XX_CM_DLL_CTRL_OFFSET			0x0010
+#define DRA7XX_CM_CLKMODE_DPLL_CORE_OFFSET		0x0020
+#define DRA7XX_CM_CLKMODE_DPLL_CORE			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x0020)
+#define DRA7XX_CM_IDLEST_DPLL_CORE_OFFSET		0x0024
+#define DRA7XX_CM_IDLEST_DPLL_CORE			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x0024)
+#define DRA7XX_CM_AUTOIDLE_DPLL_CORE_OFFSET		0x0028
+#define DRA7XX_CM_AUTOIDLE_DPLL_CORE			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x0028)
+#define DRA7XX_CM_CLKSEL_DPLL_CORE_OFFSET		0x002c
+#define DRA7XX_CM_CLKSEL_DPLL_CORE			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x002c)
+#define DRA7XX_CM_DIV_M2_DPLL_CORE_OFFSET		0x0030
+#define DRA7XX_CM_DIV_M2_DPLL_CORE			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x0030)
+#define DRA7XX_CM_DIV_M3_DPLL_CORE_OFFSET		0x0034
+#define DRA7XX_CM_DIV_M3_DPLL_CORE			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x0034)
+#define DRA7XX_CM_DIV_H11_DPLL_CORE_OFFSET		0x0038
+#define DRA7XX_CM_DIV_H11_DPLL_CORE			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x0038)
+#define DRA7XX_CM_DIV_H12_DPLL_CORE_OFFSET		0x003c
+#define DRA7XX_CM_DIV_H12_DPLL_CORE			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x003c)
+#define DRA7XX_CM_DIV_H13_DPLL_CORE_OFFSET		0x0040
+#define DRA7XX_CM_DIV_H13_DPLL_CORE			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x0040)
+#define DRA7XX_CM_DIV_H14_DPLL_CORE_OFFSET		0x0044
+#define DRA7XX_CM_DIV_H14_DPLL_CORE			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x0044)
+#define DRA7XX_CM_SSC_DELTAMSTEP_DPLL_CORE_OFFSET	0x0048
+#define DRA7XX_CM_SSC_MODFREQDIV_DPLL_CORE_OFFSET	0x004c
+#define DRA7XX_CM_DIV_H21_DPLL_CORE_OFFSET		0x0050
+#define DRA7XX_CM_DIV_H21_DPLL_CORE			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x0050)
+#define DRA7XX_CM_DIV_H22_DPLL_CORE_OFFSET		0x0054
+#define DRA7XX_CM_DIV_H22_DPLL_CORE			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x0054)
+#define DRA7XX_CM_DIV_H23_DPLL_CORE_OFFSET		0x0058
+#define DRA7XX_CM_DIV_H23_DPLL_CORE			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x0058)
+#define DRA7XX_CM_DIV_H24_DPLL_CORE_OFFSET		0x005c
+#define DRA7XX_CM_DIV_H24_DPLL_CORE			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x005c)
+#define DRA7XX_CM_CLKMODE_DPLL_MPU_OFFSET		0x0060
+#define DRA7XX_CM_CLKMODE_DPLL_MPU			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x0060)
+#define DRA7XX_CM_IDLEST_DPLL_MPU_OFFSET		0x0064
+#define DRA7XX_CM_IDLEST_DPLL_MPU			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x0064)
+#define DRA7XX_CM_AUTOIDLE_DPLL_MPU_OFFSET		0x0068
+#define DRA7XX_CM_AUTOIDLE_DPLL_MPU			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x0068)
+#define DRA7XX_CM_CLKSEL_DPLL_MPU_OFFSET		0x006c
+#define DRA7XX_CM_CLKSEL_DPLL_MPU			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x006c)
+#define DRA7XX_CM_DIV_M2_DPLL_MPU_OFFSET		0x0070
+#define DRA7XX_CM_DIV_M2_DPLL_MPU			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x0070)
+#define DRA7XX_CM_SSC_DELTAMSTEP_DPLL_MPU_OFFSET	0x0088
+#define DRA7XX_CM_SSC_MODFREQDIV_DPLL_MPU_OFFSET	0x008c
+#define DRA7XX_CM_BYPCLK_DPLL_MPU_OFFSET		0x009c
+#define DRA7XX_CM_BYPCLK_DPLL_MPU			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x009c)
+#define DRA7XX_CM_CLKMODE_DPLL_IVA_OFFSET		0x00a0
+#define DRA7XX_CM_CLKMODE_DPLL_IVA			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x00a0)
+#define DRA7XX_CM_IDLEST_DPLL_IVA_OFFSET		0x00a4
+#define DRA7XX_CM_IDLEST_DPLL_IVA			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x00a4)
+#define DRA7XX_CM_AUTOIDLE_DPLL_IVA_OFFSET		0x00a8
+#define DRA7XX_CM_AUTOIDLE_DPLL_IVA			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x00a8)
+#define DRA7XX_CM_CLKSEL_DPLL_IVA_OFFSET		0x00ac
+#define DRA7XX_CM_CLKSEL_DPLL_IVA			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x00ac)
+#define DRA7XX_CM_DIV_M2_DPLL_IVA_OFFSET		0x00b0
+#define DRA7XX_CM_DIV_M2_DPLL_IVA			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x00b0)
+#define DRA7XX_CM_DIV_M3_DPLL_IVA_OFFSET		0x00b4
+#define DRA7XX_CM_DIV_M3_DPLL_IVA			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x00b4)
+#define DRA7XX_CM_SSC_DELTAMSTEP_DPLL_IVA_OFFSET	0x00c8
+#define DRA7XX_CM_SSC_MODFREQDIV_DPLL_IVA_OFFSET	0x00cc
+#define DRA7XX_CM_BYPCLK_DPLL_IVA_OFFSET		0x00dc
+#define DRA7XX_CM_BYPCLK_DPLL_IVA			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x00dc)
+#define DRA7XX_CM_CLKMODE_DPLL_ABE_OFFSET		0x00e0
+#define DRA7XX_CM_CLKMODE_DPLL_ABE			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x00e0)
+#define DRA7XX_CM_IDLEST_DPLL_ABE_OFFSET		0x00e4
+#define DRA7XX_CM_IDLEST_DPLL_ABE			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x00e4)
+#define DRA7XX_CM_AUTOIDLE_DPLL_ABE_OFFSET		0x00e8
+#define DRA7XX_CM_AUTOIDLE_DPLL_ABE			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x00e8)
+#define DRA7XX_CM_CLKSEL_DPLL_ABE_OFFSET		0x00ec
+#define DRA7XX_CM_CLKSEL_DPLL_ABE			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x00ec)
+#define DRA7XX_CM_DIV_M2_DPLL_ABE_OFFSET		0x00f0
+#define DRA7XX_CM_DIV_M2_DPLL_ABE			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x00f0)
+#define DRA7XX_CM_DIV_M3_DPLL_ABE_OFFSET		0x00f4
+#define DRA7XX_CM_DIV_M3_DPLL_ABE			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x00f4)
+#define DRA7XX_CM_SSC_DELTAMSTEP_DPLL_ABE_OFFSET	0x0108
+#define DRA7XX_CM_SSC_MODFREQDIV_DPLL_ABE_OFFSET	0x010c
+#define DRA7XX_CM_CLKMODE_DPLL_DDR_OFFSET		0x0110
+#define DRA7XX_CM_CLKMODE_DPLL_DDR			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x0110)
+#define DRA7XX_CM_IDLEST_DPLL_DDR_OFFSET		0x0114
+#define DRA7XX_CM_IDLEST_DPLL_DDR			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x0114)
+#define DRA7XX_CM_AUTOIDLE_DPLL_DDR_OFFSET		0x0118
+#define DRA7XX_CM_AUTOIDLE_DPLL_DDR			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x0118)
+#define DRA7XX_CM_CLKSEL_DPLL_DDR_OFFSET		0x011c
+#define DRA7XX_CM_CLKSEL_DPLL_DDR			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x011c)
+#define DRA7XX_CM_DIV_M2_DPLL_DDR_OFFSET		0x0120
+#define DRA7XX_CM_DIV_M2_DPLL_DDR			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x0120)
+#define DRA7XX_CM_DIV_M3_DPLL_DDR_OFFSET		0x0124
+#define DRA7XX_CM_DIV_M3_DPLL_DDR			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x0124)
+#define DRA7XX_CM_DIV_H11_DPLL_DDR_OFFSET		0x0128
+#define DRA7XX_CM_DIV_H11_DPLL_DDR			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x0128)
+#define DRA7XX_CM_SSC_DELTAMSTEP_DPLL_DDR_OFFSET	0x012c
+#define DRA7XX_CM_SSC_MODFREQDIV_DPLL_DDR_OFFSET	0x0130
+#define DRA7XX_CM_CLKMODE_DPLL_DSP_OFFSET		0x0134
+#define DRA7XX_CM_CLKMODE_DPLL_DSP			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x0134)
+#define DRA7XX_CM_IDLEST_DPLL_DSP_OFFSET		0x0138
+#define DRA7XX_CM_IDLEST_DPLL_DSP			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x0138)
+#define DRA7XX_CM_AUTOIDLE_DPLL_DSP_OFFSET		0x013c
+#define DRA7XX_CM_AUTOIDLE_DPLL_DSP			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x013c)
+#define DRA7XX_CM_CLKSEL_DPLL_DSP_OFFSET		0x0140
+#define DRA7XX_CM_CLKSEL_DPLL_DSP			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x0140)
+#define DRA7XX_CM_DIV_M2_DPLL_DSP_OFFSET		0x0144
+#define DRA7XX_CM_DIV_M2_DPLL_DSP			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x0144)
+#define DRA7XX_CM_DIV_M3_DPLL_DSP_OFFSET		0x0148
+#define DRA7XX_CM_DIV_M3_DPLL_DSP			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x0148)
+#define DRA7XX_CM_SSC_DELTAMSTEP_DPLL_DSP_OFFSET	0x014c
+#define DRA7XX_CM_SSC_MODFREQDIV_DPLL_DSP_OFFSET	0x0150
+#define DRA7XX_CM_BYPCLK_DPLL_DSP_OFFSET		0x0154
+#define DRA7XX_CM_BYPCLK_DPLL_DSP			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x0154)
+#define DRA7XX_CM_SHADOW_FREQ_CONFIG1_OFFSET		0x0160
+#define DRA7XX_CM_SHADOW_FREQ_CONFIG2_OFFSET		0x0164
+#define DRA7XX_CM_DYN_DEP_PRESCAL_OFFSET		0x0170
+#define DRA7XX_CM_RESTORE_ST_OFFSET			0x0180
+#define DRA7XX_CM_CLKMODE_DPLL_EVE_OFFSET		0x0184
+#define DRA7XX_CM_CLKMODE_DPLL_EVE			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x0184)
+#define DRA7XX_CM_IDLEST_DPLL_EVE_OFFSET		0x0188
+#define DRA7XX_CM_IDLEST_DPLL_EVE			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x0188)
+#define DRA7XX_CM_AUTOIDLE_DPLL_EVE_OFFSET		0x018c
+#define DRA7XX_CM_AUTOIDLE_DPLL_EVE			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x018c)
+#define DRA7XX_CM_CLKSEL_DPLL_EVE_OFFSET		0x0190
+#define DRA7XX_CM_CLKSEL_DPLL_EVE			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x0190)
+#define DRA7XX_CM_DIV_M2_DPLL_EVE_OFFSET		0x0194
+#define DRA7XX_CM_DIV_M2_DPLL_EVE			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x0194)
+#define DRA7XX_CM_DIV_M3_DPLL_EVE_OFFSET		0x0198
+#define DRA7XX_CM_DIV_M3_DPLL_EVE			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x0198)
+#define DRA7XX_CM_SSC_DELTAMSTEP_DPLL_EVE_OFFSET	0x019c
+#define DRA7XX_CM_SSC_MODFREQDIV_DPLL_EVE_OFFSET	0x01a0
+#define DRA7XX_CM_BYPCLK_DPLL_EVE_OFFSET		0x01a4
+#define DRA7XX_CM_BYPCLK_DPLL_EVE			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x01a4)
+#define DRA7XX_CM_CLKMODE_DPLL_GMAC_OFFSET		0x01a8
+#define DRA7XX_CM_CLKMODE_DPLL_GMAC			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x01a8)
+#define DRA7XX_CM_IDLEST_DPLL_GMAC_OFFSET		0x01ac
+#define DRA7XX_CM_IDLEST_DPLL_GMAC			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x01ac)
+#define DRA7XX_CM_AUTOIDLE_DPLL_GMAC_OFFSET		0x01b0
+#define DRA7XX_CM_AUTOIDLE_DPLL_GMAC			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x01b0)
+#define DRA7XX_CM_CLKSEL_DPLL_GMAC_OFFSET		0x01b4
+#define DRA7XX_CM_CLKSEL_DPLL_GMAC			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x01b4)
+#define DRA7XX_CM_DIV_M2_DPLL_GMAC_OFFSET		0x01b8
+#define DRA7XX_CM_DIV_M2_DPLL_GMAC			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x01b8)
+#define DRA7XX_CM_DIV_M3_DPLL_GMAC_OFFSET		0x01bc
+#define DRA7XX_CM_DIV_M3_DPLL_GMAC			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x01bc)
+#define DRA7XX_CM_DIV_H11_DPLL_GMAC_OFFSET		0x01c0
+#define DRA7XX_CM_DIV_H11_DPLL_GMAC			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x01c0)
+#define DRA7XX_CM_DIV_H12_DPLL_GMAC_OFFSET		0x01c4
+#define DRA7XX_CM_DIV_H12_DPLL_GMAC			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x01c4)
+#define DRA7XX_CM_DIV_H13_DPLL_GMAC_OFFSET		0x01c8
+#define DRA7XX_CM_DIV_H13_DPLL_GMAC			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x01c8)
+#define DRA7XX_CM_DIV_H14_DPLL_GMAC_OFFSET		0x01cc
+#define DRA7XX_CM_DIV_H14_DPLL_GMAC			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x01cc)
+#define DRA7XX_CM_SSC_DELTAMSTEP_DPLL_GMAC_OFFSET	0x01d0
+#define DRA7XX_CM_SSC_MODFREQDIV_DPLL_GMAC_OFFSET	0x01d4
+#define DRA7XX_CM_CLKMODE_DPLL_GPU_OFFSET		0x01d8
+#define DRA7XX_CM_CLKMODE_DPLL_GPU			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x01d8)
+#define DRA7XX_CM_IDLEST_DPLL_GPU_OFFSET		0x01dc
+#define DRA7XX_CM_IDLEST_DPLL_GPU			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x01dc)
+#define DRA7XX_CM_AUTOIDLE_DPLL_GPU_OFFSET		0x01e0
+#define DRA7XX_CM_AUTOIDLE_DPLL_GPU			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x01e0)
+#define DRA7XX_CM_CLKSEL_DPLL_GPU_OFFSET		0x01e4
+#define DRA7XX_CM_CLKSEL_DPLL_GPU			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x01e4)
+#define DRA7XX_CM_DIV_M2_DPLL_GPU_OFFSET		0x01e8
+#define DRA7XX_CM_DIV_M2_DPLL_GPU			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x01e8)
+#define DRA7XX_CM_DIV_M3_DPLL_GPU_OFFSET		0x01ec
+#define DRA7XX_CM_DIV_M3_DPLL_GPU			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_CKGEN_INST, 0x01ec)
+#define DRA7XX_CM_SSC_DELTAMSTEP_DPLL_GPU_OFFSET	0x01f0
+#define DRA7XX_CM_SSC_MODFREQDIV_DPLL_GPU_OFFSET	0x01f4
+
+/* CM_CORE_AON.MPU_CM_CORE_AON register offsets */
+#define DRA7XX_CM_MPU_CLKSTCTRL_OFFSET			0x0000
+#define DRA7XX_CM_MPU_STATICDEP_OFFSET			0x0004
+#define DRA7XX_CM_MPU_DYNAMICDEP_OFFSET			0x0008
+#define DRA7XX_CM_MPU_MPU_CLKCTRL_OFFSET		0x0020
+#define DRA7XX_CM_MPU_MPU_CLKCTRL			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_MPU_INST, 0x0020)
+#define DRA7XX_CM_MPU_MPU_MPU_DBG_CLKCTRL_OFFSET	0x0028
+#define DRA7XX_CM_MPU_MPU_MPU_DBG_CLKCTRL		DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_MPU_INST, 0x0028)
+
+/* CM_CORE_AON.DSP1_CM_CORE_AON register offsets */
+#define DRA7XX_CM_DSP1_CLKSTCTRL_OFFSET			0x0000
+#define DRA7XX_CM_DSP1_STATICDEP_OFFSET			0x0004
+#define DRA7XX_CM_DSP1_DYNAMICDEP_OFFSET		0x0008
+#define DRA7XX_CM_DSP1_DSP1_CLKCTRL_OFFSET		0x0020
+#define DRA7XX_CM_DSP1_DSP1_CLKCTRL			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_DSP1_INST, 0x0020)
+
+/* CM_CORE_AON.IPU_CM_CORE_AON register offsets */
+#define DRA7XX_CM_IPU1_CLKSTCTRL_OFFSET			0x0000
+#define DRA7XX_CM_IPU1_STATICDEP_OFFSET			0x0004
+#define DRA7XX_CM_IPU1_DYNAMICDEP_OFFSET		0x0008
+#define DRA7XX_CM_IPU1_IPU1_CLKCTRL_OFFSET		0x0020
+#define DRA7XX_CM_IPU1_IPU1_CLKCTRL			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_IPU_INST, 0x0020)
+#define DRA7XX_CM_IPU_CLKSTCTRL_OFFSET			0x0040
+#define DRA7XX_CM_IPU_MCASP1_CLKCTRL_OFFSET		0x0050
+#define DRA7XX_CM_IPU_MCASP1_CLKCTRL			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_IPU_INST, 0x0050)
+#define DRA7XX_CM_IPU_TIMER5_CLKCTRL_OFFSET		0x0058
+#define DRA7XX_CM_IPU_TIMER5_CLKCTRL			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_IPU_INST, 0x0058)
+#define DRA7XX_CM_IPU_TIMER6_CLKCTRL_OFFSET		0x0060
+#define DRA7XX_CM_IPU_TIMER6_CLKCTRL			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_IPU_INST, 0x0060)
+#define DRA7XX_CM_IPU_TIMER7_CLKCTRL_OFFSET		0x0068
+#define DRA7XX_CM_IPU_TIMER7_CLKCTRL			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_IPU_INST, 0x0068)
+#define DRA7XX_CM_IPU_TIMER8_CLKCTRL_OFFSET		0x0070
+#define DRA7XX_CM_IPU_TIMER8_CLKCTRL			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_IPU_INST, 0x0070)
+#define DRA7XX_CM_IPU_I2C5_CLKCTRL_OFFSET		0x0078
+#define DRA7XX_CM_IPU_I2C5_CLKCTRL			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_IPU_INST, 0x0078)
+#define DRA7XX_CM_IPU_UART6_CLKCTRL_OFFSET		0x0080
+#define DRA7XX_CM_IPU_UART6_CLKCTRL			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_IPU_INST, 0x0080)
+
+/* CM_CORE_AON.DSP2_CM_CORE_AON register offsets */
+#define DRA7XX_CM_DSP2_CLKSTCTRL_OFFSET			0x0000
+#define DRA7XX_CM_DSP2_STATICDEP_OFFSET			0x0004
+#define DRA7XX_CM_DSP2_DYNAMICDEP_OFFSET		0x0008
+#define DRA7XX_CM_DSP2_DSP2_CLKCTRL_OFFSET		0x0020
+#define DRA7XX_CM_DSP2_DSP2_CLKCTRL			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_DSP2_INST, 0x0020)
+
+/* CM_CORE_AON.EVE1_CM_CORE_AON register offsets */
+#define DRA7XX_CM_EVE1_CLKSTCTRL_OFFSET			0x0000
+#define DRA7XX_CM_EVE1_STATICDEP_OFFSET			0x0004
+#define DRA7XX_CM_EVE1_EVE1_CLKCTRL_OFFSET		0x0020
+#define DRA7XX_CM_EVE1_EVE1_CLKCTRL			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_EVE1_INST, 0x0020)
+
+/* CM_CORE_AON.EVE2_CM_CORE_AON register offsets */
+#define DRA7XX_CM_EVE2_CLKSTCTRL_OFFSET			0x0000
+#define DRA7XX_CM_EVE2_STATICDEP_OFFSET			0x0004
+#define DRA7XX_CM_EVE2_EVE2_CLKCTRL_OFFSET		0x0020
+#define DRA7XX_CM_EVE2_EVE2_CLKCTRL			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_EVE2_INST, 0x0020)
+
+/* CM_CORE_AON.EVE3_CM_CORE_AON register offsets */
+#define DRA7XX_CM_EVE3_CLKSTCTRL_OFFSET			0x0000
+#define DRA7XX_CM_EVE3_STATICDEP_OFFSET			0x0004
+#define DRA7XX_CM_EVE3_EVE3_CLKCTRL_OFFSET		0x0020
+#define DRA7XX_CM_EVE3_EVE3_CLKCTRL			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_EVE3_INST, 0x0020)
+
+/* CM_CORE_AON.EVE4_CM_CORE_AON register offsets */
+#define DRA7XX_CM_EVE4_CLKSTCTRL_OFFSET			0x0000
+#define DRA7XX_CM_EVE4_STATICDEP_OFFSET			0x0004
+#define DRA7XX_CM_EVE4_EVE4_CLKCTRL_OFFSET		0x0020
+#define DRA7XX_CM_EVE4_EVE4_CLKCTRL			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_EVE4_INST, 0x0020)
+
+/* CM_CORE_AON.RTC_CM_CORE_AON register offsets */
+#define DRA7XX_CM_RTC_CLKSTCTRL_OFFSET			0x0000
+#define DRA7XX_CM_RTC_RTCSS_CLKCTRL_OFFSET		0x0004
+#define DRA7XX_CM_RTC_RTCSS_CLKCTRL			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_RTC_INST, 0x0004)
+
+/* CM_CORE_AON.VPE_CM_CORE_AON register offsets */
+#define DRA7XX_CM_VPE_CLKSTCTRL_OFFSET			0x0000
+#define DRA7XX_CM_VPE_VPE_CLKCTRL_OFFSET		0x0004
+#define DRA7XX_CM_VPE_VPE_CLKCTRL			DRA7XX_CM_CORE_AON_REGADDR(DRA7XX_CM_CORE_AON_VPE_INST, 0x0004)
+#define DRA7XX_CM_VPE_STATICDEP_OFFSET			0x0008
+
+#endif
diff --git a/arch/arm/mach-omap2/cm2_7xx.h b/arch/arm/mach-omap2/cm2_7xx.h
new file mode 100644
index 0000000..9ad7594
--- /dev/null
+++ b/arch/arm/mach-omap2/cm2_7xx.h
@@ -0,0 +1,513 @@
+/*
+ * DRA7xx CM2 instance offset macros
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Generated by code originally written by:
+ * Paul Walmsley (paul@pwsan.com)
+ * Rajendra Nayak (rnayak@ti.com)
+ * Benoit Cousson (b-cousson@ti.com)
+ *
+ * This file is automatically generated from the OMAP hardware databases.
+ * We respectfully ask that any modifications to this file be coordinated
+ * with the public linux-omap@vger.kernel.org mailing list and the
+ * authors above to ensure that the autogeneration scripts are kept
+ * up-to-date with the file contents.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP2_CM2_7XX_H
+#define __ARCH_ARM_MACH_OMAP2_CM2_7XX_H
+
+#include "cm_44xx_54xx.h"
+
+/* CM2 base address */
+#define DRA7XX_CM_CORE_BASE		0x4a008000
+
+#define DRA7XX_CM_CORE_REGADDR(inst, reg)				\
+	OMAP2_L4_IO_ADDRESS(DRA7XX_CM_CORE_BASE + (inst) + (reg))
+
+/* CM_CORE instances */
+#define DRA7XX_CM_CORE_OCP_SOCKET_INST	0x0000
+#define DRA7XX_CM_CORE_CKGEN_INST	0x0104
+#define DRA7XX_CM_CORE_COREAON_INST	0x0600
+#define DRA7XX_CM_CORE_CORE_INST	0x0700
+#define DRA7XX_CM_CORE_IVA_INST		0x0f00
+#define DRA7XX_CM_CORE_CAM_INST		0x1000
+#define DRA7XX_CM_CORE_DSS_INST		0x1100
+#define DRA7XX_CM_CORE_GPU_INST		0x1200
+#define DRA7XX_CM_CORE_L3INIT_INST	0x1300
+#define DRA7XX_CM_CORE_CUSTEFUSE_INST	0x1600
+#define DRA7XX_CM_CORE_L4PER_INST	0x1700
+#define DRA7XX_CM_CORE_RESTORE_INST	0x1e18
+
+/* CM_CORE clockdomain register offsets (from instance start) */
+#define DRA7XX_CM_CORE_COREAON_COREAON_CDOFFS		0x0000
+#define DRA7XX_CM_CORE_CORE_L3MAIN1_CDOFFS		0x0000
+#define DRA7XX_CM_CORE_CORE_IPU2_CDOFFS			0x0200
+#define DRA7XX_CM_CORE_CORE_DMA_CDOFFS			0x0300
+#define DRA7XX_CM_CORE_CORE_EMIF_CDOFFS			0x0400
+#define DRA7XX_CM_CORE_CORE_ATL_CDOFFS			0x0520
+#define DRA7XX_CM_CORE_CORE_L4CFG_CDOFFS		0x0600
+#define DRA7XX_CM_CORE_CORE_L3INSTR_CDOFFS		0x0700
+#define DRA7XX_CM_CORE_IVA_IVA_CDOFFS			0x0000
+#define DRA7XX_CM_CORE_CAM_CAM_CDOFFS			0x0000
+#define DRA7XX_CM_CORE_DSS_DSS_CDOFFS			0x0000
+#define DRA7XX_CM_CORE_GPU_GPU_CDOFFS			0x0000
+#define DRA7XX_CM_CORE_L3INIT_L3INIT_CDOFFS		0x0000
+#define DRA7XX_CM_CORE_L3INIT_PCIE_CDOFFS		0x00a0
+#define DRA7XX_CM_CORE_L3INIT_GMAC_CDOFFS		0x00c0
+#define DRA7XX_CM_CORE_CUSTEFUSE_CUSTEFUSE_CDOFFS	0x0000
+#define DRA7XX_CM_CORE_L4PER_L4PER_CDOFFS		0x0000
+#define DRA7XX_CM_CORE_L4PER_L4SEC_CDOFFS		0x0180
+#define DRA7XX_CM_CORE_L4PER_L4PER2_CDOFFS		0x01fc
+#define DRA7XX_CM_CORE_L4PER_L4PER3_CDOFFS		0x0210
+
+/* CM_CORE */
+
+/* CM_CORE.OCP_SOCKET_CM_CORE register offsets */
+#define DRA7XX_REVISION_CM_CORE_OFFSET				0x0000
+#define DRA7XX_CM_CM_CORE_PROFILING_CLKCTRL_OFFSET		0x0040
+#define DRA7XX_CM_CM_CORE_PROFILING_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_OCP_SOCKET_INST, 0x0040)
+#define DRA7XX_CM_CORE_DEBUG_CFG_OFFSET				0x00f0
+
+/* CM_CORE.CKGEN_CM_CORE register offsets */
+#define DRA7XX_CM_CLKSEL_USB_60MHZ_OFFSET			0x0000
+#define DRA7XX_CM_CLKSEL_USB_60MHZ				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CKGEN_INST, 0x0000)
+#define DRA7XX_CM_CLKMODE_DPLL_PER_OFFSET			0x003c
+#define DRA7XX_CM_CLKMODE_DPLL_PER				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CKGEN_INST, 0x003c)
+#define DRA7XX_CM_IDLEST_DPLL_PER_OFFSET			0x0040
+#define DRA7XX_CM_IDLEST_DPLL_PER				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CKGEN_INST, 0x0040)
+#define DRA7XX_CM_AUTOIDLE_DPLL_PER_OFFSET			0x0044
+#define DRA7XX_CM_AUTOIDLE_DPLL_PER				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CKGEN_INST, 0x0044)
+#define DRA7XX_CM_CLKSEL_DPLL_PER_OFFSET			0x0048
+#define DRA7XX_CM_CLKSEL_DPLL_PER				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CKGEN_INST, 0x0048)
+#define DRA7XX_CM_DIV_M2_DPLL_PER_OFFSET			0x004c
+#define DRA7XX_CM_DIV_M2_DPLL_PER				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CKGEN_INST, 0x004c)
+#define DRA7XX_CM_DIV_M3_DPLL_PER_OFFSET			0x0050
+#define DRA7XX_CM_DIV_M3_DPLL_PER				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CKGEN_INST, 0x0050)
+#define DRA7XX_CM_DIV_H11_DPLL_PER_OFFSET			0x0054
+#define DRA7XX_CM_DIV_H11_DPLL_PER				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CKGEN_INST, 0x0054)
+#define DRA7XX_CM_DIV_H12_DPLL_PER_OFFSET			0x0058
+#define DRA7XX_CM_DIV_H12_DPLL_PER				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CKGEN_INST, 0x0058)
+#define DRA7XX_CM_DIV_H13_DPLL_PER_OFFSET			0x005c
+#define DRA7XX_CM_DIV_H13_DPLL_PER				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CKGEN_INST, 0x005c)
+#define DRA7XX_CM_DIV_H14_DPLL_PER_OFFSET			0x0060
+#define DRA7XX_CM_DIV_H14_DPLL_PER				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CKGEN_INST, 0x0060)
+#define DRA7XX_CM_SSC_DELTAMSTEP_DPLL_PER_OFFSET		0x0064
+#define DRA7XX_CM_SSC_MODFREQDIV_DPLL_PER_OFFSET		0x0068
+#define DRA7XX_CM_CLKMODE_DPLL_USB_OFFSET			0x007c
+#define DRA7XX_CM_CLKMODE_DPLL_USB				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CKGEN_INST, 0x007c)
+#define DRA7XX_CM_IDLEST_DPLL_USB_OFFSET			0x0080
+#define DRA7XX_CM_IDLEST_DPLL_USB				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CKGEN_INST, 0x0080)
+#define DRA7XX_CM_AUTOIDLE_DPLL_USB_OFFSET			0x0084
+#define DRA7XX_CM_AUTOIDLE_DPLL_USB				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CKGEN_INST, 0x0084)
+#define DRA7XX_CM_CLKSEL_DPLL_USB_OFFSET			0x0088
+#define DRA7XX_CM_CLKSEL_DPLL_USB				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CKGEN_INST, 0x0088)
+#define DRA7XX_CM_DIV_M2_DPLL_USB_OFFSET			0x008c
+#define DRA7XX_CM_DIV_M2_DPLL_USB				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CKGEN_INST, 0x008c)
+#define DRA7XX_CM_SSC_DELTAMSTEP_DPLL_USB_OFFSET		0x00a4
+#define DRA7XX_CM_SSC_MODFREQDIV_DPLL_USB_OFFSET		0x00a8
+#define DRA7XX_CM_CLKDCOLDO_DPLL_USB_OFFSET			0x00b0
+#define DRA7XX_CM_CLKDCOLDO_DPLL_USB				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CKGEN_INST, 0x00b0)
+#define DRA7XX_CM_CLKMODE_DPLL_PCIE_REF_OFFSET			0x00fc
+#define DRA7XX_CM_CLKMODE_DPLL_PCIE_REF				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CKGEN_INST, 0x00fc)
+#define DRA7XX_CM_IDLEST_DPLL_PCIE_REF_OFFSET			0x0100
+#define DRA7XX_CM_IDLEST_DPLL_PCIE_REF				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CKGEN_INST, 0x0100)
+#define DRA7XX_CM_AUTOIDLE_DPLL_PCIE_REF_OFFSET			0x0104
+#define DRA7XX_CM_AUTOIDLE_DPLL_PCIE_REF			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CKGEN_INST, 0x0104)
+#define DRA7XX_CM_CLKSEL_DPLL_PCIE_REF_OFFSET			0x0108
+#define DRA7XX_CM_CLKSEL_DPLL_PCIE_REF				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CKGEN_INST, 0x0108)
+#define DRA7XX_CM_DIV_M2_DPLL_PCIE_REF_OFFSET			0x010c
+#define DRA7XX_CM_DIV_M2_DPLL_PCIE_REF				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CKGEN_INST, 0x010c)
+#define DRA7XX_CM_SSC_DELTAMSTEP_DPLL_PCIE_REF_OFFSET		0x0110
+#define DRA7XX_CM_SSC_MODFREQDIV_DPLL_PCIE_REF_OFFSET		0x0114
+#define DRA7XX_CM_CLKMODE_APLL_PCIE_OFFSET			0x0118
+#define DRA7XX_CM_CLKMODE_APLL_PCIE				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CKGEN_INST, 0x0118)
+#define DRA7XX_CM_IDLEST_APLL_PCIE_OFFSET			0x011c
+#define DRA7XX_CM_IDLEST_APLL_PCIE				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CKGEN_INST, 0x011c)
+#define DRA7XX_CM_DIV_M2_APLL_PCIE_OFFSET			0x0120
+#define DRA7XX_CM_DIV_M2_APLL_PCIE				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CKGEN_INST, 0x0120)
+#define DRA7XX_CM_CLKVCOLDO_APLL_PCIE_OFFSET			0x0124
+#define DRA7XX_CM_CLKVCOLDO_APLL_PCIE				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CKGEN_INST, 0x0124)
+
+/* CM_CORE.COREAON_CM_CORE register offsets */
+#define DRA7XX_CM_COREAON_CLKSTCTRL_OFFSET			0x0000
+#define DRA7XX_CM_COREAON_SMARTREFLEX_MPU_CLKCTRL_OFFSET	0x0028
+#define DRA7XX_CM_COREAON_SMARTREFLEX_MPU_CLKCTRL		DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_COREAON_INST, 0x0028)
+#define DRA7XX_CM_COREAON_SMARTREFLEX_CORE_CLKCTRL_OFFSET	0x0038
+#define DRA7XX_CM_COREAON_SMARTREFLEX_CORE_CLKCTRL		DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_COREAON_INST, 0x0038)
+#define DRA7XX_CM_COREAON_USB_PHY1_CORE_CLKCTRL_OFFSET		0x0040
+#define DRA7XX_CM_COREAON_USB_PHY1_CORE_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_COREAON_INST, 0x0040)
+#define DRA7XX_CM_COREAON_IO_SRCOMP_CLKCTRL_OFFSET		0x0050
+#define DRA7XX_CM_COREAON_IO_SRCOMP_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_COREAON_INST, 0x0050)
+#define DRA7XX_CM_COREAON_SMARTREFLEX_GPU_CLKCTRL_OFFSET	0x0058
+#define DRA7XX_CM_COREAON_SMARTREFLEX_GPU_CLKCTRL		DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_COREAON_INST, 0x0058)
+#define DRA7XX_CM_COREAON_SMARTREFLEX_DSPEVE_CLKCTRL_OFFSET	0x0068
+#define DRA7XX_CM_COREAON_SMARTREFLEX_DSPEVE_CLKCTRL		DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_COREAON_INST, 0x0068)
+#define DRA7XX_CM_COREAON_SMARTREFLEX_IVAHD_CLKCTRL_OFFSET	0x0078
+#define DRA7XX_CM_COREAON_SMARTREFLEX_IVAHD_CLKCTRL		DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_COREAON_INST, 0x0078)
+#define DRA7XX_CM_COREAON_USB_PHY2_CORE_CLKCTRL_OFFSET		0x0088
+#define DRA7XX_CM_COREAON_USB_PHY2_CORE_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_COREAON_INST, 0x0088)
+#define DRA7XX_CM_COREAON_USB_PHY3_CORE_CLKCTRL_OFFSET		0x0098
+#define DRA7XX_CM_COREAON_USB_PHY3_CORE_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_COREAON_INST, 0x0098)
+#define DRA7XX_CM_COREAON_DUMMY_MODULE1_CLKCTRL_OFFSET		0x00a0
+#define DRA7XX_CM_COREAON_DUMMY_MODULE1_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_COREAON_INST, 0x00a0)
+#define DRA7XX_CM_COREAON_DUMMY_MODULE2_CLKCTRL_OFFSET		0x00b0
+#define DRA7XX_CM_COREAON_DUMMY_MODULE2_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_COREAON_INST, 0x00b0)
+#define DRA7XX_CM_COREAON_DUMMY_MODULE3_CLKCTRL_OFFSET		0x00c0
+#define DRA7XX_CM_COREAON_DUMMY_MODULE3_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_COREAON_INST, 0x00c0)
+#define DRA7XX_CM_COREAON_DUMMY_MODULE4_CLKCTRL_OFFSET		0x00d0
+#define DRA7XX_CM_COREAON_DUMMY_MODULE4_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_COREAON_INST, 0x00d0)
+
+/* CM_CORE.CORE_CM_CORE register offsets */
+#define DRA7XX_CM_L3MAIN1_CLKSTCTRL_OFFSET			0x0000
+#define DRA7XX_CM_L3MAIN1_DYNAMICDEP_OFFSET			0x0008
+#define DRA7XX_CM_L3MAIN1_L3_MAIN_1_CLKCTRL_OFFSET		0x0020
+#define DRA7XX_CM_L3MAIN1_L3_MAIN_1_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x0020)
+#define DRA7XX_CM_L3MAIN1_GPMC_CLKCTRL_OFFSET			0x0028
+#define DRA7XX_CM_L3MAIN1_GPMC_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x0028)
+#define DRA7XX_CM_L3MAIN1_MMU_EDMA_CLKCTRL_OFFSET		0x0030
+#define DRA7XX_CM_L3MAIN1_MMU_EDMA_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x0030)
+#define DRA7XX_CM_L3MAIN1_OCMC_RAM1_CLKCTRL_OFFSET		0x0050
+#define DRA7XX_CM_L3MAIN1_OCMC_RAM1_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x0050)
+#define DRA7XX_CM_L3MAIN1_OCMC_RAM2_CLKCTRL_OFFSET		0x0058
+#define DRA7XX_CM_L3MAIN1_OCMC_RAM2_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x0058)
+#define DRA7XX_CM_L3MAIN1_OCMC_RAM3_CLKCTRL_OFFSET		0x0060
+#define DRA7XX_CM_L3MAIN1_OCMC_RAM3_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x0060)
+#define DRA7XX_CM_L3MAIN1_OCMC_ROM_CLKCTRL_OFFSET		0x0068
+#define DRA7XX_CM_L3MAIN1_OCMC_ROM_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x0068)
+#define DRA7XX_CM_L3MAIN1_TPCC_CLKCTRL_OFFSET			0x0070
+#define DRA7XX_CM_L3MAIN1_TPCC_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x0070)
+#define DRA7XX_CM_L3MAIN1_TPTC1_CLKCTRL_OFFSET			0x0078
+#define DRA7XX_CM_L3MAIN1_TPTC1_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x0078)
+#define DRA7XX_CM_L3MAIN1_TPTC2_CLKCTRL_OFFSET			0x0080
+#define DRA7XX_CM_L3MAIN1_TPTC2_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x0080)
+#define DRA7XX_CM_L3MAIN1_VCP1_CLKCTRL_OFFSET			0x0088
+#define DRA7XX_CM_L3MAIN1_VCP1_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x0088)
+#define DRA7XX_CM_L3MAIN1_VCP2_CLKCTRL_OFFSET			0x0090
+#define DRA7XX_CM_L3MAIN1_VCP2_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x0090)
+#define DRA7XX_CM_L3MAIN1_SPARE_CME_CLKCTRL_OFFSET		0x0098
+#define DRA7XX_CM_L3MAIN1_SPARE_CME_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x0098)
+#define DRA7XX_CM_L3MAIN1_SPARE_HDMI_CLKCTRL_OFFSET		0x00a0
+#define DRA7XX_CM_L3MAIN1_SPARE_HDMI_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x00a0)
+#define DRA7XX_CM_L3MAIN1_SPARE_ICM_CLKCTRL_OFFSET		0x00a8
+#define DRA7XX_CM_L3MAIN1_SPARE_ICM_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x00a8)
+#define DRA7XX_CM_L3MAIN1_SPARE_IVA2_CLKCTRL_OFFSET		0x00b0
+#define DRA7XX_CM_L3MAIN1_SPARE_IVA2_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x00b0)
+#define DRA7XX_CM_L3MAIN1_SPARE_SATA2_CLKCTRL_OFFSET		0x00b8
+#define DRA7XX_CM_L3MAIN1_SPARE_SATA2_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x00b8)
+#define DRA7XX_CM_L3MAIN1_SPARE_UNKNOWN4_CLKCTRL_OFFSET		0x00c0
+#define DRA7XX_CM_L3MAIN1_SPARE_UNKNOWN4_CLKCTRL		DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x00c0)
+#define DRA7XX_CM_L3MAIN1_SPARE_UNKNOWN5_CLKCTRL_OFFSET		0x00c8
+#define DRA7XX_CM_L3MAIN1_SPARE_UNKNOWN5_CLKCTRL		DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x00c8)
+#define DRA7XX_CM_L3MAIN1_SPARE_UNKNOWN6_CLKCTRL_OFFSET		0x00d0
+#define DRA7XX_CM_L3MAIN1_SPARE_UNKNOWN6_CLKCTRL		DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x00d0)
+#define DRA7XX_CM_L3MAIN1_SPARE_VIDEOPLL1_CLKCTRL_OFFSET	0x00d8
+#define DRA7XX_CM_L3MAIN1_SPARE_VIDEOPLL1_CLKCTRL		DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x00d8)
+#define DRA7XX_CM_L3MAIN1_SPARE_VIDEOPLL2_CLKCTRL_OFFSET	0x00f0
+#define DRA7XX_CM_L3MAIN1_SPARE_VIDEOPLL2_CLKCTRL		DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x00f0)
+#define DRA7XX_CM_L3MAIN1_SPARE_VIDEOPLL3_CLKCTRL_OFFSET	0x00f8
+#define DRA7XX_CM_L3MAIN1_SPARE_VIDEOPLL3_CLKCTRL		DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x00f8)
+#define DRA7XX_CM_IPU2_CLKSTCTRL_OFFSET				0x0200
+#define DRA7XX_CM_IPU2_STATICDEP_OFFSET				0x0204
+#define DRA7XX_CM_IPU2_DYNAMICDEP_OFFSET			0x0208
+#define DRA7XX_CM_IPU2_IPU2_CLKCTRL_OFFSET			0x0220
+#define DRA7XX_CM_IPU2_IPU2_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x0220)
+#define DRA7XX_CM_DMA_CLKSTCTRL_OFFSET				0x0300
+#define DRA7XX_CM_DMA_STATICDEP_OFFSET				0x0304
+#define DRA7XX_CM_DMA_DYNAMICDEP_OFFSET				0x0308
+#define DRA7XX_CM_DMA_DMA_SYSTEM_CLKCTRL_OFFSET			0x0320
+#define DRA7XX_CM_DMA_DMA_SYSTEM_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x0320)
+#define DRA7XX_CM_EMIF_CLKSTCTRL_OFFSET				0x0400
+#define DRA7XX_CM_EMIF_DMM_CLKCTRL_OFFSET			0x0420
+#define DRA7XX_CM_EMIF_DMM_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x0420)
+#define DRA7XX_CM_EMIF_EMIF_OCP_FW_CLKCTRL_OFFSET		0x0428
+#define DRA7XX_CM_EMIF_EMIF_OCP_FW_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x0428)
+#define DRA7XX_CM_EMIF_EMIF1_CLKCTRL_OFFSET			0x0430
+#define DRA7XX_CM_EMIF_EMIF1_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x0430)
+#define DRA7XX_CM_EMIF_EMIF2_CLKCTRL_OFFSET			0x0438
+#define DRA7XX_CM_EMIF_EMIF2_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x0438)
+#define DRA7XX_CM_EMIF_EMIF_DLL_CLKCTRL_OFFSET			0x0440
+#define DRA7XX_CM_EMIF_EMIF_DLL_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x0440)
+#define DRA7XX_CM_ATL_ATL_CLKCTRL_OFFSET			0x0500
+#define DRA7XX_CM_ATL_ATL_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x0500)
+#define DRA7XX_CM_ATL_CLKSTCTRL_OFFSET				0x0520
+#define DRA7XX_CM_L4CFG_CLKSTCTRL_OFFSET			0x0600
+#define DRA7XX_CM_L4CFG_DYNAMICDEP_OFFSET			0x0608
+#define DRA7XX_CM_L4CFG_L4_CFG_CLKCTRL_OFFSET			0x0620
+#define DRA7XX_CM_L4CFG_L4_CFG_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x0620)
+#define DRA7XX_CM_L4CFG_SPINLOCK_CLKCTRL_OFFSET			0x0628
+#define DRA7XX_CM_L4CFG_SPINLOCK_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x0628)
+#define DRA7XX_CM_L4CFG_MAILBOX1_CLKCTRL_OFFSET			0x0630
+#define DRA7XX_CM_L4CFG_MAILBOX1_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x0630)
+#define DRA7XX_CM_L4CFG_SAR_ROM_CLKCTRL_OFFSET			0x0638
+#define DRA7XX_CM_L4CFG_SAR_ROM_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x0638)
+#define DRA7XX_CM_L4CFG_OCP2SCP2_CLKCTRL_OFFSET			0x0640
+#define DRA7XX_CM_L4CFG_OCP2SCP2_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x0640)
+#define DRA7XX_CM_L4CFG_MAILBOX2_CLKCTRL_OFFSET			0x0648
+#define DRA7XX_CM_L4CFG_MAILBOX2_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x0648)
+#define DRA7XX_CM_L4CFG_MAILBOX3_CLKCTRL_OFFSET			0x0650
+#define DRA7XX_CM_L4CFG_MAILBOX3_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x0650)
+#define DRA7XX_CM_L4CFG_MAILBOX4_CLKCTRL_OFFSET			0x0658
+#define DRA7XX_CM_L4CFG_MAILBOX4_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x0658)
+#define DRA7XX_CM_L4CFG_MAILBOX5_CLKCTRL_OFFSET			0x0660
+#define DRA7XX_CM_L4CFG_MAILBOX5_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x0660)
+#define DRA7XX_CM_L4CFG_MAILBOX6_CLKCTRL_OFFSET			0x0668
+#define DRA7XX_CM_L4CFG_MAILBOX6_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x0668)
+#define DRA7XX_CM_L4CFG_MAILBOX7_CLKCTRL_OFFSET			0x0670
+#define DRA7XX_CM_L4CFG_MAILBOX7_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x0670)
+#define DRA7XX_CM_L4CFG_MAILBOX8_CLKCTRL_OFFSET			0x0678
+#define DRA7XX_CM_L4CFG_MAILBOX8_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x0678)
+#define DRA7XX_CM_L4CFG_MAILBOX9_CLKCTRL_OFFSET			0x0680
+#define DRA7XX_CM_L4CFG_MAILBOX9_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x0680)
+#define DRA7XX_CM_L4CFG_MAILBOX10_CLKCTRL_OFFSET		0x0688
+#define DRA7XX_CM_L4CFG_MAILBOX10_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x0688)
+#define DRA7XX_CM_L4CFG_MAILBOX11_CLKCTRL_OFFSET		0x0690
+#define DRA7XX_CM_L4CFG_MAILBOX11_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x0690)
+#define DRA7XX_CM_L4CFG_MAILBOX12_CLKCTRL_OFFSET		0x0698
+#define DRA7XX_CM_L4CFG_MAILBOX12_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x0698)
+#define DRA7XX_CM_L4CFG_MAILBOX13_CLKCTRL_OFFSET		0x06a0
+#define DRA7XX_CM_L4CFG_MAILBOX13_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x06a0)
+#define DRA7XX_CM_L4CFG_SPARE_SMARTREFLEX_RTC_CLKCTRL_OFFSET	0x06a8
+#define DRA7XX_CM_L4CFG_SPARE_SMARTREFLEX_RTC_CLKCTRL		DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x06a8)
+#define DRA7XX_CM_L4CFG_SPARE_SMARTREFLEX_SDRAM_CLKCTRL_OFFSET	0x06b0
+#define DRA7XX_CM_L4CFG_SPARE_SMARTREFLEX_SDRAM_CLKCTRL		DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x06b0)
+#define DRA7XX_CM_L4CFG_SPARE_SMARTREFLEX_WKUP_CLKCTRL_OFFSET	0x06b8
+#define DRA7XX_CM_L4CFG_SPARE_SMARTREFLEX_WKUP_CLKCTRL		DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x06b8)
+#define DRA7XX_CM_L4CFG_IO_DELAY_BLOCK_CLKCTRL_OFFSET		0x06c0
+#define DRA7XX_CM_L4CFG_IO_DELAY_BLOCK_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x06c0)
+#define DRA7XX_CM_L3INSTR_CLKSTCTRL_OFFSET			0x0700
+#define DRA7XX_CM_L3INSTR_L3_MAIN_2_CLKCTRL_OFFSET		0x0720
+#define DRA7XX_CM_L3INSTR_L3_MAIN_2_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x0720)
+#define DRA7XX_CM_L3INSTR_L3_INSTR_CLKCTRL_OFFSET		0x0728
+#define DRA7XX_CM_L3INSTR_L3_INSTR_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x0728)
+#define DRA7XX_CM_L3INSTR_OCP_WP_NOC_CLKCTRL_OFFSET		0x0740
+#define DRA7XX_CM_L3INSTR_OCP_WP_NOC_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x0740)
+#define DRA7XX_CM_L3INSTR_DLL_AGING_CLKCTRL_OFFSET		0x0748
+#define DRA7XX_CM_L3INSTR_DLL_AGING_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x0748)
+#define DRA7XX_CM_L3INSTR_CTRL_MODULE_BANDGAP_CLKCTRL_OFFSET	0x0750
+#define DRA7XX_CM_L3INSTR_CTRL_MODULE_BANDGAP_CLKCTRL		DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CORE_INST, 0x0750)
+
+/* CM_CORE.IVA_CM_CORE register offsets */
+#define DRA7XX_CM_IVA_CLKSTCTRL_OFFSET				0x0000
+#define DRA7XX_CM_IVA_STATICDEP_OFFSET				0x0004
+#define DRA7XX_CM_IVA_DYNAMICDEP_OFFSET				0x0008
+#define DRA7XX_CM_IVA_IVA_CLKCTRL_OFFSET			0x0020
+#define DRA7XX_CM_IVA_IVA_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_IVA_INST, 0x0020)
+#define DRA7XX_CM_IVA_SL2_CLKCTRL_OFFSET			0x0028
+#define DRA7XX_CM_IVA_SL2_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_IVA_INST, 0x0028)
+
+/* CM_CORE.CAM_CM_CORE register offsets */
+#define DRA7XX_CM_CAM_CLKSTCTRL_OFFSET				0x0000
+#define DRA7XX_CM_CAM_STATICDEP_OFFSET				0x0004
+#define DRA7XX_CM_CAM_VIP1_CLKCTRL_OFFSET			0x0020
+#define DRA7XX_CM_CAM_VIP1_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CAM_INST, 0x0020)
+#define DRA7XX_CM_CAM_VIP2_CLKCTRL_OFFSET			0x0028
+#define DRA7XX_CM_CAM_VIP2_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CAM_INST, 0x0028)
+#define DRA7XX_CM_CAM_VIP3_CLKCTRL_OFFSET			0x0030
+#define DRA7XX_CM_CAM_VIP3_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CAM_INST, 0x0030)
+#define DRA7XX_CM_CAM_LVDSRX_CLKCTRL_OFFSET			0x0038
+#define DRA7XX_CM_CAM_LVDSRX_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CAM_INST, 0x0038)
+#define DRA7XX_CM_CAM_CSI1_CLKCTRL_OFFSET			0x0040
+#define DRA7XX_CM_CAM_CSI1_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CAM_INST, 0x0040)
+#define DRA7XX_CM_CAM_CSI2_CLKCTRL_OFFSET			0x0048
+#define DRA7XX_CM_CAM_CSI2_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CAM_INST, 0x0048)
+
+/* CM_CORE.DSS_CM_CORE register offsets */
+#define DRA7XX_CM_DSS_CLKSTCTRL_OFFSET				0x0000
+#define DRA7XX_CM_DSS_STATICDEP_OFFSET				0x0004
+#define DRA7XX_CM_DSS_DYNAMICDEP_OFFSET				0x0008
+#define DRA7XX_CM_DSS_DSS_CLKCTRL_OFFSET			0x0020
+#define DRA7XX_CM_DSS_DSS_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_DSS_INST, 0x0020)
+#define DRA7XX_CM_DSS_BB2D_CLKCTRL_OFFSET			0x0030
+#define DRA7XX_CM_DSS_BB2D_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_DSS_INST, 0x0030)
+#define DRA7XX_CM_DSS_SDVENC_CLKCTRL_OFFSET			0x003c
+#define DRA7XX_CM_DSS_SDVENC_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_DSS_INST, 0x003c)
+
+/* CM_CORE.GPU_CM_CORE register offsets */
+#define DRA7XX_CM_GPU_CLKSTCTRL_OFFSET				0x0000
+#define DRA7XX_CM_GPU_STATICDEP_OFFSET				0x0004
+#define DRA7XX_CM_GPU_DYNAMICDEP_OFFSET				0x0008
+#define DRA7XX_CM_GPU_GPU_CLKCTRL_OFFSET			0x0020
+#define DRA7XX_CM_GPU_GPU_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_GPU_INST, 0x0020)
+
+/* CM_CORE.L3INIT_CM_CORE register offsets */
+#define DRA7XX_CM_L3INIT_CLKSTCTRL_OFFSET			0x0000
+#define DRA7XX_CM_L3INIT_STATICDEP_OFFSET			0x0004
+#define DRA7XX_CM_L3INIT_DYNAMICDEP_OFFSET			0x0008
+#define DRA7XX_CM_L3INIT_MMC1_CLKCTRL_OFFSET			0x0028
+#define DRA7XX_CM_L3INIT_MMC1_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L3INIT_INST, 0x0028)
+#define DRA7XX_CM_L3INIT_MMC2_CLKCTRL_OFFSET			0x0030
+#define DRA7XX_CM_L3INIT_MMC2_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L3INIT_INST, 0x0030)
+#define DRA7XX_CM_L3INIT_USB_OTG_SS2_CLKCTRL_OFFSET		0x0040
+#define DRA7XX_CM_L3INIT_USB_OTG_SS2_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L3INIT_INST, 0x0040)
+#define DRA7XX_CM_L3INIT_USB_OTG_SS3_CLKCTRL_OFFSET		0x0048
+#define DRA7XX_CM_L3INIT_USB_OTG_SS3_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L3INIT_INST, 0x0048)
+#define DRA7XX_CM_L3INIT_USB_OTG_SS4_CLKCTRL_OFFSET		0x0050
+#define DRA7XX_CM_L3INIT_USB_OTG_SS4_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L3INIT_INST, 0x0050)
+#define DRA7XX_CM_L3INIT_MLB_SS_CLKCTRL_OFFSET			0x0058
+#define DRA7XX_CM_L3INIT_MLB_SS_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L3INIT_INST, 0x0058)
+#define DRA7XX_CM_L3INIT_IEEE1500_2_OCP_CLKCTRL_OFFSET		0x0078
+#define DRA7XX_CM_L3INIT_IEEE1500_2_OCP_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L3INIT_INST, 0x0078)
+#define DRA7XX_CM_L3INIT_SATA_CLKCTRL_OFFSET			0x0088
+#define DRA7XX_CM_L3INIT_SATA_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L3INIT_INST, 0x0088)
+#define DRA7XX_CM_PCIE_CLKSTCTRL_OFFSET				0x00a0
+#define DRA7XX_CM_PCIE_STATICDEP_OFFSET				0x00a4
+#define DRA7XX_CM_GMAC_CLKSTCTRL_OFFSET				0x00c0
+#define DRA7XX_CM_GMAC_STATICDEP_OFFSET				0x00c4
+#define DRA7XX_CM_GMAC_DYNAMICDEP_OFFSET			0x00c8
+#define DRA7XX_CM_GMAC_GMAC_CLKCTRL_OFFSET			0x00d0
+#define DRA7XX_CM_GMAC_GMAC_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L3INIT_INST, 0x00d0)
+#define DRA7XX_CM_L3INIT_OCP2SCP1_CLKCTRL_OFFSET		0x00e0
+#define DRA7XX_CM_L3INIT_OCP2SCP1_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L3INIT_INST, 0x00e0)
+#define DRA7XX_CM_L3INIT_OCP2SCP3_CLKCTRL_OFFSET		0x00e8
+#define DRA7XX_CM_L3INIT_OCP2SCP3_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L3INIT_INST, 0x00e8)
+#define DRA7XX_CM_L3INIT_USB_OTG_SS1_CLKCTRL_OFFSET		0x00f0
+#define DRA7XX_CM_L3INIT_USB_OTG_SS1_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L3INIT_INST, 0x00f0)
+
+/* CM_CORE.CUSTEFUSE_CM_CORE register offsets */
+#define DRA7XX_CM_CUSTEFUSE_CLKSTCTRL_OFFSET			0x0000
+#define DRA7XX_CM_CUSTEFUSE_EFUSE_CTRL_CUST_CLKCTRL_OFFSET	0x0020
+#define DRA7XX_CM_CUSTEFUSE_EFUSE_CTRL_CUST_CLKCTRL		DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_CUSTEFUSE_INST, 0x0020)
+
+/* CM_CORE.L4PER_CM_CORE register offsets */
+#define DRA7XX_CM_L4PER_CLKSTCTRL_OFFSET			0x0000
+#define DRA7XX_CM_L4PER_DYNAMICDEP_OFFSET			0x0008
+#define DRA7XX_CM_L4PER2_L4_PER2_CLKCTRL_OFFSET			0x000c
+#define DRA7XX_CM_L4PER2_L4_PER2_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x000c)
+#define DRA7XX_CM_L4PER3_L4_PER3_CLKCTRL_OFFSET			0x0014
+#define DRA7XX_CM_L4PER3_L4_PER3_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x0014)
+#define DRA7XX_CM_L4PER2_PRUSS1_CLKCTRL_OFFSET			0x0018
+#define DRA7XX_CM_L4PER2_PRUSS1_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x0018)
+#define DRA7XX_CM_L4PER2_PRUSS2_CLKCTRL_OFFSET			0x0020
+#define DRA7XX_CM_L4PER2_PRUSS2_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x0020)
+#define DRA7XX_CM_L4PER_TIMER10_CLKCTRL_OFFSET			0x0028
+#define DRA7XX_CM_L4PER_TIMER10_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x0028)
+#define DRA7XX_CM_L4PER_TIMER11_CLKCTRL_OFFSET			0x0030
+#define DRA7XX_CM_L4PER_TIMER11_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x0030)
+#define DRA7XX_CM_L4PER_TIMER2_CLKCTRL_OFFSET			0x0038
+#define DRA7XX_CM_L4PER_TIMER2_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x0038)
+#define DRA7XX_CM_L4PER_TIMER3_CLKCTRL_OFFSET			0x0040
+#define DRA7XX_CM_L4PER_TIMER3_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x0040)
+#define DRA7XX_CM_L4PER_TIMER4_CLKCTRL_OFFSET			0x0048
+#define DRA7XX_CM_L4PER_TIMER4_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x0048)
+#define DRA7XX_CM_L4PER_TIMER9_CLKCTRL_OFFSET			0x0050
+#define DRA7XX_CM_L4PER_TIMER9_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x0050)
+#define DRA7XX_CM_L4PER_ELM_CLKCTRL_OFFSET			0x0058
+#define DRA7XX_CM_L4PER_ELM_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x0058)
+#define DRA7XX_CM_L4PER_GPIO2_CLKCTRL_OFFSET			0x0060
+#define DRA7XX_CM_L4PER_GPIO2_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x0060)
+#define DRA7XX_CM_L4PER_GPIO3_CLKCTRL_OFFSET			0x0068
+#define DRA7XX_CM_L4PER_GPIO3_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x0068)
+#define DRA7XX_CM_L4PER_GPIO4_CLKCTRL_OFFSET			0x0070
+#define DRA7XX_CM_L4PER_GPIO4_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x0070)
+#define DRA7XX_CM_L4PER_GPIO5_CLKCTRL_OFFSET			0x0078
+#define DRA7XX_CM_L4PER_GPIO5_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x0078)
+#define DRA7XX_CM_L4PER_GPIO6_CLKCTRL_OFFSET			0x0080
+#define DRA7XX_CM_L4PER_GPIO6_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x0080)
+#define DRA7XX_CM_L4PER_HDQ1W_CLKCTRL_OFFSET			0x0088
+#define DRA7XX_CM_L4PER_HDQ1W_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x0088)
+#define DRA7XX_CM_L4PER2_PWMSS2_CLKCTRL_OFFSET			0x0090
+#define DRA7XX_CM_L4PER2_PWMSS2_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x0090)
+#define DRA7XX_CM_L4PER2_PWMSS3_CLKCTRL_OFFSET			0x0098
+#define DRA7XX_CM_L4PER2_PWMSS3_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x0098)
+#define DRA7XX_CM_L4PER_I2C1_CLKCTRL_OFFSET			0x00a0
+#define DRA7XX_CM_L4PER_I2C1_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x00a0)
+#define DRA7XX_CM_L4PER_I2C2_CLKCTRL_OFFSET			0x00a8
+#define DRA7XX_CM_L4PER_I2C2_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x00a8)
+#define DRA7XX_CM_L4PER_I2C3_CLKCTRL_OFFSET			0x00b0
+#define DRA7XX_CM_L4PER_I2C3_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x00b0)
+#define DRA7XX_CM_L4PER_I2C4_CLKCTRL_OFFSET			0x00b8
+#define DRA7XX_CM_L4PER_I2C4_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x00b8)
+#define DRA7XX_CM_L4PER_L4_PER1_CLKCTRL_OFFSET			0x00c0
+#define DRA7XX_CM_L4PER_L4_PER1_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x00c0)
+#define DRA7XX_CM_L4PER2_PWMSS1_CLKCTRL_OFFSET			0x00c4
+#define DRA7XX_CM_L4PER2_PWMSS1_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x00c4)
+#define DRA7XX_CM_L4PER3_TIMER13_CLKCTRL_OFFSET			0x00c8
+#define DRA7XX_CM_L4PER3_TIMER13_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x00c8)
+#define DRA7XX_CM_L4PER3_TIMER14_CLKCTRL_OFFSET			0x00d0
+#define DRA7XX_CM_L4PER3_TIMER14_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x00d0)
+#define DRA7XX_CM_L4PER3_TIMER15_CLKCTRL_OFFSET			0x00d8
+#define DRA7XX_CM_L4PER3_TIMER15_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x00d8)
+#define DRA7XX_CM_L4PER_MCSPI1_CLKCTRL_OFFSET			0x00f0
+#define DRA7XX_CM_L4PER_MCSPI1_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x00f0)
+#define DRA7XX_CM_L4PER_MCSPI2_CLKCTRL_OFFSET			0x00f8
+#define DRA7XX_CM_L4PER_MCSPI2_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x00f8)
+#define DRA7XX_CM_L4PER_MCSPI3_CLKCTRL_OFFSET			0x0100
+#define DRA7XX_CM_L4PER_MCSPI3_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x0100)
+#define DRA7XX_CM_L4PER_MCSPI4_CLKCTRL_OFFSET			0x0108
+#define DRA7XX_CM_L4PER_MCSPI4_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x0108)
+#define DRA7XX_CM_L4PER_GPIO7_CLKCTRL_OFFSET			0x0110
+#define DRA7XX_CM_L4PER_GPIO7_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x0110)
+#define DRA7XX_CM_L4PER_GPIO8_CLKCTRL_OFFSET			0x0118
+#define DRA7XX_CM_L4PER_GPIO8_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x0118)
+#define DRA7XX_CM_L4PER_MMC3_CLKCTRL_OFFSET			0x0120
+#define DRA7XX_CM_L4PER_MMC3_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x0120)
+#define DRA7XX_CM_L4PER_MMC4_CLKCTRL_OFFSET			0x0128
+#define DRA7XX_CM_L4PER_MMC4_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x0128)
+#define DRA7XX_CM_L4PER3_TIMER16_CLKCTRL_OFFSET			0x0130
+#define DRA7XX_CM_L4PER3_TIMER16_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x0130)
+#define DRA7XX_CM_L4PER2_QSPI_CLKCTRL_OFFSET			0x0138
+#define DRA7XX_CM_L4PER2_QSPI_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x0138)
+#define DRA7XX_CM_L4PER_UART1_CLKCTRL_OFFSET			0x0140
+#define DRA7XX_CM_L4PER_UART1_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x0140)
+#define DRA7XX_CM_L4PER_UART2_CLKCTRL_OFFSET			0x0148
+#define DRA7XX_CM_L4PER_UART2_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x0148)
+#define DRA7XX_CM_L4PER_UART3_CLKCTRL_OFFSET			0x0150
+#define DRA7XX_CM_L4PER_UART3_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x0150)
+#define DRA7XX_CM_L4PER_UART4_CLKCTRL_OFFSET			0x0158
+#define DRA7XX_CM_L4PER_UART4_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x0158)
+#define DRA7XX_CM_L4PER2_MCASP2_CLKCTRL_OFFSET			0x0160
+#define DRA7XX_CM_L4PER2_MCASP2_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x0160)
+#define DRA7XX_CM_L4PER2_MCASP3_CLKCTRL_OFFSET			0x0168
+#define DRA7XX_CM_L4PER2_MCASP3_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x0168)
+#define DRA7XX_CM_L4PER_UART5_CLKCTRL_OFFSET			0x0170
+#define DRA7XX_CM_L4PER_UART5_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x0170)
+#define DRA7XX_CM_L4PER2_MCASP5_CLKCTRL_OFFSET			0x0178
+#define DRA7XX_CM_L4PER2_MCASP5_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x0178)
+#define DRA7XX_CM_L4SEC_CLKSTCTRL_OFFSET			0x0180
+#define DRA7XX_CM_L4SEC_STATICDEP_OFFSET			0x0184
+#define DRA7XX_CM_L4SEC_DYNAMICDEP_OFFSET			0x0188
+#define DRA7XX_CM_L4PER2_MCASP8_CLKCTRL_OFFSET			0x0190
+#define DRA7XX_CM_L4PER2_MCASP8_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x0190)
+#define DRA7XX_CM_L4PER2_MCASP4_CLKCTRL_OFFSET			0x0198
+#define DRA7XX_CM_L4PER2_MCASP4_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x0198)
+#define DRA7XX_CM_L4SEC_AES1_CLKCTRL_OFFSET			0x01a0
+#define DRA7XX_CM_L4SEC_AES1_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x01a0)
+#define DRA7XX_CM_L4SEC_AES2_CLKCTRL_OFFSET			0x01a8
+#define DRA7XX_CM_L4SEC_AES2_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x01a8)
+#define DRA7XX_CM_L4SEC_DES3DES_CLKCTRL_OFFSET			0x01b0
+#define DRA7XX_CM_L4SEC_DES3DES_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x01b0)
+#define DRA7XX_CM_L4SEC_FPKA_CLKCTRL_OFFSET			0x01b8
+#define DRA7XX_CM_L4SEC_FPKA_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x01b8)
+#define DRA7XX_CM_L4SEC_RNG_CLKCTRL_OFFSET			0x01c0
+#define DRA7XX_CM_L4SEC_RNG_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x01c0)
+#define DRA7XX_CM_L4SEC_SHA2MD51_CLKCTRL_OFFSET			0x01c8
+#define DRA7XX_CM_L4SEC_SHA2MD51_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x01c8)
+#define DRA7XX_CM_L4PER2_UART7_CLKCTRL_OFFSET			0x01d0
+#define DRA7XX_CM_L4PER2_UART7_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x01d0)
+#define DRA7XX_CM_L4SEC_DMA_CRYPTO_CLKCTRL_OFFSET		0x01d8
+#define DRA7XX_CM_L4SEC_DMA_CRYPTO_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x01d8)
+#define DRA7XX_CM_L4PER2_UART8_CLKCTRL_OFFSET			0x01e0
+#define DRA7XX_CM_L4PER2_UART8_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x01e0)
+#define DRA7XX_CM_L4PER2_UART9_CLKCTRL_OFFSET			0x01e8
+#define DRA7XX_CM_L4PER2_UART9_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x01e8)
+#define DRA7XX_CM_L4PER2_DCAN2_CLKCTRL_OFFSET			0x01f0
+#define DRA7XX_CM_L4PER2_DCAN2_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x01f0)
+#define DRA7XX_CM_L4SEC_SHA2MD52_CLKCTRL_OFFSET			0x01f8
+#define DRA7XX_CM_L4SEC_SHA2MD52_CLKCTRL			DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x01f8)
+#define DRA7XX_CM_L4PER2_CLKSTCTRL_OFFSET			0x01fc
+#define DRA7XX_CM_L4PER2_DYNAMICDEP_OFFSET			0x0200
+#define DRA7XX_CM_L4PER2_MCASP6_CLKCTRL_OFFSET			0x0204
+#define DRA7XX_CM_L4PER2_MCASP6_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x0204)
+#define DRA7XX_CM_L4PER2_MCASP7_CLKCTRL_OFFSET			0x0208
+#define DRA7XX_CM_L4PER2_MCASP7_CLKCTRL				DRA7XX_CM_CORE_REGADDR(DRA7XX_CM_CORE_L4PER_INST, 0x0208)
+#define DRA7XX_CM_L4PER2_STATICDEP_OFFSET			0x020c
+#define DRA7XX_CM_L4PER3_CLKSTCTRL_OFFSET			0x0210
+#define DRA7XX_CM_L4PER3_DYNAMICDEP_OFFSET			0x0214
+
+#endif
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 3656b80..ff2113c 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -665,6 +665,11 @@
 	omap2_set_globals_prcm_mpu(OMAP2_L4_IO_ADDRESS(OMAP54XX_PRCM_MPU_BASE));
 	omap_prm_base_init();
 	omap_cm_base_init();
+	omap44xx_prm_init();
+	dra7xx_powerdomains_init();
+	dra7xx_clockdomains_init();
+	dra7xx_hwmod_init();
+	omap_hwmod_init_postsetup();
 }
 #endif
 
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index b4ecd2c..d9ee0ff 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -1405,7 +1405,9 @@
 	    (sf & SYSC_HAS_CLOCKACTIVITY))
 		_set_clockactivity(oh, oh->class->sysc->clockact, &v);
 
-	_write_sysconfig(v, oh);
+	/* If the cached value is the same as the new value, skip the write */
+	if (oh->_sysc_cache != v)
+		_write_sysconfig(v, oh);
 
 	/*
 	 * Set the autoidle bit only after setting the smartidle bit
diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h
index e1482a9..d02acf9 100644
--- a/arch/arm/mach-omap2/omap_hwmod.h
+++ b/arch/arm/mach-omap2/omap_hwmod.h
@@ -751,6 +751,7 @@
 extern int omap44xx_hwmod_init(void);
 extern int omap54xx_hwmod_init(void);
 extern int am33xx_hwmod_init(void);
+extern int dra7xx_hwmod_init(void);
 
 extern int __init omap_hwmod_register_links(struct omap_hwmod_ocp_if **ois);
 
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
index eb2f3b9..215894f 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
@@ -325,7 +325,6 @@
  *
  *    - cEFUSE (doesn't fall under any ocp_if)
  *    - clkdiv32k
- *    - debugss
  *    - ocp watch point
  */
 #if 0
@@ -369,27 +368,6 @@
 	},
 };
 
-/*
- * 'debugss' class
- * debug sub system
- */
-static struct omap_hwmod_class am33xx_debugss_hwmod_class = {
-	.name		= "debugss",
-};
-
-static struct omap_hwmod am33xx_debugss_hwmod = {
-	.name		= "debugss",
-	.class		= &am33xx_debugss_hwmod_class,
-	.clkdm_name	= "l3_aon_clkdm",
-	.main_clk	= "debugss_ick",
-	.prcm		= {
-		.omap4	= {
-			.clkctrl_offs	= AM33XX_CM_WKUP_DEBUGSS_CLKCTRL_OFFSET,
-			.modulemode	= MODULEMODE_SWCTRL,
-		},
-	},
-};
-
 /* ocpwp */
 static struct omap_hwmod_class am33xx_ocpwp_hwmod_class = {
 	.name		= "ocpwp",
@@ -482,6 +460,34 @@
 	},
 };
 
+/*
+ * 'debugss' class
+ * debug sub system
+ */
+static struct omap_hwmod_opt_clk debugss_opt_clks[] = {
+	{ .role = "dbg_sysclk", .clk = "dbg_sysclk_ck" },
+	{ .role = "dbg_clka", .clk = "dbg_clka_ck" },
+};
+
+static struct omap_hwmod_class am33xx_debugss_hwmod_class = {
+	.name		= "debugss",
+};
+
+static struct omap_hwmod am33xx_debugss_hwmod = {
+	.name		= "debugss",
+	.class		= &am33xx_debugss_hwmod_class,
+	.clkdm_name	= "l3_aon_clkdm",
+	.main_clk	= "trace_clk_div_ck",
+	.prcm		= {
+		.omap4	= {
+			.clkctrl_offs	= AM33XX_CM_WKUP_DEBUGSS_CLKCTRL_OFFSET,
+			.modulemode	= MODULEMODE_SWCTRL,
+		},
+	},
+	.opt_clks	= debugss_opt_clks,
+	.opt_clks_cnt	= ARRAY_SIZE(debugss_opt_clks),
+};
+
 /* 'smartreflex' class */
 static struct omap_hwmod_class am33xx_smartreflex_hwmod_class = {
 	.name		= "smartreflex",
@@ -1796,6 +1802,24 @@
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
+/* l3_main -> debugss */
+static struct omap_hwmod_addr_space am33xx_debugss_addrs[] = {
+	{
+		.pa_start	= 0x4b000000,
+		.pa_end		= 0x4b000000 + SZ_16M - 1,
+		.flags		= ADDR_TYPE_RT
+	},
+	{ }
+};
+
+static struct omap_hwmod_ocp_if am33xx_l3_main__debugss = {
+	.master		= &am33xx_l3_main_hwmod,
+	.slave		= &am33xx_debugss_hwmod,
+	.clk		= "dpll_core_m4_ck",
+	.addr		= am33xx_debugss_addrs,
+	.user		= OCP_USER_MPU,
+};
+
 /* l4 wkup -> smartreflex0 */
 static struct omap_hwmod_ocp_if am33xx_l4_wkup__smartreflex0 = {
 	.master		= &am33xx_l4_wkup_hwmod,
@@ -2470,6 +2494,7 @@
 	&am33xx_pruss__l3_main,
 	&am33xx_wkup_m3__l4_wkup,
 	&am33xx_gfx__l3_main,
+	&am33xx_l3_main__debugss,
 	&am33xx_l4_wkup__wkup_m3,
 	&am33xx_l4_wkup__control,
 	&am33xx_l4_wkup__smartreflex0,
diff --git a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
index b4d0474..cde4155 100644
--- a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
@@ -740,6 +740,39 @@
 };
 
 /*
+ * 'mailbox' class
+ * mailbox module allowing communication between the on-chip processors using a
+ * queued mailbox-interrupt mechanism.
+ */
+
+static struct omap_hwmod_class_sysconfig omap54xx_mailbox_sysc = {
+	.rev_offs	= 0x0000,
+	.sysc_offs	= 0x0010,
+	.sysc_flags	= (SYSC_HAS_RESET_STATUS | SYSC_HAS_SIDLEMODE |
+			   SYSC_HAS_SOFTRESET),
+	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+	.sysc_fields	= &omap_hwmod_sysc_type2,
+};
+
+static struct omap_hwmod_class omap54xx_mailbox_hwmod_class = {
+	.name	= "mailbox",
+	.sysc	= &omap54xx_mailbox_sysc,
+};
+
+/* mailbox */
+static struct omap_hwmod omap54xx_mailbox_hwmod = {
+	.name		= "mailbox",
+	.class		= &omap54xx_mailbox_hwmod_class,
+	.clkdm_name	= "l4cfg_clkdm",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = OMAP54XX_CM_L4CFG_MAILBOX_CLKCTRL_OFFSET,
+			.context_offs = OMAP54XX_RM_L4CFG_MAILBOX_CONTEXT_OFFSET,
+		},
+	},
+};
+
+/*
  * 'mcbsp' class
  * multi channel buffered serial port controller
  */
@@ -1807,6 +1840,14 @@
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
+/* l4_cfg -> mailbox */
+static struct omap_hwmod_ocp_if omap54xx_l4_cfg__mailbox = {
+	.master		= &omap54xx_l4_cfg_hwmod,
+	.slave		= &omap54xx_mailbox_hwmod,
+	.clk		= "l4_root_clk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
 /* l4_abe -> mcbsp1 */
 static struct omap_hwmod_ocp_if omap54xx_l4_abe__mcbsp1 = {
 	.master		= &omap54xx_l4_abe_hwmod,
@@ -2107,6 +2148,7 @@
 	&omap54xx_l4_per__i2c4,
 	&omap54xx_l4_per__i2c5,
 	&omap54xx_l4_wkup__kbd,
+	&omap54xx_l4_cfg__mailbox,
 	&omap54xx_l4_abe__mcbsp1,
 	&omap54xx_l4_abe__mcbsp2,
 	&omap54xx_l4_abe__mcbsp3,
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
new file mode 100644
index 0000000..db32d53
--- /dev/null
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -0,0 +1,2724 @@
+/*
+ * Hardware modules present on the DRA7xx chips
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Paul Walmsley
+ * Benoit Cousson
+ *
+ * This file is automatically generated from the OMAP hardware databases.
+ * We respectfully ask that any modifications to this file be coordinated
+ * with the public linux-omap@vger.kernel.org mailing list and the
+ * authors above to ensure that the autogeneration scripts are kept
+ * up-to-date with the file contents.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/platform_data/gpio-omap.h>
+#include <linux/power/smartreflex.h>
+#include <linux/i2c-omap.h>
+
+#include <linux/omap-dma.h>
+#include <linux/platform_data/spi-omap2-mcspi.h>
+#include <linux/platform_data/asoc-ti-mcbsp.h>
+#include <plat/dmtimer.h>
+
+#include "omap_hwmod.h"
+#include "omap_hwmod_common_data.h"
+#include "cm1_7xx.h"
+#include "cm2_7xx.h"
+#include "prm7xx.h"
+#include "i2c.h"
+#include "mmc.h"
+#include "wd_timer.h"
+
+/* Base offset for all DRA7XX interrupts external to MPUSS */
+#define DRA7XX_IRQ_GIC_START	32
+
+/* Base offset for all DRA7XX dma requests */
+#define DRA7XX_DMA_REQ_START	1
+
+
+/*
+ * IP blocks
+ */
+
+/*
+ * 'l3' class
+ * instance(s): l3_instr, l3_main_1, l3_main_2
+ */
+static struct omap_hwmod_class dra7xx_l3_hwmod_class = {
+	.name	= "l3",
+};
+
+/* l3_instr */
+static struct omap_hwmod dra7xx_l3_instr_hwmod = {
+	.name		= "l3_instr",
+	.class		= &dra7xx_l3_hwmod_class,
+	.clkdm_name	= "l3instr_clkdm",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L3INSTR_L3_INSTR_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L3INSTR_L3_INSTR_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_HWCTRL,
+		},
+	},
+};
+
+/* l3_main_1 */
+static struct omap_hwmod dra7xx_l3_main_1_hwmod = {
+	.name		= "l3_main_1",
+	.class		= &dra7xx_l3_hwmod_class,
+	.clkdm_name	= "l3main1_clkdm",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L3MAIN1_L3_MAIN_1_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L3MAIN1_L3_MAIN_1_CONTEXT_OFFSET,
+		},
+	},
+};
+
+/* l3_main_2 */
+static struct omap_hwmod dra7xx_l3_main_2_hwmod = {
+	.name		= "l3_main_2",
+	.class		= &dra7xx_l3_hwmod_class,
+	.clkdm_name	= "l3instr_clkdm",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L3INSTR_L3_MAIN_2_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L3INSTR_L3_MAIN_2_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_HWCTRL,
+		},
+	},
+};
+
+/*
+ * 'l4' class
+ * instance(s): l4_cfg, l4_per1, l4_per2, l4_per3, l4_wkup
+ */
+static struct omap_hwmod_class dra7xx_l4_hwmod_class = {
+	.name	= "l4",
+};
+
+/* l4_cfg */
+static struct omap_hwmod dra7xx_l4_cfg_hwmod = {
+	.name		= "l4_cfg",
+	.class		= &dra7xx_l4_hwmod_class,
+	.clkdm_name	= "l4cfg_clkdm",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4CFG_L4_CFG_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L4CFG_L4_CFG_CONTEXT_OFFSET,
+		},
+	},
+};
+
+/* l4_per1 */
+static struct omap_hwmod dra7xx_l4_per1_hwmod = {
+	.name		= "l4_per1",
+	.class		= &dra7xx_l4_hwmod_class,
+	.clkdm_name	= "l4per_clkdm",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4PER_L4_PER1_CLKCTRL_OFFSET,
+			.flags = HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT,
+		},
+	},
+};
+
+/* l4_per2 */
+static struct omap_hwmod dra7xx_l4_per2_hwmod = {
+	.name		= "l4_per2",
+	.class		= &dra7xx_l4_hwmod_class,
+	.clkdm_name	= "l4per2_clkdm",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4PER2_L4_PER2_CLKCTRL_OFFSET,
+			.flags = HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT,
+		},
+	},
+};
+
+/* l4_per3 */
+static struct omap_hwmod dra7xx_l4_per3_hwmod = {
+	.name		= "l4_per3",
+	.class		= &dra7xx_l4_hwmod_class,
+	.clkdm_name	= "l4per3_clkdm",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4PER3_L4_PER3_CLKCTRL_OFFSET,
+			.flags = HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT,
+		},
+	},
+};
+
+/* l4_wkup */
+static struct omap_hwmod dra7xx_l4_wkup_hwmod = {
+	.name		= "l4_wkup",
+	.class		= &dra7xx_l4_hwmod_class,
+	.clkdm_name	= "wkupaon_clkdm",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_WKUPAON_L4_WKUP_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_WKUPAON_L4_WKUP_CONTEXT_OFFSET,
+		},
+	},
+};
+
+/*
+ * 'atl' class
+ *
+ */
+
+static struct omap_hwmod_class dra7xx_atl_hwmod_class = {
+	.name	= "atl",
+};
+
+/* atl */
+static struct omap_hwmod dra7xx_atl_hwmod = {
+	.name		= "atl",
+	.class		= &dra7xx_atl_hwmod_class,
+	.clkdm_name	= "atl_clkdm",
+	.main_clk	= "atl_gfclk_mux",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_ATL_ATL_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_ATL_ATL_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+};
+
+/*
+ * 'bb2d' class
+ *
+ */
+
+static struct omap_hwmod_class dra7xx_bb2d_hwmod_class = {
+	.name	= "bb2d",
+};
+
+/* bb2d */
+static struct omap_hwmod dra7xx_bb2d_hwmod = {
+	.name		= "bb2d",
+	.class		= &dra7xx_bb2d_hwmod_class,
+	.clkdm_name	= "dss_clkdm",
+	.main_clk	= "dpll_core_h24x2_ck",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_DSS_BB2D_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_DSS_BB2D_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+};
+
+/*
+ * 'counter' class
+ *
+ */
+
+static struct omap_hwmod_class_sysconfig dra7xx_counter_sysc = {
+	.rev_offs	= 0x0000,
+	.sysc_offs	= 0x0010,
+	.sysc_flags	= SYSC_HAS_SIDLEMODE,
+	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+			   SIDLE_SMART_WKUP),
+	.sysc_fields	= &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class dra7xx_counter_hwmod_class = {
+	.name	= "counter",
+	.sysc	= &dra7xx_counter_sysc,
+};
+
+/* counter_32k */
+static struct omap_hwmod dra7xx_counter_32k_hwmod = {
+	.name		= "counter_32k",
+	.class		= &dra7xx_counter_hwmod_class,
+	.clkdm_name	= "wkupaon_clkdm",
+	.flags		= HWMOD_SWSUP_SIDLE,
+	.main_clk	= "wkupaon_iclk_mux",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_WKUPAON_COUNTER_32K_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_WKUPAON_COUNTER_32K_CONTEXT_OFFSET,
+		},
+	},
+};
+
+/*
+ * 'ctrl_module' class
+ *
+ */
+
+static struct omap_hwmod_class dra7xx_ctrl_module_hwmod_class = {
+	.name	= "ctrl_module",
+};
+
+/* ctrl_module_wkup */
+static struct omap_hwmod dra7xx_ctrl_module_wkup_hwmod = {
+	.name		= "ctrl_module_wkup",
+	.class		= &dra7xx_ctrl_module_hwmod_class,
+	.clkdm_name	= "wkupaon_clkdm",
+	.prcm = {
+		.omap4 = {
+			.flags = HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT,
+		},
+	},
+};
+
+/*
+ * 'dcan' class
+ *
+ */
+
+static struct omap_hwmod_class dra7xx_dcan_hwmod_class = {
+	.name	= "dcan",
+};
+
+/* dcan1 */
+static struct omap_hwmod dra7xx_dcan1_hwmod = {
+	.name		= "dcan1",
+	.class		= &dra7xx_dcan_hwmod_class,
+	.clkdm_name	= "wkupaon_clkdm",
+	.main_clk	= "dcan1_sys_clk_mux",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_WKUPAON_DCAN1_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_WKUPAON_DCAN1_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+};
+
+/* dcan2 */
+static struct omap_hwmod dra7xx_dcan2_hwmod = {
+	.name		= "dcan2",
+	.class		= &dra7xx_dcan_hwmod_class,
+	.clkdm_name	= "l4per2_clkdm",
+	.main_clk	= "sys_clkin1",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4PER2_DCAN2_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L4PER2_DCAN2_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+};
+
+/*
+ * 'dma' class
+ *
+ */
+
+static struct omap_hwmod_class_sysconfig dra7xx_dma_sysc = {
+	.rev_offs	= 0x0000,
+	.sysc_offs	= 0x002c,
+	.syss_offs	= 0x0028,
+	.sysc_flags	= (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
+			   SYSC_HAS_EMUFREE | SYSC_HAS_MIDLEMODE |
+			   SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
+			   SYSS_HAS_RESET_STATUS),
+	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+			   SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
+			   MSTANDBY_SMART | MSTANDBY_SMART_WKUP),
+	.sysc_fields	= &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class dra7xx_dma_hwmod_class = {
+	.name	= "dma",
+	.sysc	= &dra7xx_dma_sysc,
+};
+
+/* dma dev_attr */
+static struct omap_dma_dev_attr dma_dev_attr = {
+	.dev_caps	= RESERVE_CHANNEL | DMA_LINKED_LCH | GLOBAL_PRIORITY |
+			  IS_CSSA_32 | IS_CDSA_32 | IS_RW_PRIORITY,
+	.lch_count	= 32,
+};
+
+/* dma_system */
+static struct omap_hwmod_irq_info dra7xx_dma_system_irqs[] = {
+	{ .name = "0", .irq = 12 + DRA7XX_IRQ_GIC_START },
+	{ .name = "1", .irq = 13 + DRA7XX_IRQ_GIC_START },
+	{ .name = "2", .irq = 14 + DRA7XX_IRQ_GIC_START },
+	{ .name = "3", .irq = 15 + DRA7XX_IRQ_GIC_START },
+	{ .irq = -1 }
+};
+
+static struct omap_hwmod dra7xx_dma_system_hwmod = {
+	.name		= "dma_system",
+	.class		= &dra7xx_dma_hwmod_class,
+	.clkdm_name	= "dma_clkdm",
+	.mpu_irqs	= dra7xx_dma_system_irqs,
+	.main_clk	= "l3_iclk_div",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_DMA_DMA_SYSTEM_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_DMA_DMA_SYSTEM_CONTEXT_OFFSET,
+		},
+	},
+	.dev_attr	= &dma_dev_attr,
+};
+
+/*
+ * 'dss' class
+ *
+ */
+
+static struct omap_hwmod_class_sysconfig dra7xx_dss_sysc = {
+	.rev_offs	= 0x0000,
+	.syss_offs	= 0x0014,
+	.sysc_flags	= SYSS_HAS_RESET_STATUS,
+};
+
+static struct omap_hwmod_class dra7xx_dss_hwmod_class = {
+	.name	= "dss",
+	.sysc	= &dra7xx_dss_sysc,
+	.reset	= omap_dss_reset,
+};
+
+/* dss */
+static struct omap_hwmod_dma_info dra7xx_dss_sdma_reqs[] = {
+	{ .dma_req = 75 + DRA7XX_DMA_REQ_START },
+	{ .dma_req = -1 }
+};
+
+static struct omap_hwmod_opt_clk dss_opt_clks[] = {
+	{ .role = "dss_clk", .clk = "dss_dss_clk" },
+	{ .role = "hdmi_phy_clk", .clk = "dss_48mhz_clk" },
+	{ .role = "32khz_clk", .clk = "dss_32khz_clk" },
+	{ .role = "video2_clk", .clk = "dss_video2_clk" },
+	{ .role = "video1_clk", .clk = "dss_video1_clk" },
+	{ .role = "hdmi_clk", .clk = "dss_hdmi_clk" },
+};
+
+static struct omap_hwmod dra7xx_dss_hwmod = {
+	.name		= "dss_core",
+	.class		= &dra7xx_dss_hwmod_class,
+	.clkdm_name	= "dss_clkdm",
+	.flags		= HWMOD_CONTROL_OPT_CLKS_IN_RESET,
+	.sdma_reqs	= dra7xx_dss_sdma_reqs,
+	.main_clk	= "dss_dss_clk",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_DSS_DSS_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_DSS_DSS_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+	.opt_clks	= dss_opt_clks,
+	.opt_clks_cnt	= ARRAY_SIZE(dss_opt_clks),
+};
+
+/*
+ * 'dispc' class
+ * display controller
+ */
+
+static struct omap_hwmod_class_sysconfig dra7xx_dispc_sysc = {
+	.rev_offs	= 0x0000,
+	.sysc_offs	= 0x0010,
+	.syss_offs	= 0x0014,
+	.sysc_flags	= (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
+			   SYSC_HAS_ENAWAKEUP | SYSC_HAS_MIDLEMODE |
+			   SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
+			   SYSS_HAS_RESET_STATUS),
+	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+			   MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
+	.sysc_fields	= &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class dra7xx_dispc_hwmod_class = {
+	.name	= "dispc",
+	.sysc	= &dra7xx_dispc_sysc,
+};
+
+/* dss_dispc */
+/* dss_dispc dev_attr */
+static struct omap_dss_dispc_dev_attr dss_dispc_dev_attr = {
+	.has_framedonetv_irq	= 1,
+	.manager_count		= 4,
+};
+
+static struct omap_hwmod dra7xx_dss_dispc_hwmod = {
+	.name		= "dss_dispc",
+	.class		= &dra7xx_dispc_hwmod_class,
+	.clkdm_name	= "dss_clkdm",
+	.main_clk	= "dss_dss_clk",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_DSS_DSS_CLKCTRL_OFFSET,
+			.flags = HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT,
+		},
+	},
+	.dev_attr	= &dss_dispc_dev_attr,
+};
+
+/*
+ * 'hdmi' class
+ * hdmi controller
+ */
+
+static struct omap_hwmod_class_sysconfig dra7xx_hdmi_sysc = {
+	.rev_offs	= 0x0000,
+	.sysc_offs	= 0x0010,
+	.sysc_flags	= (SYSC_HAS_RESET_STATUS | SYSC_HAS_SIDLEMODE |
+			   SYSC_HAS_SOFTRESET),
+	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+			   SIDLE_SMART_WKUP),
+	.sysc_fields	= &omap_hwmod_sysc_type2,
+};
+
+static struct omap_hwmod_class dra7xx_hdmi_hwmod_class = {
+	.name	= "hdmi",
+	.sysc	= &dra7xx_hdmi_sysc,
+};
+
+/* dss_hdmi */
+
+static struct omap_hwmod_opt_clk dss_hdmi_opt_clks[] = {
+	{ .role = "sys_clk", .clk = "dss_hdmi_clk" },
+};
+
+static struct omap_hwmod dra7xx_dss_hdmi_hwmod = {
+	.name		= "dss_hdmi",
+	.class		= &dra7xx_hdmi_hwmod_class,
+	.clkdm_name	= "dss_clkdm",
+	.main_clk	= "dss_48mhz_clk",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_DSS_DSS_CLKCTRL_OFFSET,
+			.flags = HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT,
+		},
+	},
+	.opt_clks	= dss_hdmi_opt_clks,
+	.opt_clks_cnt	= ARRAY_SIZE(dss_hdmi_opt_clks),
+};
+
+/*
+ * 'elm' class
+ *
+ */
+
+static struct omap_hwmod_class_sysconfig dra7xx_elm_sysc = {
+	.rev_offs	= 0x0000,
+	.sysc_offs	= 0x0010,
+	.syss_offs	= 0x0014,
+	.sysc_flags	= (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
+			   SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
+			   SYSS_HAS_RESET_STATUS),
+	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+			   SIDLE_SMART_WKUP),
+	.sysc_fields	= &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class dra7xx_elm_hwmod_class = {
+	.name	= "elm",
+	.sysc	= &dra7xx_elm_sysc,
+};
+
+/* elm */
+
+static struct omap_hwmod dra7xx_elm_hwmod = {
+	.name		= "elm",
+	.class		= &dra7xx_elm_hwmod_class,
+	.clkdm_name	= "l4per_clkdm",
+	.main_clk	= "l3_iclk_div",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4PER_ELM_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L4PER_ELM_CONTEXT_OFFSET,
+		},
+	},
+};
+
+/*
+ * 'gpio' class
+ *
+ */
+
+static struct omap_hwmod_class_sysconfig dra7xx_gpio_sysc = {
+	.rev_offs	= 0x0000,
+	.sysc_offs	= 0x0010,
+	.syss_offs	= 0x0114,
+	.sysc_flags	= (SYSC_HAS_AUTOIDLE | SYSC_HAS_ENAWAKEUP |
+			   SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
+			   SYSS_HAS_RESET_STATUS),
+	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+			   SIDLE_SMART_WKUP),
+	.sysc_fields	= &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class dra7xx_gpio_hwmod_class = {
+	.name	= "gpio",
+	.sysc	= &dra7xx_gpio_sysc,
+	.rev	= 2,
+};
+
+/* gpio dev_attr */
+static struct omap_gpio_dev_attr gpio_dev_attr = {
+	.bank_width	= 32,
+	.dbck_flag	= true,
+};
+
+/* gpio1 */
+static struct omap_hwmod_opt_clk gpio1_opt_clks[] = {
+	{ .role = "dbclk", .clk = "gpio1_dbclk" },
+};
+
+static struct omap_hwmod dra7xx_gpio1_hwmod = {
+	.name		= "gpio1",
+	.class		= &dra7xx_gpio_hwmod_class,
+	.clkdm_name	= "wkupaon_clkdm",
+	.main_clk	= "wkupaon_iclk_mux",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_WKUPAON_GPIO1_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_WKUPAON_GPIO1_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_HWCTRL,
+		},
+	},
+	.opt_clks	= gpio1_opt_clks,
+	.opt_clks_cnt	= ARRAY_SIZE(gpio1_opt_clks),
+	.dev_attr	= &gpio_dev_attr,
+};
+
+/* gpio2 */
+static struct omap_hwmod_opt_clk gpio2_opt_clks[] = {
+	{ .role = "dbclk", .clk = "gpio2_dbclk" },
+};
+
+static struct omap_hwmod dra7xx_gpio2_hwmod = {
+	.name		= "gpio2",
+	.class		= &dra7xx_gpio_hwmod_class,
+	.clkdm_name	= "l4per_clkdm",
+	.flags		= HWMOD_CONTROL_OPT_CLKS_IN_RESET,
+	.main_clk	= "l3_iclk_div",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4PER_GPIO2_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L4PER_GPIO2_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_HWCTRL,
+		},
+	},
+	.opt_clks	= gpio2_opt_clks,
+	.opt_clks_cnt	= ARRAY_SIZE(gpio2_opt_clks),
+	.dev_attr	= &gpio_dev_attr,
+};
+
+/* gpio3 */
+static struct omap_hwmod_opt_clk gpio3_opt_clks[] = {
+	{ .role = "dbclk", .clk = "gpio3_dbclk" },
+};
+
+static struct omap_hwmod dra7xx_gpio3_hwmod = {
+	.name		= "gpio3",
+	.class		= &dra7xx_gpio_hwmod_class,
+	.clkdm_name	= "l4per_clkdm",
+	.flags		= HWMOD_CONTROL_OPT_CLKS_IN_RESET,
+	.main_clk	= "l3_iclk_div",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4PER_GPIO3_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L4PER_GPIO3_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_HWCTRL,
+		},
+	},
+	.opt_clks	= gpio3_opt_clks,
+	.opt_clks_cnt	= ARRAY_SIZE(gpio3_opt_clks),
+	.dev_attr	= &gpio_dev_attr,
+};
+
+/* gpio4 */
+static struct omap_hwmod_opt_clk gpio4_opt_clks[] = {
+	{ .role = "dbclk", .clk = "gpio4_dbclk" },
+};
+
+static struct omap_hwmod dra7xx_gpio4_hwmod = {
+	.name		= "gpio4",
+	.class		= &dra7xx_gpio_hwmod_class,
+	.clkdm_name	= "l4per_clkdm",
+	.flags		= HWMOD_CONTROL_OPT_CLKS_IN_RESET,
+	.main_clk	= "l3_iclk_div",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4PER_GPIO4_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L4PER_GPIO4_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_HWCTRL,
+		},
+	},
+	.opt_clks	= gpio4_opt_clks,
+	.opt_clks_cnt	= ARRAY_SIZE(gpio4_opt_clks),
+	.dev_attr	= &gpio_dev_attr,
+};
+
+/* gpio5 */
+static struct omap_hwmod_opt_clk gpio5_opt_clks[] = {
+	{ .role = "dbclk", .clk = "gpio5_dbclk" },
+};
+
+static struct omap_hwmod dra7xx_gpio5_hwmod = {
+	.name		= "gpio5",
+	.class		= &dra7xx_gpio_hwmod_class,
+	.clkdm_name	= "l4per_clkdm",
+	.flags		= HWMOD_CONTROL_OPT_CLKS_IN_RESET,
+	.main_clk	= "l3_iclk_div",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4PER_GPIO5_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L4PER_GPIO5_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_HWCTRL,
+		},
+	},
+	.opt_clks	= gpio5_opt_clks,
+	.opt_clks_cnt	= ARRAY_SIZE(gpio5_opt_clks),
+	.dev_attr	= &gpio_dev_attr,
+};
+
+/* gpio6 */
+static struct omap_hwmod_opt_clk gpio6_opt_clks[] = {
+	{ .role = "dbclk", .clk = "gpio6_dbclk" },
+};
+
+static struct omap_hwmod dra7xx_gpio6_hwmod = {
+	.name		= "gpio6",
+	.class		= &dra7xx_gpio_hwmod_class,
+	.clkdm_name	= "l4per_clkdm",
+	.flags		= HWMOD_CONTROL_OPT_CLKS_IN_RESET,
+	.main_clk	= "l3_iclk_div",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4PER_GPIO6_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L4PER_GPIO6_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_HWCTRL,
+		},
+	},
+	.opt_clks	= gpio6_opt_clks,
+	.opt_clks_cnt	= ARRAY_SIZE(gpio6_opt_clks),
+	.dev_attr	= &gpio_dev_attr,
+};
+
+/* gpio7 */
+static struct omap_hwmod_opt_clk gpio7_opt_clks[] = {
+	{ .role = "dbclk", .clk = "gpio7_dbclk" },
+};
+
+static struct omap_hwmod dra7xx_gpio7_hwmod = {
+	.name		= "gpio7",
+	.class		= &dra7xx_gpio_hwmod_class,
+	.clkdm_name	= "l4per_clkdm",
+	.flags		= HWMOD_CONTROL_OPT_CLKS_IN_RESET,
+	.main_clk	= "l3_iclk_div",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4PER_GPIO7_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L4PER_GPIO7_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_HWCTRL,
+		},
+	},
+	.opt_clks	= gpio7_opt_clks,
+	.opt_clks_cnt	= ARRAY_SIZE(gpio7_opt_clks),
+	.dev_attr	= &gpio_dev_attr,
+};
+
+/* gpio8 */
+static struct omap_hwmod_opt_clk gpio8_opt_clks[] = {
+	{ .role = "dbclk", .clk = "gpio8_dbclk" },
+};
+
+static struct omap_hwmod dra7xx_gpio8_hwmod = {
+	.name		= "gpio8",
+	.class		= &dra7xx_gpio_hwmod_class,
+	.clkdm_name	= "l4per_clkdm",
+	.flags		= HWMOD_CONTROL_OPT_CLKS_IN_RESET,
+	.main_clk	= "l3_iclk_div",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4PER_GPIO8_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L4PER_GPIO8_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_HWCTRL,
+		},
+	},
+	.opt_clks	= gpio8_opt_clks,
+	.opt_clks_cnt	= ARRAY_SIZE(gpio8_opt_clks),
+	.dev_attr	= &gpio_dev_attr,
+};
+
+/*
+ * 'gpmc' class
+ *
+ */
+
+static struct omap_hwmod_class_sysconfig dra7xx_gpmc_sysc = {
+	.rev_offs	= 0x0000,
+	.sysc_offs	= 0x0010,
+	.syss_offs	= 0x0014,
+	.sysc_flags	= (SYSC_HAS_AUTOIDLE | SYSC_HAS_SIDLEMODE |
+			   SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
+	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+			   SIDLE_SMART_WKUP),
+	.sysc_fields	= &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class dra7xx_gpmc_hwmod_class = {
+	.name	= "gpmc",
+	.sysc	= &dra7xx_gpmc_sysc,
+};
+
+/* gpmc */
+
+static struct omap_hwmod dra7xx_gpmc_hwmod = {
+	.name		= "gpmc",
+	.class		= &dra7xx_gpmc_hwmod_class,
+	.clkdm_name	= "l3main1_clkdm",
+	.flags		= HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET,
+	.main_clk	= "l3_iclk_div",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L3MAIN1_GPMC_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L3MAIN1_GPMC_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_HWCTRL,
+		},
+	},
+};
+
+/*
+ * 'hdq1w' class
+ *
+ */
+
+static struct omap_hwmod_class_sysconfig dra7xx_hdq1w_sysc = {
+	.rev_offs	= 0x0000,
+	.sysc_offs	= 0x0014,
+	.syss_offs	= 0x0018,
+	.sysc_flags	= (SYSC_HAS_AUTOIDLE | SYSC_HAS_SOFTRESET |
+			   SYSS_HAS_RESET_STATUS),
+	.sysc_fields	= &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class dra7xx_hdq1w_hwmod_class = {
+	.name	= "hdq1w",
+	.sysc	= &dra7xx_hdq1w_sysc,
+};
+
+/* hdq1w */
+
+static struct omap_hwmod dra7xx_hdq1w_hwmod = {
+	.name		= "hdq1w",
+	.class		= &dra7xx_hdq1w_hwmod_class,
+	.clkdm_name	= "l4per_clkdm",
+	.flags		= HWMOD_INIT_NO_RESET,
+	.main_clk	= "func_12m_fclk",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4PER_HDQ1W_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L4PER_HDQ1W_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+};
+
+/*
+ * 'i2c' class
+ *
+ */
+
+static struct omap_hwmod_class_sysconfig dra7xx_i2c_sysc = {
+	.sysc_offs	= 0x0010,
+	.syss_offs	= 0x0090,
+	.sysc_flags	= (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
+			   SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE |
+			   SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
+	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+			   SIDLE_SMART_WKUP),
+	.clockact	= CLOCKACT_TEST_ICLK,
+	.sysc_fields	= &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class dra7xx_i2c_hwmod_class = {
+	.name	= "i2c",
+	.sysc	= &dra7xx_i2c_sysc,
+	.reset	= &omap_i2c_reset,
+	.rev	= OMAP_I2C_IP_VERSION_2,
+};
+
+/* i2c dev_attr */
+static struct omap_i2c_dev_attr i2c_dev_attr = {
+	.flags	= OMAP_I2C_FLAG_BUS_SHIFT_NONE,
+};
+
+/* i2c1 */
+static struct omap_hwmod dra7xx_i2c1_hwmod = {
+	.name		= "i2c1",
+	.class		= &dra7xx_i2c_hwmod_class,
+	.clkdm_name	= "l4per_clkdm",
+	.flags		= HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
+	.main_clk	= "func_96m_fclk",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4PER_I2C1_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L4PER_I2C1_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+	.dev_attr	= &i2c_dev_attr,
+};
+
+/* i2c2 */
+static struct omap_hwmod dra7xx_i2c2_hwmod = {
+	.name		= "i2c2",
+	.class		= &dra7xx_i2c_hwmod_class,
+	.clkdm_name	= "l4per_clkdm",
+	.flags		= HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
+	.main_clk	= "func_96m_fclk",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4PER_I2C2_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L4PER_I2C2_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+	.dev_attr	= &i2c_dev_attr,
+};
+
+/* i2c3 */
+static struct omap_hwmod dra7xx_i2c3_hwmod = {
+	.name		= "i2c3",
+	.class		= &dra7xx_i2c_hwmod_class,
+	.clkdm_name	= "l4per_clkdm",
+	.flags		= HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
+	.main_clk	= "func_96m_fclk",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4PER_I2C3_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L4PER_I2C3_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+	.dev_attr	= &i2c_dev_attr,
+};
+
+/* i2c4 */
+static struct omap_hwmod dra7xx_i2c4_hwmod = {
+	.name		= "i2c4",
+	.class		= &dra7xx_i2c_hwmod_class,
+	.clkdm_name	= "l4per_clkdm",
+	.flags		= HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
+	.main_clk	= "func_96m_fclk",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4PER_I2C4_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L4PER_I2C4_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+	.dev_attr	= &i2c_dev_attr,
+};
+
+/* i2c5 */
+static struct omap_hwmod dra7xx_i2c5_hwmod = {
+	.name		= "i2c5",
+	.class		= &dra7xx_i2c_hwmod_class,
+	.clkdm_name	= "ipu_clkdm",
+	.flags		= HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
+	.main_clk	= "func_96m_fclk",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_IPU_I2C5_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_IPU_I2C5_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+	.dev_attr	= &i2c_dev_attr,
+};
+
+/*
+ * 'mcspi' class
+ *
+ */
+
+static struct omap_hwmod_class_sysconfig dra7xx_mcspi_sysc = {
+	.rev_offs	= 0x0000,
+	.sysc_offs	= 0x0010,
+	.sysc_flags	= (SYSC_HAS_EMUFREE | SYSC_HAS_RESET_STATUS |
+			   SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
+	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+			   SIDLE_SMART_WKUP),
+	.sysc_fields	= &omap_hwmod_sysc_type2,
+};
+
+static struct omap_hwmod_class dra7xx_mcspi_hwmod_class = {
+	.name	= "mcspi",
+	.sysc	= &dra7xx_mcspi_sysc,
+	.rev	= OMAP4_MCSPI_REV,
+};
+
+/* mcspi1 */
+/* mcspi1 dev_attr */
+static struct omap2_mcspi_dev_attr mcspi1_dev_attr = {
+	.num_chipselect	= 4,
+};
+
+static struct omap_hwmod dra7xx_mcspi1_hwmod = {
+	.name		= "mcspi1",
+	.class		= &dra7xx_mcspi_hwmod_class,
+	.clkdm_name	= "l4per_clkdm",
+	.main_clk	= "func_48m_fclk",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4PER_MCSPI1_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L4PER_MCSPI1_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+	.dev_attr	= &mcspi1_dev_attr,
+};
+
+/* mcspi2 */
+/* mcspi2 dev_attr */
+static struct omap2_mcspi_dev_attr mcspi2_dev_attr = {
+	.num_chipselect	= 2,
+};
+
+static struct omap_hwmod dra7xx_mcspi2_hwmod = {
+	.name		= "mcspi2",
+	.class		= &dra7xx_mcspi_hwmod_class,
+	.clkdm_name	= "l4per_clkdm",
+	.main_clk	= "func_48m_fclk",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4PER_MCSPI2_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L4PER_MCSPI2_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+	.dev_attr	= &mcspi2_dev_attr,
+};
+
+/* mcspi3 */
+/* mcspi3 dev_attr */
+static struct omap2_mcspi_dev_attr mcspi3_dev_attr = {
+	.num_chipselect	= 2,
+};
+
+static struct omap_hwmod dra7xx_mcspi3_hwmod = {
+	.name		= "mcspi3",
+	.class		= &dra7xx_mcspi_hwmod_class,
+	.clkdm_name	= "l4per_clkdm",
+	.main_clk	= "func_48m_fclk",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4PER_MCSPI3_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L4PER_MCSPI3_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+	.dev_attr	= &mcspi3_dev_attr,
+};
+
+/* mcspi4 */
+/* mcspi4 dev_attr */
+static struct omap2_mcspi_dev_attr mcspi4_dev_attr = {
+	.num_chipselect	= 1,
+};
+
+static struct omap_hwmod dra7xx_mcspi4_hwmod = {
+	.name		= "mcspi4",
+	.class		= &dra7xx_mcspi_hwmod_class,
+	.clkdm_name	= "l4per_clkdm",
+	.main_clk	= "func_48m_fclk",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4PER_MCSPI4_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L4PER_MCSPI4_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+	.dev_attr	= &mcspi4_dev_attr,
+};
+
+/*
+ * 'mmc' class
+ *
+ */
+
+static struct omap_hwmod_class_sysconfig dra7xx_mmc_sysc = {
+	.rev_offs	= 0x0000,
+	.sysc_offs	= 0x0010,
+	.sysc_flags	= (SYSC_HAS_EMUFREE | SYSC_HAS_MIDLEMODE |
+			   SYSC_HAS_RESET_STATUS | SYSC_HAS_SIDLEMODE |
+			   SYSC_HAS_SOFTRESET),
+	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+			   SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
+			   MSTANDBY_SMART | MSTANDBY_SMART_WKUP),
+	.sysc_fields	= &omap_hwmod_sysc_type2,
+};
+
+static struct omap_hwmod_class dra7xx_mmc_hwmod_class = {
+	.name	= "mmc",
+	.sysc	= &dra7xx_mmc_sysc,
+};
+
+/* mmc1 */
+static struct omap_hwmod_opt_clk mmc1_opt_clks[] = {
+	{ .role = "clk32k", .clk = "mmc1_clk32k" },
+};
+
+/* mmc1 dev_attr */
+static struct omap_mmc_dev_attr mmc1_dev_attr = {
+	.flags	= OMAP_HSMMC_SUPPORTS_DUAL_VOLT,
+};
+
+static struct omap_hwmod dra7xx_mmc1_hwmod = {
+	.name		= "mmc1",
+	.class		= &dra7xx_mmc_hwmod_class,
+	.clkdm_name	= "l3init_clkdm",
+	.main_clk	= "mmc1_fclk_div",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L3INIT_MMC1_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L3INIT_MMC1_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+	.opt_clks	= mmc1_opt_clks,
+	.opt_clks_cnt	= ARRAY_SIZE(mmc1_opt_clks),
+	.dev_attr	= &mmc1_dev_attr,
+};
+
+/* mmc2 */
+static struct omap_hwmod_opt_clk mmc2_opt_clks[] = {
+	{ .role = "clk32k", .clk = "mmc2_clk32k" },
+};
+
+static struct omap_hwmod dra7xx_mmc2_hwmod = {
+	.name		= "mmc2",
+	.class		= &dra7xx_mmc_hwmod_class,
+	.clkdm_name	= "l3init_clkdm",
+	.main_clk	= "mmc2_fclk_div",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L3INIT_MMC2_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L3INIT_MMC2_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+	.opt_clks	= mmc2_opt_clks,
+	.opt_clks_cnt	= ARRAY_SIZE(mmc2_opt_clks),
+};
+
+/* mmc3 */
+static struct omap_hwmod_opt_clk mmc3_opt_clks[] = {
+	{ .role = "clk32k", .clk = "mmc3_clk32k" },
+};
+
+static struct omap_hwmod dra7xx_mmc3_hwmod = {
+	.name		= "mmc3",
+	.class		= &dra7xx_mmc_hwmod_class,
+	.clkdm_name	= "l4per_clkdm",
+	.main_clk	= "mmc3_gfclk_div",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4PER_MMC3_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L4PER_MMC3_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+	.opt_clks	= mmc3_opt_clks,
+	.opt_clks_cnt	= ARRAY_SIZE(mmc3_opt_clks),
+};
+
+/* mmc4 */
+static struct omap_hwmod_opt_clk mmc4_opt_clks[] = {
+	{ .role = "clk32k", .clk = "mmc4_clk32k" },
+};
+
+static struct omap_hwmod dra7xx_mmc4_hwmod = {
+	.name		= "mmc4",
+	.class		= &dra7xx_mmc_hwmod_class,
+	.clkdm_name	= "l4per_clkdm",
+	.main_clk	= "mmc4_gfclk_div",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4PER_MMC4_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L4PER_MMC4_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+	.opt_clks	= mmc4_opt_clks,
+	.opt_clks_cnt	= ARRAY_SIZE(mmc4_opt_clks),
+};
+
+/*
+ * 'mpu' class
+ *
+ */
+
+static struct omap_hwmod_class dra7xx_mpu_hwmod_class = {
+	.name	= "mpu",
+};
+
+/* mpu */
+static struct omap_hwmod dra7xx_mpu_hwmod = {
+	.name		= "mpu",
+	.class		= &dra7xx_mpu_hwmod_class,
+	.clkdm_name	= "mpu_clkdm",
+	.flags		= HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET,
+	.main_clk	= "dpll_mpu_m2_ck",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_MPU_MPU_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_MPU_MPU_CONTEXT_OFFSET,
+		},
+	},
+};
+
+/*
+ * 'ocp2scp' class
+ *
+ */
+
+static struct omap_hwmod_class_sysconfig dra7xx_ocp2scp_sysc = {
+	.rev_offs	= 0x0000,
+	.sysc_offs	= 0x0010,
+	.syss_offs	= 0x0014,
+	.sysc_flags	= (SYSC_HAS_AUTOIDLE | SYSC_HAS_SIDLEMODE |
+			   SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
+	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+			   SIDLE_SMART_WKUP),
+	.sysc_fields	= &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class dra7xx_ocp2scp_hwmod_class = {
+	.name	= "ocp2scp",
+	.sysc	= &dra7xx_ocp2scp_sysc,
+};
+
+/* ocp2scp1 */
+static struct omap_hwmod dra7xx_ocp2scp1_hwmod = {
+	.name		= "ocp2scp1",
+	.class		= &dra7xx_ocp2scp_hwmod_class,
+	.clkdm_name	= "l3init_clkdm",
+	.main_clk	= "l4_root_clk_div",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L3INIT_OCP2SCP1_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L3INIT_OCP2SCP1_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_HWCTRL,
+		},
+	},
+};
+
+/*
+ * 'qspi' class
+ *
+ */
+
+static struct omap_hwmod_class_sysconfig dra7xx_qspi_sysc = {
+	.sysc_offs	= 0x0010,
+	.sysc_flags	= SYSC_HAS_SIDLEMODE,
+	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+			   SIDLE_SMART_WKUP),
+	.sysc_fields	= &omap_hwmod_sysc_type2,
+};
+
+static struct omap_hwmod_class dra7xx_qspi_hwmod_class = {
+	.name	= "qspi",
+	.sysc	= &dra7xx_qspi_sysc,
+};
+
+/* qspi */
+static struct omap_hwmod dra7xx_qspi_hwmod = {
+	.name		= "qspi",
+	.class		= &dra7xx_qspi_hwmod_class,
+	.clkdm_name	= "l4per2_clkdm",
+	.main_clk	= "qspi_gfclk_div",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4PER2_QSPI_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L4PER2_QSPI_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+};
+
+/*
+ * 'sata' class
+ *
+ */
+
+static struct omap_hwmod_class_sysconfig dra7xx_sata_sysc = {
+	.sysc_offs	= 0x0000,
+	.sysc_flags	= (SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE),
+	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+			   SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
+			   MSTANDBY_SMART | MSTANDBY_SMART_WKUP),
+	.sysc_fields	= &omap_hwmod_sysc_type2,
+};
+
+static struct omap_hwmod_class dra7xx_sata_hwmod_class = {
+	.name	= "sata",
+	.sysc	= &dra7xx_sata_sysc,
+};
+
+/* sata */
+static struct omap_hwmod_opt_clk sata_opt_clks[] = {
+	{ .role = "ref_clk", .clk = "sata_ref_clk" },
+};
+
+static struct omap_hwmod dra7xx_sata_hwmod = {
+	.name		= "sata",
+	.class		= &dra7xx_sata_hwmod_class,
+	.clkdm_name	= "l3init_clkdm",
+	.flags		= HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY,
+	.main_clk	= "func_48m_fclk",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L3INIT_SATA_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L3INIT_SATA_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+	.opt_clks	= sata_opt_clks,
+	.opt_clks_cnt	= ARRAY_SIZE(sata_opt_clks),
+};
+
+/*
+ * 'smartreflex' class
+ *
+ */
+
+/* The IP is not compliant to type1 / type2 scheme */
+static struct omap_hwmod_sysc_fields omap_hwmod_sysc_type_smartreflex = {
+	.sidle_shift	= 24,
+	.enwkup_shift	= 26,
+};
+
+static struct omap_hwmod_class_sysconfig dra7xx_smartreflex_sysc = {
+	.sysc_offs	= 0x0038,
+	.sysc_flags	= (SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE),
+	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+			   SIDLE_SMART_WKUP),
+	.sysc_fields	= &omap_hwmod_sysc_type_smartreflex,
+};
+
+static struct omap_hwmod_class dra7xx_smartreflex_hwmod_class = {
+	.name	= "smartreflex",
+	.sysc	= &dra7xx_smartreflex_sysc,
+	.rev	= 2,
+};
+
+/* smartreflex_core */
+/* smartreflex_core dev_attr */
+static struct omap_smartreflex_dev_attr smartreflex_core_dev_attr = {
+	.sensor_voltdm_name	= "core",
+};
+
+static struct omap_hwmod dra7xx_smartreflex_core_hwmod = {
+	.name		= "smartreflex_core",
+	.class		= &dra7xx_smartreflex_hwmod_class,
+	.clkdm_name	= "coreaon_clkdm",
+	.main_clk	= "wkupaon_iclk_mux",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_COREAON_SMARTREFLEX_CORE_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_COREAON_SMARTREFLEX_CORE_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+	.dev_attr	= &smartreflex_core_dev_attr,
+};
+
+/* smartreflex_mpu */
+/* smartreflex_mpu dev_attr */
+static struct omap_smartreflex_dev_attr smartreflex_mpu_dev_attr = {
+	.sensor_voltdm_name	= "mpu",
+};
+
+static struct omap_hwmod dra7xx_smartreflex_mpu_hwmod = {
+	.name		= "smartreflex_mpu",
+	.class		= &dra7xx_smartreflex_hwmod_class,
+	.clkdm_name	= "coreaon_clkdm",
+	.main_clk	= "wkupaon_iclk_mux",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_COREAON_SMARTREFLEX_MPU_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_COREAON_SMARTREFLEX_MPU_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+	.dev_attr	= &smartreflex_mpu_dev_attr,
+};
+
+/*
+ * 'spinlock' class
+ *
+ */
+
+static struct omap_hwmod_class_sysconfig dra7xx_spinlock_sysc = {
+	.rev_offs	= 0x0000,
+	.sysc_offs	= 0x0010,
+	.syss_offs	= 0x0014,
+	.sysc_flags	= (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
+			   SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE |
+			   SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
+	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+			   SIDLE_SMART_WKUP),
+	.sysc_fields	= &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class dra7xx_spinlock_hwmod_class = {
+	.name	= "spinlock",
+	.sysc	= &dra7xx_spinlock_sysc,
+};
+
+/* spinlock */
+static struct omap_hwmod dra7xx_spinlock_hwmod = {
+	.name		= "spinlock",
+	.class		= &dra7xx_spinlock_hwmod_class,
+	.clkdm_name	= "l4cfg_clkdm",
+	.main_clk	= "l3_iclk_div",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4CFG_SPINLOCK_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L4CFG_SPINLOCK_CONTEXT_OFFSET,
+		},
+	},
+};
+
+/*
+ * 'timer' class
+ *
+ * This class contains several variants: ['timer_1ms', 'timer_secure',
+ * 'timer']
+ */
+
+static struct omap_hwmod_class_sysconfig dra7xx_timer_1ms_sysc = {
+	.rev_offs	= 0x0000,
+	.sysc_offs	= 0x0010,
+	.sysc_flags	= (SYSC_HAS_EMUFREE | SYSC_HAS_RESET_STATUS |
+			   SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
+	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+			   SIDLE_SMART_WKUP),
+	.sysc_fields	= &omap_hwmod_sysc_type2,
+};
+
+static struct omap_hwmod_class dra7xx_timer_1ms_hwmod_class = {
+	.name	= "timer",
+	.sysc	= &dra7xx_timer_1ms_sysc,
+};
+
+static struct omap_hwmod_class_sysconfig dra7xx_timer_secure_sysc = {
+	.rev_offs	= 0x0000,
+	.sysc_offs	= 0x0010,
+	.sysc_flags	= (SYSC_HAS_EMUFREE | SYSC_HAS_RESET_STATUS |
+			   SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
+	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+			   SIDLE_SMART_WKUP),
+	.sysc_fields	= &omap_hwmod_sysc_type2,
+};
+
+static struct omap_hwmod_class dra7xx_timer_secure_hwmod_class = {
+	.name	= "timer",
+	.sysc	= &dra7xx_timer_secure_sysc,
+};
+
+static struct omap_hwmod_class_sysconfig dra7xx_timer_sysc = {
+	.rev_offs	= 0x0000,
+	.sysc_offs	= 0x0010,
+	.sysc_flags	= (SYSC_HAS_EMUFREE | SYSC_HAS_RESET_STATUS |
+			   SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
+	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+			   SIDLE_SMART_WKUP),
+	.sysc_fields	= &omap_hwmod_sysc_type2,
+};
+
+static struct omap_hwmod_class dra7xx_timer_hwmod_class = {
+	.name	= "timer",
+	.sysc	= &dra7xx_timer_sysc,
+};
+
+/* timer1 */
+static struct omap_hwmod dra7xx_timer1_hwmod = {
+	.name		= "timer1",
+	.class		= &dra7xx_timer_1ms_hwmod_class,
+	.clkdm_name	= "wkupaon_clkdm",
+	.main_clk	= "timer1_gfclk_mux",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_WKUPAON_TIMER1_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_WKUPAON_TIMER1_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+};
+
+/* timer2 */
+static struct omap_hwmod dra7xx_timer2_hwmod = {
+	.name		= "timer2",
+	.class		= &dra7xx_timer_1ms_hwmod_class,
+	.clkdm_name	= "l4per_clkdm",
+	.main_clk	= "timer2_gfclk_mux",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4PER_TIMER2_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L4PER_TIMER2_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+};
+
+/* timer3 */
+static struct omap_hwmod dra7xx_timer3_hwmod = {
+	.name		= "timer3",
+	.class		= &dra7xx_timer_hwmod_class,
+	.clkdm_name	= "l4per_clkdm",
+	.main_clk	= "timer3_gfclk_mux",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4PER_TIMER3_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L4PER_TIMER3_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+};
+
+/* timer4 */
+static struct omap_hwmod dra7xx_timer4_hwmod = {
+	.name		= "timer4",
+	.class		= &dra7xx_timer_secure_hwmod_class,
+	.clkdm_name	= "l4per_clkdm",
+	.main_clk	= "timer4_gfclk_mux",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4PER_TIMER4_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L4PER_TIMER4_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+};
+
+/* timer5 */
+static struct omap_hwmod dra7xx_timer5_hwmod = {
+	.name		= "timer5",
+	.class		= &dra7xx_timer_hwmod_class,
+	.clkdm_name	= "ipu_clkdm",
+	.main_clk	= "timer5_gfclk_mux",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_IPU_TIMER5_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_IPU_TIMER5_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+};
+
+/* timer6 */
+static struct omap_hwmod dra7xx_timer6_hwmod = {
+	.name		= "timer6",
+	.class		= &dra7xx_timer_hwmod_class,
+	.clkdm_name	= "ipu_clkdm",
+	.main_clk	= "timer6_gfclk_mux",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_IPU_TIMER6_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_IPU_TIMER6_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+};
+
+/* timer7 */
+static struct omap_hwmod dra7xx_timer7_hwmod = {
+	.name		= "timer7",
+	.class		= &dra7xx_timer_hwmod_class,
+	.clkdm_name	= "ipu_clkdm",
+	.main_clk	= "timer7_gfclk_mux",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_IPU_TIMER7_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_IPU_TIMER7_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+};
+
+/* timer8 */
+static struct omap_hwmod dra7xx_timer8_hwmod = {
+	.name		= "timer8",
+	.class		= &dra7xx_timer_hwmod_class,
+	.clkdm_name	= "ipu_clkdm",
+	.main_clk	= "timer8_gfclk_mux",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_IPU_TIMER8_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_IPU_TIMER8_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+};
+
+/* timer9 */
+static struct omap_hwmod dra7xx_timer9_hwmod = {
+	.name		= "timer9",
+	.class		= &dra7xx_timer_hwmod_class,
+	.clkdm_name	= "l4per_clkdm",
+	.main_clk	= "timer9_gfclk_mux",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4PER_TIMER9_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L4PER_TIMER9_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+};
+
+/* timer10 */
+static struct omap_hwmod dra7xx_timer10_hwmod = {
+	.name		= "timer10",
+	.class		= &dra7xx_timer_1ms_hwmod_class,
+	.clkdm_name	= "l4per_clkdm",
+	.main_clk	= "timer10_gfclk_mux",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4PER_TIMER10_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L4PER_TIMER10_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+};
+
+/* timer11 */
+static struct omap_hwmod dra7xx_timer11_hwmod = {
+	.name		= "timer11",
+	.class		= &dra7xx_timer_hwmod_class,
+	.clkdm_name	= "l4per_clkdm",
+	.main_clk	= "timer11_gfclk_mux",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4PER_TIMER11_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L4PER_TIMER11_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+};
+
+/*
+ * 'uart' class
+ *
+ */
+
+static struct omap_hwmod_class_sysconfig dra7xx_uart_sysc = {
+	.rev_offs	= 0x0050,
+	.sysc_offs	= 0x0054,
+	.syss_offs	= 0x0058,
+	.sysc_flags	= (SYSC_HAS_AUTOIDLE | SYSC_HAS_ENAWAKEUP |
+			   SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
+			   SYSS_HAS_RESET_STATUS),
+	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+			   SIDLE_SMART_WKUP),
+	.sysc_fields	= &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class dra7xx_uart_hwmod_class = {
+	.name	= "uart",
+	.sysc	= &dra7xx_uart_sysc,
+};
+
+/* uart1 */
+static struct omap_hwmod dra7xx_uart1_hwmod = {
+	.name		= "uart1",
+	.class		= &dra7xx_uart_hwmod_class,
+	.clkdm_name	= "l4per_clkdm",
+	.main_clk	= "uart1_gfclk_mux",
+	.flags		= HWMOD_SWSUP_SIDLE_ACT,
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4PER_UART1_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L4PER_UART1_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+};
+
+/* uart2 */
+static struct omap_hwmod dra7xx_uart2_hwmod = {
+	.name		= "uart2",
+	.class		= &dra7xx_uart_hwmod_class,
+	.clkdm_name	= "l4per_clkdm",
+	.main_clk	= "uart2_gfclk_mux",
+	.flags		= HWMOD_SWSUP_SIDLE_ACT,
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4PER_UART2_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L4PER_UART2_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+};
+
+/* uart3 */
+static struct omap_hwmod dra7xx_uart3_hwmod = {
+	.name		= "uart3",
+	.class		= &dra7xx_uart_hwmod_class,
+	.clkdm_name	= "l4per_clkdm",
+	.main_clk	= "uart3_gfclk_mux",
+	.flags		= HWMOD_SWSUP_SIDLE_ACT,
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4PER_UART3_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L4PER_UART3_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+};
+
+/* uart4 */
+static struct omap_hwmod dra7xx_uart4_hwmod = {
+	.name		= "uart4",
+	.class		= &dra7xx_uart_hwmod_class,
+	.clkdm_name	= "l4per_clkdm",
+	.main_clk	= "uart4_gfclk_mux",
+	.flags		= HWMOD_SWSUP_SIDLE_ACT,
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4PER_UART4_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L4PER_UART4_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+};
+
+/* uart5 */
+static struct omap_hwmod dra7xx_uart5_hwmod = {
+	.name		= "uart5",
+	.class		= &dra7xx_uart_hwmod_class,
+	.clkdm_name	= "l4per_clkdm",
+	.main_clk	= "uart5_gfclk_mux",
+	.flags		= HWMOD_SWSUP_SIDLE_ACT,
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4PER_UART5_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L4PER_UART5_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+};
+
+/* uart6 */
+static struct omap_hwmod dra7xx_uart6_hwmod = {
+	.name		= "uart6",
+	.class		= &dra7xx_uart_hwmod_class,
+	.clkdm_name	= "ipu_clkdm",
+	.main_clk	= "uart6_gfclk_mux",
+	.flags		= HWMOD_SWSUP_SIDLE_ACT,
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_IPU_UART6_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_IPU_UART6_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+};
+
+/*
+ * 'usb_otg_ss' class
+ *
+ */
+
+static struct omap_hwmod_class dra7xx_usb_otg_ss_hwmod_class = {
+	.name	= "usb_otg_ss",
+};
+
+/* usb_otg_ss1 */
+static struct omap_hwmod_opt_clk usb_otg_ss1_opt_clks[] = {
+	{ .role = "refclk960m", .clk = "usb_otg_ss1_refclk960m" },
+};
+
+static struct omap_hwmod dra7xx_usb_otg_ss1_hwmod = {
+	.name		= "usb_otg_ss1",
+	.class		= &dra7xx_usb_otg_ss_hwmod_class,
+	.clkdm_name	= "l3init_clkdm",
+	.main_clk	= "dpll_core_h13x2_ck",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L3INIT_USB_OTG_SS1_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L3INIT_USB_OTG_SS1_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_HWCTRL,
+		},
+	},
+	.opt_clks	= usb_otg_ss1_opt_clks,
+	.opt_clks_cnt	= ARRAY_SIZE(usb_otg_ss1_opt_clks),
+};
+
+/* usb_otg_ss2 */
+static struct omap_hwmod_opt_clk usb_otg_ss2_opt_clks[] = {
+	{ .role = "refclk960m", .clk = "usb_otg_ss2_refclk960m" },
+};
+
+static struct omap_hwmod dra7xx_usb_otg_ss2_hwmod = {
+	.name		= "usb_otg_ss2",
+	.class		= &dra7xx_usb_otg_ss_hwmod_class,
+	.clkdm_name	= "l3init_clkdm",
+	.main_clk	= "dpll_core_h13x2_ck",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L3INIT_USB_OTG_SS2_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L3INIT_USB_OTG_SS2_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_HWCTRL,
+		},
+	},
+	.opt_clks	= usb_otg_ss2_opt_clks,
+	.opt_clks_cnt	= ARRAY_SIZE(usb_otg_ss2_opt_clks),
+};
+
+/* usb_otg_ss3 */
+static struct omap_hwmod dra7xx_usb_otg_ss3_hwmod = {
+	.name		= "usb_otg_ss3",
+	.class		= &dra7xx_usb_otg_ss_hwmod_class,
+	.clkdm_name	= "l3init_clkdm",
+	.main_clk	= "dpll_core_h13x2_ck",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L3INIT_USB_OTG_SS3_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L3INIT_USB_OTG_SS3_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_HWCTRL,
+		},
+	},
+};
+
+/* usb_otg_ss4 */
+static struct omap_hwmod dra7xx_usb_otg_ss4_hwmod = {
+	.name		= "usb_otg_ss4",
+	.class		= &dra7xx_usb_otg_ss_hwmod_class,
+	.clkdm_name	= "l3init_clkdm",
+	.main_clk	= "dpll_core_h13x2_ck",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L3INIT_USB_OTG_SS4_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L3INIT_USB_OTG_SS4_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_HWCTRL,
+		},
+	},
+};
+
+/*
+ * 'vcp' class
+ *
+ */
+
+static struct omap_hwmod_class dra7xx_vcp_hwmod_class = {
+	.name	= "vcp",
+};
+
+/* vcp1 */
+static struct omap_hwmod dra7xx_vcp1_hwmod = {
+	.name		= "vcp1",
+	.class		= &dra7xx_vcp_hwmod_class,
+	.clkdm_name	= "l3main1_clkdm",
+	.main_clk	= "l3_iclk_div",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L3MAIN1_VCP1_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L3MAIN1_VCP1_CONTEXT_OFFSET,
+		},
+	},
+};
+
+/* vcp2 */
+static struct omap_hwmod dra7xx_vcp2_hwmod = {
+	.name		= "vcp2",
+	.class		= &dra7xx_vcp_hwmod_class,
+	.clkdm_name	= "l3main1_clkdm",
+	.main_clk	= "l3_iclk_div",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L3MAIN1_VCP2_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L3MAIN1_VCP2_CONTEXT_OFFSET,
+		},
+	},
+};
+
+/*
+ * 'wd_timer' class
+ *
+ */
+
+static struct omap_hwmod_class_sysconfig dra7xx_wd_timer_sysc = {
+	.rev_offs	= 0x0000,
+	.sysc_offs	= 0x0010,
+	.syss_offs	= 0x0014,
+	.sysc_flags	= (SYSC_HAS_EMUFREE | SYSC_HAS_SIDLEMODE |
+			   SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
+	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+			   SIDLE_SMART_WKUP),
+	.sysc_fields	= &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class dra7xx_wd_timer_hwmod_class = {
+	.name		= "wd_timer",
+	.sysc		= &dra7xx_wd_timer_sysc,
+	.pre_shutdown	= &omap2_wd_timer_disable,
+	.reset		= &omap2_wd_timer_reset,
+};
+
+/* wd_timer2 */
+static struct omap_hwmod dra7xx_wd_timer2_hwmod = {
+	.name		= "wd_timer2",
+	.class		= &dra7xx_wd_timer_hwmod_class,
+	.clkdm_name	= "wkupaon_clkdm",
+	.main_clk	= "sys_32k_ck",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_WKUPAON_WD_TIMER2_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_WKUPAON_WD_TIMER2_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+};
+
+
+/*
+ * Interfaces
+ */
+
+/* l3_main_2 -> l3_instr */
+static struct omap_hwmod_ocp_if dra7xx_l3_main_2__l3_instr = {
+	.master		= &dra7xx_l3_main_2_hwmod,
+	.slave		= &dra7xx_l3_instr_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_cfg -> l3_main_1 */
+static struct omap_hwmod_ocp_if dra7xx_l4_cfg__l3_main_1 = {
+	.master		= &dra7xx_l4_cfg_hwmod,
+	.slave		= &dra7xx_l3_main_1_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* mpu -> l3_main_1 */
+static struct omap_hwmod_ocp_if dra7xx_mpu__l3_main_1 = {
+	.master		= &dra7xx_mpu_hwmod,
+	.slave		= &dra7xx_l3_main_1_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU,
+};
+
+/* l3_main_1 -> l3_main_2 */
+static struct omap_hwmod_ocp_if dra7xx_l3_main_1__l3_main_2 = {
+	.master		= &dra7xx_l3_main_1_hwmod,
+	.slave		= &dra7xx_l3_main_2_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU,
+};
+
+/* l4_cfg -> l3_main_2 */
+static struct omap_hwmod_ocp_if dra7xx_l4_cfg__l3_main_2 = {
+	.master		= &dra7xx_l4_cfg_hwmod,
+	.slave		= &dra7xx_l3_main_2_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l3_main_1 -> l4_cfg */
+static struct omap_hwmod_ocp_if dra7xx_l3_main_1__l4_cfg = {
+	.master		= &dra7xx_l3_main_1_hwmod,
+	.slave		= &dra7xx_l4_cfg_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l3_main_1 -> l4_per1 */
+static struct omap_hwmod_ocp_if dra7xx_l3_main_1__l4_per1 = {
+	.master		= &dra7xx_l3_main_1_hwmod,
+	.slave		= &dra7xx_l4_per1_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l3_main_1 -> l4_per2 */
+static struct omap_hwmod_ocp_if dra7xx_l3_main_1__l4_per2 = {
+	.master		= &dra7xx_l3_main_1_hwmod,
+	.slave		= &dra7xx_l4_per2_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l3_main_1 -> l4_per3 */
+static struct omap_hwmod_ocp_if dra7xx_l3_main_1__l4_per3 = {
+	.master		= &dra7xx_l3_main_1_hwmod,
+	.slave		= &dra7xx_l4_per3_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l3_main_1 -> l4_wkup */
+static struct omap_hwmod_ocp_if dra7xx_l3_main_1__l4_wkup = {
+	.master		= &dra7xx_l3_main_1_hwmod,
+	.slave		= &dra7xx_l4_wkup_hwmod,
+	.clk		= "wkupaon_iclk_mux",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per2 -> atl */
+static struct omap_hwmod_ocp_if dra7xx_l4_per2__atl = {
+	.master		= &dra7xx_l4_per2_hwmod,
+	.slave		= &dra7xx_atl_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l3_main_1 -> bb2d */
+static struct omap_hwmod_ocp_if dra7xx_l3_main_1__bb2d = {
+	.master		= &dra7xx_l3_main_1_hwmod,
+	.slave		= &dra7xx_bb2d_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_wkup -> counter_32k */
+static struct omap_hwmod_ocp_if dra7xx_l4_wkup__counter_32k = {
+	.master		= &dra7xx_l4_wkup_hwmod,
+	.slave		= &dra7xx_counter_32k_hwmod,
+	.clk		= "wkupaon_iclk_mux",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_wkup -> ctrl_module_wkup */
+static struct omap_hwmod_ocp_if dra7xx_l4_wkup__ctrl_module_wkup = {
+	.master		= &dra7xx_l4_wkup_hwmod,
+	.slave		= &dra7xx_ctrl_module_wkup_hwmod,
+	.clk		= "wkupaon_iclk_mux",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_wkup -> dcan1 */
+static struct omap_hwmod_ocp_if dra7xx_l4_wkup__dcan1 = {
+	.master		= &dra7xx_l4_wkup_hwmod,
+	.slave		= &dra7xx_dcan1_hwmod,
+	.clk		= "wkupaon_iclk_mux",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per2 -> dcan2 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per2__dcan2 = {
+	.master		= &dra7xx_l4_per2_hwmod,
+	.slave		= &dra7xx_dcan2_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_addr_space dra7xx_dma_system_addrs[] = {
+	{
+		.pa_start	= 0x4a056000,
+		.pa_end		= 0x4a056fff,
+		.flags		= ADDR_TYPE_RT
+	},
+	{ }
+};
+
+/* l4_cfg -> dma_system */
+static struct omap_hwmod_ocp_if dra7xx_l4_cfg__dma_system = {
+	.master		= &dra7xx_l4_cfg_hwmod,
+	.slave		= &dra7xx_dma_system_hwmod,
+	.clk		= "l3_iclk_div",
+	.addr		= dra7xx_dma_system_addrs,
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_addr_space dra7xx_dss_addrs[] = {
+	{
+		.name		= "family",
+		.pa_start	= 0x58000000,
+		.pa_end		= 0x5800007f,
+		.flags		= ADDR_TYPE_RT
+	},
+};
+
+/* l3_main_1 -> dss */
+static struct omap_hwmod_ocp_if dra7xx_l3_main_1__dss = {
+	.master		= &dra7xx_l3_main_1_hwmod,
+	.slave		= &dra7xx_dss_hwmod,
+	.clk		= "l3_iclk_div",
+	.addr		= dra7xx_dss_addrs,
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_addr_space dra7xx_dss_dispc_addrs[] = {
+	{
+		.name		= "dispc",
+		.pa_start	= 0x58001000,
+		.pa_end		= 0x58001fff,
+		.flags		= ADDR_TYPE_RT
+	},
+};
+
+/* l3_main_1 -> dispc */
+static struct omap_hwmod_ocp_if dra7xx_l3_main_1__dispc = {
+	.master		= &dra7xx_l3_main_1_hwmod,
+	.slave		= &dra7xx_dss_dispc_hwmod,
+	.clk		= "l3_iclk_div",
+	.addr		= dra7xx_dss_dispc_addrs,
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_addr_space dra7xx_dss_hdmi_addrs[] = {
+	{
+		.name		= "hdmi_wp",
+		.pa_start	= 0x58040000,
+		.pa_end		= 0x580400ff,
+		.flags		= ADDR_TYPE_RT
+	},
+	{ }
+};
+
+/* l3_main_1 -> dispc */
+static struct omap_hwmod_ocp_if dra7xx_l3_main_1__hdmi = {
+	.master		= &dra7xx_l3_main_1_hwmod,
+	.slave		= &dra7xx_dss_hdmi_hwmod,
+	.clk		= "l3_iclk_div",
+	.addr		= dra7xx_dss_hdmi_addrs,
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_addr_space dra7xx_elm_addrs[] = {
+	{
+		.pa_start	= 0x48078000,
+		.pa_end		= 0x48078fff,
+		.flags		= ADDR_TYPE_RT
+	},
+	{ }
+};
+
+/* l4_per1 -> elm */
+static struct omap_hwmod_ocp_if dra7xx_l4_per1__elm = {
+	.master		= &dra7xx_l4_per1_hwmod,
+	.slave		= &dra7xx_elm_hwmod,
+	.clk		= "l3_iclk_div",
+	.addr		= dra7xx_elm_addrs,
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_wkup -> gpio1 */
+static struct omap_hwmod_ocp_if dra7xx_l4_wkup__gpio1 = {
+	.master		= &dra7xx_l4_wkup_hwmod,
+	.slave		= &dra7xx_gpio1_hwmod,
+	.clk		= "wkupaon_iclk_mux",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per1 -> gpio2 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per1__gpio2 = {
+	.master		= &dra7xx_l4_per1_hwmod,
+	.slave		= &dra7xx_gpio2_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per1 -> gpio3 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per1__gpio3 = {
+	.master		= &dra7xx_l4_per1_hwmod,
+	.slave		= &dra7xx_gpio3_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per1 -> gpio4 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per1__gpio4 = {
+	.master		= &dra7xx_l4_per1_hwmod,
+	.slave		= &dra7xx_gpio4_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per1 -> gpio5 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per1__gpio5 = {
+	.master		= &dra7xx_l4_per1_hwmod,
+	.slave		= &dra7xx_gpio5_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per1 -> gpio6 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per1__gpio6 = {
+	.master		= &dra7xx_l4_per1_hwmod,
+	.slave		= &dra7xx_gpio6_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per1 -> gpio7 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per1__gpio7 = {
+	.master		= &dra7xx_l4_per1_hwmod,
+	.slave		= &dra7xx_gpio7_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per1 -> gpio8 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per1__gpio8 = {
+	.master		= &dra7xx_l4_per1_hwmod,
+	.slave		= &dra7xx_gpio8_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_addr_space dra7xx_gpmc_addrs[] = {
+	{
+		.pa_start	= 0x50000000,
+		.pa_end		= 0x500003ff,
+		.flags		= ADDR_TYPE_RT
+	},
+	{ }
+};
+
+/* l3_main_1 -> gpmc */
+static struct omap_hwmod_ocp_if dra7xx_l3_main_1__gpmc = {
+	.master		= &dra7xx_l3_main_1_hwmod,
+	.slave		= &dra7xx_gpmc_hwmod,
+	.clk		= "l3_iclk_div",
+	.addr		= dra7xx_gpmc_addrs,
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_addr_space dra7xx_hdq1w_addrs[] = {
+	{
+		.pa_start	= 0x480b2000,
+		.pa_end		= 0x480b201f,
+		.flags		= ADDR_TYPE_RT
+	},
+	{ }
+};
+
+/* l4_per1 -> hdq1w */
+static struct omap_hwmod_ocp_if dra7xx_l4_per1__hdq1w = {
+	.master		= &dra7xx_l4_per1_hwmod,
+	.slave		= &dra7xx_hdq1w_hwmod,
+	.clk		= "l3_iclk_div",
+	.addr		= dra7xx_hdq1w_addrs,
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per1 -> i2c1 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per1__i2c1 = {
+	.master		= &dra7xx_l4_per1_hwmod,
+	.slave		= &dra7xx_i2c1_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per1 -> i2c2 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per1__i2c2 = {
+	.master		= &dra7xx_l4_per1_hwmod,
+	.slave		= &dra7xx_i2c2_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per1 -> i2c3 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per1__i2c3 = {
+	.master		= &dra7xx_l4_per1_hwmod,
+	.slave		= &dra7xx_i2c3_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per1 -> i2c4 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per1__i2c4 = {
+	.master		= &dra7xx_l4_per1_hwmod,
+	.slave		= &dra7xx_i2c4_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per1 -> i2c5 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per1__i2c5 = {
+	.master		= &dra7xx_l4_per1_hwmod,
+	.slave		= &dra7xx_i2c5_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per1 -> mcspi1 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per1__mcspi1 = {
+	.master		= &dra7xx_l4_per1_hwmod,
+	.slave		= &dra7xx_mcspi1_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per1 -> mcspi2 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per1__mcspi2 = {
+	.master		= &dra7xx_l4_per1_hwmod,
+	.slave		= &dra7xx_mcspi2_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per1 -> mcspi3 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per1__mcspi3 = {
+	.master		= &dra7xx_l4_per1_hwmod,
+	.slave		= &dra7xx_mcspi3_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per1 -> mcspi4 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per1__mcspi4 = {
+	.master		= &dra7xx_l4_per1_hwmod,
+	.slave		= &dra7xx_mcspi4_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per1 -> mmc1 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per1__mmc1 = {
+	.master		= &dra7xx_l4_per1_hwmod,
+	.slave		= &dra7xx_mmc1_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per1 -> mmc2 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per1__mmc2 = {
+	.master		= &dra7xx_l4_per1_hwmod,
+	.slave		= &dra7xx_mmc2_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per1 -> mmc3 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per1__mmc3 = {
+	.master		= &dra7xx_l4_per1_hwmod,
+	.slave		= &dra7xx_mmc3_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per1 -> mmc4 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per1__mmc4 = {
+	.master		= &dra7xx_l4_per1_hwmod,
+	.slave		= &dra7xx_mmc4_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_cfg -> mpu */
+static struct omap_hwmod_ocp_if dra7xx_l4_cfg__mpu = {
+	.master		= &dra7xx_l4_cfg_hwmod,
+	.slave		= &dra7xx_mpu_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_addr_space dra7xx_ocp2scp1_addrs[] = {
+	{
+		.pa_start	= 0x4a080000,
+		.pa_end		= 0x4a08001f,
+		.flags		= ADDR_TYPE_RT
+	},
+	{ }
+};
+
+/* l4_cfg -> ocp2scp1 */
+static struct omap_hwmod_ocp_if dra7xx_l4_cfg__ocp2scp1 = {
+	.master		= &dra7xx_l4_cfg_hwmod,
+	.slave		= &dra7xx_ocp2scp1_hwmod,
+	.clk		= "l4_root_clk_div",
+	.addr		= dra7xx_ocp2scp1_addrs,
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_addr_space dra7xx_qspi_addrs[] = {
+	{
+		.pa_start	= 0x4b300000,
+		.pa_end		= 0x4b30007f,
+		.flags		= ADDR_TYPE_RT
+	},
+	{ }
+};
+
+/* l3_main_1 -> qspi */
+static struct omap_hwmod_ocp_if dra7xx_l3_main_1__qspi = {
+	.master		= &dra7xx_l3_main_1_hwmod,
+	.slave		= &dra7xx_qspi_hwmod,
+	.clk		= "l3_iclk_div",
+	.addr		= dra7xx_qspi_addrs,
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_addr_space dra7xx_sata_addrs[] = {
+	{
+		.name		= "sysc",
+		.pa_start	= 0x4a141100,
+		.pa_end		= 0x4a141107,
+		.flags		= ADDR_TYPE_RT
+	},
+	{ }
+};
+
+/* l4_cfg -> sata */
+static struct omap_hwmod_ocp_if dra7xx_l4_cfg__sata = {
+	.master		= &dra7xx_l4_cfg_hwmod,
+	.slave		= &dra7xx_sata_hwmod,
+	.clk		= "l3_iclk_div",
+	.addr		= dra7xx_sata_addrs,
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_addr_space dra7xx_smartreflex_core_addrs[] = {
+	{
+		.pa_start	= 0x4a0dd000,
+		.pa_end		= 0x4a0dd07f,
+		.flags		= ADDR_TYPE_RT
+	},
+	{ }
+};
+
+/* l4_cfg -> smartreflex_core */
+static struct omap_hwmod_ocp_if dra7xx_l4_cfg__smartreflex_core = {
+	.master		= &dra7xx_l4_cfg_hwmod,
+	.slave		= &dra7xx_smartreflex_core_hwmod,
+	.clk		= "l4_root_clk_div",
+	.addr		= dra7xx_smartreflex_core_addrs,
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_addr_space dra7xx_smartreflex_mpu_addrs[] = {
+	{
+		.pa_start	= 0x4a0d9000,
+		.pa_end		= 0x4a0d907f,
+		.flags		= ADDR_TYPE_RT
+	},
+	{ }
+};
+
+/* l4_cfg -> smartreflex_mpu */
+static struct omap_hwmod_ocp_if dra7xx_l4_cfg__smartreflex_mpu = {
+	.master		= &dra7xx_l4_cfg_hwmod,
+	.slave		= &dra7xx_smartreflex_mpu_hwmod,
+	.clk		= "l4_root_clk_div",
+	.addr		= dra7xx_smartreflex_mpu_addrs,
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_addr_space dra7xx_spinlock_addrs[] = {
+	{
+		.pa_start	= 0x4a0f6000,
+		.pa_end		= 0x4a0f6fff,
+		.flags		= ADDR_TYPE_RT
+	},
+	{ }
+};
+
+/* l4_cfg -> spinlock */
+static struct omap_hwmod_ocp_if dra7xx_l4_cfg__spinlock = {
+	.master		= &dra7xx_l4_cfg_hwmod,
+	.slave		= &dra7xx_spinlock_hwmod,
+	.clk		= "l3_iclk_div",
+	.addr		= dra7xx_spinlock_addrs,
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_wkup -> timer1 */
+static struct omap_hwmod_ocp_if dra7xx_l4_wkup__timer1 = {
+	.master		= &dra7xx_l4_wkup_hwmod,
+	.slave		= &dra7xx_timer1_hwmod,
+	.clk		= "wkupaon_iclk_mux",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per1 -> timer2 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per1__timer2 = {
+	.master		= &dra7xx_l4_per1_hwmod,
+	.slave		= &dra7xx_timer2_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per1 -> timer3 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per1__timer3 = {
+	.master		= &dra7xx_l4_per1_hwmod,
+	.slave		= &dra7xx_timer3_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per1 -> timer4 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per1__timer4 = {
+	.master		= &dra7xx_l4_per1_hwmod,
+	.slave		= &dra7xx_timer4_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per3 -> timer5 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per3__timer5 = {
+	.master		= &dra7xx_l4_per3_hwmod,
+	.slave		= &dra7xx_timer5_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per3 -> timer6 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per3__timer6 = {
+	.master		= &dra7xx_l4_per3_hwmod,
+	.slave		= &dra7xx_timer6_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per3 -> timer7 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per3__timer7 = {
+	.master		= &dra7xx_l4_per3_hwmod,
+	.slave		= &dra7xx_timer7_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per3 -> timer8 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per3__timer8 = {
+	.master		= &dra7xx_l4_per3_hwmod,
+	.slave		= &dra7xx_timer8_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per1 -> timer9 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per1__timer9 = {
+	.master		= &dra7xx_l4_per1_hwmod,
+	.slave		= &dra7xx_timer9_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per1 -> timer10 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per1__timer10 = {
+	.master		= &dra7xx_l4_per1_hwmod,
+	.slave		= &dra7xx_timer10_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per1 -> timer11 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per1__timer11 = {
+	.master		= &dra7xx_l4_per1_hwmod,
+	.slave		= &dra7xx_timer11_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per1 -> uart1 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per1__uart1 = {
+	.master		= &dra7xx_l4_per1_hwmod,
+	.slave		= &dra7xx_uart1_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per1 -> uart2 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per1__uart2 = {
+	.master		= &dra7xx_l4_per1_hwmod,
+	.slave		= &dra7xx_uart2_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per1 -> uart3 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per1__uart3 = {
+	.master		= &dra7xx_l4_per1_hwmod,
+	.slave		= &dra7xx_uart3_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per1 -> uart4 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per1__uart4 = {
+	.master		= &dra7xx_l4_per1_hwmod,
+	.slave		= &dra7xx_uart4_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per1 -> uart5 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per1__uart5 = {
+	.master		= &dra7xx_l4_per1_hwmod,
+	.slave		= &dra7xx_uart5_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per1 -> uart6 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per1__uart6 = {
+	.master		= &dra7xx_l4_per1_hwmod,
+	.slave		= &dra7xx_uart6_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per3 -> usb_otg_ss1 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per3__usb_otg_ss1 = {
+	.master		= &dra7xx_l4_per3_hwmod,
+	.slave		= &dra7xx_usb_otg_ss1_hwmod,
+	.clk		= "dpll_core_h13x2_ck",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per3 -> usb_otg_ss2 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per3__usb_otg_ss2 = {
+	.master		= &dra7xx_l4_per3_hwmod,
+	.slave		= &dra7xx_usb_otg_ss2_hwmod,
+	.clk		= "dpll_core_h13x2_ck",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per3 -> usb_otg_ss3 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per3__usb_otg_ss3 = {
+	.master		= &dra7xx_l4_per3_hwmod,
+	.slave		= &dra7xx_usb_otg_ss3_hwmod,
+	.clk		= "dpll_core_h13x2_ck",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per3 -> usb_otg_ss4 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per3__usb_otg_ss4 = {
+	.master		= &dra7xx_l4_per3_hwmod,
+	.slave		= &dra7xx_usb_otg_ss4_hwmod,
+	.clk		= "dpll_core_h13x2_ck",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l3_main_1 -> vcp1 */
+static struct omap_hwmod_ocp_if dra7xx_l3_main_1__vcp1 = {
+	.master		= &dra7xx_l3_main_1_hwmod,
+	.slave		= &dra7xx_vcp1_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per2 -> vcp1 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per2__vcp1 = {
+	.master		= &dra7xx_l4_per2_hwmod,
+	.slave		= &dra7xx_vcp1_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l3_main_1 -> vcp2 */
+static struct omap_hwmod_ocp_if dra7xx_l3_main_1__vcp2 = {
+	.master		= &dra7xx_l3_main_1_hwmod,
+	.slave		= &dra7xx_vcp2_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per2 -> vcp2 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per2__vcp2 = {
+	.master		= &dra7xx_l4_per2_hwmod,
+	.slave		= &dra7xx_vcp2_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_wkup -> wd_timer2 */
+static struct omap_hwmod_ocp_if dra7xx_l4_wkup__wd_timer2 = {
+	.master		= &dra7xx_l4_wkup_hwmod,
+	.slave		= &dra7xx_wd_timer2_hwmod,
+	.clk		= "wkupaon_iclk_mux",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = {
+	&dra7xx_l3_main_2__l3_instr,
+	&dra7xx_l4_cfg__l3_main_1,
+	&dra7xx_mpu__l3_main_1,
+	&dra7xx_l3_main_1__l3_main_2,
+	&dra7xx_l4_cfg__l3_main_2,
+	&dra7xx_l3_main_1__l4_cfg,
+	&dra7xx_l3_main_1__l4_per1,
+	&dra7xx_l3_main_1__l4_per2,
+	&dra7xx_l3_main_1__l4_per3,
+	&dra7xx_l3_main_1__l4_wkup,
+	&dra7xx_l4_per2__atl,
+	&dra7xx_l3_main_1__bb2d,
+	&dra7xx_l4_wkup__counter_32k,
+	&dra7xx_l4_wkup__ctrl_module_wkup,
+	&dra7xx_l4_wkup__dcan1,
+	&dra7xx_l4_per2__dcan2,
+	&dra7xx_l4_cfg__dma_system,
+	&dra7xx_l3_main_1__dss,
+	&dra7xx_l3_main_1__dispc,
+	&dra7xx_l3_main_1__hdmi,
+	&dra7xx_l4_per1__elm,
+	&dra7xx_l4_wkup__gpio1,
+	&dra7xx_l4_per1__gpio2,
+	&dra7xx_l4_per1__gpio3,
+	&dra7xx_l4_per1__gpio4,
+	&dra7xx_l4_per1__gpio5,
+	&dra7xx_l4_per1__gpio6,
+	&dra7xx_l4_per1__gpio7,
+	&dra7xx_l4_per1__gpio8,
+	&dra7xx_l3_main_1__gpmc,
+	&dra7xx_l4_per1__hdq1w,
+	&dra7xx_l4_per1__i2c1,
+	&dra7xx_l4_per1__i2c2,
+	&dra7xx_l4_per1__i2c3,
+	&dra7xx_l4_per1__i2c4,
+	&dra7xx_l4_per1__i2c5,
+	&dra7xx_l4_per1__mcspi1,
+	&dra7xx_l4_per1__mcspi2,
+	&dra7xx_l4_per1__mcspi3,
+	&dra7xx_l4_per1__mcspi4,
+	&dra7xx_l4_per1__mmc1,
+	&dra7xx_l4_per1__mmc2,
+	&dra7xx_l4_per1__mmc3,
+	&dra7xx_l4_per1__mmc4,
+	&dra7xx_l4_cfg__mpu,
+	&dra7xx_l4_cfg__ocp2scp1,
+	&dra7xx_l3_main_1__qspi,
+	&dra7xx_l4_cfg__sata,
+	&dra7xx_l4_cfg__smartreflex_core,
+	&dra7xx_l4_cfg__smartreflex_mpu,
+	&dra7xx_l4_cfg__spinlock,
+	&dra7xx_l4_wkup__timer1,
+	&dra7xx_l4_per1__timer2,
+	&dra7xx_l4_per1__timer3,
+	&dra7xx_l4_per1__timer4,
+	&dra7xx_l4_per3__timer5,
+	&dra7xx_l4_per3__timer6,
+	&dra7xx_l4_per3__timer7,
+	&dra7xx_l4_per3__timer8,
+	&dra7xx_l4_per1__timer9,
+	&dra7xx_l4_per1__timer10,
+	&dra7xx_l4_per1__timer11,
+	&dra7xx_l4_per1__uart1,
+	&dra7xx_l4_per1__uart2,
+	&dra7xx_l4_per1__uart3,
+	&dra7xx_l4_per1__uart4,
+	&dra7xx_l4_per1__uart5,
+	&dra7xx_l4_per1__uart6,
+	&dra7xx_l4_per3__usb_otg_ss1,
+	&dra7xx_l4_per3__usb_otg_ss2,
+	&dra7xx_l4_per3__usb_otg_ss3,
+	&dra7xx_l4_per3__usb_otg_ss4,
+	&dra7xx_l3_main_1__vcp1,
+	&dra7xx_l4_per2__vcp1,
+	&dra7xx_l3_main_1__vcp2,
+	&dra7xx_l4_per2__vcp2,
+	&dra7xx_l4_wkup__wd_timer2,
+	NULL,
+};
+
+int __init dra7xx_hwmod_init(void)
+{
+	omap_hwmod_init();
+	return omap_hwmod_register_links(dra7xx_hwmod_ocp_ifs);
+}
diff --git a/arch/arm/mach-omap2/powerdomain.h b/arch/arm/mach-omap2/powerdomain.h
index e4d7bd6..baf3d8b 100644
--- a/arch/arm/mach-omap2/powerdomain.h
+++ b/arch/arm/mach-omap2/powerdomain.h
@@ -256,6 +256,7 @@
 extern void am33xx_powerdomains_init(void);
 extern void omap44xx_powerdomains_init(void);
 extern void omap54xx_powerdomains_init(void);
+extern void dra7xx_powerdomains_init(void);
 
 extern struct pwrdm_ops omap2_pwrdm_operations;
 extern struct pwrdm_ops omap3_pwrdm_operations;
diff --git a/arch/arm/mach-omap2/powerdomains3xxx_data.c b/arch/arm/mach-omap2/powerdomains3xxx_data.c
index e2d4bd8..328c103 100644
--- a/arch/arm/mach-omap2/powerdomains3xxx_data.c
+++ b/arch/arm/mach-omap2/powerdomains3xxx_data.c
@@ -336,6 +336,13 @@
 	.voltdm		  = { .name = "core" },
 };
 
+static struct powerdomain alwon_81xx_pwrdm = {
+	.name		  = "alwon_pwrdm",
+	.prcm_offs	  = TI81XX_PRM_ALWON_MOD,
+	.pwrsts		  = PWRSTS_OFF_ON,
+	.voltdm		  = { .name = "core" },
+};
+
 static struct powerdomain device_81xx_pwrdm = {
 	.name		  = "device_pwrdm",
 	.prcm_offs	  = TI81XX_PRM_DEVICE_MOD,
@@ -442,6 +449,7 @@
 };
 
 static struct powerdomain *powerdomains_ti81xx[] __initdata = {
+	&alwon_81xx_pwrdm,
 	&device_81xx_pwrdm,
 	&active_816x_pwrdm,
 	&default_816x_pwrdm,
diff --git a/arch/arm/mach-omap2/powerdomains7xx_data.c b/arch/arm/mach-omap2/powerdomains7xx_data.c
new file mode 100644
index 0000000..48151d1
--- /dev/null
+++ b/arch/arm/mach-omap2/powerdomains7xx_data.c
@@ -0,0 +1,454 @@
+/*
+ * DRA7xx Power domains framework
+ *
+ * Copyright (C) 2009-2013 Texas Instruments, Inc.
+ * Copyright (C) 2009-2011 Nokia Corporation
+ *
+ * Generated by code originally written by:
+ * Abhijit Pagare (abhijitpagare@ti.com)
+ * Benoit Cousson (b-cousson@ti.com)
+ * Paul Walmsley (paul@pwsan.com)
+ *
+ * This file is automatically generated from the OMAP hardware databases.
+ * We respectfully ask that any modifications to this file be coordinated
+ * with the public linux-omap@vger.kernel.org mailing list and the
+ * authors above to ensure that the autogeneration scripts are kept
+ * up-to-date with the file contents.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include "powerdomain.h"
+
+#include "prcm-common.h"
+#include "prcm44xx.h"
+#include "prm7xx.h"
+#include "prcm_mpu7xx.h"
+
+/* iva_7xx_pwrdm: IVA-HD power domain */
+static struct powerdomain iva_7xx_pwrdm = {
+	.name		  = "iva_pwrdm",
+	.prcm_offs	  = DRA7XX_PRM_IVA_INST,
+	.prcm_partition	  = DRA7XX_PRM_PARTITION,
+	.pwrsts		  = PWRSTS_OFF_RET_ON,
+	.pwrsts_logic_ret = PWRSTS_OFF,
+	.banks		  = 4,
+	.pwrsts_mem_ret	= {
+		[0] = PWRSTS_OFF_RET,	/* hwa_mem */
+		[1] = PWRSTS_OFF_RET,	/* sl2_mem */
+		[2] = PWRSTS_OFF_RET,	/* tcm1_mem */
+		[3] = PWRSTS_OFF_RET,	/* tcm2_mem */
+	},
+	.pwrsts_mem_on	= {
+		[0] = PWRSTS_OFF_RET,	/* hwa_mem */
+		[1] = PWRSTS_OFF_RET,	/* sl2_mem */
+		[2] = PWRSTS_OFF_RET,	/* tcm1_mem */
+		[3] = PWRSTS_OFF_RET,	/* tcm2_mem */
+	},
+	.flags		  = PWRDM_HAS_LOWPOWERSTATECHANGE,
+};
+
+/* rtc_7xx_pwrdm:  */
+static struct powerdomain rtc_7xx_pwrdm = {
+	.name		  = "rtc_pwrdm",
+	.prcm_offs	  = DRA7XX_PRM_RTC_INST,
+	.prcm_partition	  = DRA7XX_PRM_PARTITION,
+	.pwrsts		  = PWRSTS_ON,
+};
+
+/* custefuse_7xx_pwrdm: Customer efuse controller power domain */
+static struct powerdomain custefuse_7xx_pwrdm = {
+	.name		  = "custefuse_pwrdm",
+	.prcm_offs	  = DRA7XX_PRM_CUSTEFUSE_INST,
+	.prcm_partition	  = DRA7XX_PRM_PARTITION,
+	.pwrsts		  = PWRSTS_OFF_ON,
+	.flags		  = PWRDM_HAS_LOWPOWERSTATECHANGE,
+};
+
+/* ipu_7xx_pwrdm: Audio back end power domain */
+static struct powerdomain ipu_7xx_pwrdm = {
+	.name		  = "ipu_pwrdm",
+	.prcm_offs	  = DRA7XX_PRM_IPU_INST,
+	.prcm_partition	  = DRA7XX_PRM_PARTITION,
+	.pwrsts		  = PWRSTS_OFF_RET_ON,
+	.pwrsts_logic_ret = PWRSTS_OFF,
+	.banks		  = 2,
+	.pwrsts_mem_ret	= {
+		[0] = PWRSTS_OFF_RET,	/* aessmem */
+		[1] = PWRSTS_OFF_RET,	/* periphmem */
+	},
+	.pwrsts_mem_on	= {
+		[0] = PWRSTS_OFF_RET,	/* aessmem */
+		[1] = PWRSTS_OFF_RET,	/* periphmem */
+	},
+	.flags		  = PWRDM_HAS_LOWPOWERSTATECHANGE,
+};
+
+/* dss_7xx_pwrdm: Display subsystem power domain */
+static struct powerdomain dss_7xx_pwrdm = {
+	.name		  = "dss_pwrdm",
+	.prcm_offs	  = DRA7XX_PRM_DSS_INST,
+	.prcm_partition	  = DRA7XX_PRM_PARTITION,
+	.pwrsts		  = PWRSTS_OFF_RET_ON,
+	.pwrsts_logic_ret = PWRSTS_OFF,
+	.banks		  = 1,
+	.pwrsts_mem_ret	= {
+		[0] = PWRSTS_OFF_RET,	/* dss_mem */
+	},
+	.pwrsts_mem_on	= {
+		[0] = PWRSTS_OFF_RET,	/* dss_mem */
+	},
+	.flags		  = PWRDM_HAS_LOWPOWERSTATECHANGE,
+};
+
+/* l4per_7xx_pwrdm: Target peripherals power domain */
+static struct powerdomain l4per_7xx_pwrdm = {
+	.name		  = "l4per_pwrdm",
+	.prcm_offs	  = DRA7XX_PRM_L4PER_INST,
+	.prcm_partition	  = DRA7XX_PRM_PARTITION,
+	.pwrsts		  = PWRSTS_RET_ON,
+	.pwrsts_logic_ret = PWRSTS_OFF_RET,
+	.banks		  = 2,
+	.pwrsts_mem_ret	= {
+		[0] = PWRSTS_OFF_RET,	/* nonretained_bank */
+		[1] = PWRSTS_OFF_RET,	/* retained_bank */
+	},
+	.pwrsts_mem_on	= {
+		[0] = PWRSTS_OFF_RET,	/* nonretained_bank */
+		[1] = PWRSTS_OFF_RET,	/* retained_bank */
+	},
+	.flags		  = PWRDM_HAS_LOWPOWERSTATECHANGE,
+};
+
+/* gpu_7xx_pwrdm: 3D accelerator power domain */
+static struct powerdomain gpu_7xx_pwrdm = {
+	.name		  = "gpu_pwrdm",
+	.prcm_offs	  = DRA7XX_PRM_GPU_INST,
+	.prcm_partition	  = DRA7XX_PRM_PARTITION,
+	.pwrsts		  = PWRSTS_OFF_ON,
+	.banks		  = 1,
+	.pwrsts_mem_ret	= {
+		[0] = PWRSTS_OFF_RET,	/* gpu_mem */
+	},
+	.pwrsts_mem_on	= {
+		[0] = PWRSTS_OFF_RET,	/* gpu_mem */
+	},
+	.flags		  = PWRDM_HAS_LOWPOWERSTATECHANGE,
+};
+
+/* wkupaon_7xx_pwrdm: Wake-up power domain */
+static struct powerdomain wkupaon_7xx_pwrdm = {
+	.name		  = "wkupaon_pwrdm",
+	.prcm_offs	  = DRA7XX_PRM_WKUPAON_INST,
+	.prcm_partition	  = DRA7XX_PRM_PARTITION,
+	.pwrsts		  = PWRSTS_ON,
+	.banks		  = 1,
+	.pwrsts_mem_ret	= {
+	},
+	.pwrsts_mem_on	= {
+		[0] = PWRSTS_ON,	/* wkup_bank */
+	},
+};
+
+/* core_7xx_pwrdm: CORE power domain */
+static struct powerdomain core_7xx_pwrdm = {
+	.name		  = "core_pwrdm",
+	.prcm_offs	  = DRA7XX_PRM_CORE_INST,
+	.prcm_partition	  = DRA7XX_PRM_PARTITION,
+	.pwrsts		  = PWRSTS_RET_ON,
+	.pwrsts_logic_ret = PWRSTS_OFF_RET,
+	.banks		  = 5,
+	.pwrsts_mem_ret	= {
+		[0] = PWRSTS_OFF_RET,	/* core_nret_bank */
+		[1] = PWRSTS_OFF_RET,	/* core_ocmram */
+		[2] = PWRSTS_OFF_RET,	/* core_other_bank */
+		[3] = PWRSTS_OFF_RET,	/* ipu_l2ram */
+		[4] = PWRSTS_OFF_RET,	/* ipu_unicache */
+	},
+	.pwrsts_mem_on	= {
+		[0] = PWRSTS_OFF_RET,	/* core_nret_bank */
+		[1] = PWRSTS_OFF_RET,	/* core_ocmram */
+		[2] = PWRSTS_OFF_RET,	/* core_other_bank */
+		[3] = PWRSTS_OFF_RET,	/* ipu_l2ram */
+		[4] = PWRSTS_OFF_RET,	/* ipu_unicache */
+	},
+	.flags		  = PWRDM_HAS_LOWPOWERSTATECHANGE,
+};
+
+/* coreaon_7xx_pwrdm: Always ON logic that sits in VDD_CORE voltage domain */
+static struct powerdomain coreaon_7xx_pwrdm = {
+	.name		  = "coreaon_pwrdm",
+	.prcm_offs	  = DRA7XX_PRM_COREAON_INST,
+	.prcm_partition	  = DRA7XX_PRM_PARTITION,
+	.pwrsts		  = PWRSTS_ON,
+};
+
+/* cpu0_7xx_pwrdm: MPU0 processor and Neon coprocessor power domain */
+static struct powerdomain cpu0_7xx_pwrdm = {
+	.name		  = "cpu0_pwrdm",
+	.prcm_offs	  = DRA7XX_MPU_PRCM_PRM_C0_INST,
+	.prcm_partition	  = DRA7XX_MPU_PRCM_PARTITION,
+	.pwrsts		  = PWRSTS_OFF_RET_ON,
+	.pwrsts_logic_ret = PWRSTS_OFF_RET,
+	.banks		  = 1,
+	.pwrsts_mem_ret	= {
+		[0] = PWRSTS_OFF_RET,	/* cpu0_l1 */
+	},
+	.pwrsts_mem_on	= {
+		[0] = PWRSTS_ON,	/* cpu0_l1 */
+	},
+};
+
+/* cpu1_7xx_pwrdm: MPU1 processor and Neon coprocessor power domain */
+static struct powerdomain cpu1_7xx_pwrdm = {
+	.name		  = "cpu1_pwrdm",
+	.prcm_offs	  = DRA7XX_MPU_PRCM_PRM_C1_INST,
+	.prcm_partition	  = DRA7XX_MPU_PRCM_PARTITION,
+	.pwrsts		  = PWRSTS_OFF_RET_ON,
+	.pwrsts_logic_ret = PWRSTS_OFF_RET,
+	.banks		  = 1,
+	.pwrsts_mem_ret	= {
+		[0] = PWRSTS_OFF_RET,	/* cpu1_l1 */
+	},
+	.pwrsts_mem_on	= {
+		[0] = PWRSTS_ON,	/* cpu1_l1 */
+	},
+};
+
+/* vpe_7xx_pwrdm:  */
+static struct powerdomain vpe_7xx_pwrdm = {
+	.name		  = "vpe_pwrdm",
+	.prcm_offs	  = DRA7XX_PRM_VPE_INST,
+	.prcm_partition	  = DRA7XX_PRM_PARTITION,
+	.pwrsts		  = PWRSTS_OFF_RET_ON,
+	.pwrsts_logic_ret = PWRSTS_OFF_RET,
+	.banks		  = 1,
+	.pwrsts_mem_ret	= {
+		[0] = PWRSTS_OFF_RET,	/* vpe_bank */
+	},
+	.pwrsts_mem_on	= {
+		[0] = PWRSTS_OFF_RET,	/* vpe_bank */
+	},
+	.flags		  = PWRDM_HAS_LOWPOWERSTATECHANGE,
+};
+
+/* mpu_7xx_pwrdm: Modena processor and the Neon coprocessor power domain */
+static struct powerdomain mpu_7xx_pwrdm = {
+	.name		  = "mpu_pwrdm",
+	.prcm_offs	  = DRA7XX_PRM_MPU_INST,
+	.prcm_partition	  = DRA7XX_PRM_PARTITION,
+	.pwrsts		  = PWRSTS_RET_ON,
+	.pwrsts_logic_ret = PWRSTS_OFF_RET,
+	.banks		  = 2,
+	.pwrsts_mem_ret	= {
+		[0] = PWRSTS_OFF_RET,	/* mpu_l2 */
+		[1] = PWRSTS_RET,	/* mpu_ram */
+	},
+	.pwrsts_mem_on	= {
+		[0] = PWRSTS_OFF_RET,	/* mpu_l2 */
+		[1] = PWRSTS_OFF_RET,	/* mpu_ram */
+	},
+};
+
+/* l3init_7xx_pwrdm: L3 initators pheripherals power domain  */
+static struct powerdomain l3init_7xx_pwrdm = {
+	.name		  = "l3init_pwrdm",
+	.prcm_offs	  = DRA7XX_PRM_L3INIT_INST,
+	.prcm_partition	  = DRA7XX_PRM_PARTITION,
+	.pwrsts		  = PWRSTS_RET_ON,
+	.pwrsts_logic_ret = PWRSTS_OFF_RET,
+	.banks		  = 3,
+	.pwrsts_mem_ret	= {
+		[0] = PWRSTS_OFF_RET,	/* gmac_bank */
+		[1] = PWRSTS_OFF_RET,	/* l3init_bank1 */
+		[2] = PWRSTS_OFF_RET,	/* l3init_bank2 */
+	},
+	.pwrsts_mem_on	= {
+		[0] = PWRSTS_OFF_RET,	/* gmac_bank */
+		[1] = PWRSTS_OFF_RET,	/* l3init_bank1 */
+		[2] = PWRSTS_OFF_RET,	/* l3init_bank2 */
+	},
+	.flags		  = PWRDM_HAS_LOWPOWERSTATECHANGE,
+};
+
+/* eve3_7xx_pwrdm:  */
+static struct powerdomain eve3_7xx_pwrdm = {
+	.name		  = "eve3_pwrdm",
+	.prcm_offs	  = DRA7XX_PRM_EVE3_INST,
+	.prcm_partition	  = DRA7XX_PRM_PARTITION,
+	.pwrsts		  = PWRSTS_OFF_ON,
+	.banks		  = 1,
+	.pwrsts_mem_ret	= {
+		[0] = PWRSTS_OFF_RET,	/* eve3_bank */
+	},
+	.pwrsts_mem_on	= {
+		[0] = PWRSTS_OFF_RET,	/* eve3_bank */
+	},
+	.flags		  = PWRDM_HAS_LOWPOWERSTATECHANGE,
+};
+
+/* emu_7xx_pwrdm: Emulation power domain */
+static struct powerdomain emu_7xx_pwrdm = {
+	.name		  = "emu_pwrdm",
+	.prcm_offs	  = DRA7XX_PRM_EMU_INST,
+	.prcm_partition	  = DRA7XX_PRM_PARTITION,
+	.pwrsts		  = PWRSTS_OFF_ON,
+	.banks		  = 1,
+	.pwrsts_mem_ret	= {
+		[0] = PWRSTS_OFF_RET,	/* emu_bank */
+	},
+	.pwrsts_mem_on	= {
+		[0] = PWRSTS_OFF_RET,	/* emu_bank */
+	},
+};
+
+/* dsp2_7xx_pwrdm:  */
+static struct powerdomain dsp2_7xx_pwrdm = {
+	.name		  = "dsp2_pwrdm",
+	.prcm_offs	  = DRA7XX_PRM_DSP2_INST,
+	.prcm_partition	  = DRA7XX_PRM_PARTITION,
+	.pwrsts		  = PWRSTS_OFF_ON,
+	.banks		  = 3,
+	.pwrsts_mem_ret	= {
+		[0] = PWRSTS_OFF_RET,	/* dsp2_edma */
+		[1] = PWRSTS_OFF_RET,	/* dsp2_l1 */
+		[2] = PWRSTS_OFF_RET,	/* dsp2_l2 */
+	},
+	.pwrsts_mem_on	= {
+		[0] = PWRSTS_OFF_RET,	/* dsp2_edma */
+		[1] = PWRSTS_OFF_RET,	/* dsp2_l1 */
+		[2] = PWRSTS_OFF_RET,	/* dsp2_l2 */
+	},
+	.flags		  = PWRDM_HAS_LOWPOWERSTATECHANGE,
+};
+
+/* dsp1_7xx_pwrdm: Tesla processor power domain */
+static struct powerdomain dsp1_7xx_pwrdm = {
+	.name		  = "dsp1_pwrdm",
+	.prcm_offs	  = DRA7XX_PRM_DSP1_INST,
+	.prcm_partition	  = DRA7XX_PRM_PARTITION,
+	.pwrsts		  = PWRSTS_OFF_ON,
+	.banks		  = 3,
+	.pwrsts_mem_ret	= {
+		[0] = PWRSTS_OFF_RET,	/* dsp1_edma */
+		[1] = PWRSTS_OFF_RET,	/* dsp1_l1 */
+		[2] = PWRSTS_OFF_RET,	/* dsp1_l2 */
+	},
+	.pwrsts_mem_on	= {
+		[0] = PWRSTS_OFF_RET,	/* dsp1_edma */
+		[1] = PWRSTS_OFF_RET,	/* dsp1_l1 */
+		[2] = PWRSTS_OFF_RET,	/* dsp1_l2 */
+	},
+	.flags		  = PWRDM_HAS_LOWPOWERSTATECHANGE,
+};
+
+/* cam_7xx_pwrdm: Camera subsystem power domain */
+static struct powerdomain cam_7xx_pwrdm = {
+	.name		  = "cam_pwrdm",
+	.prcm_offs	  = DRA7XX_PRM_CAM_INST,
+	.prcm_partition	  = DRA7XX_PRM_PARTITION,
+	.pwrsts		  = PWRSTS_OFF_ON,
+	.banks		  = 1,
+	.pwrsts_mem_ret	= {
+		[0] = PWRSTS_OFF_RET,	/* vip_bank */
+	},
+	.pwrsts_mem_on	= {
+		[0] = PWRSTS_OFF_RET,	/* vip_bank */
+	},
+	.flags		  = PWRDM_HAS_LOWPOWERSTATECHANGE,
+};
+
+/* eve4_7xx_pwrdm:  */
+static struct powerdomain eve4_7xx_pwrdm = {
+	.name		  = "eve4_pwrdm",
+	.prcm_offs	  = DRA7XX_PRM_EVE4_INST,
+	.prcm_partition	  = DRA7XX_PRM_PARTITION,
+	.pwrsts		  = PWRSTS_OFF_ON,
+	.banks		  = 1,
+	.pwrsts_mem_ret	= {
+		[0] = PWRSTS_OFF_RET,	/* eve4_bank */
+	},
+	.pwrsts_mem_on	= {
+		[0] = PWRSTS_OFF_RET,	/* eve4_bank */
+	},
+	.flags		  = PWRDM_HAS_LOWPOWERSTATECHANGE,
+};
+
+/* eve2_7xx_pwrdm:  */
+static struct powerdomain eve2_7xx_pwrdm = {
+	.name		  = "eve2_pwrdm",
+	.prcm_offs	  = DRA7XX_PRM_EVE2_INST,
+	.prcm_partition	  = DRA7XX_PRM_PARTITION,
+	.pwrsts		  = PWRSTS_OFF_ON,
+	.banks		  = 1,
+	.pwrsts_mem_ret	= {
+		[0] = PWRSTS_OFF_RET,	/* eve2_bank */
+	},
+	.pwrsts_mem_on	= {
+		[0] = PWRSTS_OFF_RET,	/* eve2_bank */
+	},
+	.flags		  = PWRDM_HAS_LOWPOWERSTATECHANGE,
+};
+
+/* eve1_7xx_pwrdm:  */
+static struct powerdomain eve1_7xx_pwrdm = {
+	.name		  = "eve1_pwrdm",
+	.prcm_offs	  = DRA7XX_PRM_EVE1_INST,
+	.prcm_partition	  = DRA7XX_PRM_PARTITION,
+	.pwrsts		  = PWRSTS_OFF_ON,
+	.banks		  = 1,
+	.pwrsts_mem_ret	= {
+		[0] = PWRSTS_OFF_RET,	/* eve1_bank */
+	},
+	.pwrsts_mem_on	= {
+		[0] = PWRSTS_OFF_RET,	/* eve1_bank */
+	},
+	.flags		  = PWRDM_HAS_LOWPOWERSTATECHANGE,
+};
+
+/*
+ * The following power domains are not under SW control
+ *
+ * mpuaon
+ * mmaon
+ */
+
+/* As powerdomains are added or removed above, this list must also be changed */
+static struct powerdomain *powerdomains_dra7xx[] __initdata = {
+	&iva_7xx_pwrdm,
+	&rtc_7xx_pwrdm,
+	&custefuse_7xx_pwrdm,
+	&ipu_7xx_pwrdm,
+	&dss_7xx_pwrdm,
+	&l4per_7xx_pwrdm,
+	&gpu_7xx_pwrdm,
+	&wkupaon_7xx_pwrdm,
+	&core_7xx_pwrdm,
+	&coreaon_7xx_pwrdm,
+	&cpu0_7xx_pwrdm,
+	&cpu1_7xx_pwrdm,
+	&vpe_7xx_pwrdm,
+	&mpu_7xx_pwrdm,
+	&l3init_7xx_pwrdm,
+	&eve3_7xx_pwrdm,
+	&emu_7xx_pwrdm,
+	&dsp2_7xx_pwrdm,
+	&dsp1_7xx_pwrdm,
+	&cam_7xx_pwrdm,
+	&eve4_7xx_pwrdm,
+	&eve2_7xx_pwrdm,
+	&eve1_7xx_pwrdm,
+	NULL
+};
+
+void __init dra7xx_powerdomains_init(void)
+{
+	pwrdm_register_platform_funcs(&omap4_pwrdm_operations);
+	pwrdm_register_pwrdms(powerdomains_dra7xx);
+	pwrdm_complete_init();
+}
diff --git a/arch/arm/mach-omap2/prcm-common.h b/arch/arm/mach-omap2/prcm-common.h
index ff1ac4a..0e841fd 100644
--- a/arch/arm/mach-omap2/prcm-common.h
+++ b/arch/arm/mach-omap2/prcm-common.h
@@ -58,6 +58,7 @@
 #define TI816X_PRM_IVAHD1_MOD			0x0d00
 #define TI816X_PRM_IVAHD2_MOD			0x0e00
 #define TI816X_PRM_SGX_MOD				0x0f00
+#define TI81XX_PRM_ALWON_MOD			0x1800
 
 /* 24XX register bits shared between CM & PRM registers */
 
diff --git a/arch/arm/mach-omap2/prcm44xx.h b/arch/arm/mach-omap2/prcm44xx.h
index f429cdd..4fea2cf 100644
--- a/arch/arm/mach-omap2/prcm44xx.h
+++ b/arch/arm/mach-omap2/prcm44xx.h
@@ -38,6 +38,11 @@
 #define OMAP54XX_SCRM_PARTITION			4
 #define OMAP54XX_PRCM_MPU_PARTITION		5
 
+#define DRA7XX_PRM_PARTITION                   1
+#define DRA7XX_CM_CORE_AON_PARTITION           2
+#define DRA7XX_CM_CORE_PARTITION               3
+#define DRA7XX_MPU_PRCM_PARTITION              5
+
 /*
  * OMAP4_MAX_PRCM_PARTITIONS: set to the highest value of the PRCM partition
  * IDs, plus one
diff --git a/arch/arm/mach-omap2/prcm_mpu7xx.h b/arch/arm/mach-omap2/prcm_mpu7xx.h
new file mode 100644
index 0000000..9ebb5ce
--- /dev/null
+++ b/arch/arm/mach-omap2/prcm_mpu7xx.h
@@ -0,0 +1,78 @@
+/*
+ * DRA7xx PRCM MPU instance offset macros
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Generated by code originally written by:
+ * Paul Walmsley (paul@pwsan.com)
+ * Rajendra Nayak (rnayak@ti.com)
+ * Benoit Cousson (b-cousson@ti.com)
+ *
+ * This file is automatically generated from the OMAP hardware databases.
+ * We respectfully ask that any modifications to this file be coordinated
+ * with the public linux-omap@vger.kernel.org mailing list and the
+ * authors above to ensure that the autogeneration scripts are kept
+ * up-to-date with the file contents.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP2_PRCM_MPU7XX_H
+#define __ARCH_ARM_MACH_OMAP2_PRCM_MPU7XX_H
+
+#include "prcm_mpu_44xx_54xx.h"
+
+#define DRA7XX_PRCM_MPU_BASE			0x48243000
+
+#define DRA7XX_PRCM_MPU_REGADDR(inst, reg)				\
+	OMAP2_L4_IO_ADDRESS(DRA7XX_PRCM_MPU_BASE + (inst) + (reg))
+
+/* MPU_PRCM instances */
+#define DRA7XX_MPU_PRCM_OCP_SOCKET_INST	0x0000
+#define DRA7XX_MPU_PRCM_DEVICE_INST	0x0200
+#define DRA7XX_MPU_PRCM_PRM_C0_INST	0x0400
+#define DRA7XX_MPU_PRCM_CM_C0_INST	0x0600
+#define DRA7XX_MPU_PRCM_PRM_C1_INST	0x0800
+#define DRA7XX_MPU_PRCM_CM_C1_INST	0x0a00
+
+/* PRCM_MPU clockdomain register offsets (from instance start) */
+#define DRA7XX_MPU_PRCM_CM_C0_CPU0_CDOFFS	0x0000
+#define DRA7XX_MPU_PRCM_CM_C1_CPU1_CDOFFS	0x0000
+
+
+/* MPU_PRCM */
+
+/* MPU_PRCM.PRCM_MPU_OCP_SOCKET register offsets */
+#define DRA7XX_REVISION_PRCM_MPU_OFFSET				0x0000
+
+/* MPU_PRCM.PRCM_MPU_DEVICE register offsets */
+#define DRA7XX_PRM_FRAC_INCREMENTER_NUMERATOR_OFFSET		0x0010
+#define DRA7XX_PRM_FRAC_INCREMENTER_DENUMERATOR_RELOAD_OFFSET	0x0014
+
+/* MPU_PRCM.PRCM_MPU_PRM_C0 register offsets */
+#define DRA7XX_PM_CPU0_PWRSTCTRL_OFFSET				0x0000
+#define DRA7XX_PM_CPU0_PWRSTST_OFFSET				0x0004
+#define DRA7XX_RM_CPU0_CPU0_RSTCTRL_OFFSET			0x0010
+#define DRA7XX_RM_CPU0_CPU0_RSTST_OFFSET			0x0014
+#define DRA7XX_RM_CPU0_CPU0_CONTEXT_OFFSET			0x0024
+
+/* MPU_PRCM.PRCM_MPU_CM_C0 register offsets */
+#define DRA7XX_CM_CPU0_CLKSTCTRL_OFFSET				0x0000
+#define DRA7XX_CM_CPU0_CPU0_CLKCTRL_OFFSET			0x0020
+#define DRA7XX_CM_CPU0_CPU0_CLKCTRL				DRA7XX_MPU_PRCM_REGADDR(DRA7XX_MPU_PRCM_CM_C0_INST, 0x0020)
+
+/* MPU_PRCM.PRCM_MPU_PRM_C1 register offsets */
+#define DRA7XX_PM_CPU1_PWRSTCTRL_OFFSET				0x0000
+#define DRA7XX_PM_CPU1_PWRSTST_OFFSET				0x0004
+#define DRA7XX_RM_CPU1_CPU1_RSTCTRL_OFFSET			0x0010
+#define DRA7XX_RM_CPU1_CPU1_RSTST_OFFSET			0x0014
+#define DRA7XX_RM_CPU1_CPU1_CONTEXT_OFFSET			0x0024
+
+/* MPU_PRCM.PRCM_MPU_CM_C1 register offsets */
+#define DRA7XX_CM_CPU1_CLKSTCTRL_OFFSET				0x0000
+#define DRA7XX_CM_CPU1_CPU1_CLKCTRL_OFFSET			0x0020
+#define DRA7XX_CM_CPU1_CPU1_CLKCTRL				DRA7XX_MPU_PRCM_REGADDR(DRA7XX_MPU_PRCM_CM_C1_INST, 0x0020)
+
+#endif
diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c
index 415c7e0..03a6034 100644
--- a/arch/arm/mach-omap2/prm44xx.c
+++ b/arch/arm/mach-omap2/prm44xx.c
@@ -620,6 +620,15 @@
 	return 0;
 }
 
+static int omap4_check_vcvp(void)
+{
+	/* No VC/VP on dra7xx devices */
+	if (soc_is_dra7xx())
+		return 0;
+
+	return 1;
+}
+
 struct pwrdm_ops omap4_pwrdm_operations = {
 	.pwrdm_set_next_pwrst	= omap4_pwrdm_set_next_pwrst,
 	.pwrdm_read_next_pwrst	= omap4_pwrdm_read_next_pwrst,
@@ -637,6 +646,7 @@
 	.pwrdm_set_mem_onst	= omap4_pwrdm_set_mem_onst,
 	.pwrdm_set_mem_retst	= omap4_pwrdm_set_mem_retst,
 	.pwrdm_wait_transition	= omap4_pwrdm_wait_transition,
+	.pwrdm_has_voltdm	= omap4_check_vcvp,
 };
 
 /*
@@ -650,7 +660,7 @@
 
 int __init omap44xx_prm_init(void)
 {
-	if (!cpu_is_omap44xx() && !soc_is_omap54xx())
+	if (!cpu_is_omap44xx() && !soc_is_omap54xx() && !soc_is_dra7xx())
 		return 0;
 
 	return prm_register(&omap44xx_prm_ll_data);
diff --git a/arch/arm/mach-omap2/prm7xx.h b/arch/arm/mach-omap2/prm7xx.h
new file mode 100644
index 0000000..d92a840
--- /dev/null
+++ b/arch/arm/mach-omap2/prm7xx.h
@@ -0,0 +1,678 @@
+/*
+ * DRA7xx PRM instance offset macros
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Generated by code originally written by:
+ * Paul Walmsley (paul@pwsan.com)
+ * Rajendra Nayak (rnayak@ti.com)
+ * Benoit Cousson (b-cousson@ti.com)
+ *
+ * This file is automatically generated from the OMAP hardware databases.
+ * We respectfully ask that any modifications to this file be coordinated
+ * with the public linux-omap@vger.kernel.org mailing list and the
+ * authors above to ensure that the autogeneration scripts are kept
+ * up-to-date with the file contents.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP2_PRM7XX_H
+#define __ARCH_ARM_MACH_OMAP2_PRM7XX_H
+
+#include "prm44xx_54xx.h"
+#include "prcm-common.h"
+#include "prm.h"
+
+#define DRA7XX_PRM_BASE		0x4ae06000
+
+#define DRA7XX_PRM_REGADDR(inst, reg)				\
+	OMAP2_L4_IO_ADDRESS(DRA7XX_PRM_BASE + (inst) + (reg))
+
+
+/* PRM instances */
+#define DRA7XX_PRM_OCP_SOCKET_INST	0x0000
+#define DRA7XX_PRM_CKGEN_INST		0x0100
+#define DRA7XX_PRM_MPU_INST		0x0300
+#define DRA7XX_PRM_DSP1_INST		0x0400
+#define DRA7XX_PRM_IPU_INST		0x0500
+#define DRA7XX_PRM_COREAON_INST		0x0628
+#define DRA7XX_PRM_CORE_INST		0x0700
+#define DRA7XX_PRM_IVA_INST		0x0f00
+#define DRA7XX_PRM_CAM_INST		0x1000
+#define DRA7XX_PRM_DSS_INST		0x1100
+#define DRA7XX_PRM_GPU_INST		0x1200
+#define DRA7XX_PRM_L3INIT_INST		0x1300
+#define DRA7XX_PRM_L4PER_INST		0x1400
+#define DRA7XX_PRM_CUSTEFUSE_INST	0x1600
+#define DRA7XX_PRM_WKUPAON_INST		0x1724
+#define DRA7XX_PRM_WKUPAON_CM_INST	0x1800
+#define DRA7XX_PRM_EMU_INST		0x1900
+#define DRA7XX_PRM_EMU_CM_INST		0x1a00
+#define DRA7XX_PRM_DSP2_INST		0x1b00
+#define DRA7XX_PRM_EVE1_INST		0x1b40
+#define DRA7XX_PRM_EVE2_INST		0x1b80
+#define DRA7XX_PRM_EVE3_INST		0x1bc0
+#define DRA7XX_PRM_EVE4_INST		0x1c00
+#define DRA7XX_PRM_RTC_INST		0x1c60
+#define DRA7XX_PRM_VPE_INST		0x1c80
+#define DRA7XX_PRM_DEVICE_INST		0x1d00
+#define DRA7XX_PRM_INSTR_INST		0x1f00
+
+/* PRM clockdomain register offsets (from instance start) */
+#define DRA7XX_PRM_WKUPAON_CM_WKUPAON_CDOFFS	0x0000
+#define DRA7XX_PRM_EMU_CM_EMU_CDOFFS		0x0000
+
+/* PRM */
+
+/* PRM.OCP_SOCKET_PRM register offsets */
+#define DRA7XX_REVISION_PRM_OFFSET				0x0000
+#define DRA7XX_PRM_IRQSTATUS_MPU_OFFSET				0x0010
+#define DRA7XX_PRM_IRQSTATUS_MPU_2_OFFSET			0x0014
+#define DRA7XX_PRM_IRQENABLE_MPU_OFFSET				0x0018
+#define DRA7XX_PRM_IRQENABLE_MPU_2_OFFSET			0x001c
+#define DRA7XX_PRM_IRQSTATUS_IPU2_OFFSET			0x0020
+#define DRA7XX_PRM_IRQENABLE_IPU2_OFFSET			0x0028
+#define DRA7XX_PRM_IRQSTATUS_DSP1_OFFSET			0x0030
+#define DRA7XX_PRM_IRQENABLE_DSP1_OFFSET			0x0038
+#define DRA7XX_CM_PRM_PROFILING_CLKCTRL_OFFSET			0x0040
+#define DRA7XX_CM_PRM_PROFILING_CLKCTRL				DRA7XX_PRM_REGADDR(DRA7XX_PRM_OCP_SOCKET_INST, 0x0040)
+#define DRA7XX_PRM_IRQENABLE_DSP2_OFFSET			0x0044
+#define DRA7XX_PRM_IRQENABLE_EVE1_OFFSET			0x0048
+#define DRA7XX_PRM_IRQENABLE_EVE2_OFFSET			0x004c
+#define DRA7XX_PRM_IRQENABLE_EVE3_OFFSET			0x0050
+#define DRA7XX_PRM_IRQENABLE_EVE4_OFFSET			0x0054
+#define DRA7XX_PRM_IRQENABLE_IPU1_OFFSET			0x0058
+#define DRA7XX_PRM_IRQSTATUS_DSP2_OFFSET			0x005c
+#define DRA7XX_PRM_IRQSTATUS_EVE1_OFFSET			0x0060
+#define DRA7XX_PRM_IRQSTATUS_EVE2_OFFSET			0x0064
+#define DRA7XX_PRM_IRQSTATUS_EVE3_OFFSET			0x0068
+#define DRA7XX_PRM_IRQSTATUS_EVE4_OFFSET			0x006c
+#define DRA7XX_PRM_IRQSTATUS_IPU1_OFFSET			0x0070
+#define DRA7XX_PRM_DEBUG_CFG1_OFFSET				0x00e4
+#define DRA7XX_PRM_DEBUG_CFG2_OFFSET				0x00e8
+#define DRA7XX_PRM_DEBUG_CFG3_OFFSET				0x00ec
+#define DRA7XX_PRM_DEBUG_OUT_OFFSET				0x00f4
+
+/* PRM.CKGEN_PRM register offsets */
+#define DRA7XX_CM_CLKSEL_SYSCLK1_OFFSET				0x0000
+#define DRA7XX_CM_CLKSEL_SYSCLK1				DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x0000)
+#define DRA7XX_CM_CLKSEL_WKUPAON_OFFSET				0x0008
+#define DRA7XX_CM_CLKSEL_WKUPAON				DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x0008)
+#define DRA7XX_CM_CLKSEL_ABE_PLL_REF_OFFSET			0x000c
+#define DRA7XX_CM_CLKSEL_ABE_PLL_REF				DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x000c)
+#define DRA7XX_CM_CLKSEL_SYS_OFFSET				0x0010
+#define DRA7XX_CM_CLKSEL_SYS					DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x0010)
+#define DRA7XX_CM_CLKSEL_ABE_PLL_BYPAS_OFFSET			0x0014
+#define DRA7XX_CM_CLKSEL_ABE_PLL_BYPAS				DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x0014)
+#define DRA7XX_CM_CLKSEL_ABE_PLL_SYS_OFFSET			0x0018
+#define DRA7XX_CM_CLKSEL_ABE_PLL_SYS				DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x0018)
+#define DRA7XX_CM_CLKSEL_ABE_24M_OFFSET				0x001c
+#define DRA7XX_CM_CLKSEL_ABE_24M				DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x001c)
+#define DRA7XX_CM_CLKSEL_ABE_SYS_OFFSET				0x0020
+#define DRA7XX_CM_CLKSEL_ABE_SYS				DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x0020)
+#define DRA7XX_CM_CLKSEL_HDMI_MCASP_AUX_OFFSET			0x0024
+#define DRA7XX_CM_CLKSEL_HDMI_MCASP_AUX				DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x0024)
+#define DRA7XX_CM_CLKSEL_HDMI_TIMER_OFFSET			0x0028
+#define DRA7XX_CM_CLKSEL_HDMI_TIMER				DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x0028)
+#define DRA7XX_CM_CLKSEL_MCASP_SYS_OFFSET			0x002c
+#define DRA7XX_CM_CLKSEL_MCASP_SYS				DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x002c)
+#define DRA7XX_CM_CLKSEL_MLBP_MCASP_OFFSET			0x0030
+#define DRA7XX_CM_CLKSEL_MLBP_MCASP				DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x0030)
+#define DRA7XX_CM_CLKSEL_MLB_MCASP_OFFSET			0x0034
+#define DRA7XX_CM_CLKSEL_MLB_MCASP				DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x0034)
+#define DRA7XX_CM_CLKSEL_PER_ABE_X1_GFCLK_MCASP_AUX_OFFSET	0x0038
+#define DRA7XX_CM_CLKSEL_PER_ABE_X1_GFCLK_MCASP_AUX		DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x0038)
+#define DRA7XX_CM_CLKSEL_SYS_CLK1_32K_OFFSET			0x0040
+#define DRA7XX_CM_CLKSEL_SYS_CLK1_32K				DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x0040)
+#define DRA7XX_CM_CLKSEL_TIMER_SYS_OFFSET			0x0044
+#define DRA7XX_CM_CLKSEL_TIMER_SYS				DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x0044)
+#define DRA7XX_CM_CLKSEL_VIDEO1_MCASP_AUX_OFFSET		0x0048
+#define DRA7XX_CM_CLKSEL_VIDEO1_MCASP_AUX			DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x0048)
+#define DRA7XX_CM_CLKSEL_VIDEO1_TIMER_OFFSET			0x004c
+#define DRA7XX_CM_CLKSEL_VIDEO1_TIMER				DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x004c)
+#define DRA7XX_CM_CLKSEL_VIDEO2_MCASP_AUX_OFFSET		0x0050
+#define DRA7XX_CM_CLKSEL_VIDEO2_MCASP_AUX			DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x0050)
+#define DRA7XX_CM_CLKSEL_VIDEO2_TIMER_OFFSET			0x0054
+#define DRA7XX_CM_CLKSEL_VIDEO2_TIMER				DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x0054)
+#define DRA7XX_CM_CLKSEL_CLKOUTMUX0_OFFSET			0x0058
+#define DRA7XX_CM_CLKSEL_CLKOUTMUX0				DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x0058)
+#define DRA7XX_CM_CLKSEL_CLKOUTMUX1_OFFSET			0x005c
+#define DRA7XX_CM_CLKSEL_CLKOUTMUX1				DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x005c)
+#define DRA7XX_CM_CLKSEL_CLKOUTMUX2_OFFSET			0x0060
+#define DRA7XX_CM_CLKSEL_CLKOUTMUX2				DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x0060)
+#define DRA7XX_CM_CLKSEL_HDMI_PLL_SYS_OFFSET			0x0064
+#define DRA7XX_CM_CLKSEL_HDMI_PLL_SYS				DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x0064)
+#define DRA7XX_CM_CLKSEL_VIDEO1_PLL_SYS_OFFSET			0x0068
+#define DRA7XX_CM_CLKSEL_VIDEO1_PLL_SYS				DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x0068)
+#define DRA7XX_CM_CLKSEL_VIDEO2_PLL_SYS_OFFSET			0x006c
+#define DRA7XX_CM_CLKSEL_VIDEO2_PLL_SYS				DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x006c)
+#define DRA7XX_CM_CLKSEL_ABE_CLK_DIV_OFFSET			0x0070
+#define DRA7XX_CM_CLKSEL_ABE_CLK_DIV				DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x0070)
+#define DRA7XX_CM_CLKSEL_ABE_GICLK_DIV_OFFSET			0x0074
+#define DRA7XX_CM_CLKSEL_ABE_GICLK_DIV				DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x0074)
+#define DRA7XX_CM_CLKSEL_AESS_FCLK_DIV_OFFSET			0x0078
+#define DRA7XX_CM_CLKSEL_AESS_FCLK_DIV				DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x0078)
+#define DRA7XX_CM_CLKSEL_EVE_CLK_OFFSET				0x0080
+#define DRA7XX_CM_CLKSEL_EVE_CLK				DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x0080)
+#define DRA7XX_CM_CLKSEL_USB_OTG_CLK_CLKOUTMUX_OFFSET		0x0084
+#define DRA7XX_CM_CLKSEL_USB_OTG_CLK_CLKOUTMUX			DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x0084)
+#define DRA7XX_CM_CLKSEL_CORE_DPLL_OUT_CLK_CLKOUTMUX_OFFSET	0x0088
+#define DRA7XX_CM_CLKSEL_CORE_DPLL_OUT_CLK_CLKOUTMUX		DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x0088)
+#define DRA7XX_CM_CLKSEL_DSP_GFCLK_CLKOUTMUX_OFFSET		0x008c
+#define DRA7XX_CM_CLKSEL_DSP_GFCLK_CLKOUTMUX			DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x008c)
+#define DRA7XX_CM_CLKSEL_EMIF_PHY_GCLK_CLKOUTMUX_OFFSET		0x0090
+#define DRA7XX_CM_CLKSEL_EMIF_PHY_GCLK_CLKOUTMUX		DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x0090)
+#define DRA7XX_CM_CLKSEL_EMU_CLK_CLKOUTMUX_OFFSET		0x0094
+#define DRA7XX_CM_CLKSEL_EMU_CLK_CLKOUTMUX			DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x0094)
+#define DRA7XX_CM_CLKSEL_FUNC_96M_AON_CLK_CLKOUTMUX_OFFSET	0x0098
+#define DRA7XX_CM_CLKSEL_FUNC_96M_AON_CLK_CLKOUTMUX		DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x0098)
+#define DRA7XX_CM_CLKSEL_GMAC_250M_CLK_CLKOUTMUX_OFFSET		0x009c
+#define DRA7XX_CM_CLKSEL_GMAC_250M_CLK_CLKOUTMUX		DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x009c)
+#define DRA7XX_CM_CLKSEL_GPU_GCLK_CLKOUTMUX_OFFSET		0x00a0
+#define DRA7XX_CM_CLKSEL_GPU_GCLK_CLKOUTMUX			DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x00a0)
+#define DRA7XX_CM_CLKSEL_HDMI_CLK_CLKOUTMUX_OFFSET		0x00a4
+#define DRA7XX_CM_CLKSEL_HDMI_CLK_CLKOUTMUX			DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x00a4)
+#define DRA7XX_CM_CLKSEL_IVA_GCLK_CLKOUTMUX_OFFSET		0x00a8
+#define DRA7XX_CM_CLKSEL_IVA_GCLK_CLKOUTMUX			DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x00a8)
+#define DRA7XX_CM_CLKSEL_L3INIT_480M_GFCLK_CLKOUTMUX_OFFSET	0x00ac
+#define DRA7XX_CM_CLKSEL_L3INIT_480M_GFCLK_CLKOUTMUX		DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x00ac)
+#define DRA7XX_CM_CLKSEL_MPU_GCLK_CLKOUTMUX_OFFSET		0x00b0
+#define DRA7XX_CM_CLKSEL_MPU_GCLK_CLKOUTMUX			DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x00b0)
+#define DRA7XX_CM_CLKSEL_PCIE1_CLK_CLKOUTMUX_OFFSET		0x00b4
+#define DRA7XX_CM_CLKSEL_PCIE1_CLK_CLKOUTMUX			DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x00b4)
+#define DRA7XX_CM_CLKSEL_PCIE2_CLK_CLKOUTMUX_OFFSET		0x00b8
+#define DRA7XX_CM_CLKSEL_PCIE2_CLK_CLKOUTMUX			DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x00b8)
+#define DRA7XX_CM_CLKSEL_PER_ABE_X1_CLK_CLKOUTMUX_OFFSET	0x00bc
+#define DRA7XX_CM_CLKSEL_PER_ABE_X1_CLK_CLKOUTMUX		DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x00bc)
+#define DRA7XX_CM_CLKSEL_SATA_CLK_CLKOUTMUX_OFFSET		0x00c0
+#define DRA7XX_CM_CLKSEL_SATA_CLK_CLKOUTMUX			DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x00c0)
+#define DRA7XX_CM_CLKSEL_SECURE_32K_CLK_CLKOUTMUX_OFFSET	0x00c4
+#define DRA7XX_CM_CLKSEL_SECURE_32K_CLK_CLKOUTMUX		DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x00c4)
+#define DRA7XX_CM_CLKSEL_SYS_CLK1_CLKOUTMUX_OFFSET		0x00c8
+#define DRA7XX_CM_CLKSEL_SYS_CLK1_CLKOUTMUX			DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x00c8)
+#define DRA7XX_CM_CLKSEL_SYS_CLK2_CLKOUTMUX_OFFSET		0x00cc
+#define DRA7XX_CM_CLKSEL_SYS_CLK2_CLKOUTMUX			DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x00cc)
+#define DRA7XX_CM_CLKSEL_VIDEO1_CLK_CLKOUTMUX_OFFSET		0x00d0
+#define DRA7XX_CM_CLKSEL_VIDEO1_CLK_CLKOUTMUX			DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x00d0)
+#define DRA7XX_CM_CLKSEL_VIDEO2_CLK_CLKOUTMUX_OFFSET		0x00d4
+#define DRA7XX_CM_CLKSEL_VIDEO2_CLK_CLKOUTMUX			DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x00d4)
+#define DRA7XX_CM_CLKSEL_ABE_LP_CLK_OFFSET			0x00d8
+#define DRA7XX_CM_CLKSEL_ABE_LP_CLK				DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x00d8)
+#define DRA7XX_CM_CLKSEL_ADC_GFCLK_OFFSET			0x00dc
+#define DRA7XX_CM_CLKSEL_ADC_GFCLK				DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x00dc)
+#define DRA7XX_CM_CLKSEL_EVE_GFCLK_CLKOUTMUX_OFFSET		0x00e0
+#define DRA7XX_CM_CLKSEL_EVE_GFCLK_CLKOUTMUX			DRA7XX_PRM_REGADDR(DRA7XX_PRM_CKGEN_INST, 0x00e0)
+
+/* PRM.MPU_PRM register offsets */
+#define DRA7XX_PM_MPU_PWRSTCTRL_OFFSET				0x0000
+#define DRA7XX_PM_MPU_PWRSTST_OFFSET				0x0004
+#define DRA7XX_RM_MPU_MPU_CONTEXT_OFFSET			0x0024
+
+/* PRM.DSP1_PRM register offsets */
+#define DRA7XX_PM_DSP1_PWRSTCTRL_OFFSET				0x0000
+#define DRA7XX_PM_DSP1_PWRSTST_OFFSET				0x0004
+#define DRA7XX_RM_DSP1_RSTCTRL_OFFSET				0x0010
+#define DRA7XX_RM_DSP1_RSTST_OFFSET				0x0014
+#define DRA7XX_RM_DSP1_DSP1_CONTEXT_OFFSET			0x0024
+
+/* PRM.IPU_PRM register offsets */
+#define DRA7XX_PM_IPU_PWRSTCTRL_OFFSET				0x0000
+#define DRA7XX_PM_IPU_PWRSTST_OFFSET				0x0004
+#define DRA7XX_RM_IPU1_RSTCTRL_OFFSET				0x0010
+#define DRA7XX_RM_IPU1_RSTST_OFFSET				0x0014
+#define DRA7XX_RM_IPU1_IPU1_CONTEXT_OFFSET			0x0024
+#define DRA7XX_PM_IPU_MCASP1_WKDEP_OFFSET			0x0050
+#define DRA7XX_RM_IPU_MCASP1_CONTEXT_OFFSET			0x0054
+#define DRA7XX_PM_IPU_TIMER5_WKDEP_OFFSET			0x0058
+#define DRA7XX_RM_IPU_TIMER5_CONTEXT_OFFSET			0x005c
+#define DRA7XX_PM_IPU_TIMER6_WKDEP_OFFSET			0x0060
+#define DRA7XX_RM_IPU_TIMER6_CONTEXT_OFFSET			0x0064
+#define DRA7XX_PM_IPU_TIMER7_WKDEP_OFFSET			0x0068
+#define DRA7XX_RM_IPU_TIMER7_CONTEXT_OFFSET			0x006c
+#define DRA7XX_PM_IPU_TIMER8_WKDEP_OFFSET			0x0070
+#define DRA7XX_RM_IPU_TIMER8_CONTEXT_OFFSET			0x0074
+#define DRA7XX_PM_IPU_I2C5_WKDEP_OFFSET				0x0078
+#define DRA7XX_RM_IPU_I2C5_CONTEXT_OFFSET			0x007c
+#define DRA7XX_PM_IPU_UART6_WKDEP_OFFSET			0x0080
+#define DRA7XX_RM_IPU_UART6_CONTEXT_OFFSET			0x0084
+
+/* PRM.COREAON_PRM register offsets */
+#define DRA7XX_PM_COREAON_SMARTREFLEX_MPU_WKDEP_OFFSET		0x0000
+#define DRA7XX_RM_COREAON_SMARTREFLEX_MPU_CONTEXT_OFFSET	0x0004
+#define DRA7XX_PM_COREAON_SMARTREFLEX_CORE_WKDEP_OFFSET		0x0010
+#define DRA7XX_RM_COREAON_SMARTREFLEX_CORE_CONTEXT_OFFSET	0x0014
+#define DRA7XX_PM_COREAON_SMARTREFLEX_GPU_WKDEP_OFFSET		0x0030
+#define DRA7XX_RM_COREAON_SMARTREFLEX_GPU_CONTEXT_OFFSET	0x0034
+#define DRA7XX_PM_COREAON_SMARTREFLEX_DSPEVE_WKDEP_OFFSET	0x0040
+#define DRA7XX_RM_COREAON_SMARTREFLEX_DSPEVE_CONTEXT_OFFSET	0x0044
+#define DRA7XX_PM_COREAON_SMARTREFLEX_IVAHD_WKDEP_OFFSET	0x0050
+#define DRA7XX_RM_COREAON_SMARTREFLEX_IVAHD_CONTEXT_OFFSET	0x0054
+#define DRA7XX_RM_COREAON_DUMMY_MODULE1_CONTEXT_OFFSET		0x0084
+#define DRA7XX_RM_COREAON_DUMMY_MODULE2_CONTEXT_OFFSET		0x0094
+#define DRA7XX_RM_COREAON_DUMMY_MODULE3_CONTEXT_OFFSET		0x00a4
+#define DRA7XX_RM_COREAON_DUMMY_MODULE4_CONTEXT_OFFSET		0x00b4
+
+/* PRM.CORE_PRM register offsets */
+#define DRA7XX_PM_CORE_PWRSTCTRL_OFFSET				0x0000
+#define DRA7XX_PM_CORE_PWRSTST_OFFSET				0x0004
+#define DRA7XX_RM_L3MAIN1_L3_MAIN_1_CONTEXT_OFFSET		0x0024
+#define DRA7XX_RM_L3MAIN1_GPMC_CONTEXT_OFFSET			0x002c
+#define DRA7XX_RM_L3MAIN1_MMU_EDMA_CONTEXT_OFFSET		0x0034
+#define DRA7XX_PM_L3MAIN1_OCMC_RAM1_WKDEP_OFFSET		0x0050
+#define DRA7XX_RM_L3MAIN1_OCMC_RAM1_CONTEXT_OFFSET		0x0054
+#define DRA7XX_PM_L3MAIN1_OCMC_RAM2_WKDEP_OFFSET		0x0058
+#define DRA7XX_RM_L3MAIN1_OCMC_RAM2_CONTEXT_OFFSET		0x005c
+#define DRA7XX_PM_L3MAIN1_OCMC_RAM3_WKDEP_OFFSET		0x0060
+#define DRA7XX_RM_L3MAIN1_OCMC_RAM3_CONTEXT_OFFSET		0x0064
+#define DRA7XX_RM_L3MAIN1_OCMC_ROM_CONTEXT_OFFSET		0x006c
+#define DRA7XX_PM_L3MAIN1_TPCC_WKDEP_OFFSET			0x0070
+#define DRA7XX_RM_L3MAIN1_TPCC_CONTEXT_OFFSET			0x0074
+#define DRA7XX_PM_L3MAIN1_TPTC1_WKDEP_OFFSET			0x0078
+#define DRA7XX_RM_L3MAIN1_TPTC1_CONTEXT_OFFSET			0x007c
+#define DRA7XX_PM_L3MAIN1_TPTC2_WKDEP_OFFSET			0x0080
+#define DRA7XX_RM_L3MAIN1_TPTC2_CONTEXT_OFFSET			0x0084
+#define DRA7XX_RM_L3MAIN1_VCP1_CONTEXT_OFFSET			0x008c
+#define DRA7XX_RM_L3MAIN1_VCP2_CONTEXT_OFFSET			0x0094
+#define DRA7XX_RM_L3MAIN1_SPARE_CME_CONTEXT_OFFSET		0x009c
+#define DRA7XX_RM_L3MAIN1_SPARE_HDMI_CONTEXT_OFFSET		0x00a4
+#define DRA7XX_RM_L3MAIN1_SPARE_ICM_CONTEXT_OFFSET		0x00ac
+#define DRA7XX_RM_L3MAIN1_SPARE_IVA2_CONTEXT_OFFSET		0x00b4
+#define DRA7XX_RM_L3MAIN1_SPARE_SATA2_CONTEXT_OFFSET		0x00bc
+#define DRA7XX_RM_L3MAIN1_SPARE_UNKNOWN4_CONTEXT_OFFSET		0x00c4
+#define DRA7XX_RM_L3MAIN1_SPARE_UNKNOWN5_CONTEXT_OFFSET		0x00cc
+#define DRA7XX_RM_L3MAIN1_SPARE_UNKNOWN6_CONTEXT_OFFSET		0x00d4
+#define DRA7XX_RM_L3MAIN1_SPARE_VIDEOPLL1_CONTEXT_OFFSET	0x00dc
+#define DRA7XX_RM_L3MAIN1_SPARE_VIDEOPLL2_CONTEXT_OFFSET	0x00f4
+#define DRA7XX_RM_L3MAIN1_SPARE_VIDEOPLL3_CONTEXT_OFFSET	0x00fc
+#define DRA7XX_RM_IPU2_RSTCTRL_OFFSET				0x0210
+#define DRA7XX_RM_IPU2_RSTST_OFFSET				0x0214
+#define DRA7XX_RM_IPU2_IPU2_CONTEXT_OFFSET			0x0224
+#define DRA7XX_RM_DMA_DMA_SYSTEM_CONTEXT_OFFSET			0x0324
+#define DRA7XX_RM_EMIF_DMM_CONTEXT_OFFSET			0x0424
+#define DRA7XX_RM_EMIF_EMIF_OCP_FW_CONTEXT_OFFSET		0x042c
+#define DRA7XX_RM_EMIF_EMIF1_CONTEXT_OFFSET			0x0434
+#define DRA7XX_RM_EMIF_EMIF2_CONTEXT_OFFSET			0x043c
+#define DRA7XX_RM_EMIF_EMIF_DLL_CONTEXT_OFFSET			0x0444
+#define DRA7XX_RM_ATL_ATL_CONTEXT_OFFSET			0x0524
+#define DRA7XX_RM_L4CFG_L4_CFG_CONTEXT_OFFSET			0x0624
+#define DRA7XX_RM_L4CFG_SPINLOCK_CONTEXT_OFFSET			0x062c
+#define DRA7XX_RM_L4CFG_MAILBOX1_CONTEXT_OFFSET			0x0634
+#define DRA7XX_RM_L4CFG_SAR_ROM_CONTEXT_OFFSET			0x063c
+#define DRA7XX_RM_L4CFG_OCP2SCP2_CONTEXT_OFFSET			0x0644
+#define DRA7XX_RM_L4CFG_MAILBOX2_CONTEXT_OFFSET			0x064c
+#define DRA7XX_RM_L4CFG_MAILBOX3_CONTEXT_OFFSET			0x0654
+#define DRA7XX_RM_L4CFG_MAILBOX4_CONTEXT_OFFSET			0x065c
+#define DRA7XX_RM_L4CFG_MAILBOX5_CONTEXT_OFFSET			0x0664
+#define DRA7XX_RM_L4CFG_MAILBOX6_CONTEXT_OFFSET			0x066c
+#define DRA7XX_RM_L4CFG_MAILBOX7_CONTEXT_OFFSET			0x0674
+#define DRA7XX_RM_L4CFG_MAILBOX8_CONTEXT_OFFSET			0x067c
+#define DRA7XX_RM_L4CFG_MAILBOX9_CONTEXT_OFFSET			0x0684
+#define DRA7XX_RM_L4CFG_MAILBOX10_CONTEXT_OFFSET		0x068c
+#define DRA7XX_RM_L4CFG_MAILBOX11_CONTEXT_OFFSET		0x0694
+#define DRA7XX_RM_L4CFG_MAILBOX12_CONTEXT_OFFSET		0x069c
+#define DRA7XX_RM_L4CFG_MAILBOX13_CONTEXT_OFFSET		0x06a4
+#define DRA7XX_RM_L4CFG_SPARE_SMARTREFLEX_RTC_CONTEXT_OFFSET	0x06ac
+#define DRA7XX_RM_L4CFG_SPARE_SMARTREFLEX_SDRAM_CONTEXT_OFFSET	0x06b4
+#define DRA7XX_RM_L4CFG_SPARE_SMARTREFLEX_WKUP_CONTEXT_OFFSET	0x06bc
+#define DRA7XX_RM_L4CFG_IO_DELAY_BLOCK_CONTEXT_OFFSET		0x06c4
+#define DRA7XX_RM_L3INSTR_L3_MAIN_2_CONTEXT_OFFSET		0x0724
+#define DRA7XX_RM_L3INSTR_L3_INSTR_CONTEXT_OFFSET		0x072c
+#define DRA7XX_RM_L3INSTR_OCP_WP_NOC_CONTEXT_OFFSET		0x0744
+
+/* PRM.IVA_PRM register offsets */
+#define DRA7XX_PM_IVA_PWRSTCTRL_OFFSET				0x0000
+#define DRA7XX_PM_IVA_PWRSTST_OFFSET				0x0004
+#define DRA7XX_RM_IVA_RSTCTRL_OFFSET				0x0010
+#define DRA7XX_RM_IVA_RSTST_OFFSET				0x0014
+#define DRA7XX_RM_IVA_IVA_CONTEXT_OFFSET			0x0024
+#define DRA7XX_RM_IVA_SL2_CONTEXT_OFFSET			0x002c
+
+/* PRM.CAM_PRM register offsets */
+#define DRA7XX_PM_CAM_PWRSTCTRL_OFFSET				0x0000
+#define DRA7XX_PM_CAM_PWRSTST_OFFSET				0x0004
+#define DRA7XX_PM_CAM_VIP1_WKDEP_OFFSET				0x0020
+#define DRA7XX_RM_CAM_VIP1_CONTEXT_OFFSET			0x0024
+#define DRA7XX_PM_CAM_VIP2_WKDEP_OFFSET				0x0028
+#define DRA7XX_RM_CAM_VIP2_CONTEXT_OFFSET			0x002c
+#define DRA7XX_PM_CAM_VIP3_WKDEP_OFFSET				0x0030
+#define DRA7XX_RM_CAM_VIP3_CONTEXT_OFFSET			0x0034
+#define DRA7XX_RM_CAM_LVDSRX_CONTEXT_OFFSET			0x003c
+#define DRA7XX_RM_CAM_CSI1_CONTEXT_OFFSET			0x0044
+#define DRA7XX_RM_CAM_CSI2_CONTEXT_OFFSET			0x004c
+
+/* PRM.DSS_PRM register offsets */
+#define DRA7XX_PM_DSS_PWRSTCTRL_OFFSET				0x0000
+#define DRA7XX_PM_DSS_PWRSTST_OFFSET				0x0004
+#define DRA7XX_PM_DSS_DSS_WKDEP_OFFSET				0x0020
+#define DRA7XX_RM_DSS_DSS_CONTEXT_OFFSET			0x0024
+#define DRA7XX_PM_DSS_DSS2_WKDEP_OFFSET				0x0028
+#define DRA7XX_RM_DSS_BB2D_CONTEXT_OFFSET			0x0034
+#define DRA7XX_RM_DSS_SDVENC_CONTEXT_OFFSET			0x003c
+
+/* PRM.GPU_PRM register offsets */
+#define DRA7XX_PM_GPU_PWRSTCTRL_OFFSET				0x0000
+#define DRA7XX_PM_GPU_PWRSTST_OFFSET				0x0004
+#define DRA7XX_RM_GPU_GPU_CONTEXT_OFFSET			0x0024
+
+/* PRM.L3INIT_PRM register offsets */
+#define DRA7XX_PM_L3INIT_PWRSTCTRL_OFFSET			0x0000
+#define DRA7XX_PM_L3INIT_PWRSTST_OFFSET				0x0004
+#define DRA7XX_PM_L3INIT_MMC1_WKDEP_OFFSET			0x0028
+#define DRA7XX_RM_L3INIT_MMC1_CONTEXT_OFFSET			0x002c
+#define DRA7XX_PM_L3INIT_MMC2_WKDEP_OFFSET			0x0030
+#define DRA7XX_RM_L3INIT_MMC2_CONTEXT_OFFSET			0x0034
+#define DRA7XX_PM_L3INIT_USB_OTG_SS2_WKDEP_OFFSET		0x0040
+#define DRA7XX_RM_L3INIT_USB_OTG_SS2_CONTEXT_OFFSET		0x0044
+#define DRA7XX_PM_L3INIT_USB_OTG_SS3_WKDEP_OFFSET		0x0048
+#define DRA7XX_RM_L3INIT_USB_OTG_SS3_CONTEXT_OFFSET		0x004c
+#define DRA7XX_PM_L3INIT_USB_OTG_SS4_WKDEP_OFFSET		0x0050
+#define DRA7XX_RM_L3INIT_USB_OTG_SS4_CONTEXT_OFFSET		0x0054
+#define DRA7XX_RM_L3INIT_MLB_SS_CONTEXT_OFFSET			0x005c
+#define DRA7XX_RM_L3INIT_IEEE1500_2_OCP_CONTEXT_OFFSET		0x007c
+#define DRA7XX_PM_L3INIT_SATA_WKDEP_OFFSET			0x0088
+#define DRA7XX_RM_L3INIT_SATA_CONTEXT_OFFSET			0x008c
+#define DRA7XX_RM_GMAC_GMAC_CONTEXT_OFFSET			0x00d4
+#define DRA7XX_RM_L3INIT_OCP2SCP1_CONTEXT_OFFSET		0x00e4
+#define DRA7XX_RM_L3INIT_OCP2SCP3_CONTEXT_OFFSET		0x00ec
+#define DRA7XX_PM_L3INIT_USB_OTG_SS1_WKDEP_OFFSET		0x00f0
+#define DRA7XX_RM_L3INIT_USB_OTG_SS1_CONTEXT_OFFSET		0x00f4
+
+/* PRM.L4PER_PRM register offsets */
+#define DRA7XX_PM_L4PER_PWRSTCTRL_OFFSET			0x0000
+#define DRA7XX_PM_L4PER_PWRSTST_OFFSET				0x0004
+#define DRA7XX_RM_L4PER2_L4PER2_CONTEXT_OFFSET			0x000c
+#define DRA7XX_RM_L4PER3_L4PER3_CONTEXT_OFFSET			0x0014
+#define DRA7XX_RM_L4PER2_PRUSS1_CONTEXT_OFFSET			0x001c
+#define DRA7XX_RM_L4PER2_PRUSS2_CONTEXT_OFFSET			0x0024
+#define DRA7XX_PM_L4PER_TIMER10_WKDEP_OFFSET			0x0028
+#define DRA7XX_RM_L4PER_TIMER10_CONTEXT_OFFSET			0x002c
+#define DRA7XX_PM_L4PER_TIMER11_WKDEP_OFFSET			0x0030
+#define DRA7XX_RM_L4PER_TIMER11_CONTEXT_OFFSET			0x0034
+#define DRA7XX_PM_L4PER_TIMER2_WKDEP_OFFSET			0x0038
+#define DRA7XX_RM_L4PER_TIMER2_CONTEXT_OFFSET			0x003c
+#define DRA7XX_PM_L4PER_TIMER3_WKDEP_OFFSET			0x0040
+#define DRA7XX_RM_L4PER_TIMER3_CONTEXT_OFFSET			0x0044
+#define DRA7XX_PM_L4PER_TIMER4_WKDEP_OFFSET			0x0048
+#define DRA7XX_RM_L4PER_TIMER4_CONTEXT_OFFSET			0x004c
+#define DRA7XX_PM_L4PER_TIMER9_WKDEP_OFFSET			0x0050
+#define DRA7XX_RM_L4PER_TIMER9_CONTEXT_OFFSET			0x0054
+#define DRA7XX_RM_L4PER_ELM_CONTEXT_OFFSET			0x005c
+#define DRA7XX_PM_L4PER_GPIO2_WKDEP_OFFSET			0x0060
+#define DRA7XX_RM_L4PER_GPIO2_CONTEXT_OFFSET			0x0064
+#define DRA7XX_PM_L4PER_GPIO3_WKDEP_OFFSET			0x0068
+#define DRA7XX_RM_L4PER_GPIO3_CONTEXT_OFFSET			0x006c
+#define DRA7XX_PM_L4PER_GPIO4_WKDEP_OFFSET			0x0070
+#define DRA7XX_RM_L4PER_GPIO4_CONTEXT_OFFSET			0x0074
+#define DRA7XX_PM_L4PER_GPIO5_WKDEP_OFFSET			0x0078
+#define DRA7XX_RM_L4PER_GPIO5_CONTEXT_OFFSET			0x007c
+#define DRA7XX_PM_L4PER_GPIO6_WKDEP_OFFSET			0x0080
+#define DRA7XX_RM_L4PER_GPIO6_CONTEXT_OFFSET			0x0084
+#define DRA7XX_RM_L4PER_HDQ1W_CONTEXT_OFFSET			0x008c
+#define DRA7XX_RM_L4PER2_PWMSS2_CONTEXT_OFFSET			0x0094
+#define DRA7XX_RM_L4PER2_PWMSS3_CONTEXT_OFFSET			0x009c
+#define DRA7XX_PM_L4PER_I2C1_WKDEP_OFFSET			0x00a0
+#define DRA7XX_RM_L4PER_I2C1_CONTEXT_OFFSET			0x00a4
+#define DRA7XX_PM_L4PER_I2C2_WKDEP_OFFSET			0x00a8
+#define DRA7XX_RM_L4PER_I2C2_CONTEXT_OFFSET			0x00ac
+#define DRA7XX_PM_L4PER_I2C3_WKDEP_OFFSET			0x00b0
+#define DRA7XX_RM_L4PER_I2C3_CONTEXT_OFFSET			0x00b4
+#define DRA7XX_PM_L4PER_I2C4_WKDEP_OFFSET			0x00b8
+#define DRA7XX_RM_L4PER_I2C4_CONTEXT_OFFSET			0x00bc
+#define DRA7XX_RM_L4PER_L4PER1_CONTEXT_OFFSET			0x00c0
+#define DRA7XX_RM_L4PER2_PWMSS1_CONTEXT_OFFSET			0x00c4
+#define DRA7XX_PM_L4PER_TIMER13_WKDEP_OFFSET			0x00c8
+#define DRA7XX_RM_L4PER3_TIMER13_CONTEXT_OFFSET			0x00cc
+#define DRA7XX_PM_L4PER_TIMER14_WKDEP_OFFSET			0x00d0
+#define DRA7XX_RM_L4PER3_TIMER14_CONTEXT_OFFSET			0x00d4
+#define DRA7XX_PM_L4PER_TIMER15_WKDEP_OFFSET			0x00d8
+#define DRA7XX_RM_L4PER3_TIMER15_CONTEXT_OFFSET			0x00dc
+#define DRA7XX_PM_L4PER_MCSPI1_WKDEP_OFFSET			0x00f0
+#define DRA7XX_RM_L4PER_MCSPI1_CONTEXT_OFFSET			0x00f4
+#define DRA7XX_PM_L4PER_MCSPI2_WKDEP_OFFSET			0x00f8
+#define DRA7XX_RM_L4PER_MCSPI2_CONTEXT_OFFSET			0x00fc
+#define DRA7XX_PM_L4PER_MCSPI3_WKDEP_OFFSET			0x0100
+#define DRA7XX_RM_L4PER_MCSPI3_CONTEXT_OFFSET			0x0104
+#define DRA7XX_PM_L4PER_MCSPI4_WKDEP_OFFSET			0x0108
+#define DRA7XX_RM_L4PER_MCSPI4_CONTEXT_OFFSET			0x010c
+#define DRA7XX_PM_L4PER_GPIO7_WKDEP_OFFSET			0x0110
+#define DRA7XX_RM_L4PER_GPIO7_CONTEXT_OFFSET			0x0114
+#define DRA7XX_PM_L4PER_GPIO8_WKDEP_OFFSET			0x0118
+#define DRA7XX_RM_L4PER_GPIO8_CONTEXT_OFFSET			0x011c
+#define DRA7XX_PM_L4PER_MMC3_WKDEP_OFFSET			0x0120
+#define DRA7XX_RM_L4PER_MMC3_CONTEXT_OFFSET			0x0124
+#define DRA7XX_PM_L4PER_MMC4_WKDEP_OFFSET			0x0128
+#define DRA7XX_RM_L4PER_MMC4_CONTEXT_OFFSET			0x012c
+#define DRA7XX_PM_L4PER_TIMER16_WKDEP_OFFSET			0x0130
+#define DRA7XX_RM_L4PER3_TIMER16_CONTEXT_OFFSET			0x0134
+#define DRA7XX_PM_L4PER2_QSPI_WKDEP_OFFSET			0x0138
+#define DRA7XX_RM_L4PER2_QSPI_CONTEXT_OFFSET			0x013c
+#define DRA7XX_PM_L4PER_UART1_WKDEP_OFFSET			0x0140
+#define DRA7XX_RM_L4PER_UART1_CONTEXT_OFFSET			0x0144
+#define DRA7XX_PM_L4PER_UART2_WKDEP_OFFSET			0x0148
+#define DRA7XX_RM_L4PER_UART2_CONTEXT_OFFSET			0x014c
+#define DRA7XX_PM_L4PER_UART3_WKDEP_OFFSET			0x0150
+#define DRA7XX_RM_L4PER_UART3_CONTEXT_OFFSET			0x0154
+#define DRA7XX_PM_L4PER_UART4_WKDEP_OFFSET			0x0158
+#define DRA7XX_RM_L4PER_UART4_CONTEXT_OFFSET			0x015c
+#define DRA7XX_PM_L4PER2_MCASP2_WKDEP_OFFSET			0x0160
+#define DRA7XX_RM_L4PER2_MCASP2_CONTEXT_OFFSET			0x0164
+#define DRA7XX_PM_L4PER2_MCASP3_WKDEP_OFFSET			0x0168
+#define DRA7XX_RM_L4PER2_MCASP3_CONTEXT_OFFSET			0x016c
+#define DRA7XX_PM_L4PER_UART5_WKDEP_OFFSET			0x0170
+#define DRA7XX_RM_L4PER_UART5_CONTEXT_OFFSET			0x0174
+#define DRA7XX_PM_L4PER2_MCASP5_WKDEP_OFFSET			0x0178
+#define DRA7XX_RM_L4PER2_MCASP5_CONTEXT_OFFSET			0x017c
+#define DRA7XX_PM_L4PER2_MCASP6_WKDEP_OFFSET			0x0180
+#define DRA7XX_RM_L4PER2_MCASP6_CONTEXT_OFFSET			0x0184
+#define DRA7XX_PM_L4PER2_MCASP7_WKDEP_OFFSET			0x0188
+#define DRA7XX_RM_L4PER2_MCASP7_CONTEXT_OFFSET			0x018c
+#define DRA7XX_PM_L4PER2_MCASP8_WKDEP_OFFSET			0x0190
+#define DRA7XX_RM_L4PER2_MCASP8_CONTEXT_OFFSET			0x0194
+#define DRA7XX_PM_L4PER2_MCASP4_WKDEP_OFFSET			0x0198
+#define DRA7XX_RM_L4PER2_MCASP4_CONTEXT_OFFSET			0x019c
+#define DRA7XX_RM_L4SEC_AES1_CONTEXT_OFFSET			0x01a4
+#define DRA7XX_RM_L4SEC_AES2_CONTEXT_OFFSET			0x01ac
+#define DRA7XX_RM_L4SEC_DES3DES_CONTEXT_OFFSET			0x01b4
+#define DRA7XX_RM_L4SEC_FPKA_CONTEXT_OFFSET			0x01bc
+#define DRA7XX_RM_L4SEC_RNG_CONTEXT_OFFSET			0x01c4
+#define DRA7XX_RM_L4SEC_SHA2MD51_CONTEXT_OFFSET			0x01cc
+#define DRA7XX_PM_L4PER2_UART7_WKDEP_OFFSET			0x01d0
+#define DRA7XX_RM_L4PER2_UART7_CONTEXT_OFFSET			0x01d4
+#define DRA7XX_RM_L4SEC_DMA_CRYPTO_CONTEXT_OFFSET		0x01dc
+#define DRA7XX_PM_L4PER2_UART8_WKDEP_OFFSET			0x01e0
+#define DRA7XX_RM_L4PER2_UART8_CONTEXT_OFFSET			0x01e4
+#define DRA7XX_PM_L4PER2_UART9_WKDEP_OFFSET			0x01e8
+#define DRA7XX_RM_L4PER2_UART9_CONTEXT_OFFSET			0x01ec
+#define DRA7XX_PM_L4PER2_DCAN2_WKDEP_OFFSET			0x01f0
+#define DRA7XX_RM_L4PER2_DCAN2_CONTEXT_OFFSET			0x01f4
+#define DRA7XX_RM_L4SEC_SHA2MD52_CONTEXT_OFFSET			0x01fc
+
+/* PRM.CUSTEFUSE_PRM register offsets */
+#define DRA7XX_PM_CUSTEFUSE_PWRSTCTRL_OFFSET			0x0000
+#define DRA7XX_PM_CUSTEFUSE_PWRSTST_OFFSET			0x0004
+#define DRA7XX_RM_CUSTEFUSE_EFUSE_CTRL_CUST_CONTEXT_OFFSET	0x0024
+
+/* PRM.WKUPAON_PRM register offsets */
+#define DRA7XX_RM_WKUPAON_L4_WKUP_CONTEXT_OFFSET		0x0000
+#define DRA7XX_PM_WKUPAON_WD_TIMER1_WKDEP_OFFSET		0x0004
+#define DRA7XX_RM_WKUPAON_WD_TIMER1_CONTEXT_OFFSET		0x0008
+#define DRA7XX_PM_WKUPAON_WD_TIMER2_WKDEP_OFFSET		0x000c
+#define DRA7XX_RM_WKUPAON_WD_TIMER2_CONTEXT_OFFSET		0x0010
+#define DRA7XX_PM_WKUPAON_GPIO1_WKDEP_OFFSET			0x0014
+#define DRA7XX_RM_WKUPAON_GPIO1_CONTEXT_OFFSET			0x0018
+#define DRA7XX_PM_WKUPAON_TIMER1_WKDEP_OFFSET			0x001c
+#define DRA7XX_RM_WKUPAON_TIMER1_CONTEXT_OFFSET			0x0020
+#define DRA7XX_PM_WKUPAON_TIMER12_WKDEP_OFFSET			0x0024
+#define DRA7XX_RM_WKUPAON_TIMER12_CONTEXT_OFFSET		0x0028
+#define DRA7XX_RM_WKUPAON_COUNTER_32K_CONTEXT_OFFSET		0x0030
+#define DRA7XX_RM_WKUPAON_SAR_RAM_CONTEXT_OFFSET		0x0040
+#define DRA7XX_PM_WKUPAON_KBD_WKDEP_OFFSET			0x0054
+#define DRA7XX_RM_WKUPAON_KBD_CONTEXT_OFFSET			0x0058
+#define DRA7XX_PM_WKUPAON_UART10_WKDEP_OFFSET			0x005c
+#define DRA7XX_RM_WKUPAON_UART10_CONTEXT_OFFSET			0x0060
+#define DRA7XX_PM_WKUPAON_DCAN1_WKDEP_OFFSET			0x0064
+#define DRA7XX_RM_WKUPAON_DCAN1_CONTEXT_OFFSET			0x0068
+#define DRA7XX_PM_WKUPAON_ADC_WKDEP_OFFSET				0x007c
+#define DRA7XX_RM_WKUPAON_ADC_CONTEXT_OFFSET			0x0080
+#define DRA7XX_RM_WKUPAON_SPARE_SAFETY1_CONTEXT_OFFSET		0x0090
+#define DRA7XX_RM_WKUPAON_SPARE_SAFETY2_CONTEXT_OFFSET		0x0098
+#define DRA7XX_RM_WKUPAON_SPARE_SAFETY3_CONTEXT_OFFSET		0x00a0
+#define DRA7XX_RM_WKUPAON_SPARE_SAFETY4_CONTEXT_OFFSET		0x00a8
+#define DRA7XX_RM_WKUPAON_SPARE_UNKNOWN2_CONTEXT_OFFSET		0x00b0
+#define DRA7XX_RM_WKUPAON_SPARE_UNKNOWN3_CONTEXT_OFFSET		0x00b8
+
+/* PRM.WKUPAON_CM register offsets */
+#define DRA7XX_CM_WKUPAON_CLKSTCTRL_OFFSET			0x0000
+#define DRA7XX_CM_WKUPAON_L4_WKUP_CLKCTRL_OFFSET		0x0020
+#define DRA7XX_CM_WKUPAON_L4_WKUP_CLKCTRL			DRA7XX_PRM_REGADDR(DRA7XX_PRM_WKUPAON_CM_INST, 0x0020)
+#define DRA7XX_CM_WKUPAON_WD_TIMER1_CLKCTRL_OFFSET		0x0028
+#define DRA7XX_CM_WKUPAON_WD_TIMER1_CLKCTRL			DRA7XX_PRM_REGADDR(DRA7XX_PRM_WKUPAON_CM_INST, 0x0028)
+#define DRA7XX_CM_WKUPAON_WD_TIMER2_CLKCTRL_OFFSET		0x0030
+#define DRA7XX_CM_WKUPAON_WD_TIMER2_CLKCTRL			DRA7XX_PRM_REGADDR(DRA7XX_PRM_WKUPAON_CM_INST, 0x0030)
+#define DRA7XX_CM_WKUPAON_GPIO1_CLKCTRL_OFFSET			0x0038
+#define DRA7XX_CM_WKUPAON_GPIO1_CLKCTRL				DRA7XX_PRM_REGADDR(DRA7XX_PRM_WKUPAON_CM_INST, 0x0038)
+#define DRA7XX_CM_WKUPAON_TIMER1_CLKCTRL_OFFSET			0x0040
+#define DRA7XX_CM_WKUPAON_TIMER1_CLKCTRL			DRA7XX_PRM_REGADDR(DRA7XX_PRM_WKUPAON_CM_INST, 0x0040)
+#define DRA7XX_CM_WKUPAON_TIMER12_CLKCTRL_OFFSET		0x0048
+#define DRA7XX_CM_WKUPAON_TIMER12_CLKCTRL			DRA7XX_PRM_REGADDR(DRA7XX_PRM_WKUPAON_CM_INST, 0x0048)
+#define DRA7XX_CM_WKUPAON_COUNTER_32K_CLKCTRL_OFFSET		0x0050
+#define DRA7XX_CM_WKUPAON_COUNTER_32K_CLKCTRL			DRA7XX_PRM_REGADDR(DRA7XX_PRM_WKUPAON_CM_INST, 0x0050)
+#define DRA7XX_CM_WKUPAON_SAR_RAM_CLKCTRL_OFFSET		0x0060
+#define DRA7XX_CM_WKUPAON_SAR_RAM_CLKCTRL			DRA7XX_PRM_REGADDR(DRA7XX_PRM_WKUPAON_CM_INST, 0x0060)
+#define DRA7XX_CM_WKUPAON_KBD_CLKCTRL_OFFSET			0x0078
+#define DRA7XX_CM_WKUPAON_KBD_CLKCTRL				DRA7XX_PRM_REGADDR(DRA7XX_PRM_WKUPAON_CM_INST, 0x0078)
+#define DRA7XX_CM_WKUPAON_UART10_CLKCTRL_OFFSET			0x0080
+#define DRA7XX_CM_WKUPAON_UART10_CLKCTRL			DRA7XX_PRM_REGADDR(DRA7XX_PRM_WKUPAON_CM_INST, 0x0080)
+#define DRA7XX_CM_WKUPAON_DCAN1_CLKCTRL_OFFSET			0x0088
+#define DRA7XX_CM_WKUPAON_DCAN1_CLKCTRL				DRA7XX_PRM_REGADDR(DRA7XX_PRM_WKUPAON_CM_INST, 0x0088)
+#define DRA7XX_CM_WKUPAON_SCRM_CLKCTRL_OFFSET			0x0090
+#define DRA7XX_CM_WKUPAON_SCRM_CLKCTRL				DRA7XX_PRM_REGADDR(DRA7XX_PRM_WKUPAON_CM_INST, 0x0090)
+#define DRA7XX_CM_WKUPAON_IO_SRCOMP_CLKCTRL_OFFSET		0x0098
+#define DRA7XX_CM_WKUPAON_IO_SRCOMP_CLKCTRL			DRA7XX_PRM_REGADDR(DRA7XX_PRM_WKUPAON_CM_INST, 0x0098)
+#define DRA7XX_CM_WKUPAON_ADC_CLKCTRL_OFFSET			0x00a0
+#define DRA7XX_CM_WKUPAON_ADC_CLKCTRL				DRA7XX_PRM_REGADDR(DRA7XX_PRM_WKUPAON_CM_INST, 0x00a0)
+#define DRA7XX_CM_WKUPAON_SPARE_SAFETY1_CLKCTRL_OFFSET		0x00b0
+#define DRA7XX_CM_WKUPAON_SPARE_SAFETY1_CLKCTRL			DRA7XX_PRM_REGADDR(DRA7XX_PRM_WKUPAON_CM_INST, 0x00b0)
+#define DRA7XX_CM_WKUPAON_SPARE_SAFETY2_CLKCTRL_OFFSET		0x00b8
+#define DRA7XX_CM_WKUPAON_SPARE_SAFETY2_CLKCTRL			DRA7XX_PRM_REGADDR(DRA7XX_PRM_WKUPAON_CM_INST, 0x00b8)
+#define DRA7XX_CM_WKUPAON_SPARE_SAFETY3_CLKCTRL_OFFSET		0x00c0
+#define DRA7XX_CM_WKUPAON_SPARE_SAFETY3_CLKCTRL			DRA7XX_PRM_REGADDR(DRA7XX_PRM_WKUPAON_CM_INST, 0x00c0)
+#define DRA7XX_CM_WKUPAON_SPARE_SAFETY4_CLKCTRL_OFFSET		0x00c8
+#define DRA7XX_CM_WKUPAON_SPARE_SAFETY4_CLKCTRL			DRA7XX_PRM_REGADDR(DRA7XX_PRM_WKUPAON_CM_INST, 0x00c8)
+#define DRA7XX_CM_WKUPAON_SPARE_UNKNOWN2_CLKCTRL_OFFSET		0x00d0
+#define DRA7XX_CM_WKUPAON_SPARE_UNKNOWN2_CLKCTRL		DRA7XX_PRM_REGADDR(DRA7XX_PRM_WKUPAON_CM_INST, 0x00d0)
+#define DRA7XX_CM_WKUPAON_SPARE_UNKNOWN3_CLKCTRL_OFFSET		0x00d8
+#define DRA7XX_CM_WKUPAON_SPARE_UNKNOWN3_CLKCTRL		DRA7XX_PRM_REGADDR(DRA7XX_PRM_WKUPAON_CM_INST, 0x00d8)
+
+/* PRM.EMU_PRM register offsets */
+#define DRA7XX_PM_EMU_PWRSTCTRL_OFFSET				0x0000
+#define DRA7XX_PM_EMU_PWRSTST_OFFSET				0x0004
+#define DRA7XX_RM_EMU_DEBUGSS_CONTEXT_OFFSET			0x0024
+
+/* PRM.EMU_CM register offsets */
+#define DRA7XX_CM_EMU_CLKSTCTRL_OFFSET				0x0000
+#define DRA7XX_CM_EMU_DEBUGSS_CLKCTRL_OFFSET			0x0004
+#define DRA7XX_CM_EMU_DEBUGSS_CLKCTRL				DRA7XX_PRM_REGADDR(DRA7XX_PRM_EMU_CM_INST, 0x0004)
+#define DRA7XX_CM_EMU_DYNAMICDEP_OFFSET				0x0008
+#define DRA7XX_CM_EMU_MPU_EMU_DBG_CLKCTRL_OFFSET		0x000c
+#define DRA7XX_CM_EMU_MPU_EMU_DBG_CLKCTRL			DRA7XX_PRM_REGADDR(DRA7XX_PRM_EMU_CM_INST, 0x000c)
+
+/* PRM.DSP2_PRM register offsets */
+#define DRA7XX_PM_DSP2_PWRSTCTRL_OFFSET				0x0000
+#define DRA7XX_PM_DSP2_PWRSTST_OFFSET				0x0004
+#define DRA7XX_RM_DSP2_RSTCTRL_OFFSET				0x0010
+#define DRA7XX_RM_DSP2_RSTST_OFFSET				0x0014
+#define DRA7XX_RM_DSP2_DSP2_CONTEXT_OFFSET			0x0024
+
+/* PRM.EVE1_PRM register offsets */
+#define DRA7XX_PM_EVE1_PWRSTCTRL_OFFSET				0x0000
+#define DRA7XX_PM_EVE1_PWRSTST_OFFSET				0x0004
+#define DRA7XX_RM_EVE1_RSTCTRL_OFFSET				0x0010
+#define DRA7XX_RM_EVE1_RSTST_OFFSET				0x0014
+#define DRA7XX_PM_EVE1_EVE1_WKDEP_OFFSET			0x0020
+#define DRA7XX_RM_EVE1_EVE1_CONTEXT_OFFSET			0x0024
+
+/* PRM.EVE2_PRM register offsets */
+#define DRA7XX_PM_EVE2_PWRSTCTRL_OFFSET				0x0000
+#define DRA7XX_PM_EVE2_PWRSTST_OFFSET				0x0004
+#define DRA7XX_RM_EVE2_RSTCTRL_OFFSET				0x0010
+#define DRA7XX_RM_EVE2_RSTST_OFFSET				0x0014
+#define DRA7XX_PM_EVE2_EVE2_WKDEP_OFFSET			0x0020
+#define DRA7XX_RM_EVE2_EVE2_CONTEXT_OFFSET			0x0024
+
+/* PRM.EVE3_PRM register offsets */
+#define DRA7XX_PM_EVE3_PWRSTCTRL_OFFSET				0x0000
+#define DRA7XX_PM_EVE3_PWRSTST_OFFSET				0x0004
+#define DRA7XX_RM_EVE3_RSTCTRL_OFFSET				0x0010
+#define DRA7XX_RM_EVE3_RSTST_OFFSET				0x0014
+#define DRA7XX_PM_EVE3_EVE3_WKDEP_OFFSET			0x0020
+#define DRA7XX_RM_EVE3_EVE3_CONTEXT_OFFSET			0x0024
+
+/* PRM.EVE4_PRM register offsets */
+#define DRA7XX_PM_EVE4_PWRSTCTRL_OFFSET				0x0000
+#define DRA7XX_PM_EVE4_PWRSTST_OFFSET				0x0004
+#define DRA7XX_RM_EVE4_RSTCTRL_OFFSET				0x0010
+#define DRA7XX_RM_EVE4_RSTST_OFFSET				0x0014
+#define DRA7XX_PM_EVE4_EVE4_WKDEP_OFFSET			0x0020
+#define DRA7XX_RM_EVE4_EVE4_CONTEXT_OFFSET			0x0024
+
+/* PRM.RTC_PRM register offsets */
+#define DRA7XX_PM_RTC_RTCSS_WKDEP_OFFSET			0x0000
+#define DRA7XX_RM_RTC_RTCSS_CONTEXT_OFFSET			0x0004
+
+/* PRM.VPE_PRM register offsets */
+#define DRA7XX_PM_VPE_PWRSTCTRL_OFFSET				0x0000
+#define DRA7XX_PM_VPE_PWRSTST_OFFSET				0x0004
+#define DRA7XX_PM_VPE_VPE_WKDEP_OFFSET				0x0020
+#define DRA7XX_RM_VPE_VPE_CONTEXT_OFFSET			0x0024
+
+/* PRM.DEVICE_PRM register offsets */
+#define DRA7XX_PRM_RSTCTRL_OFFSET				0x0000
+#define DRA7XX_PRM_RSTST_OFFSET					0x0004
+#define DRA7XX_PRM_RSTTIME_OFFSET				0x0008
+#define DRA7XX_PRM_CLKREQCTRL_OFFSET				0x000c
+#define DRA7XX_PRM_VOLTCTRL_OFFSET				0x0010
+#define DRA7XX_PRM_PWRREQCTRL_OFFSET				0x0014
+#define DRA7XX_PRM_PSCON_COUNT_OFFSET				0x0018
+#define DRA7XX_PRM_IO_COUNT_OFFSET				0x001c
+#define DRA7XX_PRM_IO_PMCTRL_OFFSET				0x0020
+#define DRA7XX_PRM_VOLTSETUP_WARMRESET_OFFSET			0x0024
+#define DRA7XX_PRM_VOLTSETUP_CORE_OFF_OFFSET			0x0028
+#define DRA7XX_PRM_VOLTSETUP_MPU_OFF_OFFSET			0x002c
+#define DRA7XX_PRM_VOLTSETUP_MM_OFF_OFFSET			0x0030
+#define DRA7XX_PRM_VOLTSETUP_CORE_RET_SLEEP_OFFSET		0x0034
+#define DRA7XX_PRM_VOLTSETUP_MPU_RET_SLEEP_OFFSET		0x0038
+#define DRA7XX_PRM_VOLTSETUP_MM_RET_SLEEP_OFFSET		0x003c
+#define DRA7XX_PRM_SRAM_COUNT_OFFSET				0x00bc
+#define DRA7XX_PRM_SRAM_WKUP_SETUP_OFFSET			0x00c0
+#define DRA7XX_PRM_SLDO_CORE_SETUP_OFFSET			0x00c4
+#define DRA7XX_PRM_SLDO_CORE_CTRL_OFFSET			0x00c8
+#define DRA7XX_PRM_SLDO_MPU_SETUP_OFFSET			0x00cc
+#define DRA7XX_PRM_SLDO_MPU_CTRL_OFFSET				0x00d0
+#define DRA7XX_PRM_SLDO_GPU_SETUP_OFFSET			0x00d4
+#define DRA7XX_PRM_SLDO_GPU_CTRL_OFFSET				0x00d8
+#define DRA7XX_PRM_ABBLDO_MPU_SETUP_OFFSET			0x00dc
+#define DRA7XX_PRM_ABBLDO_MPU_CTRL_OFFSET			0x00e0
+#define DRA7XX_PRM_ABBLDO_GPU_SETUP_OFFSET			0x00e4
+#define DRA7XX_PRM_ABBLDO_GPU_CTRL_OFFSET			0x00e8
+#define DRA7XX_PRM_BANDGAP_SETUP_OFFSET				0x00ec
+#define DRA7XX_PRM_DEVICE_OFF_CTRL_OFFSET			0x00f0
+#define DRA7XX_PRM_PHASE1_CNDP_OFFSET				0x00f4
+#define DRA7XX_PRM_PHASE2A_CNDP_OFFSET				0x00f8
+#define DRA7XX_PRM_PHASE2B_CNDP_OFFSET				0x00fc
+#define DRA7XX_PRM_MODEM_IF_CTRL_OFFSET				0x0100
+#define DRA7XX_PRM_VOLTST_MPU_OFFSET				0x0110
+#define DRA7XX_PRM_VOLTST_MM_OFFSET				0x0114
+#define DRA7XX_PRM_SLDO_DSPEVE_SETUP_OFFSET			0x0118
+#define DRA7XX_PRM_SLDO_IVA_SETUP_OFFSET			0x011c
+#define DRA7XX_PRM_ABBLDO_DSPEVE_CTRL_OFFSET			0x0120
+#define DRA7XX_PRM_ABBLDO_IVA_CTRL_OFFSET			0x0124
+#define DRA7XX_PRM_SLDO_DSPEVE_CTRL_OFFSET			0x0128
+#define DRA7XX_PRM_SLDO_IVA_CTRL_OFFSET				0x012c
+#define DRA7XX_PRM_ABBLDO_DSPEVE_SETUP_OFFSET			0x0130
+#define DRA7XX_PRM_ABBLDO_IVA_SETUP_OFFSET			0x0134
+
+#endif
diff --git a/arch/arm/mach-omap2/prminst44xx.c b/arch/arm/mach-omap2/prminst44xx.c
index c12320c..6334b96 100644
--- a/arch/arm/mach-omap2/prminst44xx.c
+++ b/arch/arm/mach-omap2/prminst44xx.c
@@ -20,10 +20,13 @@
 #include "common.h"
 #include "prcm-common.h"
 #include "prm44xx.h"
+#include "prm54xx.h"
+#include "prm7xx.h"
 #include "prminst44xx.h"
 #include "prm-regbits-44xx.h"
 #include "prcm44xx.h"
 #include "prcm_mpu44xx.h"
+#include "soc.h"
 
 static void __iomem *_prm_bases[OMAP4_MAX_PRCM_PARTITIONS];
 
@@ -165,10 +168,19 @@
 void omap4_prminst_global_warm_sw_reset(void)
 {
 	u32 v;
+	s16 dev_inst;
 
-	v = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION,
-				    OMAP4430_PRM_DEVICE_INST,
-				    OMAP4_PRM_RSTCTRL_OFFSET);
+	if (cpu_is_omap44xx())
+		dev_inst = OMAP4430_PRM_DEVICE_INST;
+	else if (soc_is_omap54xx())
+		dev_inst = OMAP54XX_PRM_DEVICE_INST;
+	else if (soc_is_dra7xx())
+		dev_inst = DRA7XX_PRM_DEVICE_INST;
+	else
+		return;
+
+	v = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION, dev_inst,
+					OMAP4_PRM_RSTCTRL_OFFSET);
 	v |= OMAP4430_RST_GLOBAL_WARM_SW_MASK;
 	omap4_prminst_write_inst_reg(v, OMAP4430_PRM_PARTITION,
 				 OMAP4430_PRM_DEVICE_INST,
diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig
index e817fde..1f94c31 100644
--- a/arch/arm/mach-shmobile/Kconfig
+++ b/arch/arm/mach-shmobile/Kconfig
@@ -109,18 +109,22 @@
 
 comment "SH-Mobile Board Type"
 
-config MACH_AG5EVM
-	bool "AG5EVM board"
-	depends on ARCH_SH73A0
-	select ARCH_REQUIRE_GPIOLIB
-	select REGULATOR_FIXED_VOLTAGE if REGULATOR
-	select SH_LCD_MIPI_DSI
-
 config MACH_APE6EVM
 	bool "APE6EVM board"
 	depends on ARCH_R8A73A4
 	select USE_OF
 
+config MACH_APE6EVM_REFERENCE
+	bool "APE6EVM board - Reference Device Tree Implementation"
+	depends on ARCH_R8A73A4
+	select USE_OF
+	---help---
+	   Use reference implementation of APE6EVM board support
+	   which makes a greater use of device tree at the expense
+	   of not supporting a number of devices.
+
+	   This is intended to aid developers
+
 config MACH_MACKEREL
 	bool "mackerel board"
 	depends on ARCH_SH7372
@@ -129,12 +133,6 @@
 	select SND_SOC_AK4642 if SND_SIMPLE_CARD
 	select USE_OF
 
-config MACH_KOTA2
-	bool "KOTA2 board"
-	depends on ARCH_SH73A0
-	select ARCH_REQUIRE_GPIOLIB
-	select REGULATOR_FIXED_VOLTAGE if REGULATOR
-
 config MACH_ARMADILLO800EVA
 	bool "Armadillo-800 EVA board"
 	depends on ARCH_R8A7740
@@ -165,11 +163,26 @@
 	select REGULATOR_FIXED_VOLTAGE if REGULATOR
 	select USE_OF
 
+config MACH_BOCKW_REFERENCE
+	bool "BOCK-W  - Reference Device Tree Implementation"
+	depends on ARCH_R8A7778
+	select ARCH_REQUIRE_GPIOLIB
+	select RENESAS_INTC_IRQPIN
+	select REGULATOR_FIXED_VOLTAGE if REGULATOR
+	select USE_OF
+	---help---
+	   Use reference implementation of BockW board support
+	   which makes use of device tree at the expense
+	   of not supporting a number of devices.
+
+	   This is intended to aid developers
+
 config MACH_MARZEN
 	bool "MARZEN board"
 	depends on ARCH_R8A7779
 	select ARCH_REQUIRE_GPIOLIB
 	select REGULATOR_FIXED_VOLTAGE if REGULATOR
+	select USE_OF
 
 config MACH_MARZEN_REFERENCE
 	bool "MARZEN board - Reference Device Tree Implementation"
@@ -189,6 +202,17 @@
 	depends on ARCH_R8A7790
 	select USE_OF
 
+config MACH_LAGER_REFERENCE
+	bool "Lager board - Reference Device Tree Implementation"
+	depends on ARCH_R8A7790
+	select USE_OF
+	---help---
+	   Use reference implementation of Lager board support
+	   which makes use of device tree at the expense
+	   of not supporting a number of devices.
+
+	   This is intended to aid developers
+
 config MACH_KZM9D
 	bool "KZM9D board"
 	depends on ARCH_EMEV2
diff --git a/arch/arm/mach-shmobile/Makefile b/arch/arm/mach-shmobile/Makefile
index b150c45..2705bfa 100644
--- a/arch/arm/mach-shmobile/Makefile
+++ b/arch/arm/mach-shmobile/Makefile
@@ -11,9 +11,9 @@
 obj-$(CONFIG_ARCH_SH7372)	+= setup-sh7372.o intc-sh7372.o
 obj-$(CONFIG_ARCH_SH73A0)	+= setup-sh73a0.o intc-sh73a0.o
 obj-$(CONFIG_ARCH_R8A73A4)	+= setup-r8a73a4.o
-obj-$(CONFIG_ARCH_R8A7740)	+= setup-r8a7740.o intc-r8a7740.o
+obj-$(CONFIG_ARCH_R8A7740)	+= setup-r8a7740.o
 obj-$(CONFIG_ARCH_R8A7778)	+= setup-r8a7778.o
-obj-$(CONFIG_ARCH_R8A7779)	+= setup-r8a7779.o intc-r8a7779.o
+obj-$(CONFIG_ARCH_R8A7779)	+= setup-r8a7779.o
 obj-$(CONFIG_ARCH_R8A7790)	+= setup-r8a7790.o
 obj-$(CONFIG_ARCH_EMEV2)	+= setup-emev2.o
 
@@ -32,32 +32,31 @@
 
 # SMP objects
 smp-y				:= platsmp.o headsmp.o
-smp-$(CONFIG_ARCH_SH73A0)	+= smp-sh73a0.o headsmp-scu.o
-smp-$(CONFIG_ARCH_R8A7779)	+= smp-r8a7779.o headsmp-scu.o
-smp-$(CONFIG_ARCH_EMEV2)	+= smp-emev2.o headsmp-scu.o
+smp-$(CONFIG_ARCH_SH73A0)	+= smp-sh73a0.o headsmp-scu.o platsmp-scu.o
+smp-$(CONFIG_ARCH_R8A7779)	+= smp-r8a7779.o headsmp-scu.o platsmp-scu.o
+smp-$(CONFIG_ARCH_EMEV2)	+= smp-emev2.o headsmp-scu.o platsmp-scu.o
 
 # IRQ objects
 obj-$(CONFIG_ARCH_SH7372)	+= entry-intc.o
-obj-$(CONFIG_ARCH_R8A7740)	+= entry-intc.o
 
 # PM objects
 obj-$(CONFIG_SUSPEND)		+= suspend.o
 obj-$(CONFIG_CPU_IDLE)		+= cpuidle.o
-obj-$(CONFIG_ARCH_SHMOBILE)	+= pm-rmobile.o
-obj-$(CONFIG_ARCH_SH7372)	+= pm-sh7372.o sleep-sh7372.o
-obj-$(CONFIG_ARCH_R8A7740)	+= pm-r8a7740.o
-obj-$(CONFIG_ARCH_R8A7779)	+= pm-r8a7779.o
+obj-$(CONFIG_ARCH_SH7372)	+= pm-sh7372.o sleep-sh7372.o pm-rmobile.o
 obj-$(CONFIG_ARCH_SH73A0)	+= pm-sh73a0.o
+obj-$(CONFIG_ARCH_R8A7740)	+= pm-r8a7740.o pm-rmobile.o
+obj-$(CONFIG_ARCH_R8A7779)	+= pm-r8a7779.o
 
 # Board objects
-obj-$(CONFIG_MACH_AG5EVM)	+= board-ag5evm.o
 obj-$(CONFIG_MACH_APE6EVM)	+= board-ape6evm.o
+obj-$(CONFIG_MACH_APE6EVM_REFERENCE)	+= board-ape6evm-reference.o
 obj-$(CONFIG_MACH_MACKEREL)	+= board-mackerel.o
-obj-$(CONFIG_MACH_KOTA2)	+= board-kota2.o
 obj-$(CONFIG_MACH_BOCKW)	+= board-bockw.o
+obj-$(CONFIG_MACH_BOCKW_REFERENCE)	+= board-bockw-reference.o
 obj-$(CONFIG_MACH_MARZEN)	+= board-marzen.o
 obj-$(CONFIG_MACH_MARZEN_REFERENCE)	+= board-marzen-reference.o
 obj-$(CONFIG_MACH_LAGER)	+= board-lager.o
+obj-$(CONFIG_MACH_LAGER_REFERENCE)	+= board-lager-reference.o
 obj-$(CONFIG_MACH_ARMADILLO800EVA)	+= board-armadillo800eva.o
 obj-$(CONFIG_MACH_ARMADILLO800EVA_REFERENCE)	+= board-armadillo800eva-reference.o
 obj-$(CONFIG_MACH_KZM9D)	+= board-kzm9d.o
diff --git a/arch/arm/mach-shmobile/Makefile.boot b/arch/arm/mach-shmobile/Makefile.boot
index 7785c52..6a504fe 100644
--- a/arch/arm/mach-shmobile/Makefile.boot
+++ b/arch/arm/mach-shmobile/Makefile.boot
@@ -1,16 +1,17 @@
 # per-board load address for uImage
 loadaddr-y	:=
-loadaddr-$(CONFIG_MACH_AG5EVM) += 0x40008000
 loadaddr-$(CONFIG_MACH_APE6EVM) += 0x40008000
+loadaddr-$(CONFIG_MACH_APE6EVM_REFERENCE) += 0x40008000
 loadaddr-$(CONFIG_MACH_ARMADILLO800EVA) += 0x40008000
 loadaddr-$(CONFIG_MACH_ARMADILLO800EVA_REFERENCE) += 0x40008000
 loadaddr-$(CONFIG_MACH_BOCKW) += 0x60008000
-loadaddr-$(CONFIG_MACH_KOTA2) += 0x41008000
+loadaddr-$(CONFIG_MACH_BOCKW_REFERENCE) += 0x60008000
 loadaddr-$(CONFIG_MACH_KZM9D) += 0x40008000
 loadaddr-$(CONFIG_MACH_KZM9D_REFERENCE) += 0x40008000
 loadaddr-$(CONFIG_MACH_KZM9G) += 0x41008000
 loadaddr-$(CONFIG_MACH_KZM9G_REFERENCE) += 0x41008000
 loadaddr-$(CONFIG_MACH_LAGER) += 0x40008000
+loadaddr-$(CONFIG_MACH_LAGER_REFERENCE) += 0x40008000
 loadaddr-$(CONFIG_MACH_MACKEREL) += 0x40008000
 loadaddr-$(CONFIG_MACH_MARZEN) += 0x60008000
 loadaddr-$(CONFIG_MACH_MARZEN_REFERENCE) += 0x60008000
diff --git a/arch/arm/mach-shmobile/board-ag5evm.c b/arch/arm/mach-shmobile/board-ag5evm.c
deleted file mode 100644
index f6d6449..0000000
--- a/arch/arm/mach-shmobile/board-ag5evm.c
+++ /dev/null
@@ -1,639 +0,0 @@
-/*
- * arch/arm/mach-shmobile/board-ag5evm.c
- *
- * Copyright (C) 2010  Takashi Yoshii <yoshii.takashi.zj@renesas.com>
- * Copyright (C) 2009  Yoshihiro Shimoda <shimoda.yoshihiro@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/pinctrl/machine.h>
-#include <linux/pinctrl/pinconf-generic.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/dma-mapping.h>
-#include <linux/regulator/fixed.h>
-#include <linux/regulator/machine.h>
-#include <linux/serial_sci.h>
-#include <linux/smsc911x.h>
-#include <linux/gpio.h>
-#include <linux/videodev2.h>
-#include <linux/input.h>
-#include <linux/input/sh_keysc.h>
-#include <linux/mmc/host.h>
-#include <linux/mmc/sh_mmcif.h>
-#include <linux/mmc/sh_mobile_sdhi.h>
-#include <linux/mfd/tmio.h>
-#include <linux/platform_data/bd6107.h>
-#include <linux/sh_clk.h>
-#include <linux/irqchip/arm-gic.h>
-#include <video/sh_mobile_lcdc.h>
-#include <video/sh_mipi_dsi.h>
-#include <sound/sh_fsi.h>
-#include <mach/hardware.h>
-#include <mach/irqs.h>
-#include <mach/sh73a0.h>
-#include <mach/common.h>
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include <asm/hardware/cache-l2x0.h>
-#include <asm/traps.h>
-
-/* Dummy supplies, where voltage doesn't matter */
-static struct regulator_consumer_supply dummy_supplies[] = {
-	REGULATOR_SUPPLY("vddvario", "smsc911x"),
-	REGULATOR_SUPPLY("vdd33a", "smsc911x"),
-};
-
-static struct resource smsc9220_resources[] = {
-	[0] = {
-		.start		= 0x14000000,
-		.end		= 0x14000000 + SZ_64K - 1,
-		.flags		= IORESOURCE_MEM,
-	},
-	[1] = {
-		.start		= SH73A0_PINT0_IRQ(2), /* PINTA2 */
-		.flags		= IORESOURCE_IRQ,
-	},
-};
-
-static struct smsc911x_platform_config smsc9220_platdata = {
-	.flags		= SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS,
-	.phy_interface	= PHY_INTERFACE_MODE_MII,
-	.irq_polarity	= SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
-	.irq_type	= SMSC911X_IRQ_TYPE_PUSH_PULL,
-};
-
-static struct platform_device eth_device = {
-	.name		= "smsc911x",
-	.id		= 0,
-	.dev  = {
-		.platform_data = &smsc9220_platdata,
-	},
-	.resource	= smsc9220_resources,
-	.num_resources	= ARRAY_SIZE(smsc9220_resources),
-};
-
-static struct sh_keysc_info keysc_platdata = {
-	.mode		= SH_KEYSC_MODE_6,
-	.scan_timing	= 3,
-	.delay		= 100,
-	.keycodes	= {
-		KEY_A, KEY_B, KEY_C, KEY_D, KEY_E, KEY_F, KEY_G,
-		KEY_H, KEY_I, KEY_J, KEY_K, KEY_L, KEY_M, KEY_N,
-		KEY_O, KEY_P, KEY_Q, KEY_R, KEY_S, KEY_T, KEY_U,
-		KEY_V, KEY_W, KEY_X, KEY_Y, KEY_Z, KEY_HOME, KEY_SLEEP,
-		KEY_SPACE, KEY_9, KEY_6, KEY_3, KEY_WAKEUP, KEY_RIGHT, \
-		KEY_COFFEE,
-		KEY_0, KEY_8, KEY_5, KEY_2, KEY_DOWN, KEY_ENTER, KEY_UP,
-		KEY_KPASTERISK, KEY_7, KEY_4, KEY_1, KEY_STOP, KEY_LEFT, \
-		KEY_COMPUTER,
-	},
-};
-
-static struct resource keysc_resources[] = {
-	[0] = {
-		.name	= "KEYSC",
-		.start	= 0xe61b0000,
-		.end	= 0xe61b0098 - 1,
-		.flags	= IORESOURCE_MEM,
-	},
-	[1] = {
-		.start	= gic_spi(71),
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-static struct platform_device keysc_device = {
-	.name		= "sh_keysc",
-	.id		= 0,
-	.num_resources	= ARRAY_SIZE(keysc_resources),
-	.resource	= keysc_resources,
-	.dev		= {
-		.platform_data	= &keysc_platdata,
-	},
-};
-
-/* FSI A */
-static struct resource fsi_resources[] = {
-	[0] = {
-		.name	= "FSI",
-		.start	= 0xEC230000,
-		.end	= 0xEC230400 - 1,
-		.flags	= IORESOURCE_MEM,
-	},
-	[1] = {
-		.start  = gic_spi(146),
-		.flags  = IORESOURCE_IRQ,
-	},
-};
-
-static struct platform_device fsi_device = {
-	.name		= "sh_fsi2",
-	.id		= -1,
-	.num_resources	= ARRAY_SIZE(fsi_resources),
-	.resource	= fsi_resources,
-};
-
-/* Fixed 1.8V regulator to be used by MMCIF */
-static struct regulator_consumer_supply fixed1v8_power_consumers[] =
-{
-	REGULATOR_SUPPLY("vmmc", "sh_mmcif.0"),
-	REGULATOR_SUPPLY("vqmmc", "sh_mmcif.0"),
-};
-
-static struct resource sh_mmcif_resources[] = {
-	[0] = {
-		.name	= "MMCIF",
-		.start	= 0xe6bd0000,
-		.end	= 0xe6bd00ff,
-		.flags	= IORESOURCE_MEM,
-	},
-	[1] = {
-		.start	= gic_spi(141),
-		.flags	= IORESOURCE_IRQ,
-	},
-	[2] = {
-		.start	= gic_spi(140),
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-static struct sh_mmcif_plat_data sh_mmcif_platdata = {
-	.sup_pclk	= 0,
-	.ocr		= MMC_VDD_165_195,
-	.caps		= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE,
-	.slave_id_tx	= SHDMA_SLAVE_MMCIF_TX,
-	.slave_id_rx	= SHDMA_SLAVE_MMCIF_RX,
-};
-
-static struct platform_device mmc_device = {
-	.name		= "sh_mmcif",
-	.id		= 0,
-	.dev		= {
-		.dma_mask		= NULL,
-		.coherent_dma_mask	= 0xffffffff,
-		.platform_data		= &sh_mmcif_platdata,
-	},
-	.num_resources	= ARRAY_SIZE(sh_mmcif_resources),
-	.resource	= sh_mmcif_resources,
-};
-
-/* IrDA */
-static struct resource irda_resources[] = {
-	[0] = {
-		.start	= 0xE6D00000,
-		.end	= 0xE6D01FD4 - 1,
-		.flags  = IORESOURCE_MEM,
-	},
-	[1] = {
-		.start	= gic_spi(95),
-		.flags  = IORESOURCE_IRQ,
-	},
-};
-
-static struct platform_device irda_device = {
-	.name           = "sh_irda",
-	.id		= 0,
-	.resource       = irda_resources,
-	.num_resources  = ARRAY_SIZE(irda_resources),
-};
-
-/* MIPI-DSI */
-static struct resource mipidsi0_resources[] = {
-	[0] = {
-		.name	= "DSI0",
-		.start  = 0xfeab0000,
-		.end    = 0xfeab3fff,
-		.flags  = IORESOURCE_MEM,
-	},
-	[1] = {
-		.name	= "DSI0",
-		.start  = 0xfeab4000,
-		.end    = 0xfeab7fff,
-		.flags  = IORESOURCE_MEM,
-	},
-};
-
-static int sh_mipi_set_dot_clock(struct platform_device *pdev,
-				 void __iomem *base,
-				 int enable)
-{
-	struct clk *pck, *phy;
-	int ret;
-
-	pck = clk_get(&pdev->dev, "dsip_clk");
-	if (IS_ERR(pck)) {
-		ret = PTR_ERR(pck);
-		goto sh_mipi_set_dot_clock_pck_err;
-	}
-
-	phy = clk_get(&pdev->dev, "dsiphy_clk");
-	if (IS_ERR(phy)) {
-		ret = PTR_ERR(phy);
-		goto sh_mipi_set_dot_clock_phy_err;
-	}
-
-	if (enable) {
-		clk_set_rate(pck, clk_round_rate(pck,  24000000));
-		clk_set_rate(phy, clk_round_rate(pck, 510000000));
-		clk_enable(pck);
-		clk_enable(phy);
-	} else {
-		clk_disable(pck);
-		clk_disable(phy);
-	}
-
-	ret = 0;
-
-	clk_put(phy);
-sh_mipi_set_dot_clock_phy_err:
-	clk_put(pck);
-sh_mipi_set_dot_clock_pck_err:
-	return ret;
-}
-
-static struct sh_mipi_dsi_info mipidsi0_info = {
-	.data_format	= MIPI_RGB888,
-	.channel	= LCDC_CHAN_MAINLCD,
-	.lane		= 2,
-	.vsynw_offset	= 20,
-	.clksrc		= 1,
-	.flags		= SH_MIPI_DSI_HSABM		|
-			  SH_MIPI_DSI_SYNC_PULSES_MODE	|
-			  SH_MIPI_DSI_HSbyteCLK,
-	.set_dot_clock	= sh_mipi_set_dot_clock,
-};
-
-static struct platform_device mipidsi0_device = {
-	.name           = "sh-mipi-dsi",
-	.num_resources  = ARRAY_SIZE(mipidsi0_resources),
-	.resource       = mipidsi0_resources,
-	.id             = 0,
-	.dev	= {
-		.platform_data	= &mipidsi0_info,
-	},
-};
-
-/* LCDC0 and backlight */
-static const struct fb_videomode lcdc0_modes[] = {
-	{
-		.name		= "R63302(QHD)",
-		.xres		= 544,
-		.yres		= 961,
-		.left_margin	= 72,
-		.right_margin	= 600,
-		.hsync_len	= 16,
-		.upper_margin	= 8,
-		.lower_margin	= 8,
-		.vsync_len	= 2,
-		.sync		= FB_SYNC_VERT_HIGH_ACT | FB_SYNC_HOR_HIGH_ACT,
-	},
-};
-
-static struct sh_mobile_lcdc_info lcdc0_info = {
-	.clock_source = LCDC_CLK_PERIPHERAL,
-	.ch[0] = {
-		.chan = LCDC_CHAN_MAINLCD,
-		.interface_type = RGB24,
-		.clock_divider = 1,
-		.flags = LCDC_FLAGS_DWPOL,
-		.fourcc = V4L2_PIX_FMT_RGB565,
-		.lcd_modes = lcdc0_modes,
-		.num_modes = ARRAY_SIZE(lcdc0_modes),
-		.panel_cfg = {
-			.width = 44,
-			.height = 79,
-		},
-		.tx_dev = &mipidsi0_device,
-	}
-};
-
-static struct resource lcdc0_resources[] = {
-	[0] = {
-		.name	= "LCDC0",
-		.start	= 0xfe940000, /* P4-only space */
-		.end	= 0xfe943fff,
-		.flags	= IORESOURCE_MEM,
-	},
-	[1] = {
-		.start	= intcs_evt2irq(0x580),
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-static struct platform_device lcdc0_device = {
-	.name		= "sh_mobile_lcdc_fb",
-	.num_resources	= ARRAY_SIZE(lcdc0_resources),
-	.resource	= lcdc0_resources,
-	.id             = 0,
-	.dev	= {
-		.platform_data	= &lcdc0_info,
-		.coherent_dma_mask = ~0,
-	},
-};
-
-static struct bd6107_platform_data backlight_data = {
-	.fbdev = &lcdc0_device.dev,
-	.reset = 235,
-	.def_value = 0,
-};
-
-static struct i2c_board_info backlight_board_info = {
-	I2C_BOARD_INFO("bd6107", 0x6d),
-	.platform_data = &backlight_data,
-};
-
-/* Fixed 2.8V regulators to be used by SDHI0 */
-static struct regulator_consumer_supply fixed2v8_power_consumers[] =
-{
-	REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"),
-	REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"),
-};
-
-/* SDHI0 */
-static struct sh_mobile_sdhi_info sdhi0_info = {
-	.dma_slave_tx	= SHDMA_SLAVE_SDHI0_TX,
-	.dma_slave_rx	= SHDMA_SLAVE_SDHI0_RX,
-	.tmio_flags	= TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_USE_GPIO_CD,
-	.tmio_caps	= MMC_CAP_SD_HIGHSPEED,
-	.tmio_ocr_mask	= MMC_VDD_27_28 | MMC_VDD_28_29,
-	.cd_gpio	= 251,
-};
-
-static struct resource sdhi0_resources[] = {
-	[0] = {
-		.name	= "SDHI0",
-		.start	= 0xee100000,
-		.end	= 0xee1000ff,
-		.flags	= IORESOURCE_MEM,
-	},
-	[1] = {
-		.name	= SH_MOBILE_SDHI_IRQ_CARD_DETECT,
-		.start	= gic_spi(83),
-		.flags	= IORESOURCE_IRQ,
-	},
-	[2] = {
-		.name	= SH_MOBILE_SDHI_IRQ_SDCARD,
-		.start	= gic_spi(84),
-		.flags	= IORESOURCE_IRQ,
-	},
-	[3] = {
-		.name	= SH_MOBILE_SDHI_IRQ_SDIO,
-		.start	= gic_spi(85),
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-static struct platform_device sdhi0_device = {
-	.name		= "sh_mobile_sdhi",
-	.id		= 0,
-	.num_resources	= ARRAY_SIZE(sdhi0_resources),
-	.resource	= sdhi0_resources,
-	.dev	= {
-		.platform_data	= &sdhi0_info,
-	},
-};
-
-/* Fixed 3.3V regulator to be used by SDHI1 */
-static struct regulator_consumer_supply cn4_power_consumers[] =
-{
-	REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.1"),
-	REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.1"),
-};
-
-static struct regulator_init_data cn4_power_init_data = {
-	.constraints = {
-		.valid_ops_mask = REGULATOR_CHANGE_STATUS,
-	},
-	.num_consumer_supplies  = ARRAY_SIZE(cn4_power_consumers),
-	.consumer_supplies      = cn4_power_consumers,
-};
-
-static struct fixed_voltage_config cn4_power_info = {
-	.supply_name = "CN4 SD/MMC Vdd",
-	.microvolts = 3300000,
-	.gpio = 114,
-	.enable_high = 1,
-	.init_data = &cn4_power_init_data,
-};
-
-static struct platform_device cn4_power = {
-	.name = "reg-fixed-voltage",
-	.id   = 2,
-	.dev  = {
-		.platform_data = &cn4_power_info,
-	},
-};
-
-static void ag5evm_sdhi1_set_pwr(struct platform_device *pdev, int state)
-{
-	static int power_gpio = -EINVAL;
-
-	if (power_gpio < 0) {
-		int ret = gpio_request_one(114, GPIOF_OUT_INIT_LOW,
-					   "sdhi1_power");
-		if (!ret)
-			power_gpio = 114;
-	}
-
-	/*
-	 * If requesting the GPIO above failed, it means, that the regulator got
-	 * probed and grabbed the GPIO, but we don't know, whether the sdhi
-	 * driver already uses the regulator. If it doesn't, we have to toggle
-	 * the GPIO ourselves, even though it is now owned by the fixed
-	 * regulator driver. We have to live with the race in case the driver
-	 * gets unloaded and the GPIO freed between these two steps.
-	 */
-	gpio_set_value(114, state);
-}
-
-static struct sh_mobile_sdhi_info sh_sdhi1_info = {
-	.tmio_flags	= TMIO_MMC_WRPROTECT_DISABLE | TMIO_MMC_HAS_IDLE_WAIT,
-	.tmio_caps	= MMC_CAP_NONREMOVABLE | MMC_CAP_SDIO_IRQ,
-	.tmio_ocr_mask	= MMC_VDD_32_33 | MMC_VDD_33_34,
-	.set_pwr	= ag5evm_sdhi1_set_pwr,
-};
-
-static struct resource sdhi1_resources[] = {
-	[0] = {
-		.name	= "SDHI1",
-		.start	= 0xee120000,
-		.end	= 0xee1200ff,
-		.flags	= IORESOURCE_MEM,
-	},
-	[1] = {
-		.name	= SH_MOBILE_SDHI_IRQ_CARD_DETECT,
-		.start	= gic_spi(87),
-		.flags	= IORESOURCE_IRQ,
-	},
-	[2] = {
-		.name	= SH_MOBILE_SDHI_IRQ_SDCARD,
-		.start	= gic_spi(88),
-		.flags	= IORESOURCE_IRQ,
-	},
-	[3] = {
-		.name	= SH_MOBILE_SDHI_IRQ_SDIO,
-		.start	= gic_spi(89),
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-static struct platform_device sdhi1_device = {
-	.name		= "sh_mobile_sdhi",
-	.id		= 1,
-	.dev		= {
-		.platform_data	= &sh_sdhi1_info,
-	},
-	.num_resources	= ARRAY_SIZE(sdhi1_resources),
-	.resource	= sdhi1_resources,
-};
-
-static struct platform_device *ag5evm_devices[] __initdata = {
-	&cn4_power,
-	&eth_device,
-	&keysc_device,
-	&fsi_device,
-	&mmc_device,
-	&irda_device,
-	&mipidsi0_device,
-	&lcdc0_device,
-	&sdhi0_device,
-	&sdhi1_device,
-};
-
-static unsigned long pin_pullup_conf[] = {
-	PIN_CONF_PACKED(PIN_CONFIG_BIAS_PULL_UP, 0),
-};
-
-static const struct pinctrl_map ag5evm_pinctrl_map[] = {
-	/* FSIA */
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_fsi2.0", "pfc-sh73a0",
-				  "fsia_mclk_in", "fsia"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_fsi2.0", "pfc-sh73a0",
-				  "fsia_sclk_in", "fsia"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_fsi2.0", "pfc-sh73a0",
-				  "fsia_data_in", "fsia"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_fsi2.0", "pfc-sh73a0",
-				  "fsia_data_out", "fsia"),
-	/* I2C2 & I2C3 */
-	PIN_MAP_MUX_GROUP_DEFAULT("i2c-sh_mobile.2", "pfc-sh73a0",
-				  "i2c2_0", "i2c2"),
-	PIN_MAP_MUX_GROUP_DEFAULT("i2c-sh_mobile.3", "pfc-sh73a0",
-				  "i2c3_0", "i2c3"),
-	/* IrDA */
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_irda.0", "pfc-sh73a0",
-				  "irda_0", "irda"),
-	/* KEYSC */
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_keysc.0", "pfc-sh73a0",
-				  "keysc_in8", "keysc"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_keysc.0", "pfc-sh73a0",
-				  "keysc_out04", "keysc"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_keysc.0", "pfc-sh73a0",
-				  "keysc_out5", "keysc"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_keysc.0", "pfc-sh73a0",
-				  "keysc_out6_0", "keysc"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_keysc.0", "pfc-sh73a0",
-				  "keysc_out7_0", "keysc"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_keysc.0", "pfc-sh73a0",
-				  "keysc_out8_0", "keysc"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_keysc.0", "pfc-sh73a0",
-				  "keysc_out9_2", "keysc"),
-	PIN_MAP_CONFIGS_GROUP_DEFAULT("sh_keysc.0", "pfc-sh73a0",
-				      "keysc_in8", pin_pullup_conf),
-	/* MMCIF */
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_mmcif.0", "pfc-sh73a0",
-				  "mmc0_data8_0", "mmc0"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_mmcif.0", "pfc-sh73a0",
-				  "mmc0_ctrl_0", "mmc0"),
-	PIN_MAP_CONFIGS_PIN_DEFAULT("sh_mmcif.0", "pfc-sh73a0",
-				    "PORT279", pin_pullup_conf),
-	PIN_MAP_CONFIGS_GROUP_DEFAULT("sh_mmcif.0", "pfc-sh73a0",
-				      "mmc0_data8_0", pin_pullup_conf),
-	/* SCIFA2 */
-	PIN_MAP_MUX_GROUP_DEFAULT("sh-sci.2", "pfc-sh73a0",
-				  "scifa2_data_0", "scifa2"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh-sci.2", "pfc-sh73a0",
-				  "scifa2_ctrl_0", "scifa2"),
-	/* SDHI0 (CN15 [SD I/F]) */
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-sh73a0",
-				  "sdhi0_data4", "sdhi0"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-sh73a0",
-				  "sdhi0_ctrl", "sdhi0"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-sh73a0",
-				  "sdhi0_wp", "sdhi0"),
-	/* SDHI1 (CN4 [WLAN I/F]) */
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.1", "pfc-sh73a0",
-				  "sdhi1_data4", "sdhi1"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.1", "pfc-sh73a0",
-				  "sdhi1_ctrl", "sdhi1"),
-	PIN_MAP_CONFIGS_GROUP_DEFAULT("sh_mobile_sdhi.1", "pfc-sh73a0",
-				      "sdhi1_data4", pin_pullup_conf),
-	PIN_MAP_CONFIGS_PIN_DEFAULT("sh_mobile_sdhi.1", "pfc-sh73a0",
-				    "PORT263", pin_pullup_conf),
-};
-
-static void __init ag5evm_init(void)
-{
-	regulator_register_always_on(0, "fixed-1.8V", fixed1v8_power_consumers,
-				     ARRAY_SIZE(fixed1v8_power_consumers), 1800000);
-	regulator_register_always_on(1, "fixed-2.8V", fixed2v8_power_consumers,
-				     ARRAY_SIZE(fixed2v8_power_consumers), 3300000);
-	regulator_register_fixed(3, dummy_supplies, ARRAY_SIZE(dummy_supplies));
-
-	pinctrl_register_mappings(ag5evm_pinctrl_map,
-				  ARRAY_SIZE(ag5evm_pinctrl_map));
-	sh73a0_pinmux_init();
-
-	/* enable MMCIF */
-	gpio_request_one(208, GPIOF_OUT_INIT_HIGH, NULL); /* Reset */
-
-	/* enable SMSC911X */
-	gpio_request_one(144, GPIOF_IN, NULL); /* PINTA2 */
-	gpio_request_one(145, GPIOF_OUT_INIT_HIGH, NULL); /* RESET */
-
-	/* LCD panel */
-	gpio_request_one(217, GPIOF_OUT_INIT_LOW, NULL); /* RESET */
-	mdelay(1);
-	gpio_set_value(217, 1);
-	mdelay(100);
-
-
-#ifdef CONFIG_CACHE_L2X0
-	/* Shared attribute override enable, 64K*8way */
-	l2x0_init(IOMEM(0xf0100000), 0x00460000, 0xc2000fff);
-#endif
-	sh73a0_add_standard_devices();
-
-	i2c_register_board_info(1, &backlight_board_info, 1);
-
-	platform_add_devices(ag5evm_devices, ARRAY_SIZE(ag5evm_devices));
-}
-
-MACHINE_START(AG5EVM, "ag5evm")
-	.smp		= smp_ops(sh73a0_smp_ops),
-	.map_io		= sh73a0_map_io,
-	.init_early	= sh73a0_add_early_devices,
-	.nr_irqs	= NR_IRQS_LEGACY,
-	.init_irq	= sh73a0_init_irq,
-	.init_machine	= ag5evm_init,
-	.init_late	= shmobile_init_late,
-	.init_time	= sh73a0_earlytimer_init,
-MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-ape6evm-reference.c b/arch/arm/mach-shmobile/board-ape6evm-reference.c
new file mode 100644
index 0000000..a23fa71
--- /dev/null
+++ b/arch/arm/mach-shmobile/board-ape6evm-reference.c
@@ -0,0 +1,63 @@
+/*
+ * APE6EVM board support
+ *
+ * Copyright (C) 2013  Renesas Solutions Corp.
+ * Copyright (C) 2013  Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+#include <linux/gpio.h>
+#include <linux/kernel.h>
+#include <linux/of_platform.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/platform_device.h>
+#include <linux/sh_clk.h>
+#include <mach/common.h>
+#include <mach/r8a73a4.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+
+static void __init ape6evm_add_standard_devices(void)
+{
+
+	struct clk *parent;
+	struct clk *mp;
+
+	r8a73a4_clock_init();
+
+	/* MP clock parent = extal2 */
+	parent      = clk_get(NULL, "extal2");
+	mp          = clk_get(NULL, "mp");
+	BUG_ON(IS_ERR(parent) || IS_ERR(mp));
+
+	clk_set_parent(mp, parent);
+	clk_put(parent);
+	clk_put(mp);
+
+	r8a73a4_add_dt_devices();
+	of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+	platform_device_register_simple("cpufreq-cpu0", -1, NULL, 0);
+}
+
+static const char *ape6evm_boards_compat_dt[] __initdata = {
+	"renesas,ape6evm-reference",
+	NULL,
+};
+
+DT_MACHINE_START(APE6EVM_DT, "ape6evm")
+	.init_early	= r8a73a4_init_delay,
+	.init_machine	= ape6evm_add_standard_devices,
+	.dt_compat	= ape6evm_boards_compat_dt,
+MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-ape6evm.c b/arch/arm/mach-shmobile/board-ape6evm.c
index 38c6c73..24b87eea 100644
--- a/arch/arm/mach-shmobile/board-ape6evm.c
+++ b/arch/arm/mach-shmobile/board-ape6evm.c
@@ -241,7 +241,6 @@
 
 DT_MACHINE_START(APE6EVM_DT, "ape6evm")
 	.init_early	= r8a73a4_init_delay,
-	.init_time	= shmobile_timer_init,
 	.init_machine	= ape6evm_add_standard_devices,
 	.dt_compat	= ape6evm_boards_compat_dt,
 MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva-reference.c b/arch/arm/mach-shmobile/board-armadillo800eva-reference.c
index fd2446d..57d1a78 100644
--- a/arch/arm/mach-shmobile/board-armadillo800eva-reference.c
+++ b/arch/arm/mach-shmobile/board-armadillo800eva-reference.c
@@ -190,7 +190,6 @@
 	.init_early	= r8a7740_init_delay,
 	.init_irq	= r8a7740_init_irq_of,
 	.init_machine	= eva_init,
-	.init_time	= shmobile_timer_init,
 	.init_late	= shmobile_init_late,
 	.dt_compat	= eva_boards_compat_dt,
 	.restart	= eva_restart,
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c
index 6b4b77d..5bd1479 100644
--- a/arch/arm/mach-shmobile/board-armadillo800eva.c
+++ b/arch/arm/mach-shmobile/board-armadillo800eva.c
@@ -1313,7 +1313,7 @@
 DT_MACHINE_START(ARMADILLO800EVA_DT, "armadillo800eva")
 	.map_io		= r8a7740_map_io,
 	.init_early	= eva_add_early_devices,
-	.init_irq	= r8a7740_init_irq,
+	.init_irq	= r8a7740_init_irq_of,
 	.init_machine	= eva_init,
 	.init_late	= shmobile_init_late,
 	.init_time	= eva_earlytimer_init,
diff --git a/arch/arm/mach-shmobile/board-bockw-reference.c b/arch/arm/mach-shmobile/board-bockw-reference.c
new file mode 100644
index 0000000..1a7c893
--- /dev/null
+++ b/arch/arm/mach-shmobile/board-bockw-reference.c
@@ -0,0 +1,61 @@
+/*
+ * Bock-W board support
+ *
+ * Copyright (C) 2013  Renesas Solutions Corp.
+ * Copyright (C) 2013  Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+#include <linux/of_platform.h>
+#include <linux/pinctrl/machine.h>
+#include <mach/common.h>
+#include <mach/r8a7778.h>
+#include <asm/mach/arch.h>
+
+/*
+ *	see board-bock.c for checking detail of dip-switch
+ */
+
+static const struct pinctrl_map bockw_pinctrl_map[] = {
+	/* SCIF0 */
+	PIN_MAP_MUX_GROUP_DEFAULT("sh-sci.0", "pfc-r8a7778",
+				  "scif0_data_a", "scif0"),
+	PIN_MAP_MUX_GROUP_DEFAULT("sh-sci.0", "pfc-r8a7778",
+				  "scif0_ctrl", "scif0"),
+};
+
+static void __init bockw_init(void)
+{
+	r8a7778_clock_init();
+
+	pinctrl_register_mappings(bockw_pinctrl_map,
+				  ARRAY_SIZE(bockw_pinctrl_map));
+	r8a7778_pinmux_init();
+	r8a7778_add_dt_devices();
+
+	of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+}
+
+static const char *bockw_boards_compat_dt[] __initdata = {
+	"renesas,bockw-reference",
+	NULL,
+};
+
+DT_MACHINE_START(BOCKW_DT, "bockw")
+	.init_early	= r8a7778_init_delay,
+	.init_irq	= r8a7778_init_irq_dt,
+	.init_machine	= bockw_init,
+	.dt_compat	= bockw_boards_compat_dt,
+MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-bockw.c b/arch/arm/mach-shmobile/board-bockw.c
index 35dd7f2..6b9faf3 100644
--- a/arch/arm/mach-shmobile/board-bockw.c
+++ b/arch/arm/mach-shmobile/board-bockw.c
@@ -21,8 +21,11 @@
 
 #include <linux/mfd/tmio.h>
 #include <linux/mmc/host.h>
+#include <linux/mmc/sh_mobile_sdhi.h>
+#include <linux/mmc/sh_mmcif.h>
 #include <linux/mtd/partitions.h>
 #include <linux/pinctrl/machine.h>
+#include <linux/platform_data/usb-rcar-phy.h>
 #include <linux/platform_device.h>
 #include <linux/regulator/fixed.h>
 #include <linux/regulator/machine.h>
@@ -66,28 +69,38 @@
 	REGULATOR_SUPPLY("vdd33a", "smsc911x"),
 };
 
-static struct smsc911x_platform_config smsc911x_data = {
+static struct smsc911x_platform_config smsc911x_data __initdata = {
 	.irq_polarity	= SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
 	.irq_type	= SMSC911X_IRQ_TYPE_PUSH_PULL,
 	.flags		= SMSC911X_USE_32BIT,
 	.phy_interface	= PHY_INTERFACE_MODE_MII,
 };
 
-static struct resource smsc911x_resources[] = {
+static struct resource smsc911x_resources[] __initdata = {
 	DEFINE_RES_MEM(0x18300000, 0x1000),
 	DEFINE_RES_IRQ(irq_pin(0)), /* IRQ 0 */
 };
 
 /* USB */
+static struct resource usb_phy_resources[] __initdata = {
+	DEFINE_RES_MEM(0xffe70800, 0x100),
+	DEFINE_RES_MEM(0xffe76000, 0x100),
+};
+
 static struct rcar_phy_platform_data usb_phy_platform_data __initdata;
 
 /* SDHI */
-static struct sh_mobile_sdhi_info sdhi0_info = {
+static struct sh_mobile_sdhi_info sdhi0_info __initdata = {
 	.tmio_caps	= MMC_CAP_SD_HIGHSPEED,
 	.tmio_ocr_mask	= MMC_VDD_165_195 | MMC_VDD_32_33 | MMC_VDD_33_34,
 	.tmio_flags	= TMIO_MMC_HAS_IDLE_WAIT,
 };
 
+static struct resource sdhi0_resources[] __initdata = {
+	DEFINE_RES_MEM(0xFFE4C000, 0x100),
+	DEFINE_RES_IRQ(gic_iid(0x77)),
+};
+
 static struct sh_eth_plat_data ether_platform_data __initdata = {
 	.phy		= 0x01,
 	.edmac_endian	= EDMAC_LITTLE_ENDIAN,
@@ -136,7 +149,12 @@
 };
 
 /* MMC */
-static struct sh_mmcif_plat_data sh_mmcif_plat = {
+static struct resource mmc_resources[] __initdata = {
+	DEFINE_RES_MEM(0xffe4e000, 0x100),
+	DEFINE_RES_IRQ(gic_iid(0x5d)),
+};
+
+static struct sh_mmcif_plat_data sh_mmcif_plat __initdata = {
 	.sup_pclk	= 0,
 	.ocr		= MMC_VDD_165_195 | MMC_VDD_32_33 | MMC_VDD_33_34,
 	.caps		= MMC_CAP_4_BIT_DATA |
@@ -217,11 +235,7 @@
 	r8a7778_clock_init();
 	r8a7778_init_irq_extpin(1);
 	r8a7778_add_standard_devices();
-	r8a7778_add_usb_phy_device(&usb_phy_platform_data);
 	r8a7778_add_ether_device(&ether_platform_data);
-	r8a7778_add_i2c_device(0);
-	r8a7778_add_hspi_device(0);
-	r8a7778_add_mmc_device(&sh_mmcif_plat);
 	r8a7778_add_vin_device(0, &vin_platform_data);
 	/* VIN1 has a pin conflict with Ether */
 	if (!IS_ENABLED(CONFIG_SH_ETH))
@@ -241,6 +255,19 @@
 				  ARRAY_SIZE(bockw_pinctrl_map));
 	r8a7778_pinmux_init();
 
+	platform_device_register_resndata(
+		&platform_bus, "sh_mmcif", -1,
+		mmc_resources, ARRAY_SIZE(mmc_resources),
+		&sh_mmcif_plat, sizeof(struct sh_mmcif_plat_data));
+
+	platform_device_register_resndata(
+		&platform_bus, "rcar_usb_phy", -1,
+		usb_phy_resources,
+		ARRAY_SIZE(usb_phy_resources),
+		&usb_phy_platform_data,
+		sizeof(struct rcar_phy_platform_data));
+
+
 	/* for SMSC */
 	base = ioremap_nocache(FPGA, SZ_1M);
 	if (base) {
@@ -276,7 +303,10 @@
 		iowrite32(ioread32(base + PUPR4) | (3 << 26), base + PUPR4);
 		iounmap(base);
 
-		r8a7778_sdhi_init(0, &sdhi0_info);
+		platform_device_register_resndata(
+			&platform_bus, "sh_mobile_sdhi", 0,
+			sdhi0_resources, ARRAY_SIZE(sdhi0_resources),
+			&sdhi0_info, sizeof(struct sh_mobile_sdhi_info));
 	}
 }
 
@@ -289,7 +319,6 @@
 	.init_early	= r8a7778_init_delay,
 	.init_irq	= r8a7778_init_irq_dt,
 	.init_machine	= bockw_init,
-	.init_time	= shmobile_timer_init,
 	.dt_compat	= bockw_boards_compat_dt,
 	.init_late      = r8a7778_init_late,
 MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-kota2.c b/arch/arm/mach-shmobile/board-kota2.c
deleted file mode 100644
index 6af20d9..0000000
--- a/arch/arm/mach-shmobile/board-kota2.c
+++ /dev/null
@@ -1,550 +0,0 @@
-/*
- * kota2 board support
- *
- * Copyright (C) 2011  Renesas Solutions Corp.
- * Copyright (C) 2011  Magnus Damm
- * Copyright (C) 2010  Takashi Yoshii <yoshii.takashi.zj@renesas.com>
- * Copyright (C) 2009  Yoshihiro Shimoda <shimoda.yoshihiro@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/pinctrl/machine.h>
-#include <linux/pinctrl/pinconf-generic.h>
-#include <linux/platform_data/pwm-renesas-tpu.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/regulator/fixed.h>
-#include <linux/regulator/machine.h>
-#include <linux/smsc911x.h>
-#include <linux/gpio.h>
-#include <linux/input.h>
-#include <linux/input/sh_keysc.h>
-#include <linux/gpio_keys.h>
-#include <linux/leds.h>
-#include <linux/leds_pwm.h>
-#include <linux/irqchip/arm-gic.h>
-#include <linux/mmc/host.h>
-#include <linux/mmc/sh_mmcif.h>
-#include <linux/mfd/tmio.h>
-#include <linux/mmc/sh_mobile_sdhi.h>
-#include <mach/hardware.h>
-#include <mach/irqs.h>
-#include <mach/sh73a0.h>
-#include <mach/common.h>
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include <asm/mach/time.h>
-#include <asm/hardware/cache-l2x0.h>
-#include <asm/traps.h>
-
-/* Dummy supplies, where voltage doesn't matter */
-static struct regulator_consumer_supply dummy_supplies[] = {
-	REGULATOR_SUPPLY("vddvario", "smsc911x"),
-	REGULATOR_SUPPLY("vdd33a", "smsc911x"),
-};
-
-/* SMSC 9220 */
-static struct resource smsc9220_resources[] = {
-	[0] = {
-		.start		= 0x14000000, /* CS5A */
-		.end		= 0x140000ff, /* A1->A7 */
-		.flags		= IORESOURCE_MEM,
-	},
-	[1] = {
-		.start		= SH73A0_PINT0_IRQ(2), /* PINTA2 */
-		.flags		= IORESOURCE_IRQ,
-	},
-};
-
-static struct smsc911x_platform_config smsc9220_platdata = {
-	.flags		= SMSC911X_USE_32BIT, /* 32-bit SW on 16-bit HW bus */
-	.phy_interface	= PHY_INTERFACE_MODE_MII,
-	.irq_polarity	= SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
-	.irq_type	= SMSC911X_IRQ_TYPE_PUSH_PULL,
-};
-
-static struct platform_device eth_device = {
-	.name		= "smsc911x",
-	.id		= 0,
-	.dev  = {
-		.platform_data = &smsc9220_platdata,
-	},
-	.resource	= smsc9220_resources,
-	.num_resources	= ARRAY_SIZE(smsc9220_resources),
-};
-
-/* KEYSC */
-static struct sh_keysc_info keysc_platdata = {
-	.mode		= SH_KEYSC_MODE_6,
-	.scan_timing	= 3,
-	.delay		= 100,
-	.keycodes	= {
-		KEY_NUMERIC_STAR, KEY_NUMERIC_0, KEY_NUMERIC_POUND,
-		0, 0, 0, 0, 0,
-		KEY_NUMERIC_7, KEY_NUMERIC_8, KEY_NUMERIC_9,
-		0, KEY_DOWN, 0, 0, 0,
-		KEY_NUMERIC_4, KEY_NUMERIC_5, KEY_NUMERIC_6,
-		KEY_LEFT, KEY_ENTER, KEY_RIGHT, 0, 0,
-		KEY_NUMERIC_1, KEY_NUMERIC_2, KEY_NUMERIC_3,
-		0, KEY_UP, 0, 0, 0,
-		0, 0, 0, 0, 0, 0, 0, 0,
-		0, 0, 0, 0, 0, 0, 0, 0,
-		0, 0, 0, 0, 0, 0, 0, 0,
-		0, 0, 0, 0, 0, 0, 0, 0,
-	},
-};
-
-static struct resource keysc_resources[] = {
-	[0] = {
-		.name	= "KEYSC",
-		.start	= 0xe61b0000,
-		.end	= 0xe61b0098 - 1,
-		.flags	= IORESOURCE_MEM,
-	},
-	[1] = {
-		.start	= gic_spi(71),
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-static struct platform_device keysc_device = {
-	.name		= "sh_keysc",
-	.id		= 0,
-	.num_resources	= ARRAY_SIZE(keysc_resources),
-	.resource	= keysc_resources,
-	.dev		= {
-		.platform_data	= &keysc_platdata,
-	},
-};
-
-/* GPIO KEY */
-#define GPIO_KEY(c, g, d) { .code = c, .gpio = g, .desc = d, .active_low = 1 }
-
-static struct gpio_keys_button gpio_buttons[] = {
-	GPIO_KEY(KEY_VOLUMEUP, 56, "+"), /* S2: VOL+ [IRQ9] */
-	GPIO_KEY(KEY_VOLUMEDOWN, 54, "-"), /* S3: VOL- [IRQ10] */
-	GPIO_KEY(KEY_MENU, 27, "Menu"), /* S4: MENU [IRQ30] */
-	GPIO_KEY(KEY_HOMEPAGE, 26, "Home"), /* S5: HOME [IRQ31] */
-	GPIO_KEY(KEY_BACK, 11, "Back"), /* S6: BACK [IRQ0] */
-	GPIO_KEY(KEY_PHONE, 238, "Tel"), /* S7: TEL [IRQ11] */
-	GPIO_KEY(KEY_POWER, 239, "C1"), /* S8: CAM [IRQ13] */
-	GPIO_KEY(KEY_MAIL, 224, "Mail"), /* S9: MAIL [IRQ3] */
-	/* Omitted button "C3?": 223 - S10: CUST [IRQ8] */
-	GPIO_KEY(KEY_CAMERA, 164, "C2"), /* S11: CAM_HALF [IRQ25] */
-	/* Omitted button "?": 152 - S12: CAM_FULL [No IRQ] */
-};
-
-static struct gpio_keys_platform_data gpio_key_info = {
-	.buttons        = gpio_buttons,
-	.nbuttons       = ARRAY_SIZE(gpio_buttons),
-};
-
-static struct platform_device gpio_keys_device = {
-	.name   = "gpio-keys",
-	.id     = -1,
-	.dev    = {
-		.platform_data  = &gpio_key_info,
-	},
-};
-
-/* GPIO LED */
-#define GPIO_LED(n, g) { .name = n, .gpio = g }
-
-static struct gpio_led gpio_leds[] = {
-	GPIO_LED("G", 20), /* PORT20 [GPO0] -> LED7 -> "G" */
-	GPIO_LED("H", 21), /* PORT21 [GPO1] -> LED8 -> "H" */
-	GPIO_LED("J", 22), /* PORT22 [GPO2] -> LED9 -> "J" */
-};
-
-static struct gpio_led_platform_data gpio_leds_info = {
-	.leds		= gpio_leds,
-	.num_leds	= ARRAY_SIZE(gpio_leds),
-};
-
-static struct platform_device gpio_leds_device = {
-	.name   = "leds-gpio",
-	.id     = -1,
-	.dev    = {
-		.platform_data  = &gpio_leds_info,
-	},
-};
-
-/* TPU LED */
-static struct resource tpu1_pwm_resources[] = {
-	[0] = {
-		.start	= 0xe6610000,
-		.end	= 0xe66100ff,
-		.flags	= IORESOURCE_MEM,
-	},
-};
-
-static struct platform_device tpu1_pwm_device = {
-	.name = "renesas-tpu-pwm",
-	.id = 1,
-	.num_resources	= ARRAY_SIZE(tpu1_pwm_resources),
-	.resource	= tpu1_pwm_resources,
-};
-
-static struct resource tpu2_pwm_resources[] = {
-	[0] = {
-		.start	= 0xe6620000,
-		.end	= 0xe66200ff,
-		.flags	= IORESOURCE_MEM,
-	},
-};
-
-static struct platform_device tpu2_pwm_device = {
-	.name = "renesas-tpu-pwm",
-	.id = 2,
-	.num_resources	= ARRAY_SIZE(tpu2_pwm_resources),
-	.resource	= tpu2_pwm_resources,
-};
-
-static struct resource tpu3_pwm_resources[] = {
-	[0] = {
-		.start	= 0xe6630000,
-		.end	= 0xe66300ff,
-		.flags	= IORESOURCE_MEM,
-	},
-};
-
-static struct platform_device tpu3_pwm_device = {
-	.name = "renesas-tpu-pwm",
-	.id = 3,
-	.num_resources	= ARRAY_SIZE(tpu3_pwm_resources),
-	.resource	= tpu3_pwm_resources,
-};
-
-static struct resource tpu4_pwm_resources[] = {
-	[0] = {
-		.start	= 0xe6640000,
-		.end	= 0xe66400ff,
-		.flags	= IORESOURCE_MEM,
-	},
-};
-
-static struct platform_device tpu4_pwm_device = {
-	.name = "renesas-tpu-pwm",
-	.id = 4,
-	.num_resources	= ARRAY_SIZE(tpu4_pwm_resources),
-	.resource	= tpu4_pwm_resources,
-};
-
-static struct pwm_lookup pwm_lookup[] = {
-	PWM_LOOKUP("renesas-tpu-pwm.1", 2, "leds-pwm.0", "V2513"),
-	PWM_LOOKUP("renesas-tpu-pwm.2", 1, "leds-pwm.0", "V2515"),
-	PWM_LOOKUP("renesas-tpu-pwm.3", 0, "leds-pwm.0", "KEYLED"),
-	PWM_LOOKUP("renesas-tpu-pwm.4", 1, "leds-pwm.0", "V2514"),
-};
-
-static struct led_pwm tpu_pwm_leds[] = {
-	{
-		.name		= "V2513",
-		.max_brightness	= 1000,
-	}, {
-		.name		= "V2515",
-		.max_brightness	= 1000,
-	}, {
-		.name		= "KEYLED",
-		.max_brightness	= 1000,
-	}, {
-		.name		= "V2514",
-		.max_brightness	= 1000,
-	},
-};
-
-static struct led_pwm_platform_data leds_pwm_pdata = {
-	.num_leds = ARRAY_SIZE(tpu_pwm_leds),
-	.leds = tpu_pwm_leds,
-};
-
-static struct platform_device leds_pwm_device = {
-	.name = "leds-pwm",
-	.id = 0,
-	.dev = {
-		.platform_data = &leds_pwm_pdata,
-	},
-};
-
-/* Fixed 1.8V regulator to be used by MMCIF */
-static struct regulator_consumer_supply fixed1v8_power_consumers[] =
-{
-	REGULATOR_SUPPLY("vmmc", "sh_mmcif.0"),
-	REGULATOR_SUPPLY("vqmmc", "sh_mmcif.0"),
-};
-
-/* MMCIF */
-static struct resource mmcif_resources[] = {
-	[0] = {
-		.name   = "MMCIF",
-		.start  = 0xe6bd0000,
-		.end    = 0xe6bd00ff,
-		.flags  = IORESOURCE_MEM,
-	},
-	[1] = {
-		.start  = gic_spi(140),
-		.flags  = IORESOURCE_IRQ,
-	},
-	[2] = {
-		.start  = gic_spi(141),
-		.flags  = IORESOURCE_IRQ,
-	},
-};
-
-static struct sh_mmcif_plat_data mmcif_info = {
-	.ocr            = MMC_VDD_165_195,
-	.caps           = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE,
-};
-
-static struct platform_device mmcif_device = {
-	.name           = "sh_mmcif",
-	.id             = 0,
-	.dev            = {
-		.platform_data          = &mmcif_info,
-	},
-	.num_resources  = ARRAY_SIZE(mmcif_resources),
-	.resource       = mmcif_resources,
-};
-
-/* Fixed 3.3V regulator to be used by SDHI0 and SDHI1 */
-static struct regulator_consumer_supply fixed3v3_power_consumers[] =
-{
-	REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"),
-	REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"),
-	REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.1"),
-	REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.1"),
-};
-
-/* SDHI0 */
-static struct sh_mobile_sdhi_info sdhi0_info = {
-	.tmio_caps      = MMC_CAP_SD_HIGHSPEED,
-	.tmio_flags     = TMIO_MMC_WRPROTECT_DISABLE | TMIO_MMC_HAS_IDLE_WAIT,
-};
-
-static struct resource sdhi0_resources[] = {
-	[0] = {
-		.name   = "SDHI0",
-		.start  = 0xee100000,
-		.end    = 0xee1000ff,
-		.flags  = IORESOURCE_MEM,
-	},
-	[1] = {
-		.start  = gic_spi(83),
-		.flags  = IORESOURCE_IRQ,
-	},
-	[2] = {
-		.start  = gic_spi(84),
-		.flags  = IORESOURCE_IRQ,
-	},
-	[3] = {
-		.start	= gic_spi(85),
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-static struct platform_device sdhi0_device = {
-	.name           = "sh_mobile_sdhi",
-	.id             = 0,
-	.num_resources  = ARRAY_SIZE(sdhi0_resources),
-	.resource       = sdhi0_resources,
-	.dev    = {
-		.platform_data  = &sdhi0_info,
-	},
-};
-
-/* SDHI1 */
-static struct sh_mobile_sdhi_info sdhi1_info = {
-	.tmio_caps      = MMC_CAP_NONREMOVABLE | MMC_CAP_SDIO_IRQ,
-	.tmio_flags     = TMIO_MMC_WRPROTECT_DISABLE | TMIO_MMC_HAS_IDLE_WAIT,
-};
-
-static struct resource sdhi1_resources[] = {
-	[0] = {
-		.name   = "SDHI1",
-		.start  = 0xee120000,
-		.end    = 0xee1200ff,
-		.flags  = IORESOURCE_MEM,
-	},
-	[1] = {
-		.start  = gic_spi(87),
-		.flags  = IORESOURCE_IRQ,
-	},
-	[2] = {
-		.start  = gic_spi(88),
-		.flags  = IORESOURCE_IRQ,
-	},
-	[3] = {
-		.start	= gic_spi(89),
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-static struct platform_device sdhi1_device = {
-	.name           = "sh_mobile_sdhi",
-	.id             = 1,
-	.num_resources  = ARRAY_SIZE(sdhi1_resources),
-	.resource       = sdhi1_resources,
-	.dev    = {
-		.platform_data  = &sdhi1_info,
-	},
-};
-
-static struct platform_device *kota2_devices[] __initdata = {
-	&eth_device,
-	&keysc_device,
-	&gpio_keys_device,
-	&gpio_leds_device,
-	&tpu1_pwm_device,
-	&tpu2_pwm_device,
-	&tpu3_pwm_device,
-	&tpu4_pwm_device,
-	&leds_pwm_device,
-	&mmcif_device,
-	&sdhi0_device,
-	&sdhi1_device,
-};
-
-static unsigned long pin_pullup_conf[] = {
-	PIN_CONF_PACKED(PIN_CONFIG_BIAS_PULL_UP, 0),
-};
-
-static const struct pinctrl_map kota2_pinctrl_map[] = {
-	/* KEYSC */
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_keysc.0", "pfc-sh73a0",
-				  "keysc_in8", "keysc"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_keysc.0", "pfc-sh73a0",
-				  "keysc_out04", "keysc"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_keysc.0", "pfc-sh73a0",
-				  "keysc_out5", "keysc"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_keysc.0", "pfc-sh73a0",
-				  "keysc_out6_0", "keysc"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_keysc.0", "pfc-sh73a0",
-				  "keysc_out7_0", "keysc"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_keysc.0", "pfc-sh73a0",
-				  "keysc_out8_0", "keysc"),
-	PIN_MAP_CONFIGS_GROUP_DEFAULT("sh_keysc.0", "pfc-sh73a0",
-				      "keysc_in8", pin_pullup_conf),
-	/* MMCIF */
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_mmcif.0", "pfc-sh73a0",
-				  "mmc0_data8_0", "mmc0"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_mmcif.0", "pfc-sh73a0",
-				  "mmc0_ctrl_0", "mmc0"),
-	PIN_MAP_CONFIGS_PIN_DEFAULT("sh_mmcif.0", "pfc-sh73a0",
-				    "PORT279", pin_pullup_conf),
-	PIN_MAP_CONFIGS_GROUP_DEFAULT("sh_mmcif.0", "pfc-sh73a0",
-				      "mmc0_data8_0", pin_pullup_conf),
-	/* SCIFA2 (UART2) */
-	PIN_MAP_MUX_GROUP_DEFAULT("sh-sci.2", "pfc-sh73a0",
-				  "scifa2_data_0", "scifa2"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh-sci.2", "pfc-sh73a0",
-				  "scifa2_ctrl_0", "scifa2"),
-	/* SCIFA4 (UART1) */
-	PIN_MAP_MUX_GROUP_DEFAULT("sh-sci.4", "pfc-sh73a0",
-				  "scifa4_data", "scifa4"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh-sci.4", "pfc-sh73a0",
-				  "scifa4_ctrl", "scifa4"),
-	/* SCIFB (BT) */
-	PIN_MAP_MUX_GROUP_DEFAULT("sh-sci.8", "pfc-sh73a0",
-				  "scifb_data_0", "scifb"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh-sci.8", "pfc-sh73a0",
-				  "scifb_clk_0", "scifb"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh-sci.8", "pfc-sh73a0",
-				  "scifb_ctrl_0", "scifb"),
-	/* SDHI0 (microSD) */
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-sh73a0",
-				  "sdhi0_data4", "sdhi0"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-sh73a0",
-				  "sdhi0_ctrl", "sdhi0"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-sh73a0",
-				  "sdhi0_cd", "sdhi0"),
-	PIN_MAP_CONFIGS_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-sh73a0",
-				      "sdhi0_data4", pin_pullup_conf),
-	PIN_MAP_CONFIGS_PIN_DEFAULT("sh_mobile_sdhi.0", "pfc-sh73a0",
-				    "PORT256", pin_pullup_conf),
-	PIN_MAP_CONFIGS_PIN_DEFAULT("sh_mobile_sdhi.0", "pfc-sh73a0",
-				    "PORT251", pin_pullup_conf),
-	/* SDHI1 (BCM4330) */
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.1", "pfc-sh73a0",
-				  "sdhi1_data4", "sdhi1"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.1", "pfc-sh73a0",
-				  "sdhi1_ctrl", "sdhi1"),
-	PIN_MAP_CONFIGS_GROUP_DEFAULT("sh_mobile_sdhi.1", "pfc-sh73a0",
-				      "sdhi1_data4", pin_pullup_conf),
-	PIN_MAP_CONFIGS_PIN_DEFAULT("sh_mobile_sdhi.1", "pfc-sh73a0",
-				    "PORT263", pin_pullup_conf),
-	/* SMSC911X */
-	PIN_MAP_MUX_GROUP_DEFAULT("smsc911x.0", "pfc-sh73a0",
-				  "bsc_data_0_7", "bsc"),
-	PIN_MAP_MUX_GROUP_DEFAULT("smsc911x.0", "pfc-sh73a0",
-				  "bsc_data_8_15", "bsc"),
-	PIN_MAP_MUX_GROUP_DEFAULT("smsc911x.0", "pfc-sh73a0",
-				  "bsc_cs5_a", "bsc"),
-	PIN_MAP_MUX_GROUP_DEFAULT("smsc911x.0", "pfc-sh73a0",
-				  "bsc_we0", "bsc"),
-	/* TPU */
-	PIN_MAP_MUX_GROUP_DEFAULT("renesas-tpu-pwm.1", "pfc-sh73a0",
-				  "tpu1_to2", "tpu1"),
-	PIN_MAP_MUX_GROUP_DEFAULT("renesas-tpu-pwm.2", "pfc-sh73a0",
-				  "tpu2_to1", "tpu2"),
-	PIN_MAP_MUX_GROUP_DEFAULT("renesas-tpu-pwm.3", "pfc-sh73a0",
-				  "tpu3_to0", "tpu3"),
-	PIN_MAP_MUX_GROUP_DEFAULT("renesas-tpu-pwm.4", "pfc-sh73a0",
-				  "tpu4_to1", "tpu4"),
-};
-
-static void __init kota2_init(void)
-{
-	regulator_register_always_on(0, "fixed-1.8V", fixed1v8_power_consumers,
-				     ARRAY_SIZE(fixed1v8_power_consumers), 1800000);
-	regulator_register_always_on(1, "fixed-3.3V", fixed3v3_power_consumers,
-				     ARRAY_SIZE(fixed3v3_power_consumers), 3300000);
-	regulator_register_fixed(2, dummy_supplies, ARRAY_SIZE(dummy_supplies));
-
-	pinctrl_register_mappings(kota2_pinctrl_map,
-				  ARRAY_SIZE(kota2_pinctrl_map));
-	pwm_add_table(pwm_lookup, ARRAY_SIZE(pwm_lookup));
-
-	sh73a0_pinmux_init();
-
-	/* SMSC911X */
-	gpio_request_one(144, GPIOF_IN, NULL); /* PINTA2 */
-	gpio_request_one(145, GPIOF_OUT_INIT_HIGH, NULL); /* RESET */
-
-	/* MMCIF */
-	gpio_request_one(208, GPIOF_OUT_INIT_HIGH, NULL); /* Reset */
-
-#ifdef CONFIG_CACHE_L2X0
-	/* Early BRESP enable, Shared attribute override enable, 64K*8way */
-	l2x0_init(IOMEM(0xf0100000), 0x40460000, 0x82000fff);
-#endif
-	sh73a0_add_standard_devices();
-	platform_add_devices(kota2_devices, ARRAY_SIZE(kota2_devices));
-}
-
-MACHINE_START(KOTA2, "kota2")
-	.smp		= smp_ops(sh73a0_smp_ops),
-	.map_io		= sh73a0_map_io,
-	.init_early	= sh73a0_add_early_devices,
-	.nr_irqs	= NR_IRQS_LEGACY,
-	.init_irq	= sh73a0_init_irq,
-	.init_machine	= kota2_init,
-	.init_late	= shmobile_init_late,
-	.init_time	= sh73a0_earlytimer_init,
-MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-kzm9g-reference.c b/arch/arm/mach-shmobile/board-kzm9g-reference.c
index a66a808..598e324 100644
--- a/arch/arm/mach-shmobile/board-kzm9g-reference.c
+++ b/arch/arm/mach-shmobile/board-kzm9g-reference.c
@@ -52,6 +52,5 @@
 	.init_early	= sh73a0_init_delay,
 	.nr_irqs	= NR_IRQS_LEGACY,
 	.init_machine	= kzm_init,
-	.init_time	= shmobile_timer_init,
 	.dt_compat	= kzm9g_boards_compat_dt,
 MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-kzm9g.c b/arch/arm/mach-shmobile/board-kzm9g.c
index 1068120..f199496 100644
--- a/arch/arm/mach-shmobile/board-kzm9g.c
+++ b/arch/arm/mach-shmobile/board-kzm9g.c
@@ -54,14 +54,14 @@
 /*
  * external GPIO
  */
-#define GPIO_PCF8575_BASE	(GPIO_NR)
-#define GPIO_PCF8575_PORT10	(GPIO_NR + 8)
-#define GPIO_PCF8575_PORT11	(GPIO_NR + 9)
-#define GPIO_PCF8575_PORT12	(GPIO_NR + 10)
-#define GPIO_PCF8575_PORT13	(GPIO_NR + 11)
-#define GPIO_PCF8575_PORT14	(GPIO_NR + 12)
-#define GPIO_PCF8575_PORT15	(GPIO_NR + 13)
-#define GPIO_PCF8575_PORT16	(GPIO_NR + 14)
+#define GPIO_PCF8575_BASE	(310)
+#define GPIO_PCF8575_PORT10	(GPIO_PCF8575_BASE + 8)
+#define GPIO_PCF8575_PORT11	(GPIO_PCF8575_BASE + 9)
+#define GPIO_PCF8575_PORT12	(GPIO_PCF8575_BASE + 10)
+#define GPIO_PCF8575_PORT13	(GPIO_PCF8575_BASE + 11)
+#define GPIO_PCF8575_PORT14	(GPIO_PCF8575_BASE + 12)
+#define GPIO_PCF8575_PORT15	(GPIO_PCF8575_BASE + 13)
+#define GPIO_PCF8575_PORT16	(GPIO_PCF8575_BASE + 14)
 
 /* Dummy supplies, where voltage doesn't matter */
 static struct regulator_consumer_supply dummy_supplies[] = {
diff --git a/arch/arm/mach-shmobile/board-lager-reference.c b/arch/arm/mach-shmobile/board-lager-reference.c
new file mode 100644
index 0000000..9c316a1
--- /dev/null
+++ b/arch/arm/mach-shmobile/board-lager-reference.c
@@ -0,0 +1,45 @@
+/*
+ * Lager board support - Reference DT implementation
+ *
+ * Copyright (C) 2013  Renesas Solutions Corp.
+ * Copyright (C) 2013  Simon Horman
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+#include <linux/init.h>
+#include <linux/of_platform.h>
+#include <mach/r8a7790.h>
+#include <asm/mach/arch.h>
+
+static void __init lager_add_standard_devices(void)
+{
+	/* clocks are setup late during boot in the case of DT */
+	r8a7790_clock_init();
+
+	r8a7790_add_dt_devices();
+        of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+}
+
+static const char *lager_boards_compat_dt[] __initdata = {
+	"renesas,lager-reference",
+	NULL,
+};
+
+DT_MACHINE_START(LAGER_DT, "lager")
+	.init_early	= r8a7790_init_delay,
+	.init_machine	= lager_add_standard_devices,
+	.init_time	= r8a7790_timer_init,
+	.dt_compat	= lager_boards_compat_dt,
+MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-marzen-reference.c b/arch/arm/mach-shmobile/board-marzen-reference.c
index 3d1c439..3f4250a 100644
--- a/arch/arm/mach-shmobile/board-marzen-reference.c
+++ b/arch/arm/mach-shmobile/board-marzen-reference.c
@@ -42,6 +42,5 @@
 	.nr_irqs	= NR_IRQS_LEGACY,
 	.init_irq	= r8a7779_init_irq_dt,
 	.init_machine	= marzen_init,
-	.init_time	= shmobile_timer_init,
 	.dt_compat	= marzen_boards_compat_dt,
 MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-marzen.c b/arch/arm/mach-shmobile/board-marzen.c
index ca7fb2e..3f5044f 100644
--- a/arch/arm/mach-shmobile/board-marzen.c
+++ b/arch/arm/mach-shmobile/board-marzen.c
@@ -30,6 +30,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/pinctrl/machine.h>
 #include <linux/platform_data/gpio-rcar.h>
+#include <linux/platform_data/usb-rcar-phy.h>
 #include <linux/regulator/fixed.h>
 #include <linux/regulator/machine.h>
 #include <linux/smsc911x.h>
@@ -39,7 +40,6 @@
 #include <linux/mmc/sh_mobile_sdhi.h>
 #include <linux/mfd/tmio.h>
 #include <media/soc_camera.h>
-#include <mach/hardware.h>
 #include <mach/r8a7779.h>
 #include <mach/common.h>
 #include <mach/irqs.h>
@@ -59,7 +59,26 @@
 	REGULATOR_SUPPLY("vdd33a", "smsc911x"),
 };
 
-static struct rcar_phy_platform_data usb_phy_platform_data __initdata;
+/* USB PHY */
+static struct resource usb_phy_resources[] = {
+	[0] = {
+		.start		= 0xffe70800,
+		.end		= 0xffe70900 - 1,
+		.flags		= IORESOURCE_MEM,
+	},
+};
+
+static struct rcar_phy_platform_data usb_phy_platform_data;
+
+static struct platform_device usb_phy = {
+	.name		= "rcar_usb_phy",
+	.id		= -1,
+	.dev  = {
+		.platform_data = &usb_phy_platform_data,
+	},
+	.resource	= usb_phy_resources,
+	.num_resources	= ARRAY_SIZE(usb_phy_resources),
+};
 
 /* SMSC LAN89218 */
 static struct resource smsc911x_resources[] = {
@@ -212,6 +231,7 @@
 	&thermal_device,
 	&hspi_device,
 	&leds_device,
+	&usb_phy,
 	&camera0_device,
 	&camera1_device,
 };
@@ -274,19 +294,23 @@
 	r8a7779_init_irq_extpin(1); /* IRQ1 as individual interrupt */
 
 	r8a7779_add_standard_devices();
-	r8a7779_add_usb_phy_device(&usb_phy_platform_data);
 	r8a7779_add_vin_device(1, &vin_platform_data);
 	r8a7779_add_vin_device(3, &vin_platform_data);
 	platform_add_devices(marzen_devices, ARRAY_SIZE(marzen_devices));
 }
 
-MACHINE_START(MARZEN, "marzen")
+static const char *marzen_boards_compat_dt[] __initdata = {
+        "renesas,marzen",
+        NULL,
+};
+
+DT_MACHINE_START(MARZEN, "marzen")
 	.smp		= smp_ops(r8a7779_smp_ops),
 	.map_io		= r8a7779_map_io,
 	.init_early	= r8a7779_add_early_devices,
-	.nr_irqs	= NR_IRQS_LEGACY,
-	.init_irq	= r8a7779_init_irq,
+	.init_irq	= r8a7779_init_irq_dt,
 	.init_machine	= marzen_init,
 	.init_late	= r8a7779_init_late,
+	.dt_compat	= marzen_boards_compat_dt,
 	.init_time	= r8a7779_earlytimer_init,
 MACHINE_END
diff --git a/arch/arm/mach-shmobile/headsmp.S b/arch/arm/mach-shmobile/headsmp.S
index 2667db8..f93751c 100644
--- a/arch/arm/mach-shmobile/headsmp.S
+++ b/arch/arm/mach-shmobile/headsmp.S
@@ -40,3 +40,52 @@
 	.globl	shmobile_boot_arg
 shmobile_boot_arg:
 2:	.space	4
+
+/*
+ * Per-CPU SMP boot function/argument selection code based on MPIDR
+ */
+
+ENTRY(shmobile_smp_boot)
+						@ r0 = MPIDR_HWID_BITMASK
+	mrc	p15, 0, r1, c0, c0, 5		@ r1 = MPIDR
+	and	r0, r1, r0			@ r0 = cpu_logical_map() value
+	mov	r1, #0				@ r1 = CPU index
+	adr	r5, 1f				@ array of per-cpu mpidr values
+	adr	r6, 2f				@ array of per-cpu functions
+	adr	r7, 3f				@ array of per-cpu arguments
+
+shmobile_smp_boot_find_mpidr:
+	ldr	r8, [r5, r1, lsl #2]
+	cmp	r8, r0
+	bne	shmobile_smp_boot_next
+
+	ldr	r9, [r6, r1, lsl #2]
+	cmp	r9, #0
+	bne	shmobile_smp_boot_found
+
+shmobile_smp_boot_next:
+	add	r1, r1, #1
+	cmp	r1, #CONFIG_NR_CPUS
+	blo	shmobile_smp_boot_find_mpidr
+
+	b	shmobile_smp_sleep
+
+shmobile_smp_boot_found:
+	ldr	r0, [r7, r1, lsl #2]
+	mov	pc, r9
+ENDPROC(shmobile_smp_boot)
+
+ENTRY(shmobile_smp_sleep)
+	wfi
+	b	shmobile_smp_boot
+ENDPROC(shmobile_smp_sleep)
+
+	.globl	shmobile_smp_mpidr
+shmobile_smp_mpidr:
+1:	.space	CONFIG_NR_CPUS * 4
+	.globl	shmobile_smp_fn
+shmobile_smp_fn:
+2:	.space	CONFIG_NR_CPUS * 4
+	.globl	shmobile_smp_arg
+shmobile_smp_arg:
+3:	.space	CONFIG_NR_CPUS * 4
diff --git a/arch/arm/mach-shmobile/include/mach/common.h b/arch/arm/mach-shmobile/include/mach/common.h
index e818f02..7b93868 100644
--- a/arch/arm/mach-shmobile/include/mach/common.h
+++ b/arch/arm/mach-shmobile/include/mach/common.h
@@ -2,7 +2,6 @@
 #define __ARCH_MACH_COMMON_H
 
 extern void shmobile_earlytimer_init(void);
-extern void shmobile_timer_init(void);
 extern void shmobile_setup_delay(unsigned int max_cpu_core_mhz,
 			 unsigned int mult, unsigned int div);
 struct twd_local_timer;
@@ -10,7 +9,16 @@
 extern void shmobile_boot_vector(void);
 extern unsigned long shmobile_boot_fn;
 extern unsigned long shmobile_boot_arg;
+extern void shmobile_smp_boot(void);
+extern void shmobile_smp_sleep(void);
+extern void shmobile_smp_hook(unsigned int cpu, unsigned long fn,
+			      unsigned long arg);
 extern void shmobile_boot_scu(void);
+extern void shmobile_smp_scu_prepare_cpus(unsigned int max_cpus);
+extern int shmobile_smp_scu_boot_secondary(unsigned int cpu,
+					   struct task_struct *idle);
+extern void shmobile_smp_scu_cpu_die(unsigned int cpu);
+extern int shmobile_smp_scu_cpu_kill(unsigned int cpu);
 struct clk;
 extern int shmobile_clk_init(void);
 extern void shmobile_handle_irq_intc(struct pt_regs *);
diff --git a/arch/arm/mach-shmobile/include/mach/hardware.h b/arch/arm/mach-shmobile/include/mach/hardware.h
deleted file mode 100644
index 99264a5..0000000
--- a/arch/arm/mach-shmobile/include/mach/hardware.h
+++ /dev/null
@@ -1,4 +0,0 @@
-#ifndef __ASM_MACH_HARDWARE_H
-#define __ASM_MACH_HARDWARE_H
-
-#endif /* __ASM_MACH_HARDWARE_H */
diff --git a/arch/arm/mach-shmobile/include/mach/r8a73a4.h b/arch/arm/mach-shmobile/include/mach/r8a73a4.h
index 144a85e..f3a9b70 100644
--- a/arch/arm/mach-shmobile/include/mach/r8a73a4.h
+++ b/arch/arm/mach-shmobile/include/mach/r8a73a4.h
@@ -2,6 +2,7 @@
 #define __ASM_R8A73A4_H__
 
 void r8a73a4_add_standard_devices(void);
+void r8a73a4_add_dt_devices(void);
 void r8a73a4_clock_init(void);
 void r8a73a4_pinmux_init(void);
 void r8a73a4_init_delay(void);
diff --git a/arch/arm/mach-shmobile/include/mach/r8a7740.h b/arch/arm/mach-shmobile/include/mach/r8a7740.h
index 56f3750..d07932f 100644
--- a/arch/arm/mach-shmobile/include/mach/r8a7740.h
+++ b/arch/arm/mach-shmobile/include/mach/r8a7740.h
@@ -48,7 +48,6 @@
 
 extern void r8a7740_meram_workaround(void);
 extern void r8a7740_init_delay(void);
-extern void r8a7740_init_irq(void);
 extern void r8a7740_init_irq_of(void);
 extern void r8a7740_map_io(void);
 extern void r8a7740_add_early_devices(void);
diff --git a/arch/arm/mach-shmobile/include/mach/r8a7778.h b/arch/arm/mach-shmobile/include/mach/r8a7778.h
index 2866704..adfcf51 100644
--- a/arch/arm/mach-shmobile/include/mach/r8a7778.h
+++ b/arch/arm/mach-shmobile/include/mach/r8a7778.h
@@ -18,21 +18,15 @@
 #ifndef __ASM_R8A7778_H__
 #define __ASM_R8A7778_H__
 
-#include <linux/mmc/sh_mmcif.h>
-#include <linux/mmc/sh_mobile_sdhi.h>
 #include <linux/sh_eth.h>
-#include <linux/platform_data/usb-rcar-phy.h>
 #include <linux/platform_data/camera-rcar.h>
 
 extern void r8a7778_add_standard_devices(void);
 extern void r8a7778_add_standard_devices_dt(void);
 extern void r8a7778_add_ether_device(struct sh_eth_plat_data *pdata);
-extern void r8a7778_add_usb_phy_device(struct rcar_phy_platform_data *pdata);
-extern void r8a7778_add_i2c_device(int id);
-extern void r8a7778_add_hspi_device(int id);
-extern void r8a7778_add_mmc_device(struct sh_mmcif_plat_data *info);
 extern void r8a7778_add_vin_device(int id,
 				   struct rcar_vin_platform_data *pdata);
+extern void r8a7778_add_dt_devices(void);
 
 extern void r8a7778_init_late(void);
 extern void r8a7778_init_delay(void);
@@ -40,6 +34,5 @@
 extern void r8a7778_clock_init(void);
 extern void r8a7778_init_irq_extpin(int irlm);
 extern void r8a7778_pinmux_init(void);
-extern void r8a7778_sdhi_init(int id, struct sh_mobile_sdhi_info *info);
 
 #endif /* __ASM_R8A7778_H__ */
diff --git a/arch/arm/mach-shmobile/include/mach/r8a7779.h b/arch/arm/mach-shmobile/include/mach/r8a7779.h
index 6d2b641..11c7400 100644
--- a/arch/arm/mach-shmobile/include/mach/r8a7779.h
+++ b/arch/arm/mach-shmobile/include/mach/r8a7779.h
@@ -4,7 +4,6 @@
 #include <linux/sh_clk.h>
 #include <linux/pm_domain.h>
 #include <linux/sh_eth.h>
-#include <linux/platform_data/usb-rcar-phy.h>
 #include <linux/platform_data/camera-rcar.h>
 
 struct platform_device;
@@ -26,7 +25,6 @@
 }
 
 extern void r8a7779_init_delay(void);
-extern void r8a7779_init_irq(void);
 extern void r8a7779_init_irq_extpin(int irlm);
 extern void r8a7779_init_irq_dt(void);
 extern void r8a7779_map_io(void);
@@ -35,7 +33,6 @@
 extern void r8a7779_add_standard_devices(void);
 extern void r8a7779_add_standard_devices_dt(void);
 extern void r8a7779_add_ether_device(struct sh_eth_plat_data *pdata);
-extern void r8a7779_add_usb_phy_device(struct rcar_phy_platform_data *pdata);
 extern void r8a7779_add_vin_device(int idx,
 				   struct rcar_vin_platform_data *pdata);
 extern void r8a7779_init_late(void);
diff --git a/arch/arm/mach-shmobile/include/mach/r8a7790.h b/arch/arm/mach-shmobile/include/mach/r8a7790.h
index 7aaef40..788d559 100644
--- a/arch/arm/mach-shmobile/include/mach/r8a7790.h
+++ b/arch/arm/mach-shmobile/include/mach/r8a7790.h
@@ -2,6 +2,7 @@
 #define __ASM_R8A7790_H__
 
 void r8a7790_add_standard_devices(void);
+void r8a7790_add_dt_devices(void);
 void r8a7790_clock_init(void);
 void r8a7790_pinmux_init(void);
 void r8a7790_init_delay(void);
diff --git a/arch/arm/mach-shmobile/include/mach/sh73a0.h b/arch/arm/mach-shmobile/include/mach/sh73a0.h
index 680dc5f..359b582 100644
--- a/arch/arm/mach-shmobile/include/mach/sh73a0.h
+++ b/arch/arm/mach-shmobile/include/mach/sh73a0.h
@@ -1,8 +1,6 @@
 #ifndef __ASM_SH73A0_H__
 #define __ASM_SH73A0_H__
 
-#define GPIO_NR			310
-
 /* DMA slave IDs */
 enum {
 	SHDMA_SLAVE_INVALID,
diff --git a/arch/arm/mach-shmobile/intc-r8a7740.c b/arch/arm/mach-shmobile/intc-r8a7740.c
deleted file mode 100644
index 8871f77..0000000
--- a/arch/arm/mach-shmobile/intc-r8a7740.c
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * R8A7740 processor support
- *
- * Copyright (C) 2011  Renesas Solutions Corp.
- * Copyright (C) 2011  Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/irqchip.h>
-#include <linux/irqchip/arm-gic.h>
-
-static void __init r8a7740_init_irq_common(void)
-{
-	void __iomem *intc_prio_base = ioremap_nocache(0xe6900010, 0x10);
-	void __iomem *intc_msk_base = ioremap_nocache(0xe6900040, 0x10);
-	void __iomem *pfc_inta_ctrl = ioremap_nocache(0xe605807c, 0x4);
-
-	/* route signals to GIC */
-	iowrite32(0x0, pfc_inta_ctrl);
-
-	/*
-	 * To mask the shared interrupt to SPI 149 we must ensure to set
-	 * PRIO *and* MASK. Else we run into IRQ floods when registering
-	 * the intc_irqpin devices
-	 */
-	iowrite32(0x0, intc_prio_base + 0x0);
-	iowrite32(0x0, intc_prio_base + 0x4);
-	iowrite32(0x0, intc_prio_base + 0x8);
-	iowrite32(0x0, intc_prio_base + 0xc);
-	iowrite8(0xff, intc_msk_base + 0x0);
-	iowrite8(0xff, intc_msk_base + 0x4);
-	iowrite8(0xff, intc_msk_base + 0x8);
-	iowrite8(0xff, intc_msk_base + 0xc);
-
-	iounmap(intc_prio_base);
-	iounmap(intc_msk_base);
-	iounmap(pfc_inta_ctrl);
-}
-
-void __init r8a7740_init_irq_of(void)
-{
-	irqchip_init();
-	r8a7740_init_irq_common();
-}
-
-void __init r8a7740_init_irq(void)
-{
-	void __iomem *gic_dist_base = ioremap_nocache(0xc2800000, 0x1000);
-	void __iomem *gic_cpu_base = ioremap_nocache(0xc2000000, 0x1000);
-
-	/* initialize the Generic Interrupt Controller PL390 r0p0 */
-	gic_init(0, 29, gic_dist_base, gic_cpu_base);
-	r8a7740_init_irq_common();
-}
diff --git a/arch/arm/mach-shmobile/intc-r8a7779.c b/arch/arm/mach-shmobile/intc-r8a7779.c
deleted file mode 100644
index b86dc89..0000000
--- a/arch/arm/mach-shmobile/intc-r8a7779.c
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * r8a7779 processor support - INTC hardware block
- *
- * Copyright (C) 2011  Renesas Solutions Corp.
- * Copyright (C) 2011  Magnus Damm
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-#include <linux/irqchip/arm-gic.h>
-#include <linux/platform_data/irq-renesas-intc-irqpin.h>
-#include <linux/irqchip.h>
-#include <mach/common.h>
-#include <mach/intc.h>
-#include <mach/irqs.h>
-#include <mach/r8a7779.h>
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-
-#define INT2SMSKCR0 IOMEM(0xfe7822a0)
-#define INT2SMSKCR1 IOMEM(0xfe7822a4)
-#define INT2SMSKCR2 IOMEM(0xfe7822a8)
-#define INT2SMSKCR3 IOMEM(0xfe7822ac)
-#define INT2SMSKCR4 IOMEM(0xfe7822b0)
-
-#define INT2NTSR0 IOMEM(0xfe700060)
-#define INT2NTSR1 IOMEM(0xfe700064)
-
-static struct renesas_intc_irqpin_config irqpin0_platform_data = {
-	.irq_base = irq_pin(0), /* IRQ0 -> IRQ3 */
-	.sense_bitfield_width = 2,
-};
-
-static struct resource irqpin0_resources[] = {
-	DEFINE_RES_MEM(0xfe78001c, 4), /* ICR1 */
-	DEFINE_RES_MEM(0xfe780010, 4), /* INTPRI */
-	DEFINE_RES_MEM(0xfe780024, 4), /* INTREQ */
-	DEFINE_RES_MEM(0xfe780044, 4), /* INTMSK0 */
-	DEFINE_RES_MEM(0xfe780064, 4), /* INTMSKCLR0 */
-	DEFINE_RES_IRQ(gic_spi(27)), /* IRQ0 */
-	DEFINE_RES_IRQ(gic_spi(28)), /* IRQ1 */
-	DEFINE_RES_IRQ(gic_spi(29)), /* IRQ2 */
-	DEFINE_RES_IRQ(gic_spi(30)), /* IRQ3 */
-};
-
-static struct platform_device irqpin0_device = {
-	.name		= "renesas_intc_irqpin",
-	.id		= 0,
-	.resource	= irqpin0_resources,
-	.num_resources	= ARRAY_SIZE(irqpin0_resources),
-	.dev		= {
-		.platform_data	= &irqpin0_platform_data,
-	},
-};
-
-void __init r8a7779_init_irq_extpin(int irlm)
-{
-	void __iomem *icr0 = ioremap_nocache(0xfe780000, PAGE_SIZE);
-	unsigned long tmp;
-
-	if (icr0) {
-		tmp = ioread32(icr0);
-		if (irlm)
-			tmp |= 1 << 23; /* IRQ0 -> IRQ3 as individual pins */
-		else
-			tmp &= ~(1 << 23); /* IRL mode - not supported */
-		tmp |= (1 << 21); /* LVLMODE = 1 */
-		iowrite32(tmp, icr0);
-		iounmap(icr0);
-
-		if (irlm)
-			platform_device_register(&irqpin0_device);
-	} else
-		pr_warn("r8a7779: unable to setup external irq pin mode\n");
-}
-
-static int r8a7779_set_wake(struct irq_data *data, unsigned int on)
-{
-	return 0; /* always allow wakeup */
-}
-
-static void __init r8a7779_init_irq_common(void)
-{
-	gic_arch_extn.irq_set_wake = r8a7779_set_wake;
-
-	/* route all interrupts to ARM */
-	__raw_writel(0xffffffff, INT2NTSR0);
-	__raw_writel(0x3fffffff, INT2NTSR1);
-
-	/* unmask all known interrupts in INTCS2 */
-	__raw_writel(0xfffffff0, INT2SMSKCR0);
-	__raw_writel(0xfff7ffff, INT2SMSKCR1);
-	__raw_writel(0xfffbffdf, INT2SMSKCR2);
-	__raw_writel(0xbffffffc, INT2SMSKCR3);
-	__raw_writel(0x003fee3f, INT2SMSKCR4);
-}
-
-void __init r8a7779_init_irq(void)
-{
-	void __iomem *gic_dist_base = IOMEM(0xf0001000);
-	void __iomem *gic_cpu_base = IOMEM(0xf0000100);
-
-	/* use GIC to handle interrupts */
-	gic_init(0, 29, gic_dist_base, gic_cpu_base);
-
-	r8a7779_init_irq_common();
-}
-
-#ifdef CONFIG_OF
-void __init r8a7779_init_irq_dt(void)
-{
-	irqchip_init();
-	r8a7779_init_irq_common();
-}
-#endif
diff --git a/arch/arm/mach-shmobile/platsmp-scu.c b/arch/arm/mach-shmobile/platsmp-scu.c
new file mode 100644
index 0000000..c96f501
--- /dev/null
+++ b/arch/arm/mach-shmobile/platsmp-scu.c
@@ -0,0 +1,81 @@
+/*
+ * SMP support for SoCs with SCU covered by mach-shmobile
+ *
+ * Copyright (C) 2013  Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/smp.h>
+#include <asm/cacheflush.h>
+#include <asm/smp_plat.h>
+#include <asm/smp_scu.h>
+#include <mach/common.h>
+
+void __init shmobile_smp_scu_prepare_cpus(unsigned int max_cpus)
+{
+	/* install boot code shared by all CPUs */
+	shmobile_boot_fn = virt_to_phys(shmobile_smp_boot);
+	shmobile_boot_arg = MPIDR_HWID_BITMASK;
+
+	/* enable SCU and cache coherency on booting CPU */
+	scu_enable(shmobile_scu_base);
+	scu_power_mode(shmobile_scu_base, SCU_PM_NORMAL);
+}
+
+int shmobile_smp_scu_boot_secondary(unsigned int cpu, struct task_struct *idle)
+{
+	/* For this particular CPU register SCU boot vector */
+	shmobile_smp_hook(cpu, virt_to_phys(shmobile_boot_scu),
+			  (unsigned long)shmobile_scu_base);
+	return 0;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+void shmobile_smp_scu_cpu_die(unsigned int cpu)
+{
+	/* For this particular CPU deregister boot vector */
+	shmobile_smp_hook(cpu, 0, 0);
+
+	dsb();
+	flush_cache_all();
+
+	/* disable cache coherency */
+	scu_power_mode(shmobile_scu_base, SCU_PM_POWEROFF);
+
+	/* jump to shared mach-shmobile sleep / reset code */
+	shmobile_smp_sleep();
+}
+
+static int shmobile_smp_scu_psr_core_disabled(int cpu)
+{
+	unsigned long mask = SCU_PM_POWEROFF << (cpu * 8);
+
+	if ((__raw_readl(shmobile_scu_base + 8) & mask) == mask)
+		return 1;
+
+	return 0;
+}
+
+int shmobile_smp_scu_cpu_kill(unsigned int cpu)
+{
+	int k;
+
+	/* this function is running on another CPU than the offline target,
+	 * here we need wait for shutdown code in platform_cpu_die() to
+	 * finish before asking SoC-specific code to power off the CPU core.
+	 */
+	for (k = 0; k < 1000; k++) {
+		if (shmobile_smp_scu_psr_core_disabled(cpu))
+			return 1;
+
+		mdelay(1);
+	}
+
+	return 0;
+}
+#endif
diff --git a/arch/arm/mach-shmobile/platsmp.c b/arch/arm/mach-shmobile/platsmp.c
index 1f958d7..d4ae616 100644
--- a/arch/arm/mach-shmobile/platsmp.c
+++ b/arch/arm/mach-shmobile/platsmp.c
@@ -12,6 +12,9 @@
  */
 #include <linux/init.h>
 #include <linux/smp.h>
+#include <asm/cacheflush.h>
+#include <asm/smp_plat.h>
+#include <mach/common.h>
 
 void __init shmobile_smp_init_cpus(unsigned int ncores)
 {
@@ -26,3 +29,18 @@
 	for (i = 0; i < ncores; i++)
 		set_cpu_possible(i, true);
 }
+
+extern unsigned long shmobile_smp_fn[];
+extern unsigned long shmobile_smp_arg[];
+extern unsigned long shmobile_smp_mpidr[];
+
+void shmobile_smp_hook(unsigned int cpu, unsigned long fn, unsigned long arg)
+{
+	shmobile_smp_fn[cpu] = 0;
+	flush_cache_all();
+
+	shmobile_smp_mpidr[cpu] = cpu_logical_map(cpu);
+	shmobile_smp_fn[cpu] = fn;
+	shmobile_smp_arg[cpu] = arg;
+	flush_cache_all();
+}
diff --git a/arch/arm/mach-shmobile/setup-emev2.c b/arch/arm/mach-shmobile/setup-emev2.c
index 1553af8..3ad531c 100644
--- a/arch/arm/mach-shmobile/setup-emev2.c
+++ b/arch/arm/mach-shmobile/setup-emev2.c
@@ -27,7 +27,6 @@
 #include <linux/input.h>
 #include <linux/io.h>
 #include <linux/irqchip/arm-gic.h>
-#include <mach/hardware.h>
 #include <mach/common.h>
 #include <mach/emev2.h>
 #include <mach/irqs.h>
diff --git a/arch/arm/mach-shmobile/setup-r8a73a4.c b/arch/arm/mach-shmobile/setup-r8a73a4.c
index d533bd2..8949170 100644
--- a/arch/arm/mach-shmobile/setup-r8a73a4.c
+++ b/arch/arm/mach-shmobile/setup-r8a73a4.c
@@ -188,7 +188,7 @@
 					  &cmt##idx##_platform_data,	\
 					  sizeof(struct sh_timer_config))
 
-void __init r8a73a4_add_standard_devices(void)
+void __init r8a73a4_add_dt_devices(void)
 {
 	r8a73a4_register_scif(SCIFA0);
 	r8a73a4_register_scif(SCIFA1);
@@ -196,10 +196,15 @@
 	r8a73a4_register_scif(SCIFB1);
 	r8a73a4_register_scif(SCIFB2);
 	r8a73a4_register_scif(SCIFB3);
+	r8a7790_register_cmt(10);
+}
+
+void __init r8a73a4_add_standard_devices(void)
+{
+	r8a73a4_add_dt_devices();
 	r8a73a4_register_irqc(0);
 	r8a73a4_register_irqc(1);
 	r8a73a4_register_thermal();
-	r8a7790_register_cmt(10);
 }
 
 void __init r8a73a4_init_delay(void)
@@ -210,11 +215,6 @@
 }
 
 #ifdef CONFIG_USE_OF
-void __init r8a73a4_add_standard_devices_dt(void)
-{
-	platform_device_register_simple("cpufreq-cpu0", -1, NULL, 0);
-	of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
-}
 
 static const char *r8a73a4_boards_compat_dt[] __initdata = {
 	"renesas,r8a73a4",
@@ -223,8 +223,6 @@
 
 DT_MACHINE_START(R8A73A4_DT, "Generic R8A73A4 (Flattened Device Tree)")
 	.init_early	= r8a73a4_init_delay,
-	.init_machine	= r8a73a4_add_standard_devices_dt,
-	.init_time	= shmobile_timer_init,
 	.dt_compat	= r8a73a4_boards_compat_dt,
 MACHINE_END
 #endif /* CONFIG_USE_OF */
diff --git a/arch/arm/mach-shmobile/setup-r8a7740.c b/arch/arm/mach-shmobile/setup-r8a7740.c
index 84c5bb6..b7d4b2c 100644
--- a/arch/arm/mach-shmobile/setup-r8a7740.c
+++ b/arch/arm/mach-shmobile/setup-r8a7740.c
@@ -22,6 +22,8 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/io.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/arm-gic.h>
 #include <linux/platform_data/irq-renesas-intc-irqpin.h>
 #include <linux/platform_device.h>
 #include <linux/of_platform.h>
@@ -1019,6 +1021,36 @@
 	shmobile_setup_delay(800, 1, 3); /* Cortex-A9 @ 800MHz */
 };
 
+void __init r8a7740_init_irq_of(void)
+{
+	void __iomem *intc_prio_base = ioremap_nocache(0xe6900010, 0x10);
+	void __iomem *intc_msk_base = ioremap_nocache(0xe6900040, 0x10);
+	void __iomem *pfc_inta_ctrl = ioremap_nocache(0xe605807c, 0x4);
+
+	irqchip_init();
+
+	/* route signals to GIC */
+	iowrite32(0x0, pfc_inta_ctrl);
+
+	/*
+	 * To mask the shared interrupt to SPI 149 we must ensure to set
+	 * PRIO *and* MASK. Else we run into IRQ floods when registering
+	 * the intc_irqpin devices
+	 */
+	iowrite32(0x0, intc_prio_base + 0x0);
+	iowrite32(0x0, intc_prio_base + 0x4);
+	iowrite32(0x0, intc_prio_base + 0x8);
+	iowrite32(0x0, intc_prio_base + 0xc);
+	iowrite8(0xff, intc_msk_base + 0x0);
+	iowrite8(0xff, intc_msk_base + 0x4);
+	iowrite8(0xff, intc_msk_base + 0x8);
+	iowrite8(0xff, intc_msk_base + 0xc);
+
+	iounmap(intc_prio_base);
+	iounmap(intc_msk_base);
+	iounmap(pfc_inta_ctrl);
+}
+
 static void __init r8a7740_generic_init(void)
 {
 	r8a7740_clock_init(0);
@@ -1035,7 +1067,6 @@
 	.init_early	= r8a7740_init_delay,
 	.init_irq	= r8a7740_init_irq_of,
 	.init_machine	= r8a7740_generic_init,
-	.init_time	= shmobile_timer_init,
 	.dt_compat	= r8a7740_boards_compat_dt,
 MACHINE_END
 
diff --git a/arch/arm/mach-shmobile/setup-r8a7778.c b/arch/arm/mach-shmobile/setup-r8a7778.c
index 203becf..6a2657e 100644
--- a/arch/arm/mach-shmobile/setup-r8a7778.c
+++ b/arch/arm/mach-shmobile/setup-r8a7778.c
@@ -95,20 +95,6 @@
 		&sh_tmu##idx##_platform_data,		\
 		sizeof(sh_tmu##idx##_platform_data))
 
-/* USB PHY */
-static struct resource usb_phy_resources[] __initdata = {
-	DEFINE_RES_MEM(0xffe70800, 0x100),
-	DEFINE_RES_MEM(0xffe76000, 0x100),
-};
-
-void __init r8a7778_add_usb_phy_device(struct rcar_phy_platform_data *pdata)
-{
-	platform_device_register_resndata(&platform_bus, "rcar_usb_phy", -1,
-					  usb_phy_resources,
-					  ARRAY_SIZE(usb_phy_resources),
-					  pdata, sizeof(*pdata));
-}
-
 /* USB */
 static struct usb_phy *phy;
 
@@ -248,30 +234,6 @@
 	r8a7778_register_gpio(4);
 };
 
-/* SDHI */
-static struct resource sdhi_resources[] __initdata = {
-	/* SDHI0 */
-	DEFINE_RES_MEM(0xFFE4C000, 0x100),
-	DEFINE_RES_IRQ(gic_iid(0x77)),
-	/* SDHI1 */
-	DEFINE_RES_MEM(0xFFE4D000, 0x100),
-	DEFINE_RES_IRQ(gic_iid(0x78)),
-	/* SDHI2 */
-	DEFINE_RES_MEM(0xFFE4F000, 0x100),
-	DEFINE_RES_IRQ(gic_iid(0x76)),
-};
-
-void __init r8a7778_sdhi_init(int id,
-			      struct sh_mobile_sdhi_info *info)
-{
-	BUG_ON(id < 0 || id > 2);
-
-	platform_device_register_resndata(
-		&platform_bus, "sh_mobile_sdhi", id,
-		sdhi_resources + (2 * id), 2,
-		info, sizeof(*info));
-}
-
 /* I2C */
 static struct resource i2c_resources[] __initdata = {
 	/* I2C0 */
@@ -288,7 +250,7 @@
 	DEFINE_RES_IRQ(gic_iid(0x6d)),
 };
 
-void __init r8a7778_add_i2c_device(int id)
+static void __init r8a7778_register_i2c(int id)
 {
 	BUG_ON(id < 0 || id > 3);
 
@@ -310,7 +272,7 @@
 	DEFINE_RES_IRQ(gic_iid(0x75)),
 };
 
-void __init r8a7778_add_hspi_device(int id)
+void __init r8a7778_register_hspi(int id)
 {
 	BUG_ON(id < 0 || id > 2);
 
@@ -319,20 +281,6 @@
 		hspi_resources + (2 * id), 2);
 }
 
-/* MMC */
-static struct resource mmc_resources[] __initdata = {
-	DEFINE_RES_MEM(0xffe4e000, 0x100),
-	DEFINE_RES_IRQ(gic_iid(0x5d)),
-};
-
-void __init r8a7778_add_mmc_device(struct sh_mmcif_plat_data *info)
-{
-	platform_device_register_resndata(
-		&platform_bus, "sh_mmcif", -1,
-		mmc_resources, ARRAY_SIZE(mmc_resources),
-		info, sizeof(*info));
-}
-
 /* VIN */
 #define R8A7778_VIN(idx)						\
 static struct resource vin##idx##_resources[] __initdata = {		\
@@ -367,7 +315,7 @@
 	platform_device_register_full(vin_info_table[id]);
 }
 
-void __init r8a7778_add_standard_devices(void)
+void __init r8a7778_add_dt_devices(void)
 {
 	int i;
 
@@ -391,6 +339,18 @@
 	r8a7778_register_tmu(1);
 }
 
+void __init r8a7778_add_standard_devices(void)
+{
+	r8a7778_add_dt_devices();
+	r8a7778_register_i2c(0);
+	r8a7778_register_i2c(1);
+	r8a7778_register_i2c(2);
+	r8a7778_register_i2c(3);
+	r8a7778_register_hspi(0);
+	r8a7778_register_hspi(1);
+	r8a7778_register_hspi(2);
+}
+
 void __init r8a7778_init_late(void)
 {
 	phy = usb_get_phy(USB_PHY_TYPE_USB2);
@@ -480,7 +440,6 @@
 DT_MACHINE_START(R8A7778_DT, "Generic R8A7778 (Flattened Device Tree)")
 	.init_early	= r8a7778_init_delay,
 	.init_irq	= r8a7778_init_irq_dt,
-	.init_time	= shmobile_timer_init,
 	.dt_compat	= r8a7778_compat_dt,
 	.init_late      = r8a7778_init_late,
 MACHINE_END
diff --git a/arch/arm/mach-shmobile/setup-r8a7779.c b/arch/arm/mach-shmobile/setup-r8a7779.c
index 41bab62..b5b2f78 100644
--- a/arch/arm/mach-shmobile/setup-r8a7779.c
+++ b/arch/arm/mach-shmobile/setup-r8a7779.c
@@ -22,14 +22,16 @@
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/arm-gic.h>
 #include <linux/of_platform.h>
 #include <linux/platform_data/gpio-rcar.h>
+#include <linux/platform_data/irq-renesas-intc-irqpin.h>
 #include <linux/platform_device.h>
 #include <linux/delay.h>
 #include <linux/input.h>
 #include <linux/io.h>
 #include <linux/serial_sci.h>
-#include <linux/sh_intc.h>
 #include <linux/sh_timer.h>
 #include <linux/dma-mapping.h>
 #include <linux/usb/otg.h>
@@ -37,7 +39,6 @@
 #include <linux/usb/ehci_pdriver.h>
 #include <linux/usb/ohci_pdriver.h>
 #include <linux/pm_runtime.h>
-#include <mach/hardware.h>
 #include <mach/irqs.h>
 #include <mach/r8a7779.h>
 #include <mach/common.h>
@@ -69,6 +70,60 @@
 	iotable_init(r8a7779_io_desc, ARRAY_SIZE(r8a7779_io_desc));
 }
 
+/* IRQ */
+#define INT2SMSKCR0 IOMEM(0xfe7822a0)
+#define INT2SMSKCR1 IOMEM(0xfe7822a4)
+#define INT2SMSKCR2 IOMEM(0xfe7822a8)
+#define INT2SMSKCR3 IOMEM(0xfe7822ac)
+#define INT2SMSKCR4 IOMEM(0xfe7822b0)
+
+#define INT2NTSR0 IOMEM(0xfe700060)
+#define INT2NTSR1 IOMEM(0xfe700064)
+
+static struct renesas_intc_irqpin_config irqpin0_platform_data __initdata = {
+	.irq_base = irq_pin(0), /* IRQ0 -> IRQ3 */
+	.sense_bitfield_width = 2,
+};
+
+static struct resource irqpin0_resources[] __initdata = {
+	DEFINE_RES_MEM(0xfe78001c, 4), /* ICR1 */
+	DEFINE_RES_MEM(0xfe780010, 4), /* INTPRI */
+	DEFINE_RES_MEM(0xfe780024, 4), /* INTREQ */
+	DEFINE_RES_MEM(0xfe780044, 4), /* INTMSK0 */
+	DEFINE_RES_MEM(0xfe780064, 4), /* INTMSKCLR0 */
+	DEFINE_RES_IRQ(gic_spi(27)), /* IRQ0 */
+	DEFINE_RES_IRQ(gic_spi(28)), /* IRQ1 */
+	DEFINE_RES_IRQ(gic_spi(29)), /* IRQ2 */
+	DEFINE_RES_IRQ(gic_spi(30)), /* IRQ3 */
+};
+
+void __init r8a7779_init_irq_extpin(int irlm)
+{
+	void __iomem *icr0 = ioremap_nocache(0xfe780000, PAGE_SIZE);
+	u32 tmp;
+
+	if (!icr0) {
+		pr_warn("r8a7779: unable to setup external irq pin mode\n");
+		return;
+	}
+
+	tmp = ioread32(icr0);
+	if (irlm)
+		tmp |= 1 << 23; /* IRQ0 -> IRQ3 as individual pins */
+	else
+		tmp &= ~(1 << 23); /* IRL mode - not supported */
+	tmp |= (1 << 21); /* LVLMODE = 1 */
+	iowrite32(tmp, icr0);
+	iounmap(icr0);
+
+	if (irlm)
+		platform_device_register_resndata(
+			&platform_bus, "renesas_intc_irqpin", -1,
+			irqpin0_resources, ARRAY_SIZE(irqpin0_resources),
+			&irqpin0_platform_data, sizeof(irqpin0_platform_data));
+}
+
+/* PFC/GPIO */
 static struct resource r8a7779_pfc_resources[] = {
 	DEFINE_RES_MEM(0xfffc0000, 0x023c),
 };
@@ -388,15 +443,6 @@
 	},
 };
 
-/* USB PHY */
-static struct resource usb_phy_resources[] __initdata = {
-	[0] = {
-		.start		= 0xffe70800,
-		.end		= 0xffe70900 - 1,
-		.flags		= IORESOURCE_MEM,
-	},
-};
-
 /* USB */
 static struct usb_phy *phy;
 
@@ -548,7 +594,7 @@
 };
 
 /* Ether */
-static struct resource ether_resources[] = {
+static struct resource ether_resources[] __initdata = {
 	{
 		.start	= 0xfde00000,
 		.end	= 0xfde003ff,
@@ -629,14 +675,6 @@
 					  pdata, sizeof(*pdata));
 }
 
-void __init r8a7779_add_usb_phy_device(struct rcar_phy_platform_data *pdata)
-{
-	platform_device_register_resndata(&platform_bus, "rcar_usb_phy", -1,
-					  usb_phy_resources,
-					  ARRAY_SIZE(usb_phy_resources),
-					  pdata, sizeof(*pdata));
-}
-
 void __init r8a7779_add_vin_device(int id, struct rcar_vin_platform_data *pdata)
 {
 	BUG_ON(id < 0 || id > 3);
@@ -697,6 +735,29 @@
 }
 
 #ifdef CONFIG_USE_OF
+static int r8a7779_set_wake(struct irq_data *data, unsigned int on)
+{
+	return 0; /* always allow wakeup */
+}
+
+void __init r8a7779_init_irq_dt(void)
+{
+	gic_arch_extn.irq_set_wake = r8a7779_set_wake;
+
+	irqchip_init();
+
+	/* route all interrupts to ARM */
+	__raw_writel(0xffffffff, INT2NTSR0);
+	__raw_writel(0x3fffffff, INT2NTSR1);
+
+	/* unmask all known interrupts in INTCS2 */
+	__raw_writel(0xfffffff0, INT2SMSKCR0);
+	__raw_writel(0xfff7ffff, INT2SMSKCR1);
+	__raw_writel(0xfffbffdf, INT2SMSKCR2);
+	__raw_writel(0xbffffffc, INT2SMSKCR3);
+	__raw_writel(0x003fee3f, INT2SMSKCR4);
+}
+
 void __init r8a7779_init_delay(void)
 {
 	shmobile_setup_delay(1000, 2, 4); /* Cortex-A9 @ 1000MHz */
@@ -723,7 +784,6 @@
 	.nr_irqs	= NR_IRQS_LEGACY,
 	.init_irq	= r8a7779_init_irq_dt,
 	.init_machine	= r8a7779_add_standard_devices_dt,
-	.init_time	= shmobile_timer_init,
 	.init_late	= r8a7779_init_late,
 	.dt_compat	= r8a7779_compat_dt,
 MACHINE_END
diff --git a/arch/arm/mach-shmobile/setup-r8a7790.c b/arch/arm/mach-shmobile/setup-r8a7790.c
index 4c96dad..d0f5c9f 100644
--- a/arch/arm/mach-shmobile/setup-r8a7790.c
+++ b/arch/arm/mach-shmobile/setup-r8a7790.c
@@ -18,6 +18,7 @@
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
+#include <linux/clocksource.h>
 #include <linux/irq.h>
 #include <linux/kernel.h>
 #include <linux/of_platform.h>
@@ -160,13 +161,13 @@
 					thermal_resources,		\
 					ARRAY_SIZE(thermal_resources))
 
-static struct sh_timer_config cmt00_platform_data = {
+static struct sh_timer_config cmt00_platform_data __initdata = {
 	.name = "CMT00",
 	.timer_bit = 0,
 	.clockevent_rating = 80,
 };
 
-static struct resource cmt00_resources[] = {
+static struct resource cmt00_resources[] __initdata = {
 	DEFINE_RES_MEM(0xffca0510, 0x0c),
 	DEFINE_RES_MEM(0xffca0500, 0x04),
 	DEFINE_RES_IRQ(gic_spi(142)), /* CMT0_0 */
@@ -179,7 +180,7 @@
 					  &cmt##idx##_platform_data,	\
 					  sizeof(struct sh_timer_config))
 
-void __init r8a7790_add_standard_devices(void)
+void __init r8a7790_add_dt_devices(void)
 {
 	r8a7790_register_scif(SCIFA0);
 	r8a7790_register_scif(SCIFA1);
@@ -191,9 +192,14 @@
 	r8a7790_register_scif(SCIF1);
 	r8a7790_register_scif(HSCIF0);
 	r8a7790_register_scif(HSCIF1);
+	r8a7790_register_cmt(00);
+}
+
+void __init r8a7790_add_standard_devices(void)
+{
+	r8a7790_add_dt_devices();
 	r8a7790_register_irqc(0);
 	r8a7790_register_thermal();
-	r8a7790_register_cmt(00);
 }
 
 #define MODEMR 0xe6160060
@@ -258,7 +264,7 @@
 	iounmap(base);
 #endif /* CONFIG_ARM_ARCH_TIMER */
 
-	shmobile_timer_init();
+	clocksource_of_init();
 }
 
 void __init r8a7790_init_delay(void)
diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c
index 13e6fdb..3118783 100644
--- a/arch/arm/mach-shmobile/setup-sh7372.c
+++ b/arch/arm/mach-shmobile/setup-sh7372.c
@@ -35,7 +35,6 @@
 #include <linux/dma-mapping.h>
 #include <linux/platform_data/sh_ipmmu.h>
 #include <mach/dma-register.h>
-#include <mach/hardware.h>
 #include <mach/irqs.h>
 #include <mach/sh7372.h>
 #include <mach/common.h>
diff --git a/arch/arm/mach-shmobile/setup-sh73a0.c b/arch/arm/mach-shmobile/setup-sh73a0.c
index 516c239..22de174 100644
--- a/arch/arm/mach-shmobile/setup-sh73a0.c
+++ b/arch/arm/mach-shmobile/setup-sh73a0.c
@@ -34,7 +34,6 @@
 #include <linux/platform_data/sh_ipmmu.h>
 #include <linux/platform_data/irq-renesas-intc-irqpin.h>
 #include <mach/dma-register.h>
-#include <mach/hardware.h>
 #include <mach/irqs.h>
 #include <mach/sh73a0.h>
 #include <mach/common.h>
diff --git a/arch/arm/mach-shmobile/smp-emev2.c b/arch/arm/mach-shmobile/smp-emev2.c
index 78e84c5..522de5e 100644
--- a/arch/arm/mach-shmobile/smp-emev2.c
+++ b/arch/arm/mach-shmobile/smp-emev2.c
@@ -34,6 +34,12 @@
 
 static int emev2_boot_secondary(unsigned int cpu, struct task_struct *idle)
 {
+	int ret;
+
+	ret = shmobile_smp_scu_boot_secondary(cpu, idle);
+	if (ret)
+		return ret;
+
 	arch_send_wakeup_ipi_mask(cpumask_of(cpu_logical_map(cpu)));
 	return 0;
 }
@@ -42,21 +48,16 @@
 {
 	void __iomem *smu;
 
-	/* setup EMEV2 specific SCU base, enable */
-	shmobile_scu_base = ioremap(EMEV2_SCU_BASE, PAGE_SIZE);
-	scu_enable(shmobile_scu_base);
-
-	/* Tell ROM loader about our vector (in headsmp-scu.S, headsmp.S) */
+	/* Tell ROM loader about our vector (in headsmp.S) */
 	smu = ioremap(EMEV2_SMU_BASE, PAGE_SIZE);
 	if (smu) {
 		iowrite32(__pa(shmobile_boot_vector), smu + SMU_GENERAL_REG0);
 		iounmap(smu);
 	}
-	shmobile_boot_fn = virt_to_phys(shmobile_boot_scu);
-	shmobile_boot_arg = (unsigned long)shmobile_scu_base;
 
-	/* enable cache coherency on booting CPU */
-	scu_power_mode(shmobile_scu_base, SCU_PM_NORMAL);
+	/* setup EMEV2 specific SCU bits */
+	shmobile_scu_base = ioremap(EMEV2_SCU_BASE, PAGE_SIZE);
+	shmobile_smp_scu_prepare_cpus(max_cpus);
 }
 
 struct smp_operations emev2_smp_ops __initdata = {
diff --git a/arch/arm/mach-shmobile/smp-r8a7779.c b/arch/arm/mach-shmobile/smp-r8a7779.c
index 9bdf810..0f05e9f 100644
--- a/arch/arm/mach-shmobile/smp-r8a7779.c
+++ b/arch/arm/mach-shmobile/smp-r8a7779.c
@@ -84,30 +84,34 @@
 static int r8a7779_boot_secondary(unsigned int cpu, struct task_struct *idle)
 {
 	struct r8a7779_pm_ch *ch = NULL;
-	int ret = -EIO;
+	unsigned int lcpu = cpu_logical_map(cpu);
+	int ret;
 
-	cpu = cpu_logical_map(cpu);
+	ret = shmobile_smp_scu_boot_secondary(cpu, idle);
+	if (ret)
+		return ret;
 
-	if (cpu < ARRAY_SIZE(r8a7779_ch_cpu))
-		ch = r8a7779_ch_cpu[cpu];
+	if (lcpu < ARRAY_SIZE(r8a7779_ch_cpu))
+		ch = r8a7779_ch_cpu[lcpu];
 
 	if (ch)
 		ret = r8a7779_sysc_power_up(ch);
+	else
+		ret = -EIO;
 
 	return ret;
 }
 
 static void __init r8a7779_smp_prepare_cpus(unsigned int max_cpus)
 {
-	scu_enable(shmobile_scu_base);
-
 	/* Map the reset vector (in headsmp-scu.S, headsmp.S) */
 	__raw_writel(__pa(shmobile_boot_vector), AVECR);
 	shmobile_boot_fn = virt_to_phys(shmobile_boot_scu);
 	shmobile_boot_arg = (unsigned long)shmobile_scu_base;
 
-	/* enable cache coherency on booting CPU */
-	scu_power_mode(shmobile_scu_base, SCU_PM_NORMAL);
+	/* setup r8a7779 specific SCU bits */
+	shmobile_scu_base = IOMEM(R8A7779_SCU_BASE);
+	shmobile_smp_scu_prepare_cpus(max_cpus);
 
 	r8a7779_pm_init();
 
@@ -117,56 +121,15 @@
 	r8a7779_platform_cpu_kill(3);
 }
 
-static void __init r8a7779_smp_init_cpus(void)
-{
-	/* setup r8a7779 specific SCU base */
-	shmobile_scu_base = IOMEM(R8A7779_SCU_BASE);
-
-	shmobile_smp_init_cpus(scu_get_core_count(shmobile_scu_base));
-}
-
 #ifdef CONFIG_HOTPLUG_CPU
-static int r8a7779_scu_psr_core_disabled(int cpu)
-{
-	unsigned long mask = 3 << (cpu * 8);
-
-	if ((__raw_readl(shmobile_scu_base + 8) & mask) == mask)
-		return 1;
-
-	return 0;
-}
-
 static int r8a7779_cpu_kill(unsigned int cpu)
 {
-	int k;
-
-	/* this function is running on another CPU than the offline target,
-	 * here we need wait for shutdown code in platform_cpu_die() to
-	 * finish before asking SoC-specific code to power off the CPU core.
-	 */
-	for (k = 0; k < 1000; k++) {
-		if (r8a7779_scu_psr_core_disabled(cpu))
-			return r8a7779_platform_cpu_kill(cpu);
-
-		mdelay(1);
-	}
+	if (shmobile_smp_scu_cpu_kill(cpu))
+		return r8a7779_platform_cpu_kill(cpu);
 
 	return 0;
 }
 
-static void r8a7779_cpu_die(unsigned int cpu)
-{
-	dsb();
-	flush_cache_all();
-
-	/* disable cache coherency */
-	scu_power_mode(shmobile_scu_base, SCU_PM_POWEROFF);
-
-	/* Endless loop until power off from r8a7779_cpu_kill() */
-	while (1)
-		cpu_do_idle();
-}
-
 static int r8a7779_cpu_disable(unsigned int cpu)
 {
 	/* only CPU1->3 have power domains, do not allow hotplug of CPU0 */
@@ -175,12 +138,11 @@
 #endif /* CONFIG_HOTPLUG_CPU */
 
 struct smp_operations r8a7779_smp_ops  __initdata = {
-	.smp_init_cpus		= r8a7779_smp_init_cpus,
 	.smp_prepare_cpus	= r8a7779_smp_prepare_cpus,
 	.smp_boot_secondary	= r8a7779_boot_secondary,
 #ifdef CONFIG_HOTPLUG_CPU
-	.cpu_kill		= r8a7779_cpu_kill,
-	.cpu_die		= r8a7779_cpu_die,
 	.cpu_disable		= r8a7779_cpu_disable,
+	.cpu_die		= shmobile_smp_scu_cpu_die,
+	.cpu_kill		= r8a7779_cpu_kill,
 #endif
 };
diff --git a/arch/arm/mach-shmobile/smp-sh73a0.c b/arch/arm/mach-shmobile/smp-sh73a0.c
index d5fc3ed..0baa244 100644
--- a/arch/arm/mach-shmobile/smp-sh73a0.c
+++ b/arch/arm/mach-shmobile/smp-sh73a0.c
@@ -20,14 +20,11 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/smp.h>
-#include <linux/spinlock.h>
 #include <linux/io.h>
 #include <linux/delay.h>
 #include <mach/common.h>
-#include <asm/cacheflush.h>
-#include <asm/smp_plat.h>
 #include <mach/sh73a0.h>
-#include <asm/smp_scu.h>
+#include <asm/smp_plat.h>
 #include <asm/smp_twd.h>
 
 #define WUPCR		IOMEM(0xe6151010)
@@ -36,8 +33,6 @@
 #define SBAR		IOMEM(0xe6180020)
 #define APARMBAREA	IOMEM(0xe6f10020)
 
-#define PSTR_SHUTDOWN_MODE	3
-
 #define SH73A0_SCU_BASE 0xf0000000
 
 #ifdef CONFIG_HAVE_ARM_TWD
@@ -50,69 +45,33 @@
 
 static int sh73a0_boot_secondary(unsigned int cpu, struct task_struct *idle)
 {
-	cpu = cpu_logical_map(cpu);
+	unsigned int lcpu = cpu_logical_map(cpu);
+	int ret;
 
-	if (((__raw_readl(PSTR) >> (4 * cpu)) & 3) == 3)
-		__raw_writel(1 << cpu, WUPCR);	/* wake up */
+	ret = shmobile_smp_scu_boot_secondary(cpu, idle);
+	if (ret)
+		return ret;
+
+	if (((__raw_readl(PSTR) >> (4 * lcpu)) & 3) == 3)
+		__raw_writel(1 << lcpu, WUPCR);	/* wake up */
 	else
-		__raw_writel(1 << cpu, SRESCR);	/* reset */
+		__raw_writel(1 << lcpu, SRESCR);	/* reset */
 
 	return 0;
 }
 
 static void __init sh73a0_smp_prepare_cpus(unsigned int max_cpus)
 {
-	scu_enable(shmobile_scu_base);
-
-	/* Map the reset vector (in headsmp-scu.S, headsmp.S) */
+	/* Map the reset vector (in headsmp.S) */
 	__raw_writel(0, APARMBAREA);      /* 4k */
 	__raw_writel(__pa(shmobile_boot_vector), SBAR);
-	shmobile_boot_fn = virt_to_phys(shmobile_boot_scu);
-	shmobile_boot_arg = (unsigned long)shmobile_scu_base;
 
-	/* enable cache coherency on booting CPU */
-	scu_power_mode(shmobile_scu_base, SCU_PM_NORMAL);
-}
-
-static void __init sh73a0_smp_init_cpus(void)
-{
-	/* setup sh73a0 specific SCU base */
+	/* setup sh73a0 specific SCU bits */
 	shmobile_scu_base = IOMEM(SH73A0_SCU_BASE);
-
-	shmobile_smp_init_cpus(scu_get_core_count(shmobile_scu_base));
+	shmobile_smp_scu_prepare_cpus(max_cpus);
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
-static int sh73a0_cpu_kill(unsigned int cpu)
-{
-
-	int k;
-	u32 pstr;
-
-	/*
-	 * wait until the power status register confirms the shutdown of the
-	 * offline target
-	 */
-	for (k = 0; k < 1000; k++) {
-		pstr = (__raw_readl(PSTR) >> (4 * cpu)) & 3;
-		if (pstr == PSTR_SHUTDOWN_MODE)
-			return 1;
-
-		mdelay(1);
-	}
-
-	return 0;
-}
-
-static void sh73a0_cpu_die(unsigned int cpu)
-{
-	/* Set power off mode. This takes the CPU out of the MP cluster */
-	scu_power_mode(shmobile_scu_base, SCU_PM_POWEROFF);
-
-	/* Enter shutdown mode */
-	cpu_do_idle();
-}
-
 static int sh73a0_cpu_disable(unsigned int cpu)
 {
 	return 0; /* CPU0 and CPU1 supported */
@@ -120,12 +79,11 @@
 #endif /* CONFIG_HOTPLUG_CPU */
 
 struct smp_operations sh73a0_smp_ops __initdata = {
-	.smp_init_cpus		= sh73a0_smp_init_cpus,
 	.smp_prepare_cpus	= sh73a0_smp_prepare_cpus,
 	.smp_boot_secondary	= sh73a0_boot_secondary,
 #ifdef CONFIG_HOTPLUG_CPU
-	.cpu_kill		= sh73a0_cpu_kill,
-	.cpu_die		= sh73a0_cpu_die,
 	.cpu_disable		= sh73a0_cpu_disable,
+	.cpu_die		= shmobile_smp_scu_cpu_die,
+	.cpu_kill		= shmobile_smp_scu_cpu_kill,
 #endif
 };
diff --git a/arch/arm/mach-shmobile/timer.c b/arch/arm/mach-shmobile/timer.c
index f321dbe..62d7052 100644
--- a/arch/arm/mach-shmobile/timer.c
+++ b/arch/arm/mach-shmobile/timer.c
@@ -59,7 +59,3 @@
 	late_time_init = shmobile_late_time_init;
 }
 
-void __init shmobile_timer_init(void)
-{
-	clocksource_of_init();
-}
diff --git a/arch/arm/mach-ux500/board-mop500-audio.c b/arch/arm/mach-ux500/board-mop500-audio.c
index bfe443d..ec08072 100644
--- a/arch/arm/mach-ux500/board-mop500-audio.c
+++ b/arch/arm/mach-ux500/board-mop500-audio.c
@@ -17,7 +17,6 @@
 #include "ste-dma40-db8500.h"
 #include "board-mop500.h"
 #include "devices-db8500.h"
-#include "pins-db8500.h"
 
 static struct stedma40_chan_cfg msp0_dma_rx = {
 	.high_priority = true,
diff --git a/arch/arm/mach-ux500/board-mop500-pins.c b/arch/arm/mach-ux500/board-mop500-pins.c
index 7936d40..0efb156 100644
--- a/arch/arm/mach-ux500/board-mop500-pins.c
+++ b/arch/arm/mach-ux500/board-mop500-pins.c
@@ -14,7 +14,6 @@
 
 #include <asm/mach-types.h>
 
-#include "pins-db8500.h"
 #include "board-mop500.h"
 
 enum custom_pin_cfg_t {
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index 4e7ab3a..ad0806e 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -324,21 +324,19 @@
        .clock_mode     = LP55XX_CLOCK_EXT,
 };
 
+/* I2C0 devices only available on the first HREF/MOP500 */
 static struct i2c_board_info __initdata mop500_i2c0_devices[] = {
 	{
 		I2C_BOARD_INFO("tc3589x", 0x42),
 		.irq		= NOMADIK_GPIO_TO_IRQ(217),
 		.platform_data  = &mop500_tc35892_data,
 	},
-	/* I2C0 devices only available prior to HREFv60 */
 	{
 		I2C_BOARD_INFO("tps61052", 0x33),
 		.platform_data  = &mop500_tps61052_data,
 	},
 };
 
-#define NUM_PRE_V60_I2C0_DEVICES 1
-
 static struct i2c_board_info __initdata mop500_i2c2_devices[] = {
 	{
 		/* lp5521 LED driver, 1st device */
@@ -356,6 +354,17 @@
 	},
 };
 
+static int __init mop500_i2c_board_init(void)
+{
+	if (machine_is_u8500())
+		mop500_uib_i2c_add(0, mop500_i2c0_devices,
+				   ARRAY_SIZE(mop500_i2c0_devices));
+	mop500_uib_i2c_add(2, mop500_i2c2_devices,
+			   ARRAY_SIZE(mop500_i2c2_devices));
+	return 0;
+}
+device_initcall(mop500_i2c_board_init);
+
 static void __init mop500_i2c_init(struct device *parent)
 {
 	db8500_add_i2c0(parent, NULL);
@@ -564,7 +573,6 @@
 static void __init mop500_init_machine(void)
 {
 	struct device *parent = NULL;
-	int i2c0_devs;
 	int i;
 
 	platform_device_register(&db8500_prcmu_device);
@@ -587,19 +595,13 @@
 	mop500_spi_init(parent);
 	mop500_audio_init(parent);
 	mop500_uart_init(parent);
-
 	u8500_cryp1_hash1_init(parent);
 
-	i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices);
-
-	i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs);
-	i2c_register_board_info(2, mop500_i2c2_devices,
-				ARRAY_SIZE(mop500_i2c2_devices));
-
 	/* This board has full regulator constraints */
 	regulator_has_full_constraints();
 }
 
+
 static void __init snowball_init_machine(void)
 {
 	struct device *parent = NULL;
@@ -634,7 +636,6 @@
 static void __init hrefv60_init_machine(void)
 {
 	struct device *parent = NULL;
-	int i2c0_devs;
 	int i;
 
 	platform_device_register(&db8500_prcmu_device);
@@ -663,14 +664,6 @@
 	mop500_audio_init(parent);
 	mop500_uart_init(parent);
 
-	i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices);
-
-	i2c0_devs -= NUM_PRE_V60_I2C0_DEVICES;
-
-	i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs);
-	i2c_register_board_info(2, mop500_i2c2_devices,
-				ARRAY_SIZE(mop500_i2c2_devices));
-
 	/* This board has full regulator constraints */
 	regulator_has_full_constraints();
 }
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index bfaf95d..301c346 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -156,7 +156,8 @@
 		.supports_sleepmode = true,
 	};
 
-	dbx500_add_gpios(parent, ARRAY_AND_SIZE(db8500_gpio_base),
+	dbx500_add_gpios(parent, db8500_gpio_base,
+			 ARRAY_SIZE(db8500_gpio_base),
 			 IRQ_DB8500_GPIO0, &pdata);
 	dbx500_add_pinctrl(parent, "pinctrl-db8500", U8500_PRCMU_BASE);
 }
diff --git a/arch/arm/mach-ux500/pins-db8500.h b/arch/arm/mach-ux500/pins-db8500.h
deleted file mode 100644
index 062c7ac..0000000
--- a/arch/arm/mach-ux500/pins-db8500.h
+++ /dev/null
@@ -1,746 +0,0 @@
-/*
- * Copyright (C) ST-Ericsson SA 2010
- *
- * License terms: GNU General Public License, version 2
- * Author: Rabin Vincent <rabin.vincent@stericsson.com>
- */
-
-#ifndef __MACH_PINS_DB8500_H
-#define __MACH_PINS_DB8500_H
-
-/*
- * TODO: Eventually encode all non-board specific pull up/down configuration
- * here.
- */
-
-#define GPIO0_GPIO		PIN_CFG(0, GPIO)
-#define GPIO0_U0_CTSn		PIN_CFG(0, ALT_A)
-#define GPIO0_TRIG_OUT		PIN_CFG(0, ALT_B)
-#define GPIO0_IP_TDO		PIN_CFG(0, ALT_C)
-
-#define GPIO1_GPIO		PIN_CFG(1, GPIO)
-#define GPIO1_U0_RTSn		PIN_CFG(1, ALT_A)
-#define GPIO1_TRIG_IN		PIN_CFG(1, ALT_B)
-#define GPIO1_IP_TDI		PIN_CFG(1, ALT_C)
-
-#define GPIO2_GPIO		PIN_CFG(2, GPIO)
-#define GPIO2_U0_RXD		PIN_CFG(2, ALT_A)
-#define GPIO2_NONE		PIN_CFG(2, ALT_B)
-#define GPIO2_IP_TMS		PIN_CFG(2, ALT_C)
-
-#define GPIO3_GPIO		PIN_CFG(3, GPIO)
-#define GPIO3_U0_TXD		PIN_CFG(3, ALT_A)
-#define GPIO3_NONE		PIN_CFG(3, ALT_B)
-#define GPIO3_IP_TCK		PIN_CFG(3, ALT_C)
-
-#define GPIO4_GPIO		PIN_CFG(4, GPIO)
-#define GPIO4_U1_RXD		PIN_CFG(4, ALT_A)
-#define GPIO4_I2C4_SCL		PIN_CFG(4, ALT_B)
-#define GPIO4_IP_TRSTn		PIN_CFG(4, ALT_C)
-
-#define GPIO5_GPIO		PIN_CFG(5, GPIO)
-#define GPIO5_U1_TXD		PIN_CFG(5, ALT_A)
-#define GPIO5_I2C4_SDA		PIN_CFG(5, ALT_B)
-#define GPIO5_IP_GPIO6		PIN_CFG(5, ALT_C)
-
-#define GPIO6_GPIO		PIN_CFG(6, GPIO)
-#define GPIO6_U1_CTSn		PIN_CFG(6, ALT_A)
-#define GPIO6_I2C1_SCL		PIN_CFG(6, ALT_B)
-#define GPIO6_IP_GPIO0		PIN_CFG(6, ALT_C)
-
-#define GPIO7_GPIO		PIN_CFG(7, GPIO)
-#define GPIO7_U1_RTSn		PIN_CFG(7, ALT_A)
-#define GPIO7_I2C1_SDA		PIN_CFG(7, ALT_B)
-#define GPIO7_IP_GPIO1		PIN_CFG(7, ALT_C)
-
-#define GPIO8_GPIO		PIN_CFG(8, GPIO)
-#define GPIO8_IPI2C_SDA		PIN_CFG(8, ALT_A)
-#define GPIO8_I2C2_SDA		PIN_CFG(8, ALT_B)
-
-#define GPIO9_GPIO		PIN_CFG(9, GPIO)
-#define GPIO9_IPI2C_SCL		PIN_CFG(9, ALT_A)
-#define GPIO9_I2C2_SCL		PIN_CFG(9, ALT_B)
-
-#define GPIO10_GPIO		PIN_CFG(10, GPIO)
-#define GPIO10_IPI2C_SDA	PIN_CFG(10, ALT_A)
-#define GPIO10_I2C2_SDA		PIN_CFG(10, ALT_B)
-#define GPIO10_IP_GPIO3		PIN_CFG(10, ALT_C)
-
-#define GPIO11_GPIO		PIN_CFG(11, GPIO)
-#define GPIO11_IPI2C_SCL	PIN_CFG(11, ALT_A)
-#define GPIO11_I2C2_SCL		PIN_CFG(11, ALT_B)
-#define GPIO11_IP_GPIO2		PIN_CFG(11, ALT_C)
-
-#define GPIO12_GPIO		PIN_CFG(12, GPIO)
-#define GPIO12_MSP0_TXD		PIN_CFG(12, ALT_A)
-#define GPIO12_MSP0_RXD		PIN_CFG(12, ALT_B)
-
-#define GPIO13_GPIO		PIN_CFG(13, GPIO)
-#define GPIO13_MSP0_TFS		PIN_CFG(13, ALT_A)
-
-#define GPIO14_GPIO		PIN_CFG(14, GPIO)
-#define GPIO14_MSP0_TCK		PIN_CFG(14, ALT_A)
-
-#define GPIO15_GPIO		PIN_CFG(15, GPIO)
-#define GPIO15_MSP0_RXD		PIN_CFG(15, ALT_A)
-#define GPIO15_MSP0_TXD		PIN_CFG(15, ALT_B)
-
-#define GPIO16_GPIO		PIN_CFG(16, GPIO)
-#define GPIO16_MSP0_RFS		PIN_CFG(16, ALT_A)
-#define GPIO16_I2C1_SCL		PIN_CFG(16, ALT_B)
-#define GPIO16_SLIM0_DAT	PIN_CFG(16, ALT_C)
-
-#define GPIO17_GPIO		PIN_CFG(17, GPIO)
-#define GPIO17_MSP0_RCK		PIN_CFG(17, ALT_A)
-#define GPIO17_I2C1_SDA		PIN_CFG(17, ALT_B)
-#define GPIO17_SLIM0_CLK	PIN_CFG(17, ALT_C)
-
-#define GPIO18_GPIO		PIN_CFG(18, GPIO)
-#define GPIO18_MC0_CMDDIR	PIN_CFG_INPUT(18, ALT_A, PULLUP)
-#define GPIO18_U2_RXD		PIN_CFG(18, ALT_B)
-#define GPIO18_MS_IEP		PIN_CFG(18, ALT_C)
-
-#define GPIO19_GPIO		PIN_CFG(19, GPIO)
-#define GPIO19_MC0_DAT0DIR	PIN_CFG_INPUT(19, ALT_A, PULLUP)
-#define GPIO19_U2_TXD		PIN_CFG(19, ALT_B)
-#define GPIO19_MS_DAT0DIR	PIN_CFG(19, ALT_C)
-
-#define GPIO20_GPIO		PIN_CFG(20, GPIO)
-#define GPIO20_MC0_DAT2DIR	PIN_CFG_INPUT(20, ALT_A, PULLUP)
-#define GPIO20_UARTMOD_TXD	PIN_CFG(20, ALT_B)
-#define GPIO20_IP_TRIGOUT	PIN_CFG(20, ALT_C)
-
-#define GPIO21_GPIO		PIN_CFG(21, GPIO)
-#define GPIO21_MC0_DAT31DIR	PIN_CFG_INPUT(21, ALT_A, PULLUP)
-#define GPIO21_MSP0_SCK		PIN_CFG(21, ALT_B)
-#define GPIO21_MS_DAT31DIR	PIN_CFG(21, ALT_C)
-
-#define GPIO22_GPIO		PIN_CFG(22, GPIO)
-#define GPIO22_MC0_FBCLK	PIN_CFG_INPUT(22, ALT_A, PULLUP)
-#define GPIO22_UARTMOD_RXD	PIN_CFG(22, ALT_B)
-#define GPIO22_MS_FBCLK		PIN_CFG(22, ALT_C)
-
-#define GPIO23_GPIO		PIN_CFG(23, GPIO)
-#define GPIO23_MC0_CLK		PIN_CFG_INPUT(23, ALT_A, PULLUP)
-#define GPIO23_STMMOD_CLK	PIN_CFG(23, ALT_B)
-#define GPIO23_MS_CLK		PIN_CFG(23, ALT_C)
-
-#define GPIO24_GPIO		PIN_CFG(24, GPIO)
-#define GPIO24_MC0_CMD		PIN_CFG_INPUT(24, ALT_A, PULLUP)
-#define GPIO24_UARTMOD_RXD	PIN_CFG(24, ALT_B)
-#define GPIO24_MS_BS		PIN_CFG(24, ALT_C)
-
-#define GPIO25_GPIO		PIN_CFG(25, GPIO)
-#define GPIO25_MC0_DAT0		PIN_CFG_INPUT(25, ALT_A, PULLUP)
-#define GPIO25_STMMOD_DAT0	PIN_CFG(25, ALT_B)
-#define GPIO25_MS_DAT0		PIN_CFG(25, ALT_C)
-
-#define GPIO26_GPIO		PIN_CFG(26, GPIO)
-#define GPIO26_MC0_DAT1		PIN_CFG_INPUT(26, ALT_A, PULLUP)
-#define GPIO26_STMMOD_DAT1	PIN_CFG(26, ALT_B)
-#define GPIO26_MS_DAT1		PIN_CFG(26, ALT_C)
-
-#define GPIO27_GPIO		PIN_CFG(27, GPIO)
-#define GPIO27_MC0_DAT2		PIN_CFG_INPUT(27, ALT_A, PULLUP)
-#define GPIO27_STMMOD_DAT2	PIN_CFG(27, ALT_B)
-#define GPIO27_MS_DAT2		PIN_CFG(27, ALT_C)
-
-#define GPIO28_GPIO		PIN_CFG(28, GPIO)
-#define GPIO28_MC0_DAT3		PIN_CFG_INPUT(28, ALT_A, PULLUP)
-#define GPIO28_STMMOD_DAT3	PIN_CFG(28, ALT_B)
-#define GPIO28_MS_DAT3		PIN_CFG(28, ALT_C)
-
-#define GPIO29_GPIO		PIN_CFG(29, GPIO)
-#define GPIO29_MC0_DAT4		PIN_CFG(29, ALT_A)
-#define GPIO29_SPI3_CLK		PIN_CFG(29, ALT_B)
-#define GPIO29_U2_RXD		PIN_CFG(29, ALT_C)
-
-#define GPIO30_GPIO		PIN_CFG(30, GPIO)
-#define GPIO30_MC0_DAT5		PIN_CFG(30, ALT_A)
-#define GPIO30_SPI3_RXD		PIN_CFG(30, ALT_B)
-#define GPIO30_U2_TXD		PIN_CFG(30, ALT_C)
-
-#define GPIO31_GPIO		PIN_CFG(31, GPIO)
-#define GPIO31_MC0_DAT6		PIN_CFG(31, ALT_A)
-#define GPIO31_SPI3_FRM		PIN_CFG(31, ALT_B)
-#define GPIO31_U2_CTSn		PIN_CFG(31, ALT_C)
-
-#define GPIO32_GPIO		PIN_CFG(32, GPIO)
-#define GPIO32_MC0_DAT7		PIN_CFG(32, ALT_A)
-#define GPIO32_SPI3_TXD		PIN_CFG(32, ALT_B)
-#define GPIO32_U2_RTSn		PIN_CFG(32, ALT_C)
-
-#define GPIO33_GPIO		PIN_CFG(33, GPIO)
-#define GPIO33_MSP1_TXD		PIN_CFG(33, ALT_A)
-#define GPIO33_MSP1_RXD		PIN_CFG(33, ALT_B)
-#define GPIO33_U0_DTRn		PIN_CFG(33, ALT_C)
-
-#define GPIO34_GPIO		PIN_CFG(34, GPIO)
-#define GPIO34_MSP1_TFS		PIN_CFG(34, ALT_A)
-#define GPIO34_NONE		PIN_CFG(34, ALT_B)
-#define GPIO34_U0_DCDn		PIN_CFG(34, ALT_C)
-
-#define GPIO35_GPIO		PIN_CFG(35, GPIO)
-#define GPIO35_MSP1_TCK		PIN_CFG(35, ALT_A)
-#define GPIO35_NONE		PIN_CFG(35, ALT_B)
-#define GPIO35_U0_DSRn		PIN_CFG(35, ALT_C)
-
-#define GPIO36_GPIO		PIN_CFG(36, GPIO)
-#define GPIO36_MSP1_RXD		PIN_CFG(36, ALT_A)
-#define GPIO36_MSP1_TXD		PIN_CFG(36, ALT_B)
-#define GPIO36_U0_RIn		PIN_CFG(36, ALT_C)
-
-#define GPIO64_GPIO		PIN_CFG(64, GPIO)
-#define GPIO64_LCDB_DE		PIN_CFG(64, ALT_A)
-#define GPIO64_KP_O1		PIN_CFG(64, ALT_B)
-#define GPIO64_IP_GPIO4		PIN_CFG(64, ALT_C)
-
-#define GPIO65_GPIO		PIN_CFG(65, GPIO)
-#define GPIO65_LCDB_HSO		PIN_CFG(65, ALT_A)
-#define GPIO65_KP_O0		PIN_CFG(65, ALT_B)
-#define GPIO65_IP_GPIO5		PIN_CFG(65, ALT_C)
-
-#define GPIO66_GPIO		PIN_CFG(66, GPIO)
-#define GPIO66_LCDB_VSO		PIN_CFG(66, ALT_A)
-#define GPIO66_KP_I1		PIN_CFG(66, ALT_B)
-#define GPIO66_IP_GPIO6		PIN_CFG(66, ALT_C)
-
-#define GPIO67_GPIO		PIN_CFG(67, GPIO)
-#define GPIO67_LCDB_CLK		PIN_CFG(67, ALT_A)
-#define GPIO67_KP_I0		PIN_CFG(67, ALT_B)
-#define GPIO67_IP_GPIO7		PIN_CFG(67, ALT_C)
-
-#define GPIO68_GPIO		PIN_CFG(68, GPIO)
-#define GPIO68_LCD_VSI0		PIN_CFG(68, ALT_A)
-#define GPIO68_KP_O7		PIN_CFG(68, ALT_B)
-#define GPIO68_SM_CLE		PIN_CFG(68, ALT_C)
-
-#define GPIO69_GPIO		PIN_CFG(69, GPIO)
-#define GPIO69_LCD_VSI1		PIN_CFG(69, ALT_A)
-#define GPIO69_KP_I7		PIN_CFG(69, ALT_B)
-#define GPIO69_SM_ALE		PIN_CFG(69, ALT_C)
-
-#define GPIO70_GPIO		PIN_CFG(70, GPIO)
-#define GPIO70_LCD_D0		PIN_CFG(70, ALT_A)
-#define GPIO70_KP_O5		PIN_CFG(70, ALT_B)
-#define GPIO70_STMAPE_CLK	PIN_CFG(70, ALT_C)
-
-#define GPIO71_GPIO		PIN_CFG(71, GPIO)
-#define GPIO71_LCD_D1		PIN_CFG(71, ALT_A)
-#define GPIO71_KP_O4		PIN_CFG(71, ALT_B)
-#define GPIO71_STMAPE_DAT3	PIN_CFG(71, ALT_C)
-
-#define GPIO72_GPIO		PIN_CFG(72, GPIO)
-#define GPIO72_LCD_D2		PIN_CFG(72, ALT_A)
-#define GPIO72_KP_O3		PIN_CFG(72, ALT_B)
-#define GPIO72_STMAPE_DAT2	PIN_CFG(72, ALT_C)
-
-#define GPIO73_GPIO		PIN_CFG(73, GPIO)
-#define GPIO73_LCD_D3		PIN_CFG(73, ALT_A)
-#define GPIO73_KP_O2		PIN_CFG(73, ALT_B)
-#define GPIO73_STMAPE_DAT1	PIN_CFG(73, ALT_C)
-
-#define GPIO74_GPIO		PIN_CFG(74, GPIO)
-#define GPIO74_LCD_D4		PIN_CFG(74, ALT_A)
-#define GPIO74_KP_I5		PIN_CFG(74, ALT_B)
-#define GPIO74_STMAPE_DAT0	PIN_CFG(74, ALT_C)
-
-#define GPIO75_GPIO		PIN_CFG(75, GPIO)
-#define GPIO75_LCD_D5		PIN_CFG(75, ALT_A)
-#define GPIO75_KP_I4		PIN_CFG(75, ALT_B)
-#define GPIO75_U2_RXD		PIN_CFG(75, ALT_C)
-
-#define GPIO76_GPIO		PIN_CFG(76, GPIO)
-#define GPIO76_LCD_D6		PIN_CFG(76, ALT_A)
-#define GPIO76_KP_I3		PIN_CFG(76, ALT_B)
-#define GPIO76_U2_TXD		PIN_CFG(76, ALT_C)
-
-#define GPIO77_GPIO		PIN_CFG(77, GPIO)
-#define GPIO77_LCD_D7		PIN_CFG(77, ALT_A)
-#define GPIO77_KP_I2		PIN_CFG(77, ALT_B)
-#define GPIO77_NONE		PIN_CFG(77, ALT_C)
-
-#define GPIO78_GPIO		PIN_CFG(78, GPIO)
-#define GPIO78_LCD_D8		PIN_CFG(78, ALT_A)
-#define GPIO78_KP_O6		PIN_CFG(78, ALT_B)
-#define GPIO78_IP_GPIO2		PIN_CFG(78, ALT_C)
-
-#define GPIO79_GPIO		PIN_CFG(79, GPIO)
-#define GPIO79_LCD_D9		PIN_CFG(79, ALT_A)
-#define GPIO79_KP_I6		PIN_CFG(79, ALT_B)
-#define GPIO79_IP_GPIO3		PIN_CFG(79, ALT_C)
-
-#define GPIO80_GPIO		PIN_CFG(80, GPIO)
-#define GPIO80_LCD_D10		PIN_CFG(80, ALT_A)
-#define GPIO80_KP_SKA0		PIN_CFG(80, ALT_B)
-#define GPIO80_IP_GPIO4		PIN_CFG(80, ALT_C)
-
-#define GPIO81_GPIO		PIN_CFG(81, GPIO)
-#define GPIO81_LCD_D11		PIN_CFG(81, ALT_A)
-#define GPIO81_KP_SKB0		PIN_CFG(81, ALT_B)
-#define GPIO81_IP_GPIO5		PIN_CFG(81, ALT_C)
-
-#define GPIO82_GPIO		PIN_CFG(82, GPIO)
-#define GPIO82_LCD_D12		PIN_CFG(82, ALT_A)
-#define GPIO82_KP_O5		PIN_CFG(82, ALT_B)
-
-#define GPIO83_GPIO		PIN_CFG(83, GPIO)
-#define GPIO83_LCD_D13		PIN_CFG(83, ALT_A)
-#define GPIO83_KP_O4		PIN_CFG(83, ALT_B)
-
-#define GPIO84_GPIO		PIN_CFG(84, GPIO)
-#define GPIO84_LCD_D14		PIN_CFG(84, ALT_A)
-#define GPIO84_KP_I5		PIN_CFG(84, ALT_B)
-
-#define GPIO85_GPIO		PIN_CFG(85, GPIO)
-#define GPIO85_LCD_D15		PIN_CFG(85, ALT_A)
-#define GPIO85_KP_I4		PIN_CFG(85, ALT_B)
-
-#define GPIO86_GPIO		PIN_CFG(86, GPIO)
-#define GPIO86_LCD_D16		PIN_CFG(86, ALT_A)
-#define GPIO86_SM_ADQ0		PIN_CFG(86, ALT_B)
-#define GPIO86_MC5_DAT0		PIN_CFG(86, ALT_C)
-
-#define GPIO87_GPIO		PIN_CFG(87, GPIO)
-#define GPIO87_LCD_D17		PIN_CFG(87, ALT_A)
-#define GPIO87_SM_ADQ1		PIN_CFG(87, ALT_B)
-#define GPIO87_MC5_DAT1		PIN_CFG(87, ALT_C)
-
-#define GPIO88_GPIO		PIN_CFG(88, GPIO)
-#define GPIO88_LCD_D18		PIN_CFG(88, ALT_A)
-#define GPIO88_SM_ADQ2		PIN_CFG(88, ALT_B)
-#define GPIO88_MC5_DAT2		PIN_CFG(88, ALT_C)
-
-#define GPIO89_GPIO		PIN_CFG(89, GPIO)
-#define GPIO89_LCD_D19		PIN_CFG(89, ALT_A)
-#define GPIO89_SM_ADQ3		PIN_CFG(89, ALT_B)
-#define GPIO89_MC5_DAT3		PIN_CFG(89, ALT_C)
-
-#define GPIO90_GPIO		PIN_CFG(90, GPIO)
-#define GPIO90_LCD_D20		PIN_CFG(90, ALT_A)
-#define GPIO90_SM_ADQ4		PIN_CFG(90, ALT_B)
-#define GPIO90_MC5_CMD		PIN_CFG(90, ALT_C)
-
-#define GPIO91_GPIO		PIN_CFG(91, GPIO)
-#define GPIO91_LCD_D21		PIN_CFG(91, ALT_A)
-#define GPIO91_SM_ADQ5		PIN_CFG(91, ALT_B)
-#define GPIO91_MC5_FBCLK	PIN_CFG(91, ALT_C)
-
-#define GPIO92_GPIO		PIN_CFG(92, GPIO)
-#define GPIO92_LCD_D22		PIN_CFG(92, ALT_A)
-#define GPIO92_SM_ADQ6		PIN_CFG(92, ALT_B)
-#define GPIO92_MC5_CLK		PIN_CFG(92, ALT_C)
-
-#define GPIO93_GPIO		PIN_CFG(93, GPIO)
-#define GPIO93_LCD_D23		PIN_CFG(93, ALT_A)
-#define GPIO93_SM_ADQ7		PIN_CFG(93, ALT_B)
-#define GPIO93_MC5_DAT4		PIN_CFG(93, ALT_C)
-
-#define GPIO94_GPIO		PIN_CFG(94, GPIO)
-#define GPIO94_KP_O7		PIN_CFG(94, ALT_A)
-#define GPIO94_SM_ADVn		PIN_CFG(94, ALT_B)
-#define GPIO94_MC5_DAT5		PIN_CFG(94, ALT_C)
-
-#define GPIO95_GPIO		PIN_CFG(95, GPIO)
-#define GPIO95_KP_I7		PIN_CFG(95, ALT_A)
-#define GPIO95_SM_CS0n		PIN_CFG(95, ALT_B)
-#define GPIO95_SM_PS0n		PIN_CFG(95, ALT_C)
-
-#define GPIO96_GPIO		PIN_CFG(96, GPIO)
-#define GPIO96_KP_O6		PIN_CFG(96, ALT_A)
-#define GPIO96_SM_OEn		PIN_CFG(96, ALT_B)
-#define GPIO96_MC5_DAT6		PIN_CFG(96, ALT_C)
-
-#define GPIO97_GPIO		PIN_CFG(97, GPIO)
-#define GPIO97_KP_I6		PIN_CFG(97, ALT_A)
-#define GPIO97_SM_WEn		PIN_CFG(97, ALT_B)
-#define GPIO97_MC5_DAT7		PIN_CFG(97, ALT_C)
-
-#define GPIO128_GPIO		PIN_CFG(128, GPIO)
-#define GPIO128_MC2_CLK		PIN_CFG_INPUT(128, ALT_A, PULLUP)
-#define GPIO128_SM_CKO		PIN_CFG(128, ALT_B)
-
-#define GPIO129_GPIO		PIN_CFG(129, GPIO)
-#define GPIO129_MC2_CMD		PIN_CFG_INPUT(129, ALT_A, PULLUP)
-#define GPIO129_SM_WAIT0n	PIN_CFG(129, ALT_B)
-
-#define GPIO130_GPIO		PIN_CFG(130, GPIO)
-#define GPIO130_MC2_FBCLK	PIN_CFG_INPUT(130, ALT_A, PULLUP)
-#define GPIO130_SM_FBCLK	PIN_CFG(130, ALT_B)
-#define GPIO130_MC2_RSTN	PIN_CFG(130, ALT_C)
-
-#define GPIO131_GPIO		PIN_CFG(131, GPIO)
-#define GPIO131_MC2_DAT0	PIN_CFG_INPUT(131, ALT_A, PULLUP)
-#define GPIO131_SM_ADQ8		PIN_CFG(131, ALT_B)
-
-#define GPIO132_GPIO		PIN_CFG(132, GPIO)
-#define GPIO132_MC2_DAT1	PIN_CFG_INPUT(132, ALT_A, PULLUP)
-#define GPIO132_SM_ADQ9		PIN_CFG(132, ALT_B)
-
-#define GPIO133_GPIO		PIN_CFG(133, GPIO)
-#define GPIO133_MC2_DAT2	PIN_CFG_INPUT(133, ALT_A, PULLUP)
-#define GPIO133_SM_ADQ10	PIN_CFG(133, ALT_B)
-
-#define GPIO134_GPIO		PIN_CFG(134, GPIO)
-#define GPIO134_MC2_DAT3	PIN_CFG_INPUT(134, ALT_A, PULLUP)
-#define GPIO134_SM_ADQ11	PIN_CFG(134, ALT_B)
-
-#define GPIO135_GPIO		PIN_CFG(135, GPIO)
-#define GPIO135_MC2_DAT4	PIN_CFG_INPUT(135, ALT_A, PULLUP)
-#define GPIO135_SM_ADQ12	PIN_CFG(135, ALT_B)
-
-#define GPIO136_GPIO		PIN_CFG(136, GPIO)
-#define GPIO136_MC2_DAT5	PIN_CFG_INPUT(136, ALT_A, PULLUP)
-#define GPIO136_SM_ADQ13	PIN_CFG(136, ALT_B)
-
-#define GPIO137_GPIO		PIN_CFG(137, GPIO)
-#define GPIO137_MC2_DAT6	PIN_CFG_INPUT(137, ALT_A, PULLUP)
-#define GPIO137_SM_ADQ14	PIN_CFG(137, ALT_B)
-
-#define GPIO138_GPIO		PIN_CFG(138, GPIO)
-#define GPIO138_MC2_DAT7	PIN_CFG_INPUT(138, ALT_A, PULLUP)
-#define GPIO138_SM_ADQ15	PIN_CFG(138, ALT_B)
-
-#define GPIO139_GPIO		PIN_CFG(139, GPIO)
-#define GPIO139_SSP1_RXD	PIN_CFG(139, ALT_A)
-#define GPIO139_SM_WAIT1n	PIN_CFG(139, ALT_B)
-#define GPIO139_KP_O8		PIN_CFG(139, ALT_C)
-
-#define GPIO140_GPIO		PIN_CFG(140, GPIO)
-#define GPIO140_SSP1_TXD	PIN_CFG(140, ALT_A)
-#define GPIO140_IP_GPIO7	PIN_CFG(140, ALT_B)
-#define GPIO140_KP_SKA1		PIN_CFG(140, ALT_C)
-
-#define GPIO141_GPIO		PIN_CFG(141, GPIO)
-#define GPIO141_SSP1_CLK	PIN_CFG(141, ALT_A)
-#define GPIO141_IP_GPIO2	PIN_CFG(141, ALT_B)
-#define GPIO141_KP_O9		PIN_CFG(141, ALT_C)
-
-#define GPIO142_GPIO		PIN_CFG(142, GPIO)
-#define GPIO142_SSP1_FRM	PIN_CFG(142, ALT_A)
-#define GPIO142_IP_GPIO3	PIN_CFG(142, ALT_B)
-#define GPIO142_KP_SKB1		PIN_CFG(142, ALT_C)
-
-#define GPIO143_GPIO		PIN_CFG(143, GPIO)
-#define GPIO143_SSP0_CLK	PIN_CFG(143, ALT_A)
-
-#define GPIO144_GPIO		PIN_CFG(144, GPIO)
-#define GPIO144_SSP0_FRM	PIN_CFG(144, ALT_A)
-
-#define GPIO145_GPIO		PIN_CFG(145, GPIO)
-#define GPIO145_SSP0_RXD	PIN_CFG(145, ALT_A)
-
-#define GPIO146_GPIO		PIN_CFG(146, GPIO)
-#define GPIO146_SSP0_TXD	PIN_CFG(146, ALT_A)
-
-#define GPIO147_GPIO		PIN_CFG(147, GPIO)
-#define GPIO147_I2C0_SCL	PIN_CFG(147, ALT_A)
-
-#define GPIO148_GPIO		PIN_CFG(148, GPIO)
-#define GPIO148_I2C0_SDA	PIN_CFG(148, ALT_A)
-
-#define GPIO149_GPIO		PIN_CFG(149, GPIO)
-#define GPIO149_IP_GPIO0	PIN_CFG(149, ALT_A)
-#define GPIO149_SM_CS1n		PIN_CFG(149, ALT_B)
-#define GPIO149_SM_PS1n		PIN_CFG(149, ALT_C)
-
-#define GPIO150_GPIO		PIN_CFG(150, GPIO)
-#define GPIO150_IP_GPIO1	PIN_CFG(150, ALT_A)
-#define GPIO150_LCDA_CLK	PIN_CFG(150, ALT_B)
-
-#define GPIO151_GPIO		PIN_CFG(151, GPIO)
-#define GPIO151_KP_SKA0		PIN_CFG(151, ALT_A)
-#define GPIO151_LCD_VSI0	PIN_CFG(151, ALT_B)
-#define GPIO151_KP_O8		PIN_CFG(151, ALT_C)
-
-#define GPIO152_GPIO		PIN_CFG(152, GPIO)
-#define GPIO152_KP_SKB0		PIN_CFG(152, ALT_A)
-#define GPIO152_LCD_VSI1	PIN_CFG(152, ALT_B)
-#define GPIO152_KP_O9		PIN_CFG(152, ALT_C)
-
-#define GPIO153_GPIO		PIN_CFG(153, GPIO)
-#define GPIO153_KP_I7		PIN_CFG(153, ALT_A)
-#define GPIO153_LCD_D24		PIN_CFG(153, ALT_B)
-#define GPIO153_U2_RXD		PIN_CFG(153, ALT_C)
-
-#define GPIO154_GPIO		PIN_CFG(154, GPIO)
-#define GPIO154_KP_I6		PIN_CFG(154, ALT_A)
-#define GPIO154_LCD_D25		PIN_CFG(154, ALT_B)
-#define GPIO154_U2_TXD		PIN_CFG(154, ALT_C)
-
-#define GPIO155_GPIO		PIN_CFG(155, GPIO)
-#define GPIO155_KP_I5		PIN_CFG(155, ALT_A)
-#define GPIO155_LCD_D26		PIN_CFG(155, ALT_B)
-#define GPIO155_STMAPE_CLK	PIN_CFG(155, ALT_C)
-
-#define GPIO156_GPIO		PIN_CFG(156, GPIO)
-#define GPIO156_KP_I4		PIN_CFG(156, ALT_A)
-#define GPIO156_LCD_D27		PIN_CFG(156, ALT_B)
-#define GPIO156_STMAPE_DAT3	PIN_CFG(156, ALT_C)
-
-#define GPIO157_GPIO		PIN_CFG(157, GPIO)
-#define GPIO157_KP_O7		PIN_CFG(157, ALT_A)
-#define GPIO157_LCD_D28		PIN_CFG(157, ALT_B)
-#define GPIO157_STMAPE_DAT2	PIN_CFG(157, ALT_C)
-
-#define GPIO158_GPIO		PIN_CFG(158, GPIO)
-#define GPIO158_KP_O6		PIN_CFG(158, ALT_A)
-#define GPIO158_LCD_D29		PIN_CFG(158, ALT_B)
-#define GPIO158_STMAPE_DAT1	PIN_CFG(158, ALT_C)
-
-#define GPIO159_GPIO		PIN_CFG(159, GPIO)
-#define GPIO159_KP_O5		PIN_CFG(159, ALT_A)
-#define GPIO159_LCD_D30		PIN_CFG(159, ALT_B)
-#define GPIO159_STMAPE_DAT0	PIN_CFG(159, ALT_C)
-
-#define GPIO160_GPIO		PIN_CFG(160, GPIO)
-#define GPIO160_KP_O4		PIN_CFG(160, ALT_A)
-#define GPIO160_LCD_D31		PIN_CFG(160, ALT_B)
-#define GPIO160_NONE		PIN_CFG(160, ALT_C)
-
-#define GPIO161_GPIO		PIN_CFG(161, GPIO)
-#define GPIO161_KP_I3		PIN_CFG(161, ALT_A)
-#define GPIO161_LCD_D32		PIN_CFG(161, ALT_B)
-#define GPIO161_UARTMOD_RXD	PIN_CFG(161, ALT_C)
-
-#define GPIO162_GPIO		PIN_CFG(162, GPIO)
-#define GPIO162_KP_I2		PIN_CFG(162, ALT_A)
-#define GPIO162_LCD_D33		PIN_CFG(162, ALT_B)
-#define GPIO162_UARTMOD_TXD	PIN_CFG(162, ALT_C)
-
-#define GPIO163_GPIO		PIN_CFG(163, GPIO)
-#define GPIO163_KP_I1		PIN_CFG(163, ALT_A)
-#define GPIO163_LCD_D34		PIN_CFG(163, ALT_B)
-#define GPIO163_STMMOD_CLK	PIN_CFG(163, ALT_C)
-
-#define GPIO164_GPIO		PIN_CFG(164, GPIO)
-#define GPIO164_KP_I0		PIN_CFG(164, ALT_A)
-#define GPIO164_LCD_D35		PIN_CFG(164, ALT_B)
-#define GPIO164_STMMOD_DAT3	PIN_CFG(164, ALT_C)
-
-#define GPIO165_GPIO		PIN_CFG(165, GPIO)
-#define GPIO165_KP_O3		PIN_CFG(165, ALT_A)
-#define GPIO165_LCD_D36		PIN_CFG(165, ALT_B)
-#define GPIO165_STMMOD_DAT2	PIN_CFG(165, ALT_C)
-
-#define GPIO166_GPIO		PIN_CFG(166, GPIO)
-#define GPIO166_KP_O2		PIN_CFG(166, ALT_A)
-#define GPIO166_LCD_D37		PIN_CFG(166, ALT_B)
-#define GPIO166_STMMOD_DAT1	PIN_CFG(166, ALT_C)
-
-#define GPIO167_GPIO		PIN_CFG(167, GPIO)
-#define GPIO167_KP_O1		PIN_CFG(167, ALT_A)
-#define GPIO167_LCD_D38		PIN_CFG(167, ALT_B)
-#define GPIO167_STMMOD_DAT0	PIN_CFG(167, ALT_C)
-
-#define GPIO168_GPIO		PIN_CFG(168, GPIO)
-#define GPIO168_KP_O0		PIN_CFG(168, ALT_A)
-#define GPIO168_LCD_D39		PIN_CFG(168, ALT_B)
-#define GPIO168_NONE		PIN_CFG(168, ALT_C)
-
-#define GPIO169_GPIO		PIN_CFG(169, GPIO)
-#define GPIO169_RF_PURn		PIN_CFG(169, ALT_A)
-#define GPIO169_LCDA_DE		PIN_CFG(169, ALT_B)
-#define GPIO169_USBSIM_PDC	PIN_CFG(169, ALT_C)
-
-#define GPIO170_GPIO		PIN_CFG(170, GPIO)
-#define GPIO170_MODEM_STATE	PIN_CFG(170, ALT_A)
-#define GPIO170_LCDA_VSO	PIN_CFG(170, ALT_B)
-#define GPIO170_KP_SKA1		PIN_CFG(170, ALT_C)
-
-#define GPIO171_GPIO		PIN_CFG(171, GPIO)
-#define GPIO171_MODEM_PWREN	PIN_CFG(171, ALT_A)
-#define GPIO171_LCDA_HSO	PIN_CFG(171, ALT_B)
-#define GPIO171_KP_SKB1		PIN_CFG(171, ALT_C)
-
-#define GPIO192_GPIO		PIN_CFG(192, GPIO)
-#define GPIO192_MSP2_SCK	PIN_CFG(192, ALT_A)
-
-#define GPIO193_GPIO		PIN_CFG(193, GPIO)
-#define GPIO193_MSP2_TXD	PIN_CFG(193, ALT_A)
-
-#define GPIO194_GPIO		PIN_CFG(194, GPIO)
-#define GPIO194_MSP2_TCK	PIN_CFG(194, ALT_A)
-
-#define GPIO195_GPIO		PIN_CFG(195, GPIO)
-#define GPIO195_MSP2_TFS	PIN_CFG(195, ALT_A)
-
-#define GPIO196_GPIO		PIN_CFG(196, GPIO)
-#define GPIO196_MSP2_RXD	PIN_CFG(196, ALT_A)
-
-#define GPIO197_GPIO		PIN_CFG(197, GPIO)
-#define GPIO197_MC4_DAT3	PIN_CFG_INPUT(197, ALT_A, PULLUP)
-
-#define GPIO198_GPIO		PIN_CFG(198, GPIO)
-#define GPIO198_MC4_DAT2	PIN_CFG_INPUT(198, ALT_A, PULLUP)
-
-#define GPIO199_GPIO		PIN_CFG(199, GPIO)
-#define GPIO199_MC4_DAT1	PIN_CFG_INPUT(199, ALT_A, PULLUP)
-
-#define GPIO200_GPIO		PIN_CFG(200, GPIO)
-#define GPIO200_MC4_DAT0	PIN_CFG_INPUT(200, ALT_A, PULLUP)
-
-#define GPIO201_GPIO		PIN_CFG(201, GPIO)
-#define GPIO201_MC4_CMD		PIN_CFG_INPUT(201, ALT_A, PULLUP)
-
-#define GPIO202_GPIO		PIN_CFG(202, GPIO)
-#define GPIO202_MC4_FBCLK	PIN_CFG_INPUT(202, ALT_A, PULLUP)
-#define GPIO202_PWL		PIN_CFG(202, ALT_B)
-#define GPIO202_MC4_RSTN	PIN_CFG(202, ALT_C)
-
-#define GPIO203_GPIO		PIN_CFG(203, GPIO)
-#define GPIO203_MC4_CLK		PIN_CFG_INPUT(203, ALT_A, PULLUP)
-
-#define GPIO204_GPIO		PIN_CFG(204, GPIO)
-#define GPIO204_MC4_DAT7	PIN_CFG_INPUT(204, ALT_A, PULLUP)
-
-#define GPIO205_GPIO		PIN_CFG(205, GPIO)
-#define GPIO205_MC4_DAT6	PIN_CFG_INPUT(205, ALT_A, PULLUP)
-
-#define GPIO206_GPIO		PIN_CFG(206, GPIO)
-#define GPIO206_MC4_DAT5	PIN_CFG_INPUT(206, ALT_A, PULLUP)
-
-#define GPIO207_GPIO		PIN_CFG(207, GPIO)
-#define GPIO207_MC4_DAT4	PIN_CFG_INPUT(207, ALT_A, PULLUP)
-
-#define GPIO208_GPIO		PIN_CFG(208, GPIO)
-#define GPIO208_MC1_CLK		PIN_CFG(208, ALT_A)
-
-#define GPIO209_GPIO		PIN_CFG(209, GPIO)
-#define GPIO209_MC1_FBCLK	PIN_CFG(209, ALT_A)
-#define GPIO209_SPI1_CLK	PIN_CFG(209, ALT_B)
-
-#define GPIO210_GPIO		PIN_CFG(210, GPIO)
-#define GPIO210_MC1_CMD		PIN_CFG(210, ALT_A)
-
-#define GPIO211_GPIO		PIN_CFG(211, GPIO)
-#define GPIO211_MC1_DAT0	PIN_CFG(211, ALT_A)
-
-#define GPIO212_GPIO		PIN_CFG(212, GPIO)
-#define GPIO212_MC1_DAT1	PIN_CFG(212, ALT_A)
-#define GPIO212_SPI1_FRM	PIN_CFG(212, ALT_B)
-
-#define GPIO213_GPIO		PIN_CFG(213, GPIO)
-#define GPIO213_MC1_DAT2	PIN_CFG(213, ALT_A)
-#define GPIO213_SPI1_TXD	PIN_CFG(213, ALT_B)
-
-#define GPIO214_GPIO		PIN_CFG(214, GPIO)
-#define GPIO214_MC1_DAT3	PIN_CFG(214, ALT_A)
-#define GPIO214_SPI1_RXD	PIN_CFG(214, ALT_B)
-
-#define GPIO215_GPIO		PIN_CFG(215, GPIO)
-#define GPIO215_MC1_CMDDIR	PIN_CFG(215, ALT_A)
-#define GPIO215_MC3_DAT2DIR	PIN_CFG(215, ALT_B)
-#define GPIO215_CLKOUT1		PIN_CFG(215, ALT_C)
-#define GPIO215_SPI2_TXD	PIN_CFG(215, ALT_C)
-
-#define GPIO216_GPIO		PIN_CFG(216, GPIO)
-#define GPIO216_MC1_DAT2DIR	PIN_CFG(216, ALT_A)
-#define GPIO216_MC3_CMDDIR	PIN_CFG(216, ALT_B)
-#define GPIO216_I2C3_SDA	PIN_CFG(216, ALT_C)
-#define GPIO216_SPI2_FRM	PIN_CFG(216, ALT_C)
-
-#define GPIO217_GPIO		PIN_CFG(217, GPIO)
-#define GPIO217_MC1_DAT0DIR	PIN_CFG(217, ALT_A)
-#define GPIO217_MC3_DAT31DIR	PIN_CFG(217, ALT_B)
-#define GPIO217_CLKOUT2		PIN_CFG(217, ALT_C)
-#define GPIO217_SPI2_CLK	PIN_CFG(217, ALT_C)
-
-#define GPIO218_GPIO		PIN_CFG(218, GPIO)
-#define GPIO218_MC1_DAT31DIR	PIN_CFG(218, ALT_A)
-#define GPIO218_MC3_DAT0DIR	PIN_CFG(218, ALT_B)
-#define GPIO218_I2C3_SCL	PIN_CFG(218, ALT_C)
-#define GPIO218_SPI2_RXD	PIN_CFG(218, ALT_C)
-
-#define GPIO219_GPIO		PIN_CFG(219, GPIO)
-#define GPIO219_HSIR_FLA0	PIN_CFG(219, ALT_A)
-#define GPIO219_MC3_CLK		PIN_CFG(219, ALT_B)
-
-#define GPIO220_GPIO		PIN_CFG(220, GPIO)
-#define GPIO220_HSIR_DAT0	PIN_CFG(220, ALT_A)
-#define GPIO220_MC3_FBCLK	PIN_CFG(220, ALT_B)
-#define GPIO220_SPI0_CLK	PIN_CFG(220, ALT_C)
-
-#define GPIO221_GPIO		PIN_CFG(221, GPIO)
-#define GPIO221_HSIR_RDY0	PIN_CFG(221, ALT_A)
-#define GPIO221_MC3_CMD		PIN_CFG(221, ALT_B)
-
-#define GPIO222_GPIO		PIN_CFG(222, GPIO)
-#define GPIO222_HSIT_FLA0	PIN_CFG(222, ALT_A)
-#define GPIO222_MC3_DAT0	PIN_CFG(222, ALT_B)
-
-#define GPIO223_GPIO		PIN_CFG(223, GPIO)
-#define GPIO223_HSIT_DAT0	PIN_CFG(223, ALT_A)
-#define GPIO223_MC3_DAT1	PIN_CFG(223, ALT_B)
-#define GPIO223_SPI0_FRM	PIN_CFG(223, ALT_C)
-
-#define GPIO224_GPIO		PIN_CFG(224, GPIO)
-#define GPIO224_HSIT_RDY0	PIN_CFG(224, ALT_A)
-#define GPIO224_MC3_DAT2	PIN_CFG(224, ALT_B)
-#define GPIO224_SPI0_TXD	PIN_CFG(224, ALT_C)
-
-#define GPIO225_GPIO		PIN_CFG(225, GPIO)
-#define GPIO225_HSIT_CAWAKE0	PIN_CFG(225, ALT_A)
-#define GPIO225_MC3_DAT3	PIN_CFG(225, ALT_B)
-#define GPIO225_SPI0_RXD	PIN_CFG(225, ALT_C)
-
-#define GPIO226_GPIO		PIN_CFG(226, GPIO)
-#define GPIO226_HSIT_ACWAKE0	PIN_CFG(226, ALT_A)
-#define GPIO226_PWL		PIN_CFG(226, ALT_B)
-#define GPIO226_USBSIM_PDC	PIN_CFG(226, ALT_C)
-
-#define GPIO227_GPIO		PIN_CFG(227, GPIO)
-#define GPIO227_CLKOUT1		PIN_CFG(227, ALT_A)
-
-#define GPIO228_GPIO		PIN_CFG(228, GPIO)
-#define GPIO228_CLKOUT2		PIN_CFG(228, ALT_A)
-
-#define GPIO229_GPIO		PIN_CFG(229, GPIO)
-#define GPIO229_CLKOUT1		PIN_CFG(229, ALT_A)
-#define GPIO229_PWL		PIN_CFG(229, ALT_B)
-#define GPIO229_I2C3_SDA	PIN_CFG(229, ALT_C)
-
-#define GPIO230_GPIO		PIN_CFG(230, GPIO)
-#define GPIO230_CLKOUT2		PIN_CFG(230, ALT_A)
-#define GPIO230_PWL		PIN_CFG(230, ALT_B)
-#define GPIO230_I2C3_SCL	PIN_CFG(230, ALT_C)
-
-#define GPIO256_GPIO		PIN_CFG(256, GPIO)
-#define GPIO256_USB_NXT		PIN_CFG(256, ALT_A)
-
-#define GPIO257_GPIO		PIN_CFG(257, GPIO)
-#define GPIO257_USB_STP		PIN_CFG(257, ALT_A)
-
-#define GPIO258_GPIO		PIN_CFG(258, GPIO)
-#define GPIO258_USB_XCLK	PIN_CFG(258, ALT_A)
-#define GPIO258_NONE		PIN_CFG(258, ALT_B)
-#define GPIO258_DDR_TRIG	PIN_CFG(258, ALT_C)
-
-#define GPIO259_GPIO		PIN_CFG(259, GPIO)
-#define GPIO259_USB_DIR		PIN_CFG(259, ALT_A)
-
-#define GPIO260_GPIO		PIN_CFG(260, GPIO)
-#define GPIO260_USB_DAT7	PIN_CFG(260, ALT_A)
-
-#define GPIO261_GPIO		PIN_CFG(261, GPIO)
-#define GPIO261_USB_DAT6	PIN_CFG(261, ALT_A)
-
-#define GPIO262_GPIO		PIN_CFG(262, GPIO)
-#define GPIO262_USB_DAT5	PIN_CFG(262, ALT_A)
-
-#define GPIO263_GPIO		PIN_CFG(263, GPIO)
-#define GPIO263_USB_DAT4	PIN_CFG(263, ALT_A)
-
-#define GPIO264_GPIO		PIN_CFG(264, GPIO)
-#define GPIO264_USB_DAT3	PIN_CFG(264, ALT_A)
-
-#define GPIO265_GPIO		PIN_CFG(265, GPIO)
-#define GPIO265_USB_DAT2	PIN_CFG(265, ALT_A)
-
-#define GPIO266_GPIO		PIN_CFG(266, GPIO)
-#define GPIO266_USB_DAT1	PIN_CFG(266, ALT_A)
-
-#define GPIO267_GPIO		PIN_CFG(267, GPIO)
-#define GPIO267_USB_DAT0	PIN_CFG(267, ALT_A)
-
-#endif
diff --git a/arch/arm/mach-vexpress/tc2_pm.c b/arch/arm/mach-vexpress/tc2_pm.c
index 2b7c93a..7aeb5d6 100644
--- a/arch/arm/mach-vexpress/tc2_pm.c
+++ b/arch/arm/mach-vexpress/tc2_pm.c
@@ -18,6 +18,7 @@
 #include <linux/of_address.h>
 #include <linux/spinlock.h>
 #include <linux/errno.h>
+#include <linux/irqchip/arm-gic.h>
 
 #include <asm/mcpm.h>
 #include <asm/proc-fns.h>
@@ -230,6 +231,7 @@
 	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
 	cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
 	ve_spc_set_resume_addr(cluster, cpu, virt_to_phys(mcpm_entry_point));
+	gic_cpu_if_down();
 	tc2_pm_down(residency);
 }
 
diff --git a/arch/arm/mm/hugetlbpage.c b/arch/arm/mm/hugetlbpage.c
index 66781bf3..54ee616 100644
--- a/arch/arm/mm/hugetlbpage.c
+++ b/arch/arm/mm/hugetlbpage.c
@@ -56,3 +56,8 @@
 {
 	return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
 }
+
+int pmd_huge_support(void)
+{
+	return 1;
+}
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 7b0cb3b..febaee7 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -78,7 +78,7 @@
 __tagtable(ATAG_INITRD2, parse_tag_initrd2);
 
 #ifdef CONFIG_OF_FLATTREE
-void __init early_init_dt_setup_initrd_arch(unsigned long start, unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
 {
 	phys_initrd_start = start;
 	phys_initrd_size = end - start;
@@ -208,7 +208,7 @@
 
 #ifdef CONFIG_ZONE_DMA
 
-unsigned long arm_dma_zone_size __read_mostly;
+phys_addr_t arm_dma_zone_size __read_mostly;
 EXPORT_SYMBOL(arm_dma_zone_size);
 
 /*
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 8a6295c..83e4f95 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -21,6 +21,8 @@
 #include <linux/of.h>
 #include <linux/of_irq.h>
 #include <linux/of_address.h>
+#include <linux/cpuidle.h>
+#include <linux/cpufreq.h>
 
 #include <linux/mm.h>
 
@@ -267,18 +269,28 @@
 	if (!xen_initial_domain())
 		xenbus_probe(NULL);
 
+	/*
+	 * Making sure board specific code will not set up ops for
+	 * cpu idle and cpu freq.
+	 */
+	disable_cpuidle();
+	disable_cpufreq();
+
 	return 0;
 }
 core_initcall(xen_guest_init);
 
 static int __init xen_pm_init(void)
 {
+	if (!xen_domain())
+		return -ENODEV;
+
 	pm_power_off = xen_power_off;
 	arm_pm_restart = xen_restart;
 
 	return 0;
 }
-subsys_initcall(xen_pm_init);
+late_initcall(xen_pm_init);
 
 static irqreturn_t xen_arm_callback(int irq, void *arg)
 {
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index bca4c1c..12ad8f3 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -190,11 +190,6 @@
 	memblock_add(base, size);
 }
 
-void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
-{
-	return __va(memblock_alloc(size, align));
-}
-
 /*
  * Limit the memory size that was specified via FDT.
  */
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 2fc8258..5e9aec3 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -54,6 +54,11 @@
 	return !(pud_val(pud) & PUD_TABLE_BIT);
 }
 
+int pmd_huge_support(void)
+{
+	return 1;
+}
+
 static __init int setup_hugepagesz(char *opt)
 {
 	unsigned long ps = memparse(opt, &opt);
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 67e8d7c..de2de5d 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -44,8 +44,7 @@
 
 phys_addr_t memstart_addr __read_mostly = 0;
 
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
-					    unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
 {
 	phys_initrd_start = start;
 	phys_initrd_size = end - start;
diff --git a/arch/c6x/kernel/devicetree.c b/arch/c6x/kernel/devicetree.c
index bdb56f0..9e15ab9 100644
--- a/arch/c6x/kernel/devicetree.c
+++ b/arch/c6x/kernel/devicetree.c
@@ -33,8 +33,7 @@
 
 
 #ifdef CONFIG_BLK_DEV_INITRD
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
-		unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
 {
 	initrd_start = (unsigned long)__va(start);
 	initrd_end = (unsigned long)__va(end);
@@ -46,8 +45,3 @@
 {
 	c6x_add_memory(base, size);
 }
-
-void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
-{
-	return __va(memblock_alloc(size, align));
-}
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index 3201ddb..c699d32 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -99,9 +99,6 @@
 	help
 	  Enable module allocation with kmalloc instead of vmalloc.
 
-config OOM_REBOOT
-       bool "Enable reboot at out of memory"
-
 source "kernel/Kconfig.preempt"
 
 source mm/Kconfig
@@ -175,12 +172,6 @@
 	help
 	  Width in bytes of the NOR Flash bus (1, 2 or 4). Is usually 2.
 
-config ETRAX_NANDFLASH_BUSWIDTH
-	int "Buswidth of NAND flash in bytes"
-	default "1"
-	help
-	  Width in bytes of the NAND flash (1 or 2).
-
 config ETRAX_FLASH1_SIZE
        int "FLASH1 size (dec, in MB. 0 = Unknown)"
        default "0"
@@ -272,38 +263,6 @@
 	  This option enables MTD mapping of flash devices.  Needed to use
 	  flash memories.  If unsure, say Y.
 
-config ETRAX_RTC
-	bool "Real Time Clock support"
-	depends on ETRAX_I2C
-	help
-	  Enables drivers for the Real-Time Clock battery-backed chips on
-	  some products. The kernel reads the time when booting, and
-	  the date can be set using ioctl(fd, RTC_SET_TIME, &rt) with rt a
-	  rtc_time struct (see <file:arch/cris/include/asm/rtc.h>) on the
-	  /dev/rtc device.  You can check the time with cat /proc/rtc, but
-	  normal time reading should be done using libc function time and
-	  friends.
-
-choice
-	prompt "RTC chip"
-	depends on ETRAX_RTC
-	default ETRAX_DS1302
-
-config ETRAX_DS1302
-	depends on ETRAX_ARCH_V10
-	bool "DS1302"
-	help
-	  Enables the driver for the DS1302 Real-Time Clock battery-backed
-	  chip on some products.
-
-config ETRAX_PCF8563
-	bool "PCF8563"
-	help
-	  Enables the driver for the PCF8563 Real-Time Clock battery-backed
-	  chip on some products.
-
-endchoice
-
 config ETRAX_SYNCHRONOUS_SERIAL
 	bool "Synchronous serial-port support"
 	help
@@ -578,26 +537,6 @@
 	depends on ETRAX_ARCH_V10
 	bool "DMA 5"
 
-config ETRAX_SERIAL_PORT3_DMA9_IN
-	bool "Ser3 uses DMA9 for input"
-	depends on ETRAXFS
-	help
-	  Enables the DMA9 input channel for ser3 (ttyS3).
-	  If you do not enable DMA, an interrupt for each character will be
-	  used when receiving data.
-	  Normally you want to use DMA, unless you use the DMA channel for
-	  something else.
-
-config ETRAX_SERIAL_PORT3_DMA3_IN
-	bool "Ser3 uses DMA3 for input"
-	depends on CRIS_MACH_ARTPEC3
-	help
-	  Enables the DMA3 input channel for ser3 (ttyS3).
-	  If you do not enable DMA, an interrupt for each character will be
-	  used when receiving data.
-	  Normally you want to use DMA, unless you use the DMA channel for
-	  something else.
-
 endchoice
 
 choice
@@ -615,26 +554,6 @@
 	depends on ETRAX_ARCH_V10
 	bool "DMA 4"
 
-config ETRAX_SERIAL_PORT3_DMA8_OUT
-	bool "Ser3 uses DMA8 for output"
-	depends on ETRAXFS
-	help
-	  Enables the DMA8 output channel for ser3 (ttyS3).
-	  If you do not enable DMA, an interrupt for each character will be
-	  used when transmitting data.
-	  Normally you want to use DMA, unless you use the DMA channel for
-	  something else.
-
-config ETRAX_SERIAL_PORT3_DMA2_OUT
-	bool "Ser3 uses DMA2 for output"
-	depends on CRIS_MACH_ARTPEC3
-	help
-	  Enables the DMA2 output channel for ser3 (ttyS3).
-	  If you do not enable DMA, an interrupt for each character will be
-	  used when transmitting data.
-	  Normally you want to use DMA, unless you use the DMA channel for
-	  something else.
-
 endchoice
 
 endmenu
diff --git a/arch/cris/arch-v10/drivers/Kconfig b/arch/cris/arch-v10/drivers/Kconfig
index daf5f19..239dab0 100644
--- a/arch/cris/arch-v10/drivers/Kconfig
+++ b/arch/cris/arch-v10/drivers/Kconfig
@@ -417,16 +417,6 @@
 	   for CTRL and BULK traffic only, INTR traffic may work as well
 	   however (depending on the requirements of timeliness).
 
-config ETRAX_USB_HOST_PORT1
-	bool "USB port 1 enabled"
-	depends on ETRAX_USB_HOST
-	default n
-
-config ETRAX_USB_HOST_PORT2
-	bool "USB port 2 enabled"
-	depends on ETRAX_USB_HOST
-	default n
-
 config ETRAX_PTABLE_SECTOR
 	int "Byte-offset of partition table sector"
 	depends on ETRAX_AXISFLASHMAP
@@ -527,19 +517,6 @@
 	  Remember that you need to setup the port directions appropriately in
 	  the General configuration.
 
-config ETRAX_PA_BUTTON_BITMASK
-	hex "PA-buttons bitmask"
-	depends on ETRAX_GPIO
-	default "02"
-	help
-	  This is a bitmask with information about what bits on PA that
-	  are used for buttons.
-	  Most products has a so called TEST button on PA1, if that's true
-	  use 02 here.
-	  Use 00 if there are no buttons on PA.
-	  If the bitmask is <> 00 a button driver will be included in the gpio
-	  driver. ETRAX general I/O support must be enabled.
-
 config ETRAX_PA_CHANGEABLE_DIR
 	hex "PA user changeable dir mask"
 	depends on ETRAX_GPIO
@@ -580,51 +557,4 @@
 	  Bit set = changeable.
 	  You probably want 00 here.
 
-config ETRAX_DS1302_RST_ON_GENERIC_PORT
-	bool "DS1302 RST on Generic Port"
-	depends on ETRAX_DS1302
-	help
-	  If your product has the RST signal line for the DS1302 RTC on the
-	  Generic Port then say Y here, otherwise leave it as N in which
-	  case the RST signal line is assumed to be connected to Port PB
-	  (just like the SCL and SDA lines).
-
-config ETRAX_DS1302_RSTBIT
-	int "DS1302 RST bit number"
-	depends on ETRAX_DS1302
-	default "2"
-	help
-	  This is the bit number for the RST signal line of the DS1302 RTC on
-	  the selected port. If you have selected the generic port then it
-	  should be bit 27, otherwise your best bet is bit 5.
-
-config ETRAX_DS1302_SCLBIT
-	int "DS1302 SCL bit number"
-	depends on ETRAX_DS1302
-	default "1"
-	help
-	  This is the bit number for the SCL signal line of the DS1302 RTC on
-	  Port PB. This is probably best left at 3.
-
-config ETRAX_DS1302_SDABIT
-	int "DS1302 SDA bit number"
-	depends on ETRAX_DS1302
-	default "0"
-	help
-	  This is the bit number for the SDA signal line of the DS1302 RTC on
-	  Port PB. This is probably best left at 2.
-
-config ETRAX_DS1302_TRICKLE_CHARGE
-	int "DS1302 Trickle charger value"
-	depends on ETRAX_DS1302
-	default "0"
-	help
-	  This controls the initial value of the trickle charge register.
-	  0 = disabled (use this if you are unsure or have a non rechargeable battery)
-	  Otherwise the following values can be OR:ed together to control the
-	  charge current:
-	  1 = 2kohm, 2 = 4kohm, 3 = 4kohm
-	  4 = 1 diode, 8 = 2 diodes
-	  Allowed values are (increasing current): 0, 11, 10, 9, 7, 6, 5
-
 endif
diff --git a/arch/cris/arch-v10/drivers/Makefile b/arch/cris/arch-v10/drivers/Makefile
index 44bf2e8..e5c1318 100644
--- a/arch/cris/arch-v10/drivers/Makefile
+++ b/arch/cris/arch-v10/drivers/Makefile
@@ -6,7 +6,5 @@
 obj-$(CONFIG_ETRAX_I2C)			+= i2c.o
 obj-$(CONFIG_ETRAX_I2C_EEPROM)		+= eeprom.o
 obj-$(CONFIG_ETRAX_GPIO)		+= gpio.o
-obj-$(CONFIG_ETRAX_DS1302)		+= ds1302.o
-obj-$(CONFIG_ETRAX_PCF8563)		+= pcf8563.o
 obj-$(CONFIG_ETRAX_SYNCHRONOUS_SERIAL)	+= sync_serial.o
 
diff --git a/arch/cris/arch-v32/drivers/Kconfig b/arch/cris/arch-v32/drivers/Kconfig
index 1d866d3..6792503 100644
--- a/arch/cris/arch-v32/drivers/Kconfig
+++ b/arch/cris/arch-v32/drivers/Kconfig
@@ -19,64 +19,6 @@
 	  switch. This option should normally be disabled. If enabled,
 	  speed and duplex will be locked to 100 Mbit and full duplex.
 
-config ETRAX_ETHERNET_IFACE0
-	depends on ETRAX_ETHERNET
-	bool "Enable network interface 0"
-
-config ETRAX_ETHERNET_IFACE1
-	depends on (ETRAX_ETHERNET && ETRAXFS)
-	bool "Enable network interface 1 (uses DMA6 and DMA7)"
-
-config ETRAX_ETHERNET_GBIT
-	depends on (ETRAX_ETHERNET && CRIS_MACH_ARTPEC3)
-	bool "Enable gigabit Ethernet support"
-
-choice
-	prompt "Eth0 led group"
-	depends on ETRAX_ETHERNET_IFACE0
-	default ETRAX_ETH0_USE_LEDGRP0
-
-config ETRAX_ETH0_USE_LEDGRP0
-	bool "Use LED grp 0"
-	depends on ETRAX_NBR_LED_GRP_ONE || ETRAX_NBR_LED_GRP_TWO
-	help
-	  Use LED grp 0 for eth0
-
-config ETRAX_ETH0_USE_LEDGRP1
-	bool "Use LED grp 1"
-	depends on ETRAX_NBR_LED_GRP_TWO
-	help
-	  Use LED grp 1 for eth0
-
-config ETRAX_ETH0_USE_LEDGRPNULL
-	bool "Use no LEDs for eth0"
-	help
-	  Use no LEDs for eth0
-endchoice
-
-choice
-	prompt "Eth1 led group"
-	depends on ETRAX_ETHERNET_IFACE1
-	default ETRAX_ETH1_USE_LEDGRP1
-
-config ETRAX_ETH1_USE_LEDGRP0
-	bool "Use LED grp 0"
-	depends on ETRAX_NBR_LED_GRP_ONE || ETRAX_NBR_LED_GRP_TWO
-	help
-	  Use LED grp 0 for eth1
-
-config ETRAX_ETH1_USE_LEDGRP1
-	bool "Use LED grp 1"
-	depends on ETRAX_NBR_LED_GRP_TWO
-	help
-	  Use LED grp 1 for eth1
-
-config ETRAX_ETH1_USE_LEDGRPNULL
-	bool "Use no LEDs for eth1"
-	help
-	  Use no LEDs for eth1
-endchoice
-
 config ETRAXFS_SERIAL
 	bool "Serial-port support"
 	depends on ETRAX_ARCH_V32
@@ -108,261 +50,24 @@
 	  if you do not need DMA to something else.
 	  ser0 can use dma4 or dma6 for output and dma5 or dma7 for input.
 
-choice
-	prompt "Ser0 default port type "
-	depends on ETRAX_SERIAL_PORT0
-	default ETRAX_SERIAL_PORT0_TYPE_232
-	help
-	  Type of serial port.
-
-config ETRAX_SERIAL_PORT0_TYPE_232
-	bool "Ser0 is a RS-232 port"
-	help
-	  Configure serial port 0 to be a RS-232 port.
-
-config ETRAX_SERIAL_PORT0_TYPE_485HD
-	bool "Ser0 is a half duplex RS-485 port"
-	depends on ETRAX_RS485
-	help
-	  Configure serial port 0 to be a half duplex (two wires) RS-485 port.
-
-config ETRAX_SERIAL_PORT0_TYPE_485FD
-	bool "Ser0 is a full duplex RS-485 port"
-	depends on ETRAX_RS485
-	help
-	  Configure serial port 0 to be a full duplex (four wires) RS-485 port.
-endchoice
-
-config ETRAX_SER0_DTR_BIT
-	string "Ser 0 DTR bit (empty = not used)"
-	depends on ETRAX_SERIAL_PORT0
-
-config ETRAX_SER0_RI_BIT
-	string "Ser 0 RI bit (empty = not used)"
-	depends on ETRAX_SERIAL_PORT0
-
-config ETRAX_SER0_DSR_BIT
-	string "Ser 0 DSR bit (empty = not used)"
-	depends on ETRAX_SERIAL_PORT0
-
-config ETRAX_SER0_CD_BIT
-	string "Ser 0 CD bit (empty = not used)"
-	depends on ETRAX_SERIAL_PORT0
-
 config ETRAX_SERIAL_PORT1
 	bool "Serial port 1 enabled"
 	depends on ETRAXFS_SERIAL
 	help
 	  Enables the ETRAX FS serial driver for ser1 (ttyS1).
 
-choice
-	prompt "Ser1 default port type"
-	depends on ETRAX_SERIAL_PORT1
-	default ETRAX_SERIAL_PORT1_TYPE_232
-	help
-	  Type of serial port.
-
-config ETRAX_SERIAL_PORT1_TYPE_232
-	bool "Ser1 is a RS-232 port"
-	help
-	  Configure serial port 1 to be a RS-232 port.
-
-config ETRAX_SERIAL_PORT1_TYPE_485HD
-	bool "Ser1 is a half duplex RS-485 port"
-	depends on ETRAX_RS485
-	help
-	  Configure serial port 1 to be a half duplex (two wires) RS-485 port.
-
-config ETRAX_SERIAL_PORT1_TYPE_485FD
-	bool "Ser1 is a full duplex RS-485 port"
-	depends on ETRAX_RS485
-	help
-	  Configure serial port 1 to be a full duplex (four wires) RS-485 port.
-endchoice
-
-config ETRAX_SER1_DTR_BIT
-	string "Ser 1 DTR bit (empty = not used)"
-	depends on ETRAX_SERIAL_PORT1
-
-config ETRAX_SER1_RI_BIT
-	string "Ser 1 RI bit (empty = not used)"
-	depends on ETRAX_SERIAL_PORT1
-
-config ETRAX_SER1_DSR_BIT
-	string "Ser 1 DSR bit (empty = not used)"
-	depends on ETRAX_SERIAL_PORT1
-
-config ETRAX_SER1_CD_BIT
-	string "Ser 1 CD bit (empty = not used)"
-	depends on ETRAX_SERIAL_PORT1
-
 config ETRAX_SERIAL_PORT2
 	bool "Serial port 2 enabled"
 	depends on ETRAXFS_SERIAL
 	help
 	  Enables the ETRAX FS serial driver for ser2 (ttyS2).
 
-choice
-	prompt "Ser2 default port type"
-	depends on ETRAX_SERIAL_PORT2
-	default ETRAX_SERIAL_PORT2_TYPE_232
-	help
-	  What DMA channel to use for ser2
-
-config ETRAX_SERIAL_PORT2_TYPE_232
-	bool "Ser2 is a RS-232 port"
-	help
-	  Configure serial port 2 to be a RS-232 port.
-
-config ETRAX_SERIAL_PORT2_TYPE_485HD
-	bool "Ser2 is a half duplex RS-485 port"
-	depends on ETRAX_RS485
-	help
-	  Configure serial port 2 to be a half duplex (two wires) RS-485 port.
-
-config ETRAX_SERIAL_PORT2_TYPE_485FD
-	bool "Ser2 is a full duplex RS-485 port"
-	depends on ETRAX_RS485
-	help
-	  Configure serial port 2 to be a full duplex (four wires) RS-485 port.
-endchoice
-
-
-config ETRAX_SER2_DTR_BIT
-	string "Ser 2 DTR bit (empty = not used)"
-	depends on ETRAX_SERIAL_PORT2
-
-config ETRAX_SER2_RI_BIT
-	string "Ser 2 RI bit (empty = not used)"
-	depends on ETRAX_SERIAL_PORT2
-
-config ETRAX_SER2_DSR_BIT
-	string "Ser 2 DSR bit (empty = not used)"
-	depends on ETRAX_SERIAL_PORT2
-
-config ETRAX_SER2_CD_BIT
-	string "Ser 2 CD bit (empty = not used)"
-	depends on ETRAX_SERIAL_PORT2
-
 config ETRAX_SERIAL_PORT3
 	bool "Serial port 3 enabled"
 	depends on ETRAXFS_SERIAL
 	help
 	  Enables the ETRAX FS serial driver for ser3 (ttyS3).
 
-choice
-	prompt "Ser3 default port type"
-	depends on ETRAX_SERIAL_PORT3
-	default ETRAX_SERIAL_PORT3_TYPE_232
-	help
-	  What DMA channel to use for ser3.
-
-config ETRAX_SERIAL_PORT3_TYPE_232
-	bool "Ser3 is a RS-232 port"
-	help
-	  Configure serial port 3 to be a RS-232 port.
-
-config ETRAX_SERIAL_PORT3_TYPE_485HD
-	bool "Ser3 is a half duplex RS-485 port"
-	depends on ETRAX_RS485
-	help
-	  Configure serial port 3 to be a half duplex (two wires) RS-485 port.
-
-config ETRAX_SERIAL_PORT3_TYPE_485FD
-	bool "Ser3 is a full duplex RS-485 port"
-	depends on ETRAX_RS485
-	help
-	  Configure serial port 3 to be a full duplex (four wires) RS-485 port.
-endchoice
-
-config ETRAX_SER3_DTR_BIT
-	string "Ser 3 DTR bit (empty = not used)"
-	depends on ETRAX_SERIAL_PORT3
-
-config ETRAX_SER3_RI_BIT
-	string "Ser 3 RI bit (empty = not used)"
-	depends on ETRAX_SERIAL_PORT3
-
-config ETRAX_SER3_DSR_BIT
-	string "Ser 3 DSR bit (empty = not used)"
-	depends on ETRAX_SERIAL_PORT3
-
-config ETRAX_SER3_CD_BIT
-	string "Ser 3 CD bit (empty = not used)"
-	depends on ETRAX_SERIAL_PORT3
-
-config ETRAX_SERIAL_PORT4
-	bool "Serial port 4 enabled"
-	depends on ETRAXFS_SERIAL && CRIS_MACH_ARTPEC3
-	help
-	  Enables the ETRAX FS serial driver for ser4 (ttyS4).
-
-choice
-	prompt "Ser4 default port type"
-	depends on ETRAX_SERIAL_PORT4
-	default ETRAX_SERIAL_PORT4_TYPE_232
-	help
-	  What DMA channel to use for ser4.
-
-config ETRAX_SERIAL_PORT4_TYPE_232
-	bool "Ser4 is a RS-232 port"
-	help
-	  Configure serial port 4 to be a RS-232 port.
-
-config ETRAX_SERIAL_PORT4_TYPE_485HD
-	bool "Ser4 is a half duplex RS-485 port"
-	depends on ETRAX_RS485
-	help
-	  Configure serial port 4 to be a half duplex (two wires) RS-485 port.
-
-config ETRAX_SERIAL_PORT4_TYPE_485FD
-	bool "Ser4 is a full duplex RS-485 port"
-	depends on ETRAX_RS485
-	help
-	  Configure serial port 4 to be a full duplex (four wires) RS-485 port.
-endchoice
-
-choice
-	prompt "Ser4 DMA in channel "
-	depends on ETRAX_SERIAL_PORT4
-	default ETRAX_SERIAL_PORT4_NO_DMA_IN
-	help
-	  What DMA channel to use for ser4.
-
-
-config ETRAX_SERIAL_PORT4_NO_DMA_IN
-	bool "Ser4 uses no DMA for input"
-	help
-	  Do not use DMA for ser4 input.
-
-config ETRAX_SERIAL_PORT4_DMA9_IN
-	bool "Ser4 uses DMA9 for input"
-	depends on ETRAX_SERIAL_PORT4
-	help
-	  Enables the DMA9 input channel for ser4 (ttyS4).
-	  If you do not enable DMA, an interrupt for each character will be
-	  used when receiving data.
-	  Normally you want to use DMA, unless you use the DMA channel for
-	  something else.
-
-endchoice
-
-config ETRAX_SER4_DTR_BIT
-	string "Ser 4 DTR bit (empty = not used)"
-	depends on ETRAX_SERIAL_PORT4
-
-config ETRAX_SER4_RI_BIT
-	string "Ser 4 RI bit (empty = not used)"
-	depends on ETRAX_SERIAL_PORT4
-
-config ETRAX_SER4_DSR_BIT
-	string "Ser 4 DSR bit (empty = not used)"
-	depends on ETRAX_SERIAL_PORT4
-
-config ETRAX_SER4_CD_BIT
-	string "Ser 4 CD bit (empty = not used)"
-	depends on ETRAX_SERIAL_PORT4
-
 config ETRAX_SYNCHRONOUS_SERIAL
 	bool "Synchronous serial-port support"
 	depends on ETRAX_ARCH_V32
@@ -703,32 +408,6 @@
 	  want to build it as a module, which will be named spi_crisv32_sser.
 	  (You need to select MMC separately.)
 
-config ETRAX_SPI_SSER0_DMA
-	bool "DMA for SPI on sser0 enabled"
-	depends on ETRAX_SPI_SSER0
-	depends on !ETRAX_SERIAL_PORT1_DMA4_OUT && !ETRAX_SERIAL_PORT1_DMA5_IN
-	default y
-	help
-	  Say Y if using DMA (dma4/dma5) for SPI on synchronous serial port 0.
-
-config ETRAX_SPI_MMC_CD_SSER0_PIN
-	string "MMC/SD card detect pin for SPI on sser0"
-	depends on ETRAX_SPI_SSER0 && MMC_SPI
-	default "pd11"
-	help
-	  The pin to use for SD/MMC card detect.  This pin should be pulled up
-	  and grounded when a card is present.  If defined as " " (space), no
-	  pin is selected.  A card must then always be inserted for proper
-	  action.
-
-config ETRAX_SPI_MMC_WP_SSER0_PIN
-	string "MMC/SD card write-protect pin for SPI on sser0"
-	depends on ETRAX_SPI_SSER0 && MMC_SPI
-	default "pd10"
-	help
-	  The pin to use for the SD/MMC write-protect signal for a memory
-	  card.  If defined as " " (space), the card is considered writable.
-
 config ETRAX_SPI_SSER1
 	tristate "SPI using synchronous serial port 1 (sser1)"
 	depends on ETRAX_SPI_MMC
@@ -742,32 +421,6 @@
 	  want to build it as a module, which will be named spi_crisv32_sser.
 	  (You need to select MMC separately.)
 
-config ETRAX_SPI_SSER1_DMA
-	bool "DMA for SPI on sser1 enabled"
-	depends on ETRAX_SPI_SSER1 && !ETRAX_ETHERNET_IFACE1
-	depends on !ETRAX_SERIAL_PORT0_DMA6_OUT && !ETRAX_SERIAL_PORT0_DMA7_IN
-	default y
-	help
-	  Say Y if using DMA (dma6/dma7) for SPI on synchronous serial port 1.
-
-config ETRAX_SPI_MMC_CD_SSER1_PIN
-	string "MMC/SD card detect pin for SPI on sser1"
-	depends on ETRAX_SPI_SSER1 && MMC_SPI
-	default "pd12"
-	help
-	  The pin to use for SD/MMC card detect.  This pin should be pulled up
-	  and grounded when a card is present.  If defined as " " (space), no
-	  pin is selected.  A card must then always be inserted for proper
-	  action.
-
-config ETRAX_SPI_MMC_WP_SSER1_PIN
-	string "MMC/SD card write-protect pin for SPI on sser1"
-	depends on ETRAX_SPI_SSER1 && MMC_SPI
-	default "pd9"
-	help
-	  The pin to use for the SD/MMC write-protect signal for a memory
-	  card.  If defined as " " (space), the card is considered writable.
-
 config ETRAX_SPI_GPIO
 	tristate "Bitbanged SPI using gpio pins"
 	depends on ETRAX_SPI_MMC
@@ -782,51 +435,4 @@
 	  Say m to build it as a module, which will be called spi_crisv32_gpio.
 	  (You need to select MMC separately.)
 
-# The default match that of sser0, only because that's how it was tested.
-config ETRAX_SPI_CS_PIN
-	string "SPI chip select pin"
-	depends on ETRAX_SPI_GPIO
-	default "pc3"
-	help
-	  The pin to use for SPI chip select.
-
-config ETRAX_SPI_CLK_PIN
-	string "SPI clock pin"
-	depends on ETRAX_SPI_GPIO
-	default "pc1"
-	help
-	  The pin to use for the SPI clock.
-
-config ETRAX_SPI_DATAIN_PIN
-	string "SPI MISO (data in) pin"
-	depends on ETRAX_SPI_GPIO
-	default "pc16"
-	help
-	  The pin to use for SPI data in from the device.
-
-config ETRAX_SPI_DATAOUT_PIN
-	string "SPI MOSI (data out) pin"
-	depends on ETRAX_SPI_GPIO
-	default "pc0"
-	help
-	  The pin to use for SPI data out to the device.
-
-config ETRAX_SPI_MMC_CD_GPIO_PIN
-	string "MMC/SD card detect pin for SPI using gpio (space for none)"
-	depends on ETRAX_SPI_GPIO && MMC_SPI
-	default "pd11"
-	help
-	  The pin to use for SD/MMC card detect.  This pin should be pulled up
-	  and grounded when a card is present.  If defined as " " (space), no
-	  pin is selected.  A card must then always be inserted for proper
-	  action.
-
-config ETRAX_SPI_MMC_WP_GPIO_PIN
-	string "MMC/SD card write-protect pin for SPI using gpio (space for none)"
-	depends on ETRAX_SPI_GPIO && MMC_SPI
-	default "pd10"
-	help
-	  The pin to use for the SD/MMC write-protect signal for a memory
-	  card.  If defined as " " (space), the card is considered writable.
-
 endif
diff --git a/arch/cris/arch-v32/mach-a3/Kconfig b/arch/cris/arch-v32/mach-a3/Kconfig
index 7796aaf..8754727 100644
--- a/arch/cris/arch-v32/mach-a3/Kconfig
+++ b/arch/cris/arch-v32/mach-a3/Kconfig
@@ -15,10 +15,6 @@
        int
        default 5
 
-config ETRAX_DDR
-       bool
-       default y
-
 config ETRAX_DDR2_MRS
 	hex "DDR2 MRS"
 	default "0"
diff --git a/arch/cris/include/asm/processor.h b/arch/cris/include/asm/processor.h
index c0a29b9..15b815d 100644
--- a/arch/cris/include/asm/processor.h
+++ b/arch/cris/include/asm/processor.h
@@ -47,7 +47,6 @@
  */
 
 #define task_pt_regs(task) user_regs(task_thread_info(task))
-#define current_regs() task_pt_regs(current)
 
 unsigned long get_wchan(struct task_struct *p);
 
diff --git a/arch/cris/include/uapi/asm/kvm_para.h b/arch/cris/include/uapi/asm/kvm_para.h
new file mode 100644
index 0000000..14fab8f
--- /dev/null
+++ b/arch/cris/include/uapi/asm/kvm_para.h
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
index 76069c1..68232db 100644
--- a/arch/ia64/mm/hugetlbpage.c
+++ b/arch/ia64/mm/hugetlbpage.c
@@ -114,6 +114,11 @@
 	return 0;
 }
 
+int pmd_huge_support(void)
+{
+	return 0;
+}
+
 struct page *
 follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write)
 {
diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
index 3c52fa6..0424315 100644
--- a/arch/metag/mm/hugetlbpage.c
+++ b/arch/metag/mm/hugetlbpage.c
@@ -110,6 +110,11 @@
 	return 0;
 }
 
+int pmd_huge_support(void)
+{
+	return 1;
+}
+
 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
 			     pmd_t *pmd, int write)
 {
diff --git a/arch/metag/mm/init.c b/arch/metag/mm/init.c
index 28813f1..1239195 100644
--- a/arch/metag/mm/init.c
+++ b/arch/metag/mm/init.c
@@ -407,10 +407,9 @@
 #endif
 
 #ifdef CONFIG_OF_FLATTREE
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
-					    unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
 {
-	pr_err("%s(%lx, %lx)\n",
+	pr_err("%s(%llx, %llx)\n",
 	       __func__, start, end);
 }
 #endif /* CONFIG_OF_FLATTREE */
diff --git a/arch/microblaze/kernel/prom.c b/arch/microblaze/kernel/prom.c
index 0a2c68f..0c4453f 100644
--- a/arch/microblaze/kernel/prom.c
+++ b/arch/microblaze/kernel/prom.c
@@ -46,11 +46,6 @@
 	memblock_add(base, size);
 }
 
-void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
-{
-	return __va(memblock_alloc(size, align));
-}
-
 #ifdef CONFIG_EARLY_PRINTK
 static char *stdout;
 
@@ -136,8 +131,7 @@
 }
 
 #ifdef CONFIG_BLK_DEV_INITRD
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
-		unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
 {
 	initrd_start = (unsigned long)__va(start);
 	initrd_end = (unsigned long)__va(end);
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
index 7e95404..0fa0b69 100644
--- a/arch/mips/kernel/prom.c
+++ b/arch/mips/kernel/prom.c
@@ -58,8 +58,7 @@
 }
 
 #ifdef CONFIG_BLK_DEV_INITRD
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
-					    unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
 {
 	initrd_start = (unsigned long)__va(start);
 	initrd_end = (unsigned long)__va(end);
diff --git a/arch/mips/mm/hugetlbpage.c b/arch/mips/mm/hugetlbpage.c
index a7fee0d..01fda44 100644
--- a/arch/mips/mm/hugetlbpage.c
+++ b/arch/mips/mm/hugetlbpage.c
@@ -85,6 +85,11 @@
 	return (pud_val(pud) & _PAGE_HUGE) != 0;
 }
 
+int pmd_huge_support(void)
+{
+	return 1;
+}
+
 struct page *
 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
 		pmd_t *pmd, int write)
diff --git a/arch/mn10300/kernel/entry.S b/arch/mn10300/kernel/entry.S
index 222152a..177d61d 100644
--- a/arch/mn10300/kernel/entry.S
+++ b/arch/mn10300/kernel/entry.S
@@ -171,10 +171,10 @@
 	mov	(REG_EPSW,fp),d0	# need to deliver signals before
 					# returning to userspace
 	and	EPSW_nSL,d0
-	beq	resume_kernel		# returning to supervisor mode
+	bne	resume_userspace	# returning to userspace
 
 #ifdef CONFIG_PREEMPT
-ENTRY(resume_kernel)
+resume_kernel:
 	LOCAL_IRQ_DISABLE
 	mov	(TI_preempt_count,a2),d0	# non-zero preempt_count ?
 	cmp	0,d0
@@ -189,6 +189,8 @@
 	bne	restore_all
 	call	preempt_schedule_irq[],0
 	jmp	need_resched
+#else
+	jmp	resume_kernel
 #endif
 
 
diff --git a/arch/openrisc/kernel/prom.c b/arch/openrisc/kernel/prom.c
index 5869e3f..a63e768 100644
--- a/arch/openrisc/kernel/prom.c
+++ b/arch/openrisc/kernel/prom.c
@@ -55,11 +55,6 @@
 	memblock_add(base, size);
 }
 
-void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
-{
-	return __va(memblock_alloc(size, align));
-}
-
 void __init early_init_devtree(void *params)
 {
 	void *alloc;
@@ -96,8 +91,7 @@
 }
 
 #ifdef CONFIG_BLK_DEV_INITRD
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
-		unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
 {
 	initrd_start = (unsigned long)__va(start);
 	initrd_end = (unsigned long)__va(end);
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 6bfcab97..b7634ce 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -546,14 +546,8 @@
 	memblock_add(base, size);
 }
 
-void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
-{
-	return __va(memblock_alloc(size, align));
-}
-
 #ifdef CONFIG_BLK_DEV_INITRD
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
-		unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
 {
 	initrd_start = (unsigned long)__va(start);
 	initrd_end = (unsigned long)__va(end);
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 7b6391b..12e656f 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -1297,7 +1297,8 @@
 		prom_opal_align = 0x10000;
 }
 
-static int prom_rtas_call(int token, int nargs, int nret, int *outputs, ...)
+static int __init prom_rtas_call(int token, int nargs, int nret,
+				 int *outputs, ...)
 {
 	struct rtas_args rtas_args;
 	va_list list;
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 442d8e2..8e59abc 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -611,6 +611,7 @@
 	of_node_put(np);
 	return of_get_ibm_chip_id(np);
 }
+EXPORT_SYMBOL(cpu_to_chip_id);
 
 /* Helper routines for cpu to core mapping */
 int cpu_core_index_of_thread(int cpu)
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 76d8e7c..2dd69bf 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -206,7 +206,7 @@
 	int trap = TRAP(regs);
  	int is_exec = trap == 0x400;
 	int fault;
-	int rc = 0;
+	int rc = 0, store_update_sp = 0;
 
 #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
 	/*
@@ -280,6 +280,14 @@
 
 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 
+	/*
+	 * We want to do this outside mmap_sem, because reading code around nip
+	 * can result in fault, which will cause a deadlock when called with
+	 * mmap_sem held
+	 */
+	if (user_mode(regs))
+		store_update_sp = store_updates_sp(regs);
+
 	/* When running in the kernel we expect faults to occur only to
 	 * addresses in user space.  All other faults represent errors in the
 	 * kernel and should generate an OOPS.  Unfortunately, in the case of an
@@ -345,8 +353,7 @@
 		 * between the last mapped region and the stack will
 		 * expand the stack rather than segfaulting.
 		 */
-		if (address + 2048 < uregs->gpr[1]
-		    && (!user_mode(regs) || !store_updates_sp(regs)))
+		if (address + 2048 < uregs->gpr[1] && !store_update_sp)
 			goto bad_area;
 	}
 	if (expand_stack(vma, address))
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 834ca8e..d67db4b 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -86,6 +86,11 @@
 	 */
 	return ((pgd_val(pgd) & 0x3) != 0x0);
 }
+
+int pmd_huge_support(void)
+{
+	return 1;
+}
 #else
 int pmd_huge(pmd_t pmd)
 {
@@ -101,6 +106,11 @@
 {
 	return 0;
 }
+
+int pmd_huge_support(void)
+{
+	return 0;
+}
 #endif
 
 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index d64feb3..1f97e2b 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -354,7 +354,7 @@
 }
 early_initcall(alloc_dispatch_log_kmem_cache);
 
-static void pSeries_idle(void)
+static void pseries_lpar_idle(void)
 {
 	/* This would call on the cpuidle framework, and the back-end pseries
 	 * driver to  go to idle states
@@ -362,10 +362,22 @@
 	if (cpuidle_idle_call()) {
 		/* On error, execute default handler
 		 * to go into low thread priority and possibly
-		 * low power mode.
+		 * low power mode by cedeing processor to hypervisor
 		 */
-		HMT_low();
-		HMT_very_low();
+
+		/* Indicate to hypervisor that we are idle. */
+		get_lppaca()->idle = 1;
+
+		/*
+		 * Yield the processor to the hypervisor.  We return if
+		 * an external interrupt occurs (which are driven prior
+		 * to returning here) or if a prod occurs from another
+		 * processor. When returning here, external interrupts
+		 * are enabled.
+		 */
+		cede_processor();
+
+		get_lppaca()->idle = 0;
 	}
 }
 
@@ -456,15 +468,14 @@
 
 	pSeries_nvram_init();
 
-	if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
+	if (firmware_has_feature(FW_FEATURE_LPAR)) {
 		vpa_init(boot_cpuid);
-		ppc_md.power_save = pSeries_idle;
-	}
-
-	if (firmware_has_feature(FW_FEATURE_LPAR))
+		ppc_md.power_save = pseries_lpar_idle;
 		ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
-	else
+	} else {
+		/* No special idle routine */
 		ppc_md.enable_pmcs = power4_enable_pmcs;
+	}
 
 	ppc_md.pcibios_root_bridge_prepare = pseries_root_bridge_prepare;
 
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index c696ad7d..3ec2728 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -62,6 +62,7 @@
 	def_bool y
 	select ARCH_DISCARD_MEMBLOCK
 	select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
+	select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
 	select ARCH_HAVE_NMI_SAFE_CMPXCHG
 	select ARCH_INLINE_READ_LOCK
 	select ARCH_INLINE_READ_LOCK_BH
@@ -91,7 +92,6 @@
 	select ARCH_INLINE_WRITE_UNLOCK_BH
 	select ARCH_INLINE_WRITE_UNLOCK_IRQ
 	select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
-	select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
 	select ARCH_SAVE_PAGE_KEYS if HIBERNATION
 	select ARCH_WANT_IPC_PARSE_VERSION
 	select BUILDTIME_EXTABLE_SORT
@@ -135,15 +135,15 @@
 	select HAVE_SYSCALL_TRACEPOINTS
 	select HAVE_UID16 if 32BIT
 	select HAVE_VIRT_CPU_ACCOUNTING
-	select VIRT_TO_BUS
 	select INIT_ALL_POSSIBLE
 	select KTIME_SCALAR if 32BIT
 	select MODULES_USE_ELF_RELA
-	select OLD_SIGSUSPEND3
 	select OLD_SIGACTION
+	select OLD_SIGSUSPEND3
 	select SYSCTL_EXCEPTION_TRACE
 	select USE_GENERIC_SMP_HELPERS if SMP
 	select VIRT_CPU_ACCOUNTING
+	select VIRT_TO_BUS
 
 config SCHED_OMIT_FRAME_POINTER
 	def_bool y
@@ -526,6 +526,7 @@
 	bool "kernel crash dumps"
 	depends on 64BIT && SMP
 	select KEXEC
+	select ZFCPDUMP
 	help
 	  Generate crash dump after being started by kexec.
 	  Crash dump kernels are loaded in the main kernel with kexec-tools
@@ -536,7 +537,7 @@
 config ZFCPDUMP
 	def_bool n
 	prompt "zfcpdump support"
-	select SMP
+	depends on SMP
 	help
 	  Select this option if you want to build an zfcpdump enabled kernel.
 	  Refer to <file:Documentation/s390/zfcpdump.txt> for more details on this.
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index b74400e..d204c65 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -1,14 +1,13 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 CONFIG_FHANDLE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
 CONFIG_TASKSTATS=y
 CONFIG_TASK_DELAY_ACCT=y
 CONFIG_TASK_XACCT=y
 CONFIG_TASK_IO_ACCOUNTING=y
-CONFIG_AUDIT=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
 CONFIG_RCU_FAST_NO_HZ=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
@@ -27,6 +26,7 @@
 CONFIG_RD_LZMA=y
 CONFIG_RD_XZ=y
 CONFIG_RD_LZO=y
+CONFIG_RD_LZ4=y
 CONFIG_EXPERT=y
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
@@ -38,11 +38,13 @@
 CONFIG_MODVERSIONS=y
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_IBM_PARTITION=y
+# CONFIG_EFI_PARTITION is not set
 CONFIG_DEFAULT_DEADLINE=y
 CONFIG_HZ_100=y
 CONFIG_MEMORY_HOTPLUG=y
 CONFIG_MEMORY_HOTREMOVE=y
 CONFIG_KSM=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
 CONFIG_CRASH_DUMP=y
 CONFIG_BINFMT_MISC=m
 CONFIG_HIBERNATION=y
@@ -92,40 +94,49 @@
 CONFIG_SCSI_LOGGING=y
 CONFIG_SCSI_SCAN_ASYNC=y
 CONFIG_ZFCP=y
+CONFIG_SCSI_VIRTIO=y
 CONFIG_NETDEVICES=y
 CONFIG_BONDING=m
 CONFIG_DUMMY=m
 CONFIG_EQUALIZER=m
 CONFIG_TUN=m
 CONFIG_VIRTIO_NET=y
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
 CONFIG_RAW_DRIVER=m
 CONFIG_VIRTIO_BALLOON=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
 CONFIG_EXT4_FS_SECURITY=y
+CONFIG_XFS_FS=y
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_XFS_RT=y
+CONFIG_BTRFS_FS=y
+CONFIG_BTRFS_FS_POSIX_ACL=y
+CONFIG_FANOTIFY=y
+CONFIG_FUSE_FS=y
 CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_HUGETLBFS=y
 # CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
 CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_PAGEALLOC=y
 CONFIG_TIMER_STATS=y
 CONFIG_PROVE_LOCKING=y
-CONFIG_PROVE_RCU=y
 CONFIG_LOCK_STAT=y
 CONFIG_DEBUG_LOCKDEP=y
 CONFIG_DEBUG_LIST=y
 CONFIG_DEBUG_NOTIFIERS=y
+CONFIG_PROVE_RCU=y
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
 CONFIG_RCU_TRACE=y
-CONFIG_KPROBES_SANITY_TEST=y
-CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
 CONFIG_LATENCYTOP=y
-CONFIG_DEBUG_PAGEALLOC=y
 CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_KPROBES_SANITY_TEST=y
 # CONFIG_STRICT_DEVMEM is not set
-CONFIG_CRYPTO_NULL=m
 CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_AUTHENC=m
 CONFIG_CRYPTO_TEST=m
@@ -137,8 +148,10 @@
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_CMAC=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
+CONFIG_CRYPTO_CRC32=m
 CONFIG_CRYPTO_MD4=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
@@ -165,6 +178,8 @@
 CONFIG_CRYPTO_DEFLATE=m
 CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_LZ4=m
+CONFIG_CRYPTO_LZ4HC=m
 CONFIG_ZCRYPT=m
 CONFIG_CRYPTO_SHA1_S390=m
 CONFIG_CRYPTO_SHA256_S390=m
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index 1eaa362..5f8bcc5 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -78,10 +78,14 @@
 
 int register_external_interrupt(u16 code, ext_int_handler_t handler);
 int unregister_external_interrupt(u16 code, ext_int_handler_t handler);
-void service_subclass_irq_register(void);
-void service_subclass_irq_unregister(void);
-void measurement_alert_subclass_register(void);
-void measurement_alert_subclass_unregister(void);
+
+enum irq_subclass {
+	IRQ_SUBCLASS_MEASUREMENT_ALERT = 5,
+	IRQ_SUBCLASS_SERVICE_SIGNAL = 9,
+};
+
+void irq_subclass_register(enum irq_subclass subclass);
+void irq_subclass_unregister(enum irq_subclass subclass);
 
 #define irq_canonicalize(irq)  (irq)
 
diff --git a/arch/s390/include/asm/kprobes.h b/arch/s390/include/asm/kprobes.h
index dcf6948..4176dfe 100644
--- a/arch/s390/include/asm/kprobes.h
+++ b/arch/s390/include/asm/kprobes.h
@@ -31,6 +31,8 @@
 #include <linux/ptrace.h>
 #include <linux/percpu.h>
 
+#define __ARCH_WANT_KPROBES_INSN_SLOT
+
 struct pt_regs;
 struct kprobe;
 
@@ -57,7 +59,7 @@
 /* Architecture specific copy of original instruction */
 struct arch_specific_insn {
 	/* copy of original instruction */
-	kprobe_opcode_t insn[MAX_INSN_SIZE];
+	kprobe_opcode_t *insn;
 };
 
 struct prev_kprobe {
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index 06a1361..7dc7f9c 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -56,5 +56,6 @@
 bool sclp_has_vt220(void);
 int sclp_pci_configure(u32 fid);
 int sclp_pci_deconfigure(u32 fid);
+int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode);
 
 #endif /* _ASM_S390_SCLP_H */
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index 8b6e4f5..1f1b8c7 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -221,25 +221,26 @@
 
 asmlinkage long sys32_getgroups16(int gidsetsize, u16 __user *grouplist)
 {
+	const struct cred *cred = current_cred();
 	int i;
 
 	if (gidsetsize < 0)
 		return -EINVAL;
 
-	get_group_info(current->cred->group_info);
-	i = current->cred->group_info->ngroups;
+	get_group_info(cred->group_info);
+	i = cred->group_info->ngroups;
 	if (gidsetsize) {
 		if (i > gidsetsize) {
 			i = -EINVAL;
 			goto out;
 		}
-		if (groups16_to_user(grouplist, current->cred->group_info)) {
+		if (groups16_to_user(grouplist, cred->group_info)) {
 			i = -EFAULT;
 			goto out;
 		}
 	}
 out:
-	put_group_info(current->cred->group_info);
+	put_group_info(cred->group_info);
 	return i;
 }
 
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index c439ac9..1389b63 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -332,9 +332,9 @@
 	/* Set up to return from userspace.  If provided, use a stub
 	   already in userspace.  */
 	if (ka->sa.sa_flags & SA_RESTORER) {
-		regs->gprs[14] = (__u64) ka->sa.sa_restorer | PSW32_ADDR_AMODE;
+		regs->gprs[14] = (__u64 __force) ka->sa.sa_restorer | PSW32_ADDR_AMODE;
 	} else {
-		regs->gprs[14] = (__u64) frame->retcode | PSW32_ADDR_AMODE;
+		regs->gprs[14] = (__u64 __force) frame->retcode | PSW32_ADDR_AMODE;
 		if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn,
 			       (u16 __force __user *)(frame->retcode)))
 			goto give_sigsegv;
@@ -400,9 +400,9 @@
 	/* Set up to return from userspace.  If provided, use a stub
 	   already in userspace.  */
 	if (ka->sa.sa_flags & SA_RESTORER) {
-		regs->gprs[14] = (__u64) ka->sa.sa_restorer | PSW32_ADDR_AMODE;
+		regs->gprs[14] = (__u64 __force) ka->sa.sa_restorer | PSW32_ADDR_AMODE;
 	} else {
-		regs->gprs[14] = (__u64) frame->retcode | PSW32_ADDR_AMODE;
+		regs->gprs[14] = (__u64 __force) frame->retcode | PSW32_ADDR_AMODE;
 		err |= __put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn,
 				  (u16 __force __user *)(frame->retcode));
 	}
@@ -417,7 +417,7 @@
 	regs->psw.mask = PSW_MASK_BA |
 		(psw_user_bits & PSW_MASK_ASC) |
 		(regs->psw.mask & ~PSW_MASK_ASC);
-	regs->psw.addr = (__u64) ka->sa.sa_handler;
+	regs->psw.addr = (__u64 __force) ka->sa.sa_handler;
 
 	regs->gprs[2] = map_signal(sig);
 	regs->gprs[3] = (__force __u64) &frame->info;
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index d8f3556..c84f33d 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -16,6 +16,7 @@
 #include <asm/os_info.h>
 #include <asm/elf.h>
 #include <asm/ipl.h>
+#include <asm/sclp.h>
 
 #define PTR_ADD(x, y) (((char *) (x)) + ((unsigned long) (y)))
 #define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y)))
@@ -64,22 +65,46 @@
 }
 
 /*
- * Copy one page from "oldmem"
+ * Pointer to ELF header in new kernel
+ */
+static void *elfcorehdr_newmem;
+
+/*
+ * Copy one page from zfcpdump "oldmem"
+ *
+ * For pages below ZFCPDUMP_HSA_SIZE memory from the HSA is copied. Otherwise
+ * real memory copy is used.
+ */
+static ssize_t copy_oldmem_page_zfcpdump(char *buf, size_t csize,
+					 unsigned long src, int userbuf)
+{
+	int rc;
+
+	if (src < ZFCPDUMP_HSA_SIZE) {
+		rc = memcpy_hsa(buf, src, csize, userbuf);
+	} else {
+		if (userbuf)
+			rc = copy_to_user_real((void __force __user *) buf,
+					       (void *) src, csize);
+		else
+			rc = memcpy_real(buf, (void *) src, csize);
+	}
+	return rc ? rc : csize;
+}
+
+/*
+ * Copy one page from kdump "oldmem"
  *
  * For the kdump reserved memory this functions performs a swap operation:
  *  - [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE] is mapped to [0 - OLDMEM_SIZE].
  *  - [0 - OLDMEM_SIZE] is mapped to [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE]
  */
-ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
-			 size_t csize, unsigned long offset, int userbuf)
+static ssize_t copy_oldmem_page_kdump(char *buf, size_t csize,
+				      unsigned long src, int userbuf)
+
 {
-	unsigned long src;
 	int rc;
 
-	if (!csize)
-		return 0;
-
-	src = (pfn << PAGE_SHIFT) + offset;
 	if (src < OLDMEM_SIZE)
 		src += OLDMEM_BASE;
 	else if (src > OLDMEM_BASE &&
@@ -90,7 +115,88 @@
 				       (void *) src, csize);
 	else
 		rc = copy_page_real(buf, (void *) src, csize);
-	return (rc == 0) ? csize : rc;
+	return (rc == 0) ? rc : csize;
+}
+
+/*
+ * Copy one page from "oldmem"
+ */
+ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
+			 unsigned long offset, int userbuf)
+{
+	unsigned long src;
+
+	if (!csize)
+		return 0;
+	src = (pfn << PAGE_SHIFT) + offset;
+	if (OLDMEM_BASE)
+		return copy_oldmem_page_kdump(buf, csize, src, userbuf);
+	else
+		return copy_oldmem_page_zfcpdump(buf, csize, src, userbuf);
+}
+
+/*
+ * Remap "oldmem" for kdump
+ *
+ * For the kdump reserved memory this functions performs a swap operation:
+ * [0 - OLDMEM_SIZE] is mapped to [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE]
+ */
+static int remap_oldmem_pfn_range_kdump(struct vm_area_struct *vma,
+					unsigned long from, unsigned long pfn,
+					unsigned long size, pgprot_t prot)
+{
+	unsigned long size_old;
+	int rc;
+
+	if (pfn < OLDMEM_SIZE >> PAGE_SHIFT) {
+		size_old = min(size, OLDMEM_SIZE - (pfn << PAGE_SHIFT));
+		rc = remap_pfn_range(vma, from,
+				     pfn + (OLDMEM_BASE >> PAGE_SHIFT),
+				     size_old, prot);
+		if (rc || size == size_old)
+			return rc;
+		size -= size_old;
+		from += size_old;
+		pfn += size_old >> PAGE_SHIFT;
+	}
+	return remap_pfn_range(vma, from, pfn, size, prot);
+}
+
+/*
+ * Remap "oldmem" for zfcpdump
+ *
+ * We only map available memory above ZFCPDUMP_HSA_SIZE. Memory below
+ * ZFCPDUMP_HSA_SIZE is read on demand using the copy_oldmem_page() function.
+ */
+static int remap_oldmem_pfn_range_zfcpdump(struct vm_area_struct *vma,
+					   unsigned long from,
+					   unsigned long pfn,
+					   unsigned long size, pgprot_t prot)
+{
+	unsigned long size_hsa;
+
+	if (pfn < ZFCPDUMP_HSA_SIZE >> PAGE_SHIFT) {
+		size_hsa = min(size, ZFCPDUMP_HSA_SIZE - (pfn << PAGE_SHIFT));
+		if (size == size_hsa)
+			return 0;
+		size -= size_hsa;
+		from += size_hsa;
+		pfn += size_hsa >> PAGE_SHIFT;
+	}
+	return remap_pfn_range(vma, from, pfn, size, prot);
+}
+
+/*
+ * Remap "oldmem" for kdump or zfcpdump
+ */
+int remap_oldmem_pfn_range(struct vm_area_struct *vma, unsigned long from,
+			   unsigned long pfn, unsigned long size, pgprot_t prot)
+{
+	if (OLDMEM_BASE)
+		return remap_oldmem_pfn_range_kdump(vma, from, pfn, size, prot);
+	else
+		return remap_oldmem_pfn_range_zfcpdump(vma, from, pfn, size,
+						       prot);
 }
 
 /*
@@ -101,11 +207,21 @@
 	unsigned long copied = 0;
 	int rc;
 
-	if ((unsigned long) src < OLDMEM_SIZE) {
-		copied = min(count, OLDMEM_SIZE - (unsigned long) src);
-		rc = memcpy_real(dest, src + OLDMEM_BASE, copied);
-		if (rc)
-			return rc;
+	if (OLDMEM_BASE) {
+		if ((unsigned long) src < OLDMEM_SIZE) {
+			copied = min(count, OLDMEM_SIZE - (unsigned long) src);
+			rc = memcpy_real(dest, src + OLDMEM_BASE, copied);
+			if (rc)
+				return rc;
+		}
+	} else {
+		if ((unsigned long) src < ZFCPDUMP_HSA_SIZE) {
+			copied = min(count,
+				     ZFCPDUMP_HSA_SIZE - (unsigned long) src);
+			rc = memcpy_hsa(dest, (unsigned long) src, copied, 0);
+			if (rc)
+				return rc;
+		}
 	}
 	return memcpy_real(dest + copied, src + copied, count - copied);
 }
@@ -368,14 +484,6 @@
 }
 
 /*
- * Relocate pointer in order to allow vmcore code access the data
- */
-static inline unsigned long relocate(unsigned long addr)
-{
-	return OLDMEM_BASE + addr;
-}
-
-/*
  * Initialize ELF loads (new kernel)
  */
 static int loads_init(Elf64_Phdr *phdr, u64 loads_offset)
@@ -426,7 +534,7 @@
 	ptr = nt_vmcoreinfo(ptr);
 	memset(phdr, 0, sizeof(*phdr));
 	phdr->p_type = PT_NOTE;
-	phdr->p_offset = relocate(notes_offset);
+	phdr->p_offset = notes_offset;
 	phdr->p_filesz = (unsigned long) PTR_SUB(ptr, ptr_start);
 	phdr->p_memsz = phdr->p_filesz;
 	return ptr;
@@ -435,7 +543,7 @@
 /*
  * Create ELF core header (new kernel)
  */
-static void s390_elf_corehdr_create(char **elfcorebuf, size_t *elfcorebuf_sz)
+int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
 {
 	Elf64_Phdr *phdr_notes, *phdr_loads;
 	int mem_chunk_cnt;
@@ -443,6 +551,12 @@
 	u32 alloc_size;
 	u64 hdr_off;
 
+	/* If we are not in kdump or zfcpdump mode return */
+	if (!OLDMEM_BASE && ipl_info.type != IPL_TYPE_FCP_DUMP)
+		return 0;
+	/* If elfcorehdr= has been passed via cmdline, we use that one */
+	if (elfcorehdr_addr != ELFCORE_ADDR_MAX)
+		return 0;
 	mem_chunk_cnt = get_mem_chunk_cnt();
 
 	alloc_size = 0x1000 + get_cpu_cnt() * 0x300 +
@@ -460,27 +574,52 @@
 	ptr = notes_init(phdr_notes, ptr, ((unsigned long) hdr) + hdr_off);
 	/* Init loads */
 	hdr_off = PTR_DIFF(ptr, hdr);
-	loads_init(phdr_loads, ((unsigned long) hdr) + hdr_off);
-	*elfcorebuf_sz = hdr_off;
-	*elfcorebuf = (void *) relocate((unsigned long) hdr);
-	BUG_ON(*elfcorebuf_sz > alloc_size);
-}
-
-/*
- * Create kdump ELF core header in new kernel, if it has not been passed via
- * the "elfcorehdr" kernel parameter
- */
-static int setup_kdump_elfcorehdr(void)
-{
-	size_t elfcorebuf_sz;
-	char *elfcorebuf;
-
-	if (!OLDMEM_BASE || is_kdump_kernel())
-		return -EINVAL;
-	s390_elf_corehdr_create(&elfcorebuf, &elfcorebuf_sz);
-	elfcorehdr_addr = (unsigned long long) elfcorebuf;
-	elfcorehdr_size = elfcorebuf_sz;
+	loads_init(phdr_loads, hdr_off);
+	*addr = (unsigned long long) hdr;
+	elfcorehdr_newmem = hdr;
+	*size = (unsigned long long) hdr_off;
+	BUG_ON(elfcorehdr_size > alloc_size);
 	return 0;
 }
 
-subsys_initcall(setup_kdump_elfcorehdr);
+/*
+ * Free ELF core header (new kernel)
+ */
+void elfcorehdr_free(unsigned long long addr)
+{
+	if (!elfcorehdr_newmem)
+		return;
+	kfree((void *)(unsigned long)addr);
+}
+
+/*
+ * Read from ELF header
+ */
+ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos)
+{
+	void *src = (void *)(unsigned long)*ppos;
+
+	src = elfcorehdr_newmem ? src : src - OLDMEM_BASE;
+	memcpy(buf, src, count);
+	*ppos += count;
+	return count;
+}
+
+/*
+ * Read from ELF notes data
+ */
+ssize_t elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
+{
+	void *src = (void *)(unsigned long)*ppos;
+	int rc;
+
+	if (elfcorehdr_newmem) {
+		memcpy(buf, src, count);
+	} else {
+		rc = copy_from_oldmem(buf, src, count);
+		if (rc)
+			return rc;
+	}
+	*ppos += count;
+	return count;
+}
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index 87acc38..99e7f60 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -40,14 +40,15 @@
 {
 	struct stack_frame *sf;
 	struct pt_regs *regs;
+	unsigned long addr;
 
 	while (1) {
 		sp = sp & PSW_ADDR_INSN;
 		if (sp < low || sp > high - sizeof(*sf))
 			return sp;
 		sf = (struct stack_frame *) sp;
-		printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
-		print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN);
+		addr = sf->gprs[8] & PSW_ADDR_INSN;
+		printk("([<%016lx>] %pSR)\n", addr, (void *)addr);
 		/* Follow the backchain. */
 		while (1) {
 			low = sp;
@@ -57,16 +58,16 @@
 			if (sp <= low || sp > high - sizeof(*sf))
 				return sp;
 			sf = (struct stack_frame *) sp;
-			printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
-			print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN);
+			addr = sf->gprs[8] & PSW_ADDR_INSN;
+			printk(" [<%016lx>] %pSR\n", addr, (void *)addr);
 		}
 		/* Zero backchain detected, check for interrupt frame. */
 		sp = (unsigned long) (sf + 1);
 		if (sp <= low || sp > high - sizeof(*regs))
 			return sp;
 		regs = (struct pt_regs *) sp;
-		printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN);
-		print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN);
+		addr = regs->psw.addr & PSW_ADDR_INSN;
+		printk(" [<%016lx>] %pSR\n", addr, (void *)addr);
 		low = sp;
 		sp = regs->gprs[15];
 	}
@@ -128,8 +129,7 @@
 {
 #ifdef CONFIG_64BIT
 	printk("Last Breaking-Event-Address:\n");
-	printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN);
-	print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN);
+	printk(" [<%016lx>] %pSR\n", regs->args[0], (void *)regs->args[0]);
 #endif
 }
 
@@ -143,10 +143,10 @@
 	char *mode;
 
 	mode = user_mode(regs) ? "User" : "Krnl";
-	printk("%s PSW : %p %p",
+	printk("%s PSW : %p %p (%pSR)\n",
 	       mode, (void *) regs->psw.mask,
+	       (void *) regs->psw.addr,
 	       (void *) regs->psw.addr);
-	print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
 	printk("           R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
 	       "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER),
 	       mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO),
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index 3ddbc26..e9b04c3 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -53,27 +53,21 @@
 		    siginfo_t *info, sigset_t *oldset, struct pt_regs *regs);
 void do_notify_resume(struct pt_regs *regs);
 
-struct ext_code;
-void do_extint(struct pt_regs *regs);
+void __init init_IRQ(void);
+void do_IRQ(struct pt_regs *regs, int irq);
 void do_restart(void);
 void __init startup_init(void);
 void die(struct pt_regs *regs, const char *str);
-
+int setup_profiling_timer(unsigned int multiplier);
 void __init time_init(void);
+int pfn_is_nosave(unsigned long);
+void s390_early_resume(void);
+unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip);
 
 struct s390_mmap_arg_struct;
 struct fadvise64_64_args;
 struct old_sigaction;
 
-long sys_mmap2(struct s390_mmap_arg_struct __user  *arg);
-long sys_s390_ipc(uint call, int first, unsigned long second,
-	     unsigned long third, void __user *ptr);
-long sys_s390_personality(unsigned int personality);
-long sys_s390_fadvise64(int fd, u32 offset_high, u32 offset_low,
-		    size_t len, int advice);
-long sys_s390_fadvise64_64(struct fadvise64_64_args __user *args);
-long sys_s390_fallocate(int fd, int mode, loff_t offset, u32 len_high,
-			u32 len_low);
 long sys_sigreturn(void);
 long sys_rt_sigreturn(void);
 long sys32_sigreturn(void);
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index e3043ae..1014ad5 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -15,6 +15,7 @@
 #include <linux/kprobes.h>
 #include <trace/syscall.h>
 #include <asm/asm-offsets.h>
+#include "entry.h"
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 
@@ -177,7 +178,7 @@
 
 	offset = ((void *) prepare_ftrace_return -
 		  (void *) ftrace_graph_caller) / 2;
-	return probe_kernel_write(ftrace_graph_caller + 2,
+	return probe_kernel_write((void *) ftrace_graph_caller + 2,
 				  &offset, sizeof(offset));
 }
 
@@ -185,7 +186,7 @@
 {
 	static unsigned short offset = 0x0002;
 
-	return probe_kernel_write(ftrace_graph_caller + 2,
+	return probe_kernel_write((void *) ftrace_graph_caller + 2,
 				  &offset, sizeof(offset));
 }
 
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index b34ba0e..8ac2097 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -196,21 +196,23 @@
  * ext_int_hash[index] is the list head for all external interrupts that hash
  * to this index.
  */
-static struct list_head ext_int_hash[256];
+static struct hlist_head ext_int_hash[32] ____cacheline_aligned;
 
 struct ext_int_info {
 	ext_int_handler_t handler;
-	u16 code;
-	struct list_head entry;
+	struct hlist_node entry;
 	struct rcu_head rcu;
+	u16 code;
 };
 
 /* ext_int_hash_lock protects the handler lists for external interrupts */
-DEFINE_SPINLOCK(ext_int_hash_lock);
+static DEFINE_SPINLOCK(ext_int_hash_lock);
 
 static inline int ext_hash(u16 code)
 {
-	return (code + (code >> 9)) & 0xff;
+	BUILD_BUG_ON(!is_power_of_2(ARRAY_SIZE(ext_int_hash)));
+
+	return (code + (code >> 9)) & (ARRAY_SIZE(ext_int_hash) - 1);
 }
 
 int register_external_interrupt(u16 code, ext_int_handler_t handler)
@@ -227,7 +229,7 @@
 	index = ext_hash(code);
 
 	spin_lock_irqsave(&ext_int_hash_lock, flags);
-	list_add_rcu(&p->entry, &ext_int_hash[index]);
+	hlist_add_head_rcu(&p->entry, &ext_int_hash[index]);
 	spin_unlock_irqrestore(&ext_int_hash_lock, flags);
 	return 0;
 }
@@ -240,9 +242,9 @@
 	int index = ext_hash(code);
 
 	spin_lock_irqsave(&ext_int_hash_lock, flags);
-	list_for_each_entry_rcu(p, &ext_int_hash[index], entry) {
+	hlist_for_each_entry_rcu(p, &ext_int_hash[index], entry) {
 		if (p->code == code && p->handler == handler) {
-			list_del_rcu(&p->entry);
+			hlist_del_rcu(&p->entry);
 			kfree_rcu(p, rcu);
 		}
 	}
@@ -264,12 +266,12 @@
 
 	index = ext_hash(ext_code.code);
 	rcu_read_lock();
-	list_for_each_entry_rcu(p, &ext_int_hash[index], entry)
-		if (likely(p->code == ext_code.code))
-			p->handler(ext_code, regs->int_parm,
-				   regs->int_parm_long);
+	hlist_for_each_entry_rcu(p, &ext_int_hash[index], entry) {
+		if (unlikely(p->code != ext_code.code))
+			continue;
+		p->handler(ext_code, regs->int_parm, regs->int_parm_long);
+	}
 	rcu_read_unlock();
-
 	return IRQ_HANDLED;
 }
 
@@ -283,55 +285,32 @@
 	int idx;
 
 	for (idx = 0; idx < ARRAY_SIZE(ext_int_hash); idx++)
-		INIT_LIST_HEAD(&ext_int_hash[idx]);
+		INIT_HLIST_HEAD(&ext_int_hash[idx]);
 
 	irq_set_chip_and_handler(EXT_INTERRUPT,
 				 &dummy_irq_chip, handle_percpu_irq);
 	setup_irq(EXT_INTERRUPT, &external_interrupt);
 }
 
-static DEFINE_SPINLOCK(sc_irq_lock);
-static int sc_irq_refcount;
+static DEFINE_SPINLOCK(irq_subclass_lock);
+static unsigned char irq_subclass_refcount[64];
 
-void service_subclass_irq_register(void)
+void irq_subclass_register(enum irq_subclass subclass)
 {
-	spin_lock(&sc_irq_lock);
-	if (!sc_irq_refcount)
-		ctl_set_bit(0, 9);
-	sc_irq_refcount++;
-	spin_unlock(&sc_irq_lock);
+	spin_lock(&irq_subclass_lock);
+	if (!irq_subclass_refcount[subclass])
+		ctl_set_bit(0, subclass);
+	irq_subclass_refcount[subclass]++;
+	spin_unlock(&irq_subclass_lock);
 }
-EXPORT_SYMBOL(service_subclass_irq_register);
+EXPORT_SYMBOL(irq_subclass_register);
 
-void service_subclass_irq_unregister(void)
+void irq_subclass_unregister(enum irq_subclass subclass)
 {
-	spin_lock(&sc_irq_lock);
-	sc_irq_refcount--;
-	if (!sc_irq_refcount)
-		ctl_clear_bit(0, 9);
-	spin_unlock(&sc_irq_lock);
+	spin_lock(&irq_subclass_lock);
+	irq_subclass_refcount[subclass]--;
+	if (!irq_subclass_refcount[subclass])
+		ctl_clear_bit(0, subclass);
+	spin_unlock(&irq_subclass_lock);
 }
-EXPORT_SYMBOL(service_subclass_irq_unregister);
-
-static DEFINE_SPINLOCK(ma_subclass_lock);
-static int ma_subclass_refcount;
-
-void measurement_alert_subclass_register(void)
-{
-	spin_lock(&ma_subclass_lock);
-	if (!ma_subclass_refcount)
-		ctl_set_bit(0, 5);
-	ma_subclass_refcount++;
-	spin_unlock(&ma_subclass_lock);
-}
-EXPORT_SYMBOL(measurement_alert_subclass_register);
-
-void measurement_alert_subclass_unregister(void)
-{
-	spin_lock(&ma_subclass_lock);
-	ma_subclass_refcount--;
-	if (!ma_subclass_refcount)
-		ctl_clear_bit(0, 5);
-	spin_unlock(&ma_subclass_lock);
-}
-EXPORT_SYMBOL(measurement_alert_subclass_unregister);
+EXPORT_SYMBOL(irq_subclass_unregister);
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index adbbe7f..0ce9fb2 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -37,6 +37,26 @@
 
 struct kretprobe_blackpoint kretprobe_blacklist[] = { };
 
+DEFINE_INSN_CACHE_OPS(dmainsn);
+
+static void *alloc_dmainsn_page(void)
+{
+	return (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
+}
+
+static void free_dmainsn_page(void *page)
+{
+	free_page((unsigned long)page);
+}
+
+struct kprobe_insn_cache kprobe_dmainsn_slots = {
+	.mutex = __MUTEX_INITIALIZER(kprobe_dmainsn_slots.mutex),
+	.alloc = alloc_dmainsn_page,
+	.free = free_dmainsn_page,
+	.pages = LIST_HEAD_INIT(kprobe_dmainsn_slots.pages),
+	.insn_size = MAX_INSN_SIZE,
+};
+
 static int __kprobes is_prohibited_opcode(kprobe_opcode_t *insn)
 {
 	switch (insn[0] >> 8) {
@@ -100,9 +120,8 @@
 			fixup |= FIXUP_RETURN_REGISTER;
 		break;
 	case 0xc0:
-		if ((insn[0] & 0x0f) == 0x00 ||	/* larl  */
-		    (insn[0] & 0x0f) == 0x05)	/* brasl */
-		fixup |= FIXUP_RETURN_REGISTER;
+		if ((insn[0] & 0x0f) == 0x05)	/* brasl */
+			fixup |= FIXUP_RETURN_REGISTER;
 		break;
 	case 0xeb:
 		switch (insn[2] & 0xff) {
@@ -134,18 +153,128 @@
 	return fixup;
 }
 
+static int __kprobes is_insn_relative_long(kprobe_opcode_t *insn)
+{
+	/* Check if we have a RIL-b or RIL-c format instruction which
+	 * we need to modify in order to avoid instruction emulation. */
+	switch (insn[0] >> 8) {
+	case 0xc0:
+		if ((insn[0] & 0x0f) == 0x00) /* larl */
+			return true;
+		break;
+	case 0xc4:
+		switch (insn[0] & 0x0f) {
+		case 0x02: /* llhrl  */
+		case 0x04: /* lghrl  */
+		case 0x05: /* lhrl   */
+		case 0x06: /* llghrl */
+		case 0x07: /* sthrl  */
+		case 0x08: /* lgrl   */
+		case 0x0b: /* stgrl  */
+		case 0x0c: /* lgfrl  */
+		case 0x0d: /* lrl    */
+		case 0x0e: /* llgfrl */
+		case 0x0f: /* strl   */
+			return true;
+		}
+		break;
+	case 0xc6:
+		switch (insn[0] & 0x0f) {
+		case 0x00: /* exrl   */
+		case 0x02: /* pfdrl  */
+		case 0x04: /* cghrl  */
+		case 0x05: /* chrl   */
+		case 0x06: /* clghrl */
+		case 0x07: /* clhrl  */
+		case 0x08: /* cgrl   */
+		case 0x0a: /* clgrl  */
+		case 0x0c: /* cgfrl  */
+		case 0x0d: /* crl    */
+		case 0x0e: /* clgfrl */
+		case 0x0f: /* clrl   */
+			return true;
+		}
+		break;
+	}
+	return false;
+}
+
+static void __kprobes copy_instruction(struct kprobe *p)
+{
+	s64 disp, new_disp;
+	u64 addr, new_addr;
+
+	memcpy(p->ainsn.insn, p->addr, ((p->opcode >> 14) + 3) & -2);
+	if (!is_insn_relative_long(p->ainsn.insn))
+		return;
+	/*
+	 * For pc-relative instructions in RIL-b or RIL-c format patch the
+	 * RI2 displacement field. We have already made sure that the insn
+	 * slot for the patched instruction is within the same 2GB area
+	 * as the original instruction (either kernel image or module area).
+	 * Therefore the new displacement will always fit.
+	 */
+	disp = *(s32 *)&p->ainsn.insn[1];
+	addr = (u64)(unsigned long)p->addr;
+	new_addr = (u64)(unsigned long)p->ainsn.insn;
+	new_disp = ((addr + (disp * 2)) - new_addr) / 2;
+	*(s32 *)&p->ainsn.insn[1] = new_disp;
+}
+
+static inline int is_kernel_addr(void *addr)
+{
+	return addr < (void *)_end;
+}
+
+static inline int is_module_addr(void *addr)
+{
+#ifdef CONFIG_64BIT
+	BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
+	if (addr < (void *)MODULES_VADDR)
+		return 0;
+	if (addr > (void *)MODULES_END)
+		return 0;
+#endif
+	return 1;
+}
+
+static int __kprobes s390_get_insn_slot(struct kprobe *p)
+{
+	/*
+	 * Get an insn slot that is within the same 2GB area like the original
+	 * instruction. That way instructions with a 32bit signed displacement
+	 * field can be patched and executed within the insn slot.
+	 */
+	p->ainsn.insn = NULL;
+	if (is_kernel_addr(p->addr))
+		p->ainsn.insn = get_dmainsn_slot();
+	if (is_module_addr(p->addr))
+		p->ainsn.insn = get_insn_slot();
+	return p->ainsn.insn ? 0 : -ENOMEM;
+}
+
+static void __kprobes s390_free_insn_slot(struct kprobe *p)
+{
+	if (!p->ainsn.insn)
+		return;
+	if (is_kernel_addr(p->addr))
+		free_dmainsn_slot(p->ainsn.insn, 0);
+	else
+		free_insn_slot(p->ainsn.insn, 0);
+	p->ainsn.insn = NULL;
+}
+
 int __kprobes arch_prepare_kprobe(struct kprobe *p)
 {
 	if ((unsigned long) p->addr & 0x01)
 		return -EINVAL;
-
 	/* Make sure the probe isn't going on a difficult instruction */
 	if (is_prohibited_opcode(p->addr))
 		return -EINVAL;
-
+	if (s390_get_insn_slot(p))
+		return -ENOMEM;
 	p->opcode = *p->addr;
-	memcpy(p->ainsn.insn, p->addr, ((p->opcode >> 14) + 3) & -2);
-
+	copy_instruction(p);
 	return 0;
 }
 
@@ -186,6 +315,7 @@
 
 void __kprobes arch_remove_kprobe(struct kprobe *p)
 {
+	s390_free_insn_slot(p);
 }
 
 static void __kprobes enable_singlestep(struct kprobe_ctlblk *kcb,
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index ac21781..719e27b 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -50,7 +50,7 @@
 /*
  * Initialize CPU ELF notes
  */
-void setup_regs(void)
+static void setup_regs(void)
 {
 	unsigned long sa = S390_lowcore.prefixreg_save_area + SAVE_AREA_BASE;
 	int cpu, this_cpu;
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index fb99c20..1105502 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -274,7 +274,7 @@
 	int flags = PMC_INIT;
 
 	on_each_cpu(setup_pmc_cpu, &flags, 1);
-	measurement_alert_subclass_register();
+	irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT);
 
 	return 0;
 }
@@ -285,7 +285,7 @@
 	int flags = PMC_RELEASE;
 
 	on_each_cpu(setup_pmc_cpu, &flags, 1);
-	measurement_alert_subclass_unregister();
+	irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
 }
 
 /* Release the PMU if event is the last perf event */
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index 500aa10..2343c21 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -105,13 +105,10 @@
 
 	cpu = smp_processor_id();
 	memset(&cf_info, 0, sizeof(cf_info));
-	if (!qctri(&cf_info)) {
+	if (!qctri(&cf_info))
 		pr_info("CPU[%i] CPUM_CF: ver=%u.%u A=%04x E=%04x C=%04x\n",
 			cpu, cf_info.cfvn, cf_info.csvn,
 			cf_info.auth_ctl, cf_info.enable_ctl, cf_info.act_ctl);
-		print_hex_dump_bytes("CPUMF Query: ", DUMP_PREFIX_OFFSET,
-				     &cf_info, sizeof(cf_info));
-	}
 
 	local_irq_restore(flags);
 }
diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c
index 077a993..e1c9d1c 100644
--- a/arch/s390/kernel/runtime_instr.c
+++ b/arch/s390/kernel/runtime_instr.c
@@ -139,10 +139,10 @@
 	if (!runtime_instr_avail())
 		return 0;
 
-	measurement_alert_subclass_register();
+	irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT);
 	rc = register_external_interrupt(0x1407, runtime_instr_int_handler);
 	if (rc)
-		measurement_alert_subclass_unregister();
+		irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
 	else
 		pr_info("Runtime instrumentation facility initialized\n");
 	return rc;
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index d386c4e..1a4313a 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -362,7 +362,7 @@
  * Send cpus emergency shutdown signal. This gives the cpus the
  * opportunity to complete outstanding interrupts.
  */
-void smp_emergency_stop(cpumask_t *cpumask)
+static void smp_emergency_stop(cpumask_t *cpumask)
 {
 	u64 end;
 	int cpu;
diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c
index 737bff3..a7a7537 100644
--- a/arch/s390/kernel/suspend.c
+++ b/arch/s390/kernel/suspend.c
@@ -13,6 +13,7 @@
 #include <asm/ipl.h>
 #include <asm/cio.h>
 #include <asm/pci.h>
+#include "entry.h"
 
 /*
  * References to section boundaries
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index f00aefb..7de4469 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -673,7 +673,7 @@
 	rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
 	if (rc)
 		goto out_pfault;
-	service_subclass_irq_register();
+	irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
 	hotcpu_notifier(pfault_cpu_notify, 0);
 	return 0;
 
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index 248445f..d261c62 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -223,6 +223,11 @@
 	return 0;
 }
 
+int pmd_huge_support(void)
+{
+	return 1;
+}
+
 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
 			     pmd_t *pmdp, int write)
 {
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index 921fa54..d1e0e0c 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -14,6 +14,7 @@
 #include <linux/gfp.h>
 #include <linux/cpu.h>
 #include <asm/ctl_reg.h>
+#include <asm/io.h>
 
 /*
  * This function writes to kernel memory bypassing DAT and possible
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index bf7c0dc..de8cbc3 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -245,7 +245,9 @@
  * gmap_alloc_table is assumed to be called with mmap_sem held
  */
 static int gmap_alloc_table(struct gmap *gmap,
-			       unsigned long *table, unsigned long init)
+			    unsigned long *table, unsigned long init)
+	__releases(&gmap->mm->page_table_lock)
+	__acquires(&gmap->mm->page_table_lock)
 {
 	struct page *page;
 	unsigned long *new;
@@ -966,7 +968,7 @@
 	tlb_remove_table(tlb, table);
 }
 
-void __tlb_remove_table(void *_table)
+static void __tlb_remove_table(void *_table)
 {
 	const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK;
 	void *table = (void *)((unsigned long) _table & ~mask);
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index d5f10a4..7092392 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -805,7 +805,7 @@
 		return NULL;
 	memset(header, 0, sz);
 	header->pages = sz / PAGE_SIZE;
-	hole = sz - bpfsize + sizeof(*header);
+	hole = sz - (bpfsize + sizeof(*header));
 	/* Insert random number of illegal instructions before BPF code
 	 * and make sure the first instruction starts at an even address.
 	 */
diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c
index b5b2916..231ceca 100644
--- a/arch/s390/oprofile/hwsampler.c
+++ b/arch/s390/oprofile/hwsampler.c
@@ -1001,7 +1001,7 @@
 	if (hws_state != HWS_STOPPED)
 		goto deallocate_exit;
 
-	measurement_alert_subclass_unregister();
+	irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
 	deallocate_sdbt();
 
 	hws_state = HWS_DEALLOCATED;
@@ -1115,7 +1115,7 @@
 		mutex_lock(&hws_sem);
 
 		if (hws_state == HWS_STOPPED) {
-			measurement_alert_subclass_unregister();
+			irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
 			deallocate_sdbt();
 		}
 		if (hws_wq) {
@@ -1190,7 +1190,7 @@
 	hws_oom = 1;
 	hws_flush_all = 0;
 	/* now let them in, 1407 CPUMF external interrupts */
-	measurement_alert_subclass_register();
+	irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT);
 
 	return 0;
 }
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index 65dd81b..1fa8be4 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -600,37 +600,13 @@
 	},
 };
 
-static void sdhi0_set_pwr(struct platform_device *pdev, int state)
-{
-	static int power_gpio = -EINVAL;
-
-	if (power_gpio < 0) {
-		int ret = gpio_request(GPIO_PTB6, NULL);
-		if (!ret) {
-			power_gpio = GPIO_PTB6;
-			gpio_direction_output(power_gpio, 0);
-		}
-	}
-
-	/*
-	 * Toggle the GPIO regardless, whether we managed to grab it above or
-	 * the fixed regulator driver did.
-	 */
-	gpio_set_value(GPIO_PTB6, state);
-}
-
-static int sdhi0_get_cd(struct platform_device *pdev)
-{
-	return !gpio_get_value(GPIO_PTY7);
-}
-
 static struct sh_mobile_sdhi_info sdhi0_info = {
 	.dma_slave_tx	= SHDMA_SLAVE_SDHI0_TX,
 	.dma_slave_rx	= SHDMA_SLAVE_SDHI0_RX,
-	.set_pwr	= sdhi0_set_pwr,
 	.tmio_caps      = MMC_CAP_SDIO_IRQ | MMC_CAP_POWER_OFF_CARD |
 			  MMC_CAP_NEEDS_POLL,
-	.get_cd		= sdhi0_get_cd,
+	.tmio_flags	= TMIO_MMC_USE_GPIO_CD,
+	.cd_gpio	= GPIO_PTY7,
 };
 
 static struct resource sdhi0_resources[] = {
@@ -656,39 +632,15 @@
 	},
 };
 
-static void cn12_set_pwr(struct platform_device *pdev, int state)
-{
-	static int power_gpio = -EINVAL;
-
-	if (power_gpio < 0) {
-		int ret = gpio_request(GPIO_PTB7, NULL);
-		if (!ret) {
-			power_gpio = GPIO_PTB7;
-			gpio_direction_output(power_gpio, 0);
-		}
-	}
-
-	/*
-	 * Toggle the GPIO regardless, whether we managed to grab it above or
-	 * the fixed regulator driver did.
-	 */
-	gpio_set_value(GPIO_PTB7, state);
-}
-
 #if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
 /* SDHI1 */
-static int sdhi1_get_cd(struct platform_device *pdev)
-{
-	return !gpio_get_value(GPIO_PTW7);
-}
-
 static struct sh_mobile_sdhi_info sdhi1_info = {
 	.dma_slave_tx	= SHDMA_SLAVE_SDHI1_TX,
 	.dma_slave_rx	= SHDMA_SLAVE_SDHI1_RX,
 	.tmio_caps      = MMC_CAP_SDIO_IRQ | MMC_CAP_POWER_OFF_CARD |
 			  MMC_CAP_NEEDS_POLL,
-	.set_pwr	= cn12_set_pwr,
-	.get_cd		= sdhi1_get_cd,
+	.tmio_flags	= TMIO_MMC_USE_GPIO_CD,
+	.cd_gpio	= GPIO_PTW7,
 };
 
 static struct resource sdhi1_resources[] = {
@@ -718,27 +670,19 @@
 #else
 
 /* MMC SPI */
-static int mmc_spi_get_ro(struct device *dev)
-{
-	return gpio_get_value(GPIO_PTY6);
-}
-
-static int mmc_spi_get_cd(struct device *dev)
-{
-	return !gpio_get_value(GPIO_PTY7);
-}
-
 static void mmc_spi_setpower(struct device *dev, unsigned int maskval)
 {
 	gpio_set_value(GPIO_PTB6, maskval ? 1 : 0);
 }
 
 static struct mmc_spi_platform_data mmc_spi_info = {
-	.get_ro = mmc_spi_get_ro,
-	.get_cd = mmc_spi_get_cd,
 	.caps = MMC_CAP_NEEDS_POLL,
+	.caps2 = MMC_CAP2_RO_ACTIVE_HIGH,
 	.ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, /* 3.3V only */
 	.setpower = mmc_spi_setpower,
+	.flags = MMC_SPI_USE_CD_GPIO | MMC_SPI_USE_RO_GPIO,
+	.cd_gpio = GPIO_PTY7,
+	.ro_gpio = GPIO_PTY6,
 };
 
 static struct spi_board_info spi_bus[] = {
@@ -998,11 +942,6 @@
 
 #if defined(CONFIG_MMC_SH_MMCIF) || defined(CONFIG_MMC_SH_MMCIF_MODULE)
 /* SH_MMCIF */
-static void mmcif_down_pwr(struct platform_device *pdev)
-{
-	cn12_set_pwr(pdev, 0);
-}
-
 static struct resource sh_mmcif_resources[] = {
 	[0] = {
 		.name	= "SH_MMCIF",
@@ -1023,8 +962,6 @@
 };
 
 static struct sh_mmcif_plat_data sh_mmcif_plat = {
-	.set_pwr	= cn12_set_pwr,
-	.down_pwr	= mmcif_down_pwr,
 	.sup_pclk	= 0, /* SH7724: Max Pclk/2 */
 	.caps		= MMC_CAP_4_BIT_DATA |
 			  MMC_CAP_8_BIT_DATA |
@@ -1341,10 +1278,6 @@
 	gpio_direction_input(GPIO_PTR6);
 
 	/* SD-card slot CN11 */
-	/* Card-detect, used on CN11, either with SDHI0 or with SPI */
-	gpio_request(GPIO_PTY7, NULL);
-	gpio_direction_input(GPIO_PTY7);
-
 #if defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE)
 	/* enable SDHI0 on CN11 (needs DS2.4 set to ON) */
 	gpio_request(GPIO_FN_SDHI0WP,  NULL);
@@ -1363,8 +1296,6 @@
 	gpio_direction_output(GPIO_PTM4, 1); /* active low CS */
 	gpio_request(GPIO_PTB6, NULL); /* 3.3V power control */
 	gpio_direction_output(GPIO_PTB6, 0); /* disable power by default */
-	gpio_request(GPIO_PTY6, NULL); /* write protect */
-	gpio_direction_input(GPIO_PTY6);
 
 	spi_register_board_info(spi_bus, ARRAY_SIZE(spi_bus));
 #endif
@@ -1394,10 +1325,6 @@
 	gpio_request(GPIO_FN_SDHI1D1,  NULL);
 	gpio_request(GPIO_FN_SDHI1D0,  NULL);
 
-	/* Card-detect, used on CN12 with SDHI1 */
-	gpio_request(GPIO_PTW7, NULL);
-	gpio_direction_input(GPIO_PTW7);
-
 	cn12_enabled = true;
 #endif
 
diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c
index d776234..0d676a4 100644
--- a/arch/sh/mm/hugetlbpage.c
+++ b/arch/sh/mm/hugetlbpage.c
@@ -83,6 +83,11 @@
 	return 0;
 }
 
+int pmd_huge_support(void)
+{
+	return 0;
+}
+
 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
 			     pmd_t *pmd, int write)
 {
diff --git a/arch/sparc/kernel/sys_sparc32.c b/arch/sparc/kernel/sys_sparc32.c
index 3d0ddbc..7136885 100644
--- a/arch/sparc/kernel/sys_sparc32.c
+++ b/arch/sparc/kernel/sys_sparc32.c
@@ -169,10 +169,10 @@
 		new_ka.ka_restorer = restorer;
 		ret = get_user(u_handler, &act->sa_handler);
 		new_ka.sa.sa_handler =  compat_ptr(u_handler);
-		ret |= __copy_from_user(&set32, &act->sa_mask, sizeof(compat_sigset_t));
+		ret |= copy_from_user(&set32, &act->sa_mask, sizeof(compat_sigset_t));
 		sigset_from_compat(&new_ka.sa.sa_mask, &set32);
-		ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
-		ret |= __get_user(u_restorer, &act->sa_restorer);
+		ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
+		ret |= get_user(u_restorer, &act->sa_restorer);
 		new_ka.sa.sa_restorer = compat_ptr(u_restorer);
                 if (ret)
                 	return -EFAULT;
@@ -183,9 +183,9 @@
 	if (!ret && oact) {
 		sigset_to_compat(&set32, &old_ka.sa.sa_mask);
 		ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler);
-		ret |= __copy_to_user(&oact->sa_mask, &set32, sizeof(compat_sigset_t));
-		ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
-		ret |= __put_user(ptr_to_compat(old_ka.sa.sa_restorer), &oact->sa_restorer);
+		ret |= copy_to_user(&oact->sa_mask, &set32, sizeof(compat_sigset_t));
+		ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+		ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer), &oact->sa_restorer);
 		if (ret)
 			ret = -EFAULT;
         }
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index d2b5944..9639964 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -234,6 +234,11 @@
 	return 0;
 }
 
+int pmd_huge_support(void)
+{
+	return 0;
+}
+
 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
 			     pmd_t *pmd, int write)
 {
diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
index e514899..0cb3bba 100644
--- a/arch/tile/mm/hugetlbpage.c
+++ b/arch/tile/mm/hugetlbpage.c
@@ -166,6 +166,11 @@
 	return !!(pud_val(pud) & _PAGE_HUGE_PAGE);
 }
 
+int pmd_huge_support(void)
+{
+	return 1;
+}
+
 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
 			     pmd_t *pmd, int write)
 {
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
index 3a16c14..64507f3 100644
--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -3,18 +3,23 @@
 
 #ifdef __KERNEL__
 
+#include <linux/stringify.h>
 #include <linux/types.h>
 #include <asm/nops.h>
 #include <asm/asm.h>
 
 #define JUMP_LABEL_NOP_SIZE 5
 
-#define STATIC_KEY_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t"
+#ifdef CONFIG_X86_64
+# define STATIC_KEY_INIT_NOP P6_NOP5_ATOMIC
+#else
+# define STATIC_KEY_INIT_NOP GENERIC_NOP5_ATOMIC
+#endif
 
 static __always_inline bool arch_static_branch(struct static_key *key)
 {
 	asm goto("1:"
-		STATIC_KEY_INITIAL_NOP
+		".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t"
 		".pushsection __jump_table,  \"aw\" \n\t"
 		_ASM_ALIGN "\n\t"
 		_ASM_PTR "1b, %l[l_yes], %c0 \n\t"
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 8d16bef..3d19994 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -315,21 +315,6 @@
 	return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
 }
 
-static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
-{
-	return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
-}
-
-static inline int pte_swp_soft_dirty(pte_t pte)
-{
-	return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
-}
-
-static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
-{
-	return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
-}
-
 static inline pte_t pte_file_clear_soft_dirty(pte_t pte)
 {
 	return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
@@ -446,6 +431,7 @@
 
 #ifndef __ASSEMBLY__
 #include <linux/mm_types.h>
+#include <linux/mmdebug.h>
 #include <linux/log2.h>
 
 static inline int pte_none(pte_t pte)
@@ -864,6 +850,24 @@
 {
 }
 
+static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
+{
+	VM_BUG_ON(pte_present(pte));
+	return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
+}
+
+static inline int pte_swp_soft_dirty(pte_t pte)
+{
+	VM_BUG_ON(pte_present(pte));
+	return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
+}
+
+static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
+{
+	VM_BUG_ON(pte_present(pte));
+	return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
+}
+
 #include <asm-generic/pgtable.h>
 #endif	/* __ASSEMBLY__ */
 
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index f4843e0..0ecac25 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -75,6 +75,9 @@
  * with swap entry format. On x86 bits 6 and 7 are *not* involved
  * into swap entry computation, but bit 6 is used for nonlinear
  * file mapping, so we borrow bit 7 for soft dirty tracking.
+ *
+ * Please note that this bit must be treated as swap dirty page
+ * mark if and only if the PTE has present bit clear!
  */
 #ifdef CONFIG_MEM_SOFT_DIRTY
 #define _PAGE_SWP_SOFT_DIRTY	_PAGE_PSE
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index cf51200..e6d90ba 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -62,6 +62,7 @@
 
 static inline void __flush_tlb_one(unsigned long addr)
 {
+	count_vm_event(NR_TLB_LOCAL_FLUSH_ONE);
 	__flush_tlb_single(addr);
 }
 
@@ -84,14 +85,38 @@
 
 #ifndef CONFIG_SMP
 
-#define flush_tlb() __flush_tlb()
-#define flush_tlb_all() __flush_tlb_all()
-#define local_flush_tlb() __flush_tlb()
+/* "_up" is for UniProcessor.
+ *
+ * This is a helper for other header functions.  *Not* intended to be called
+ * directly.  All global TLB flushes need to either call this, or to bump the
+ * vm statistics themselves.
+ */
+static inline void __flush_tlb_up(void)
+{
+	count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
+	__flush_tlb();
+}
+
+static inline void flush_tlb_all(void)
+{
+	count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
+	__flush_tlb_all();
+}
+
+static inline void flush_tlb(void)
+{
+	__flush_tlb_up();
+}
+
+static inline void local_flush_tlb(void)
+{
+	__flush_tlb_up();
+}
 
 static inline void flush_tlb_mm(struct mm_struct *mm)
 {
 	if (mm == current->active_mm)
-		__flush_tlb();
+		__flush_tlb_up();
 }
 
 static inline void flush_tlb_page(struct vm_area_struct *vma,
@@ -105,14 +130,14 @@
 				   unsigned long start, unsigned long end)
 {
 	if (vma->vm_mm == current->active_mm)
-		__flush_tlb();
+		__flush_tlb_up();
 }
 
 static inline void flush_tlb_mm_range(struct mm_struct *mm,
 	   unsigned long start, unsigned long end, unsigned long vmflag)
 {
 	if (mm == current->active_mm)
-		__flush_tlb();
+		__flush_tlb_up();
 }
 
 static inline void native_flush_tlb_others(const struct cpumask *cpumask,
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index d4cdfa6..ce2d0a2 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -683,6 +683,7 @@
 	}
 
 	/* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
+	count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
 	__flush_tlb();
 
 	/* Save MTRR state */
@@ -696,6 +697,7 @@
 static void post_set(void) __releases(set_atomicity_lock)
 {
 	/* Flush TLBs (no need to flush caches - they are disabled) */
+	count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
 	__flush_tlb();
 
 	/* Intel (P6) standard MTRRs */
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
index 69eb2fa..376dc78 100644
--- a/arch/x86/kernel/devicetree.c
+++ b/arch/x86/kernel/devicetree.c
@@ -52,8 +52,7 @@
 }
 
 #ifdef CONFIG_BLK_DEV_INITRD
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
-					    unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
 {
 	initrd_start = (unsigned long)__va(start);
 	initrd_end = (unsigned long)__va(end);
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 63bdb29..b3cd3eb 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -12,6 +12,7 @@
 #include <linux/pci.h>
 #include <linux/acpi.h>
 #include <linux/pci_ids.h>
+#include <drm/i915_drm.h>
 #include <asm/pci-direct.h>
 #include <asm/dma.h>
 #include <asm/io_apic.h>
@@ -216,6 +217,157 @@
 
 }
 
+/*
+ * Systems with Intel graphics controllers set aside memory exclusively
+ * for gfx driver use.  This memory is not marked in the E820 as reserved
+ * or as RAM, and so is subject to overlap from E820 manipulation later
+ * in the boot process.  On some systems, MMIO space is allocated on top,
+ * despite the efforts of the "RAM buffer" approach, which simply rounds
+ * memory boundaries up to 64M to try to catch space that may decode
+ * as RAM and so is not suitable for MMIO.
+ *
+ * And yes, so far on current devices the base addr is always under 4G.
+ */
+static u32 __init intel_stolen_base(int num, int slot, int func)
+{
+	u32 base;
+
+	/*
+	 * For the PCI IDs in this quirk, the stolen base is always
+	 * in 0x5c, aka the BDSM register (yes that's really what
+	 * it's called).
+	 */
+	base = read_pci_config(num, slot, func, 0x5c);
+	base &= ~((1<<20) - 1);
+
+	return base;
+}
+
+#define KB(x)	((x) * 1024)
+#define MB(x)	(KB (KB (x)))
+#define GB(x)	(MB (KB (x)))
+
+static size_t __init gen3_stolen_size(int num, int slot, int func)
+{
+	size_t stolen_size;
+	u16 gmch_ctrl;
+
+	gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL);
+
+	switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
+	case I855_GMCH_GMS_STOLEN_1M:
+		stolen_size = MB(1);
+		break;
+	case I855_GMCH_GMS_STOLEN_4M:
+		stolen_size = MB(4);
+		break;
+	case I855_GMCH_GMS_STOLEN_8M:
+		stolen_size = MB(8);
+		break;
+	case I855_GMCH_GMS_STOLEN_16M:
+		stolen_size = MB(16);
+		break;
+	case I855_GMCH_GMS_STOLEN_32M:
+		stolen_size = MB(32);
+		break;
+	case I915_GMCH_GMS_STOLEN_48M:
+		stolen_size = MB(48);
+		break;
+	case I915_GMCH_GMS_STOLEN_64M:
+		stolen_size = MB(64);
+		break;
+	case G33_GMCH_GMS_STOLEN_128M:
+		stolen_size = MB(128);
+		break;
+	case G33_GMCH_GMS_STOLEN_256M:
+		stolen_size = MB(256);
+		break;
+	case INTEL_GMCH_GMS_STOLEN_96M:
+		stolen_size = MB(96);
+		break;
+	case INTEL_GMCH_GMS_STOLEN_160M:
+		stolen_size = MB(160);
+		break;
+	case INTEL_GMCH_GMS_STOLEN_224M:
+		stolen_size = MB(224);
+		break;
+	case INTEL_GMCH_GMS_STOLEN_352M:
+		stolen_size = MB(352);
+		break;
+	default:
+		stolen_size = 0;
+		break;
+	}
+
+	return stolen_size;
+}
+
+static size_t __init gen6_stolen_size(int num, int slot, int func)
+{
+	u16 gmch_ctrl;
+
+	gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
+	gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
+	gmch_ctrl &= SNB_GMCH_GMS_MASK;
+
+	return gmch_ctrl << 25; /* 32 MB units */
+}
+
+typedef size_t (*stolen_size_fn)(int num, int slot, int func);
+
+static struct pci_device_id intel_stolen_ids[] __initdata = {
+	INTEL_I915G_IDS(gen3_stolen_size),
+	INTEL_I915GM_IDS(gen3_stolen_size),
+	INTEL_I945G_IDS(gen3_stolen_size),
+	INTEL_I945GM_IDS(gen3_stolen_size),
+	INTEL_VLV_M_IDS(gen3_stolen_size),
+	INTEL_VLV_D_IDS(gen3_stolen_size),
+	INTEL_PINEVIEW_IDS(gen3_stolen_size),
+	INTEL_I965G_IDS(gen3_stolen_size),
+	INTEL_G33_IDS(gen3_stolen_size),
+	INTEL_I965GM_IDS(gen3_stolen_size),
+	INTEL_GM45_IDS(gen3_stolen_size),
+	INTEL_G45_IDS(gen3_stolen_size),
+	INTEL_IRONLAKE_D_IDS(gen3_stolen_size),
+	INTEL_IRONLAKE_M_IDS(gen3_stolen_size),
+	INTEL_SNB_D_IDS(gen6_stolen_size),
+	INTEL_SNB_M_IDS(gen6_stolen_size),
+	INTEL_IVB_M_IDS(gen6_stolen_size),
+	INTEL_IVB_D_IDS(gen6_stolen_size),
+	INTEL_HSW_D_IDS(gen6_stolen_size),
+	INTEL_HSW_M_IDS(gen6_stolen_size),
+};
+
+static void __init intel_graphics_stolen(int num, int slot, int func)
+{
+	size_t size;
+	int i;
+	u32 start;
+	u16 device, subvendor, subdevice;
+
+	device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID);
+	subvendor = read_pci_config_16(num, slot, func,
+				       PCI_SUBSYSTEM_VENDOR_ID);
+	subdevice = read_pci_config_16(num, slot, func, PCI_SUBSYSTEM_ID);
+
+	for (i = 0; i < ARRAY_SIZE(intel_stolen_ids); i++) {
+		if (intel_stolen_ids[i].device == device) {
+			stolen_size_fn stolen_size =
+				(stolen_size_fn)intel_stolen_ids[i].driver_data;
+			size = stolen_size(num, slot, func);
+			start = intel_stolen_base(num, slot, func);
+			if (size && start) {
+				/* Mark this space as reserved */
+				e820_add_region(start, size, E820_RESERVED);
+				sanitize_e820_map(e820.map,
+						  ARRAY_SIZE(e820.map),
+						  &e820.nr_map);
+			}
+			return;
+		}
+	}
+}
+
 #define QFLAG_APPLY_ONCE 	0x1
 #define QFLAG_APPLIED		0x2
 #define QFLAG_DONE		(QFLAG_APPLY_ONCE|QFLAG_APPLIED)
@@ -251,6 +403,8 @@
 	  PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
 	{ PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST,
 	  PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
+	{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, PCI_ANY_ID,
+	  QFLAG_APPLY_ONCE, intel_graphics_stolen },
 	{}
 };
 
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 2cfbc3a..f0dcb0c 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -1176,6 +1176,9 @@
 #else /* ! CONFIG_DYNAMIC_FTRACE */
 
 ENTRY(mcount)
+	cmpl $__PAGE_OFFSET, %esp
+	jb ftrace_stub		/* Paging not enabled yet? */
+
 	cmpl $0, function_trace_stop
 	jne  ftrace_stub
 
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
index 460f5d9..ee11b7d 100644
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -24,18 +24,57 @@
 	} __attribute__((packed));
 };
 
+static void bug_at(unsigned char *ip, int line)
+{
+	/*
+	 * The location is not an op that we were expecting.
+	 * Something went wrong. Crash the box, as something could be
+	 * corrupting the kernel.
+	 */
+	pr_warning("Unexpected op at %pS [%p] (%02x %02x %02x %02x %02x) %s:%d\n",
+	       ip, ip, ip[0], ip[1], ip[2], ip[3], ip[4], __FILE__, line);
+	BUG();
+}
+
 static void __jump_label_transform(struct jump_entry *entry,
 				   enum jump_label_type type,
-				   void *(*poker)(void *, const void *, size_t))
+				   void *(*poker)(void *, const void *, size_t),
+				   int init)
 {
 	union jump_code_union code;
+	const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5];
 
 	if (type == JUMP_LABEL_ENABLE) {
+		/*
+		 * We are enabling this jump label. If it is not a nop
+		 * then something must have gone wrong.
+		 */
+		if (unlikely(memcmp((void *)entry->code, ideal_nop, 5) != 0))
+			bug_at((void *)entry->code, __LINE__);
+
 		code.jump = 0xe9;
 		code.offset = entry->target -
 				(entry->code + JUMP_LABEL_NOP_SIZE);
-	} else
+	} else {
+		/*
+		 * We are disabling this jump label. If it is not what
+		 * we think it is, then something must have gone wrong.
+		 * If this is the first initialization call, then we
+		 * are converting the default nop to the ideal nop.
+		 */
+		if (init) {
+			const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
+			if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0))
+				bug_at((void *)entry->code, __LINE__);
+		} else {
+			code.jump = 0xe9;
+			code.offset = entry->target -
+				(entry->code + JUMP_LABEL_NOP_SIZE);
+			if (unlikely(memcmp((void *)entry->code, &code, 5) != 0))
+				bug_at((void *)entry->code, __LINE__);
+		}
 		memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE);
+	}
 
 	/*
 	 * Make text_poke_bp() a default fallback poker.
@@ -57,15 +96,38 @@
 {
 	get_online_cpus();
 	mutex_lock(&text_mutex);
-	__jump_label_transform(entry, type, NULL);
+	__jump_label_transform(entry, type, NULL, 0);
 	mutex_unlock(&text_mutex);
 	put_online_cpus();
 }
 
+static enum {
+	JL_STATE_START,
+	JL_STATE_NO_UPDATE,
+	JL_STATE_UPDATE,
+} jlstate __initdata_or_module = JL_STATE_START;
+
 __init_or_module void arch_jump_label_transform_static(struct jump_entry *entry,
 				      enum jump_label_type type)
 {
-	__jump_label_transform(entry, type, text_poke_early);
+	/*
+	 * This function is called at boot up and when modules are
+	 * first loaded. Check if the default nop, the one that is
+	 * inserted at compile time, is the ideal nop. If it is, then
+	 * we do not need to update the nop, and we can leave it as is.
+	 * If it is not, then we need to update the nop to the ideal nop.
+	 */
+	if (jlstate == JL_STATE_START) {
+		const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
+		const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5];
+
+		if (memcmp(ideal_nop, default_nop, 5) != 0)
+			jlstate = JL_STATE_UPDATE;
+		else
+			jlstate = JL_STATE_NO_UPDATE;
+	}
+	if (jlstate == JL_STATE_UPDATE)
+		__jump_label_transform(entry, type, text_poke_early, 1);
 }
 
 #endif
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index 7e73e8c..9d980d8 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -59,6 +59,10 @@
 	return NULL;
 }
 
+int pmd_huge_support(void)
+{
+	return 0;
+}
 #else
 
 struct page *
@@ -77,6 +81,10 @@
 	return !!(pud_val(pud) & _PAGE_PSE);
 }
 
+int pmd_huge_support(void)
+{
+	return 1;
+}
 #endif
 
 /* x86_64 also uses this file */
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 282375f..ae699b3 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -103,6 +103,7 @@
 	if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
 		return;
 
+	count_vm_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
 	if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
 		if (f->flush_end == TLB_FLUSH_ALL)
 			local_flush_tlb();
@@ -130,6 +131,7 @@
 	info.flush_start = start;
 	info.flush_end = end;
 
+	count_vm_event(NR_TLB_REMOTE_FLUSH);
 	if (is_uv_system()) {
 		unsigned int cpu;
 
@@ -149,6 +151,7 @@
 
 	preempt_disable();
 
+	count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
 	local_flush_tlb();
 	if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
 		flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
@@ -211,16 +214,19 @@
 	act_entries = mm->total_vm > tlb_entries ? tlb_entries : mm->total_vm;
 
 	/* tlb_flushall_shift is on balance point, details in commit log */
-	if ((end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift)
+	if ((end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift) {
+		count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
 		local_flush_tlb();
-	else {
+	} else {
 		if (has_large_page(mm, start, end)) {
 			local_flush_tlb();
 			goto flush_all;
 		}
 		/* flush range by one by one 'invlpg' */
-		for (addr = start; addr < end;	addr += PAGE_SIZE)
+		for (addr = start; addr < end;	addr += PAGE_SIZE) {
+			count_vm_event(NR_TLB_LOCAL_FLUSH_ONE);
 			__flush_tlb_single(addr);
+		}
 
 		if (cpumask_any_but(mm_cpumask(mm),
 				smp_processor_id()) < nr_cpu_ids)
@@ -256,6 +262,7 @@
 
 static void do_flush_tlb_all(void *info)
 {
+	count_vm_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
 	__flush_tlb_all();
 	if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
 		leave_mm(smp_processor_id());
@@ -263,6 +270,7 @@
 
 void flush_tlb_all(void)
 {
+	count_vm_event(NR_TLB_REMOTE_FLUSH);
 	on_each_cpu(do_flush_tlb_all, NULL, 1);
 }
 
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 2fc216d..fa6ade7 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1692,7 +1692,6 @@
 	case CPU_UP_PREPARE:
 		xen_vcpu_setup(cpu);
 		if (xen_have_vector_callback) {
-			xen_init_lock_cpu(cpu);
 			if (xen_feature(XENFEAT_hvm_safe_pvclock))
 				xen_setup_timer(cpu);
 		}
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 0d4ec35..8b901e8 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -990,10 +990,13 @@
 				printk(KERN_WARNING "m2p_remove_override: "
 						"pfn %lx mfn %lx, failed to modify kernel mappings",
 						pfn, mfn);
+				put_balloon_scratch_page();
 				return -1;
 			}
 
-			mcs = xen_mc_entry(
+			xen_mc_batch();
+
+			mcs = __xen_mc_entry(
 					sizeof(struct gnttab_unmap_and_replace));
 			unmap_op = mcs.args;
 			unmap_op->host_addr = kmap_op->host_addr;
@@ -1003,12 +1006,11 @@
 			MULTI_grant_table_op(mcs.mc,
 					GNTTABOP_unmap_and_replace, unmap_op, 1);
 
-			xen_mc_issue(PARAVIRT_LAZY_MMU);
-
 			mcs = __xen_mc_entry(0);
 			MULTI_update_va_mapping(mcs.mc, scratch_page_address,
-					pfn_pte(page_to_pfn(get_balloon_scratch_page()),
+					pfn_pte(page_to_pfn(scratch_page),
 					PAGE_KERNEL_RO), 0);
+
 			xen_mc_issue(PARAVIRT_LAZY_MMU);
 
 			kmap_op->host_addr = 0;
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 9235842..d1e4777 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -273,12 +273,20 @@
 	BUG_ON(smp_processor_id() != 0);
 	native_smp_prepare_boot_cpu();
 
-	/* We've switched to the "real" per-cpu gdt, so make sure the
-	   old memory can be recycled */
-	make_lowmem_page_readwrite(xen_initial_gdt);
+	if (xen_pv_domain()) {
+		/* We've switched to the "real" per-cpu gdt, so make sure the
+		   old memory can be recycled */
+		make_lowmem_page_readwrite(xen_initial_gdt);
 
-	xen_filter_cpu_maps();
-	xen_setup_vcpu_info_placement();
+		xen_filter_cpu_maps();
+		xen_setup_vcpu_info_placement();
+	}
+	/*
+	 * The alternative logic (which patches the unlock/lock) runs before
+	 * the smp bootup up code is activated. Hence we need to set this up
+	 * the core kernel is being patched. Otherwise we will have only
+	 * modules patched but not core code.
+	 */
 	xen_init_spinlocks();
 }
 
@@ -709,6 +717,15 @@
 	WARN_ON(rc);
 	if (!rc)
 		rc =  native_cpu_up(cpu, tidle);
+
+	/*
+	 * We must initialize the slowpath CPU kicker _after_ the native
+	 * path has executed. If we initialized it before none of the
+	 * unlocker IPI kicks would reach the booting CPU as the booting
+	 * CPU had not set itself 'online' in cpu_online_mask. That mask
+	 * is checked when IPIs are sent (on HVM at least).
+	 */
+	xen_init_lock_cpu(cpu);
 	return rc;
 }
 
@@ -728,4 +745,5 @@
 	smp_ops.cpu_die = xen_hvm_cpu_die;
 	smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
 	smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
+	smp_ops.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu;
 }
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 0438b93..253f63f 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -81,7 +81,6 @@
 	spinlock_stats.time_blocked += delta;
 }
 #else  /* !CONFIG_XEN_DEBUG_FS */
-#define TIMEOUT			(1 << 10)
 static inline void add_stats(enum xen_contention_stat var, u32 val)
 {
 }
@@ -96,23 +95,6 @@
 }
 #endif  /* CONFIG_XEN_DEBUG_FS */
 
-/*
- * Size struct xen_spinlock so it's the same as arch_spinlock_t.
- */
-#if NR_CPUS < 256
-typedef u8 xen_spinners_t;
-# define inc_spinners(xl) \
-	asm(LOCK_PREFIX " incb %0" : "+m" ((xl)->spinners) : : "memory");
-# define dec_spinners(xl) \
-	asm(LOCK_PREFIX " decb %0" : "+m" ((xl)->spinners) : : "memory");
-#else
-typedef u16 xen_spinners_t;
-# define inc_spinners(xl) \
-	asm(LOCK_PREFIX " incw %0" : "+m" ((xl)->spinners) : : "memory");
-# define dec_spinners(xl) \
-	asm(LOCK_PREFIX " decw %0" : "+m" ((xl)->spinners) : : "memory");
-#endif
-
 struct xen_lock_waiting {
 	struct arch_spinlock *lock;
 	__ticket_t want;
@@ -123,6 +105,7 @@
 static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting);
 static cpumask_t waiting_cpus;
 
+static bool xen_pvspin = true;
 static void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
 {
 	int irq = __this_cpu_read(lock_kicker_irq);
@@ -241,16 +224,12 @@
 	int irq;
 	char *name;
 
+	if (!xen_pvspin)
+		return;
+
 	WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n",
 	     cpu, per_cpu(lock_kicker_irq, cpu));
 
-	/*
-	 * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23
-	 * (xen: disable PV spinlocks on HVM)
-	 */
-	if (xen_hvm_domain())
-		return;
-
 	name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
 	irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
 				     cpu,
@@ -270,11 +249,7 @@
 
 void xen_uninit_lock_cpu(int cpu)
 {
-	/*
-	 * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23
-	 * (xen: disable PV spinlocks on HVM)
-	 */
-	if (xen_hvm_domain())
+	if (!xen_pvspin)
 		return;
 
 	unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL);
@@ -283,16 +258,9 @@
 	per_cpu(irq_name, cpu) = NULL;
 }
 
-static bool xen_pvspin __initdata = true;
 
 void __init xen_init_spinlocks(void)
 {
-	/*
-	 * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23
-	 * (xen: disable PV spinlocks on HVM)
-	 */
-	if (xen_hvm_domain())
-		return;
 
 	if (!xen_pvspin) {
 		printk(KERN_DEBUG "xen: PV spinlocks disabled\n");
@@ -323,6 +291,9 @@
 	if (d_xen == NULL)
 		return -ENOMEM;
 
+	if (!xen_pvspin)
+		return 0;
+
 	d_spin_debug = debugfs_create_dir("spinlocks", d_xen);
 
 	debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats);
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 42a8bba..101012b 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -170,8 +170,7 @@
 
 __tagtable(BP_TAG_FDT, parse_tag_fdt);
 
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
-		unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
 {
 	initrd_start = (void *)__va(start);
 	initrd_end = (void *)__va(end);
diff --git a/block/Kconfig b/block/Kconfig
index a7e40a7..7f38e40 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -99,6 +99,12 @@
 
 	See Documentation/cgroups/blkio-controller.txt for more information.
 
+config CMDLINE_PARSER
+	bool "Block device command line partition parser"
+	default n
+	---help---
+	Parsing command line, get the partitions information.
+
 menu "Partition Types"
 
 source "block/partitions/Kconfig"
diff --git a/block/Makefile b/block/Makefile
index 39b76ba..4fa4be5 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -18,3 +18,4 @@
 
 obj-$(CONFIG_BLOCK_COMPAT)	+= compat_ioctl.o
 obj-$(CONFIG_BLK_DEV_INTEGRITY)	+= blk-integrity.o
+obj-$(CONFIG_CMDLINE_PARSER)	+= cmdline-parser.o
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 4464c82..46cd7bd 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -367,7 +367,7 @@
 	if (!icq)
 		return NULL;
 
-	if (radix_tree_preload(gfp_mask) < 0) {
+	if (radix_tree_maybe_preload(gfp_mask) < 0) {
 		kmem_cache_free(et->icq_cache, icq);
 		return NULL;
 	}
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 5efc5a6..3aa5b19 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -29,7 +29,7 @@
 	int err;
 	unsigned long v;
 
-	err = strict_strtoul(page, 10, &v);
+	err = kstrtoul(page, 10, &v);
 	if (err || v > UINT_MAX)
 		return -EINVAL;
 
diff --git a/block/cmdline-parser.c b/block/cmdline-parser.c
new file mode 100644
index 0000000..cc2637f
--- /dev/null
+++ b/block/cmdline-parser.c
@@ -0,0 +1,250 @@
+/*
+ * Parse command line, get partition information
+ *
+ * Written by Cai Zhiyong <caizhiyong@huawei.com>
+ *
+ */
+#include <linux/buffer_head.h>
+#include <linux/module.h>
+#include <linux/cmdline-parser.h>
+
+static int parse_subpart(struct cmdline_subpart **subpart, char *partdef)
+{
+	int ret = 0;
+	struct cmdline_subpart *new_subpart;
+
+	*subpart = NULL;
+
+	new_subpart = kzalloc(sizeof(struct cmdline_subpart), GFP_KERNEL);
+	if (!new_subpart)
+		return -ENOMEM;
+
+	if (*partdef == '-') {
+		new_subpart->size = (sector_t)(~0ULL);
+		partdef++;
+	} else {
+		new_subpart->size = (sector_t)memparse(partdef, &partdef);
+		if (new_subpart->size < (sector_t)PAGE_SIZE) {
+			pr_warn("cmdline partition size is invalid.");
+			ret = -EINVAL;
+			goto fail;
+		}
+	}
+
+	if (*partdef == '@') {
+		partdef++;
+		new_subpart->from = (sector_t)memparse(partdef, &partdef);
+	} else {
+		new_subpart->from = (sector_t)(~0ULL);
+	}
+
+	if (*partdef == '(') {
+		int length;
+		char *next = strchr(++partdef, ')');
+
+		if (!next) {
+			pr_warn("cmdline partition format is invalid.");
+			ret = -EINVAL;
+			goto fail;
+		}
+
+		length = min_t(int, next - partdef,
+			       sizeof(new_subpart->name) - 1);
+		strncpy(new_subpart->name, partdef, length);
+		new_subpart->name[length] = '\0';
+
+		partdef = ++next;
+	} else
+		new_subpart->name[0] = '\0';
+
+	new_subpart->flags = 0;
+
+	if (!strncmp(partdef, "ro", 2)) {
+		new_subpart->flags |= PF_RDONLY;
+		partdef += 2;
+	}
+
+	if (!strncmp(partdef, "lk", 2)) {
+		new_subpart->flags |= PF_POWERUP_LOCK;
+		partdef += 2;
+	}
+
+	*subpart = new_subpart;
+	return 0;
+fail:
+	kfree(new_subpart);
+	return ret;
+}
+
+static void free_subpart(struct cmdline_parts *parts)
+{
+	struct cmdline_subpart *subpart;
+
+	while (parts->subpart) {
+		subpart = parts->subpart;
+		parts->subpart = subpart->next_subpart;
+		kfree(subpart);
+	}
+}
+
+static int parse_parts(struct cmdline_parts **parts, const char *bdevdef)
+{
+	int ret = -EINVAL;
+	char *next;
+	int length;
+	struct cmdline_subpart **next_subpart;
+	struct cmdline_parts *newparts;
+	char buf[BDEVNAME_SIZE + 32 + 4];
+
+	*parts = NULL;
+
+	newparts = kzalloc(sizeof(struct cmdline_parts), GFP_KERNEL);
+	if (!newparts)
+		return -ENOMEM;
+
+	next = strchr(bdevdef, ':');
+	if (!next) {
+		pr_warn("cmdline partition has no block device.");
+		goto fail;
+	}
+
+	length = min_t(int, next - bdevdef, sizeof(newparts->name) - 1);
+	strncpy(newparts->name, bdevdef, length);
+	newparts->name[length] = '\0';
+	newparts->nr_subparts = 0;
+
+	next_subpart = &newparts->subpart;
+
+	while (next && *(++next)) {
+		bdevdef = next;
+		next = strchr(bdevdef, ',');
+
+		length = (!next) ? (sizeof(buf) - 1) :
+			min_t(int, next - bdevdef, sizeof(buf) - 1);
+
+		strncpy(buf, bdevdef, length);
+		buf[length] = '\0';
+
+		ret = parse_subpart(next_subpart, buf);
+		if (ret)
+			goto fail;
+
+		newparts->nr_subparts++;
+		next_subpart = &(*next_subpart)->next_subpart;
+	}
+
+	if (!newparts->subpart) {
+		pr_warn("cmdline partition has no valid partition.");
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	*parts = newparts;
+
+	return 0;
+fail:
+	free_subpart(newparts);
+	kfree(newparts);
+	return ret;
+}
+
+void cmdline_parts_free(struct cmdline_parts **parts)
+{
+	struct cmdline_parts *next_parts;
+
+	while (*parts) {
+		next_parts = (*parts)->next_parts;
+		free_subpart(*parts);
+		kfree(*parts);
+		*parts = next_parts;
+	}
+}
+
+int cmdline_parts_parse(struct cmdline_parts **parts, const char *cmdline)
+{
+	int ret;
+	char *buf;
+	char *pbuf;
+	char *next;
+	struct cmdline_parts **next_parts;
+
+	*parts = NULL;
+
+	next = pbuf = buf = kstrdup(cmdline, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	next_parts = parts;
+
+	while (next && *pbuf) {
+		next = strchr(pbuf, ';');
+		if (next)
+			*next = '\0';
+
+		ret = parse_parts(next_parts, pbuf);
+		if (ret)
+			goto fail;
+
+		if (next)
+			pbuf = ++next;
+
+		next_parts = &(*next_parts)->next_parts;
+	}
+
+	if (!*parts) {
+		pr_warn("cmdline partition has no valid partition.");
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	ret = 0;
+done:
+	kfree(buf);
+	return ret;
+
+fail:
+	cmdline_parts_free(parts);
+	goto done;
+}
+
+struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts,
+					 const char *bdev)
+{
+	while (parts && strncmp(bdev, parts->name, sizeof(parts->name)))
+		parts = parts->next_parts;
+	return parts;
+}
+
+/*
+ *  add_part()
+ *    0 success.
+ *    1 can not add so many partitions.
+ */
+void cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size,
+		       int slot,
+		       int (*add_part)(int, struct cmdline_subpart *, void *),
+		       void *param)
+
+{
+	sector_t from = 0;
+	struct cmdline_subpart *subpart;
+
+	for (subpart = parts->subpart; subpart;
+	     subpart = subpart->next_subpart, slot++) {
+		if (subpart->from == (sector_t)(~0ULL))
+			subpart->from = from;
+		else
+			from = subpart->from;
+
+		if (from >= disk_size)
+			break;
+
+		if (subpart->size > (disk_size - from))
+			subpart->size = disk_size - from;
+
+		from += subpart->size;
+
+		if (add_part(slot, subpart, param))
+			break;
+	}
+}
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index 7e5d474..fbd5a67 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -70,7 +70,7 @@
 		return ret;
 
 	ret = copy_to_user(ugeo, &geo, 4);
-	ret |= __put_user(geo.start, &ugeo->start);
+	ret |= put_user(geo.start, &ugeo->start);
 	if (ret)
 		ret = -EFAULT;
 
diff --git a/block/partitions/Kconfig b/block/partitions/Kconfig
index 4cebb2f..87a3208 100644
--- a/block/partitions/Kconfig
+++ b/block/partitions/Kconfig
@@ -260,3 +260,10 @@
 	  partition table format used by Motorola Delta machines (using
 	  sysv68).
 	  Otherwise, say N.
+
+config CMDLINE_PARTITION
+	bool "Command line partition support" if PARTITION_ADVANCED
+	select CMDLINE_PARSER
+	help
+	  Say Y here if you would read the partitions table from bootargs.
+	  The format for the command line is just like mtdparts.
diff --git a/block/partitions/Makefile b/block/partitions/Makefile
index 2be4d7b..37a9527 100644
--- a/block/partitions/Makefile
+++ b/block/partitions/Makefile
@@ -8,6 +8,7 @@
 obj-$(CONFIG_AMIGA_PARTITION) += amiga.o
 obj-$(CONFIG_ATARI_PARTITION) += atari.o
 obj-$(CONFIG_AIX_PARTITION) += aix.o
+obj-$(CONFIG_CMDLINE_PARTITION) += cmdline.o
 obj-$(CONFIG_MAC_PARTITION) += mac.o
 obj-$(CONFIG_LDM_PARTITION) += ldm.o
 obj-$(CONFIG_MSDOS_PARTITION) += msdos.o
diff --git a/block/partitions/check.c b/block/partitions/check.c
index 19ba207..9ac1df7 100644
--- a/block/partitions/check.c
+++ b/block/partitions/check.c
@@ -34,6 +34,7 @@
 #include "efi.h"
 #include "karma.h"
 #include "sysv68.h"
+#include "cmdline.h"
 
 int warn_no_part = 1; /*This is ugly: should make genhd removable media aware*/
 
@@ -65,6 +66,9 @@
 	adfspart_check_ADFS,
 #endif
 
+#ifdef CONFIG_CMDLINE_PARTITION
+	cmdline_partition,
+#endif
 #ifdef CONFIG_EFI_PARTITION
 	efi_partition,		/* this must come before msdos */
 #endif
diff --git a/block/partitions/cmdline.c b/block/partitions/cmdline.c
new file mode 100644
index 0000000..56cf4ff
--- /dev/null
+++ b/block/partitions/cmdline.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2013 HUAWEI
+ * Author: Cai Zhiyong <caizhiyong@huawei.com>
+ *
+ * Read block device partition table from command line.
+ * The partition used for fixed block device (eMMC) embedded device.
+ * It is no MBR, save storage space. Bootloader can be easily accessed
+ * by absolute address of data on the block device.
+ * Users can easily change the partition.
+ *
+ * The format for the command line is just like mtdparts.
+ *
+ * Verbose config please reference "Documentation/block/cmdline-partition.txt"
+ *
+ */
+
+#include <linux/cmdline-parser.h>
+
+#include "check.h"
+#include "cmdline.h"
+
+static char *cmdline;
+static struct cmdline_parts *bdev_parts;
+
+static int add_part(int slot, struct cmdline_subpart *subpart, void *param)
+{
+	int label_min;
+	struct partition_meta_info *info;
+	char tmp[sizeof(info->volname) + 4];
+	struct parsed_partitions *state = (struct parsed_partitions *)param;
+
+	if (slot >= state->limit)
+		return 1;
+
+	put_partition(state, slot, subpart->from >> 9,
+		      subpart->size >> 9);
+
+	info = &state->parts[slot].info;
+
+	label_min = min_t(int, sizeof(info->volname) - 1,
+			  sizeof(subpart->name));
+	strncpy(info->volname, subpart->name, label_min);
+	info->volname[label_min] = '\0';
+
+	snprintf(tmp, sizeof(tmp), "(%s)", info->volname);
+	strlcat(state->pp_buf, tmp, PAGE_SIZE);
+
+	state->parts[slot].has_info = true;
+
+	return 0;
+}
+
+static int __init cmdline_parts_setup(char *s)
+{
+	cmdline = s;
+	return 1;
+}
+__setup("blkdevparts=", cmdline_parts_setup);
+
+/*
+ * Purpose: allocate cmdline partitions.
+ * Returns:
+ * -1 if unable to read the partition table
+ *  0 if this isn't our partition table
+ *  1 if successful
+ */
+int cmdline_partition(struct parsed_partitions *state)
+{
+	sector_t disk_size;
+	char bdev[BDEVNAME_SIZE];
+	struct cmdline_parts *parts;
+
+	if (cmdline) {
+		if (bdev_parts)
+			cmdline_parts_free(&bdev_parts);
+
+		if (cmdline_parts_parse(&bdev_parts, cmdline)) {
+			cmdline = NULL;
+			return -1;
+		}
+		cmdline = NULL;
+	}
+
+	if (!bdev_parts)
+		return 0;
+
+	bdevname(state->bdev, bdev);
+	parts = cmdline_parts_find(bdev_parts, bdev);
+	if (!parts)
+		return 0;
+
+	disk_size = get_capacity(state->bdev->bd_disk) << 9;
+
+	cmdline_parts_set(parts, disk_size, 1, add_part, (void *)state);
+
+	strlcat(state->pp_buf, "\n", PAGE_SIZE);
+
+	return 1;
+}
diff --git a/block/partitions/cmdline.h b/block/partitions/cmdline.h
new file mode 100644
index 0000000..26e0f8d
--- /dev/null
+++ b/block/partitions/cmdline.h
@@ -0,0 +1,2 @@
+
+int cmdline_partition(struct parsed_partitions *state);
diff --git a/block/partitions/efi.c b/block/partitions/efi.c
index c85fc89..1a5ec9a 100644
--- a/block/partitions/efi.c
+++ b/block/partitions/efi.c
@@ -25,6 +25,9 @@
  * TODO:
  *
  * Changelog:
+ * Mon August 5th, 2013 Davidlohr Bueso <davidlohr@hp.com>
+ * - detect hybrid MBRs, tighter pMBR checking & cleanups.
+ *
  * Mon Nov 09 2004 Matt Domsch <Matt_Domsch@dell.com>
  * - test for valid PMBR and valid PGPT before ever reading
  *   AGPT, allow override with 'gpt' kernel command line option.
@@ -149,34 +152,80 @@
 		       bdev_logical_block_size(bdev)) - 1ULL;
 }
 
-static inline int
-pmbr_part_valid(struct partition *part)
+static inline int pmbr_part_valid(gpt_mbr_record *part)
 {
-        if (part->sys_ind == EFI_PMBR_OSTYPE_EFI_GPT &&
-            le32_to_cpu(part->start_sect) == 1UL)
-                return 1;
-        return 0;
+	if (part->os_type != EFI_PMBR_OSTYPE_EFI_GPT)
+		goto invalid;
+
+	/* set to 0x00000001 (i.e., the LBA of the GPT Partition Header) */
+	if (le32_to_cpu(part->starting_lba) != GPT_PRIMARY_PARTITION_TABLE_LBA)
+		goto invalid;
+
+	return GPT_MBR_PROTECTIVE;
+invalid:
+	return 0;
 }
 
 /**
  * is_pmbr_valid(): test Protective MBR for validity
  * @mbr: pointer to a legacy mbr structure
+ * @total_sectors: amount of sectors in the device
  *
- * Description: Returns 1 if PMBR is valid, 0 otherwise.
- * Validity depends on two things:
+ * Description: Checks for a valid protective or hybrid
+ * master boot record (MBR). The validity of a pMBR depends
+ * on all of the following properties:
  *  1) MSDOS signature is in the last two bytes of the MBR
  *  2) One partition of type 0xEE is found
+ *
+ * In addition, a hybrid MBR will have up to three additional
+ * primary partitions, which point to the same space that's
+ * marked out by up to three GPT partitions.
+ *
+ * Returns 0 upon invalid MBR, or GPT_MBR_PROTECTIVE or
+ * GPT_MBR_HYBRID depending on the device layout.
  */
-static int
-is_pmbr_valid(legacy_mbr *mbr)
+static int is_pmbr_valid(legacy_mbr *mbr, sector_t total_sectors)
 {
-	int i;
+	int i, part = 0, ret = 0; /* invalid by default */
+
 	if (!mbr || le16_to_cpu(mbr->signature) != MSDOS_MBR_SIGNATURE)
-                return 0;
+		goto done;
+
+	for (i = 0; i < 4; i++) {
+		ret = pmbr_part_valid(&mbr->partition_record[i]);
+		if (ret == GPT_MBR_PROTECTIVE) {
+			part = i;
+			/*
+			 * Ok, we at least know that there's a protective MBR,
+			 * now check if there are other partition types for
+			 * hybrid MBR.
+			 */
+			goto check_hybrid;
+		}
+	}
+
+	if (ret != GPT_MBR_PROTECTIVE)
+		goto done;
+check_hybrid:
 	for (i = 0; i < 4; i++)
-		if (pmbr_part_valid(&mbr->partition_record[i]))
-                        return 1;
-	return 0;
+		if ((mbr->partition_record[i].os_type !=
+			EFI_PMBR_OSTYPE_EFI_GPT) &&
+		    (mbr->partition_record[i].os_type != 0x00))
+			ret = GPT_MBR_HYBRID;
+
+	/*
+	 * Protective MBRs take up the lesser of the whole disk
+	 * or 2 TiB (32bit LBA), ignoring the rest of the disk.
+	 *
+	 * Hybrid MBRs do not necessarily comply with this.
+	 */
+	if (ret == GPT_MBR_PROTECTIVE) {
+		if (le32_to_cpu(mbr->partition_record[part].size_in_lba) !=
+		    min((uint32_t) total_sectors - 1, 0xFFFFFFFF))
+			ret = 0;
+	}
+done:
+	return ret;
 }
 
 /**
@@ -243,8 +292,7 @@
 		return NULL;
 
 	if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
-                     (u8 *) pte,
-		     count) < count) {
+			(u8 *) pte, count) < count) {
 		kfree(pte);
                 pte=NULL;
 		return NULL;
@@ -364,7 +412,12 @@
 			 (unsigned long long)lastlba);
 		goto fail;
 	}
-
+	if (le64_to_cpu((*gpt)->last_usable_lba) < le64_to_cpu((*gpt)->first_usable_lba)) {
+		pr_debug("GPT: last_usable_lba incorrect: %lld > %lld\n",
+			 (unsigned long long)le64_to_cpu((*gpt)->last_usable_lba),
+			 (unsigned long long)le64_to_cpu((*gpt)->first_usable_lba));
+		goto fail;
+	}
 	/* Check that sizeof_partition_entry has the correct value */
 	if (le32_to_cpu((*gpt)->sizeof_partition_entry) != sizeof(gpt_entry)) {
 		pr_debug("GUID Partitition Entry Size check failed.\n");
@@ -429,44 +482,42 @@
 	if (!pgpt || !agpt)
 		return;
 	if (le64_to_cpu(pgpt->my_lba) != le64_to_cpu(agpt->alternate_lba)) {
-		printk(KERN_WARNING
-		       "GPT:Primary header LBA != Alt. header alternate_lba\n");
-		printk(KERN_WARNING "GPT:%lld != %lld\n",
+		pr_warn("GPT:Primary header LBA != Alt. header alternate_lba\n");
+		pr_warn("GPT:%lld != %lld\n",
 		       (unsigned long long)le64_to_cpu(pgpt->my_lba),
                        (unsigned long long)le64_to_cpu(agpt->alternate_lba));
 		error_found++;
 	}
 	if (le64_to_cpu(pgpt->alternate_lba) != le64_to_cpu(agpt->my_lba)) {
-		printk(KERN_WARNING
-		       "GPT:Primary header alternate_lba != Alt. header my_lba\n");
-		printk(KERN_WARNING "GPT:%lld != %lld\n",
+		pr_warn("GPT:Primary header alternate_lba != Alt. header my_lba\n");
+		pr_warn("GPT:%lld != %lld\n",
 		       (unsigned long long)le64_to_cpu(pgpt->alternate_lba),
                        (unsigned long long)le64_to_cpu(agpt->my_lba));
 		error_found++;
 	}
 	if (le64_to_cpu(pgpt->first_usable_lba) !=
             le64_to_cpu(agpt->first_usable_lba)) {
-		printk(KERN_WARNING "GPT:first_usable_lbas don't match.\n");
-		printk(KERN_WARNING "GPT:%lld != %lld\n",
+		pr_warn("GPT:first_usable_lbas don't match.\n");
+		pr_warn("GPT:%lld != %lld\n",
 		       (unsigned long long)le64_to_cpu(pgpt->first_usable_lba),
                        (unsigned long long)le64_to_cpu(agpt->first_usable_lba));
 		error_found++;
 	}
 	if (le64_to_cpu(pgpt->last_usable_lba) !=
             le64_to_cpu(agpt->last_usable_lba)) {
-		printk(KERN_WARNING "GPT:last_usable_lbas don't match.\n");
-		printk(KERN_WARNING "GPT:%lld != %lld\n",
+		pr_warn("GPT:last_usable_lbas don't match.\n");
+		pr_warn("GPT:%lld != %lld\n",
 		       (unsigned long long)le64_to_cpu(pgpt->last_usable_lba),
                        (unsigned long long)le64_to_cpu(agpt->last_usable_lba));
 		error_found++;
 	}
 	if (efi_guidcmp(pgpt->disk_guid, agpt->disk_guid)) {
-		printk(KERN_WARNING "GPT:disk_guids don't match.\n");
+		pr_warn("GPT:disk_guids don't match.\n");
 		error_found++;
 	}
 	if (le32_to_cpu(pgpt->num_partition_entries) !=
             le32_to_cpu(agpt->num_partition_entries)) {
-		printk(KERN_WARNING "GPT:num_partition_entries don't match: "
+		pr_warn("GPT:num_partition_entries don't match: "
 		       "0x%x != 0x%x\n",
 		       le32_to_cpu(pgpt->num_partition_entries),
 		       le32_to_cpu(agpt->num_partition_entries));
@@ -474,8 +525,7 @@
 	}
 	if (le32_to_cpu(pgpt->sizeof_partition_entry) !=
             le32_to_cpu(agpt->sizeof_partition_entry)) {
-		printk(KERN_WARNING
-		       "GPT:sizeof_partition_entry values don't match: "
+		pr_warn("GPT:sizeof_partition_entry values don't match: "
 		       "0x%x != 0x%x\n",
                        le32_to_cpu(pgpt->sizeof_partition_entry),
 		       le32_to_cpu(agpt->sizeof_partition_entry));
@@ -483,34 +533,30 @@
 	}
 	if (le32_to_cpu(pgpt->partition_entry_array_crc32) !=
             le32_to_cpu(agpt->partition_entry_array_crc32)) {
-		printk(KERN_WARNING
-		       "GPT:partition_entry_array_crc32 values don't match: "
+		pr_warn("GPT:partition_entry_array_crc32 values don't match: "
 		       "0x%x != 0x%x\n",
                        le32_to_cpu(pgpt->partition_entry_array_crc32),
 		       le32_to_cpu(agpt->partition_entry_array_crc32));
 		error_found++;
 	}
 	if (le64_to_cpu(pgpt->alternate_lba) != lastlba) {
-		printk(KERN_WARNING
-		       "GPT:Primary header thinks Alt. header is not at the end of the disk.\n");
-		printk(KERN_WARNING "GPT:%lld != %lld\n",
+		pr_warn("GPT:Primary header thinks Alt. header is not at the end of the disk.\n");
+		pr_warn("GPT:%lld != %lld\n",
 			(unsigned long long)le64_to_cpu(pgpt->alternate_lba),
 			(unsigned long long)lastlba);
 		error_found++;
 	}
 
 	if (le64_to_cpu(agpt->my_lba) != lastlba) {
-		printk(KERN_WARNING
-		       "GPT:Alternate GPT header not at the end of the disk.\n");
-		printk(KERN_WARNING "GPT:%lld != %lld\n",
+		pr_warn("GPT:Alternate GPT header not at the end of the disk.\n");
+		pr_warn("GPT:%lld != %lld\n",
 			(unsigned long long)le64_to_cpu(agpt->my_lba),
 			(unsigned long long)lastlba);
 		error_found++;
 	}
 
 	if (error_found)
-		printk(KERN_WARNING
-		       "GPT: Use GNU Parted to correct GPT errors.\n");
+		pr_warn("GPT: Use GNU Parted to correct GPT errors.\n");
 	return;
 }
 
@@ -536,6 +582,7 @@
 	gpt_header *pgpt = NULL, *agpt = NULL;
 	gpt_entry *pptes = NULL, *aptes = NULL;
 	legacy_mbr *legacymbr;
+	sector_t total_sectors = i_size_read(state->bdev->bd_inode) >> 9;
 	u64 lastlba;
 
 	if (!ptes)
@@ -543,17 +590,22 @@
 
 	lastlba = last_lba(state->bdev);
         if (!force_gpt) {
-                /* This will be added to the EFI Spec. per Intel after v1.02. */
-                legacymbr = kzalloc(sizeof (*legacymbr), GFP_KERNEL);
-                if (legacymbr) {
-                        read_lba(state, 0, (u8 *) legacymbr,
-				 sizeof (*legacymbr));
-                        good_pmbr = is_pmbr_valid(legacymbr);
-                        kfree(legacymbr);
-                }
-                if (!good_pmbr)
-                        goto fail;
-        }
+		/* This will be added to the EFI Spec. per Intel after v1.02. */
+		legacymbr = kzalloc(sizeof(*legacymbr), GFP_KERNEL);
+		if (!legacymbr)
+			goto fail;
+
+		read_lba(state, 0, (u8 *)legacymbr, sizeof(*legacymbr));
+		good_pmbr = is_pmbr_valid(legacymbr, total_sectors);
+		kfree(legacymbr);
+
+		if (!good_pmbr)
+			goto fail;
+
+		pr_debug("Device has a %s MBR\n",
+			 good_pmbr == GPT_MBR_PROTECTIVE ?
+						"protective" : "hybrid");
+	}
 
 	good_pgpt = is_gpt_valid(state, GPT_PRIMARY_PARTITION_TABLE_LBA,
 				 &pgpt, &pptes);
@@ -576,11 +628,8 @@
                 *ptes = pptes;
                 kfree(agpt);
                 kfree(aptes);
-                if (!good_agpt) {
-                        printk(KERN_WARNING 
-			       "Alternate GPT is invalid, "
-                               "using primary GPT.\n");
-                }
+		if (!good_agpt)
+                        pr_warn("Alternate GPT is invalid, using primary GPT.\n");
                 return 1;
         }
         else if (good_agpt) {
@@ -588,8 +637,7 @@
                 *ptes = aptes;
                 kfree(pgpt);
                 kfree(pptes);
-                printk(KERN_WARNING 
-                       "Primary GPT is invalid, using alternate GPT.\n");
+		pr_warn("Primary GPT is invalid, using alternate GPT.\n");
                 return 1;
         }
 
@@ -651,8 +699,7 @@
 		put_partition(state, i+1, start * ssz, size * ssz);
 
 		/* If this is a RAID volume, tell md */
-		if (!efi_guidcmp(ptes[i].partition_type_guid,
-				 PARTITION_LINUX_RAID_GUID))
+		if (!efi_guidcmp(ptes[i].partition_type_guid, PARTITION_LINUX_RAID_GUID))
 			state->parts[i + 1].flags = ADDPART_FLAG_RAID;
 
 		info = &state->parts[i + 1].info;
diff --git a/block/partitions/efi.h b/block/partitions/efi.h
index b69ab72..4efcafb 100644
--- a/block/partitions/efi.h
+++ b/block/partitions/efi.h
@@ -37,6 +37,9 @@
 #define EFI_PMBR_OSTYPE_EFI 0xEF
 #define EFI_PMBR_OSTYPE_EFI_GPT 0xEE
 
+#define GPT_MBR_PROTECTIVE  1
+#define GPT_MBR_HYBRID      2
+
 #define GPT_HEADER_SIGNATURE 0x5452415020494645ULL
 #define GPT_HEADER_REVISION_V1 0x00010000
 #define GPT_PRIMARY_PARTITION_TABLE_LBA 1
@@ -101,11 +104,25 @@
 	efi_char16_t partition_name[72 / sizeof (efi_char16_t)];
 } __attribute__ ((packed)) gpt_entry;
 
+typedef struct _gpt_mbr_record {
+	u8	boot_indicator; /* unused by EFI, set to 0x80 for bootable */
+	u8	start_head;     /* unused by EFI, pt start in CHS */
+	u8	start_sector;   /* unused by EFI, pt start in CHS */
+	u8	start_track;
+	u8	os_type;        /* EFI and legacy non-EFI OS types */
+	u8	end_head;       /* unused by EFI, pt end in CHS */
+	u8	end_sector;     /* unused by EFI, pt end in CHS */
+	u8	end_track;      /* unused by EFI, pt end in CHS */
+	__le32	starting_lba;   /* used by EFI - start addr of the on disk pt */
+	__le32	size_in_lba;    /* used by EFI - size of pt in LBA */
+} __packed gpt_mbr_record;
+
+
 typedef struct _legacy_mbr {
 	u8 boot_code[440];
 	__le32 unique_mbr_signature;
 	__le16 unknown;
-	struct partition partition_record[4];
+	gpt_mbr_record partition_record[4];
 	__le16 signature;
 } __attribute__ ((packed)) legacy_mbr;
 
@@ -113,22 +130,3 @@
 extern int efi_partition(struct parsed_partitions *state);
 
 #endif
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only.  This must remain at the end
- * of the file.
- * --------------------------------------------------------------------------
- * Local variables:
- * c-indent-level: 4 
- * c-brace-imaginary-offset: 0
- * c-brace-offset: -4
- * c-argdecl-indent: 4
- * c-label-offset: -4
- * c-continued-statement-offset: 4
- * c-continued-brace-offset: 0
- * indent-tabs-mode: nil
- * tab-width: 8
- * End:
- */
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 449f629..8557adc 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -2865,15 +2865,4 @@
 	.id_table =	he_pci_tbl,
 };
 
-static int __init he_init(void)
-{
-	return pci_register_driver(&he_driver);
-}
-
-static void __exit he_cleanup(void)
-{
-	pci_unregister_driver(&he_driver);
-}
-
-module_init(he_init);
-module_exit(he_cleanup);
+module_pci_driver(he_driver);
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index 409502a..5aca5f4 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -778,7 +778,7 @@
 		return error;
 	}
 
-	if (mac[i] == NULL || mac_pton(mac[i], card->atmdev->esi)) {
+	if (mac[i] == NULL || !mac_pton(mac[i], card->atmdev->esi)) {
 		nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET,
 				   card->atmdev->esi, 6);
 		if (memcmp(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00", 6) ==
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
index 1219ab7..1e16cbd 100644
--- a/drivers/base/dma-buf.c
+++ b/drivers/base/dma-buf.c
@@ -77,9 +77,36 @@
 	return dmabuf->ops->mmap(dmabuf, vma);
 }
 
+static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
+{
+	struct dma_buf *dmabuf;
+	loff_t base;
+
+	if (!is_dma_buf_file(file))
+		return -EBADF;
+
+	dmabuf = file->private_data;
+
+	/* only support discovering the end of the buffer,
+	   but also allow SEEK_SET to maintain the idiomatic
+	   SEEK_END(0), SEEK_CUR(0) pattern */
+	if (whence == SEEK_END)
+		base = dmabuf->size;
+	else if (whence == SEEK_SET)
+		base = 0;
+	else
+		return -EINVAL;
+
+	if (offset != 0)
+		return -EINVAL;
+
+	return base + offset;
+}
+
 static const struct file_operations dma_buf_fops = {
 	.release	= dma_buf_release,
 	.mmap		= dma_buf_mmap_internal,
+	.llseek		= dma_buf_llseek,
 };
 
 /*
@@ -133,7 +160,12 @@
 	dmabuf->exp_name = exp_name;
 
 	file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf, flags);
+	if (IS_ERR(file)) {
+		kfree(dmabuf);
+		return ERR_CAST(file);
+	}
 
+	file->f_mode |= FMODE_LSEEK;
 	dmabuf->file = file;
 
 	mutex_init(&dmabuf->lock);
diff --git a/drivers/bcma/scan.c b/drivers/bcma/scan.c
index cd6b20f..3776840 100644
--- a/drivers/bcma/scan.c
+++ b/drivers/bcma/scan.c
@@ -269,6 +269,8 @@
 	return NULL;
 }
 
+#define IS_ERR_VALUE_U32(x) ((x) >= (u32)-MAX_ERRNO)
+
 static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
 			      struct bcma_device_id *match, int core_num,
 			      struct bcma_device *core)
@@ -351,11 +353,11 @@
 	 * the main register space for the core
 	 */
 	tmp = bcma_erom_get_addr_desc(bus, eromptr, SCAN_ADDR_TYPE_SLAVE, 0);
-	if (tmp == 0 || IS_ERR_VALUE(tmp)) {
+	if (tmp == 0 || IS_ERR_VALUE_U32(tmp)) {
 		/* Try again to see if it is a bridge */
 		tmp = bcma_erom_get_addr_desc(bus, eromptr,
 					      SCAN_ADDR_TYPE_BRIDGE, 0);
-		if (tmp == 0 || IS_ERR_VALUE(tmp)) {
+		if (tmp == 0 || IS_ERR_VALUE_U32(tmp)) {
 			return -EILSEQ;
 		} else {
 			bcma_info(bus, "Bridge found\n");
@@ -369,7 +371,7 @@
 		for (j = 0; ; j++) {
 			tmp = bcma_erom_get_addr_desc(bus, eromptr,
 				SCAN_ADDR_TYPE_SLAVE, i);
-			if (IS_ERR_VALUE(tmp)) {
+			if (IS_ERR_VALUE_U32(tmp)) {
 				/* no more entries for port _i_ */
 				/* pr_debug("erom: slave port %d "
 				 * "has %d descriptors\n", i, j); */
@@ -386,7 +388,7 @@
 		for (j = 0; ; j++) {
 			tmp = bcma_erom_get_addr_desc(bus, eromptr,
 				SCAN_ADDR_TYPE_MWRAP, i);
-			if (IS_ERR_VALUE(tmp)) {
+			if (IS_ERR_VALUE_U32(tmp)) {
 				/* no more entries for port _i_ */
 				/* pr_debug("erom: master wrapper %d "
 				 * "has %d descriptors\n", i, j); */
@@ -404,7 +406,7 @@
 		for (j = 0; ; j++) {
 			tmp = bcma_erom_get_addr_desc(bus, eromptr,
 				SCAN_ADDR_TYPE_SWRAP, i + hack);
-			if (IS_ERR_VALUE(tmp)) {
+			if (IS_ERR_VALUE_U32(tmp)) {
 				/* no more entries for port _i_ */
 				/* pr_debug("erom: master wrapper %d "
 				 * has %d descriptors\n", i, j); */
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index 025c41d..14a9d19 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -1,5 +1,5 @@
 /* Copyright (c) 2013 Coraid, Inc.  See COPYING for GPL terms. */
-#define VERSION "83"
+#define VERSION "85"
 #define AOE_MAJOR 152
 #define DEVICE_NAME "aoe"
 
@@ -169,6 +169,7 @@
 	ulong ref;
 	struct work_struct work;/* disk create work struct */
 	struct gendisk *gd;
+	struct dentry *debugfs;
 	struct request_queue *blkq;
 	struct hd_geometry geo;
 	sector_t ssize;
@@ -206,6 +207,7 @@
 int aoeblk_init(void);
 void aoeblk_exit(void);
 void aoeblk_gdalloc(void *);
+void aoedisk_rm_debugfs(struct aoedev *d);
 void aoedisk_rm_sysfs(struct aoedev *d);
 
 int aoechr_init(void);
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 916d9ed..dd73e1f 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012 Coraid, Inc.  See COPYING for GPL terms. */
+/* Copyright (c) 2013 Coraid, Inc.  See COPYING for GPL terms. */
 /*
  * aoeblk.c
  * block device routines
@@ -17,11 +17,13 @@
 #include <linux/mutex.h>
 #include <linux/export.h>
 #include <linux/moduleparam.h>
+#include <linux/debugfs.h>
 #include <scsi/sg.h>
 #include "aoe.h"
 
 static DEFINE_MUTEX(aoeblk_mutex);
 static struct kmem_cache *buf_pool_cache;
+static struct dentry *aoe_debugfs_dir;
 
 /* GPFS needs a larger value than the default. */
 static int aoe_maxsectors;
@@ -108,6 +110,55 @@
 	return snprintf(page, PAGE_SIZE, "%lu\n", d->maxbcnt);
 }
 
+static int aoedisk_debugfs_show(struct seq_file *s, void *ignored)
+{
+	struct aoedev *d;
+	struct aoetgt **t, **te;
+	struct aoeif *ifp, *ife;
+	unsigned long flags;
+	char c;
+
+	d = s->private;
+	seq_printf(s, "rttavg: %d rttdev: %d\n",
+		d->rttavg >> RTTSCALE,
+		d->rttdev >> RTTDSCALE);
+	seq_printf(s, "nskbpool: %d\n", skb_queue_len(&d->skbpool));
+	seq_printf(s, "kicked: %ld\n", d->kicked);
+	seq_printf(s, "maxbcnt: %ld\n", d->maxbcnt);
+	seq_printf(s, "ref: %ld\n", d->ref);
+
+	spin_lock_irqsave(&d->lock, flags);
+	t = d->targets;
+	te = t + d->ntargets;
+	for (; t < te && *t; t++) {
+		c = '\t';
+		seq_printf(s, "falloc: %ld\n", (*t)->falloc);
+		seq_printf(s, "ffree: %p\n",
+			list_empty(&(*t)->ffree) ? NULL : (*t)->ffree.next);
+		seq_printf(s, "%pm:%d:%d:%d\n", (*t)->addr, (*t)->nout,
+			(*t)->maxout, (*t)->nframes);
+		seq_printf(s, "\tssthresh:%d\n", (*t)->ssthresh);
+		seq_printf(s, "\ttaint:%d\n", (*t)->taint);
+		seq_printf(s, "\tr:%d\n", (*t)->rpkts);
+		seq_printf(s, "\tw:%d\n", (*t)->wpkts);
+		ifp = (*t)->ifs;
+		ife = ifp + ARRAY_SIZE((*t)->ifs);
+		for (; ifp->nd && ifp < ife; ifp++) {
+			seq_printf(s, "%c%s", c, ifp->nd->name);
+			c = ',';
+		}
+		seq_puts(s, "\n");
+	}
+	spin_unlock_irqrestore(&d->lock, flags);
+
+	return 0;
+}
+
+static int aoe_debugfs_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, aoedisk_debugfs_show, inode->i_private);
+}
+
 static DEVICE_ATTR(state, S_IRUGO, aoedisk_show_state, NULL);
 static DEVICE_ATTR(mac, S_IRUGO, aoedisk_show_mac, NULL);
 static DEVICE_ATTR(netif, S_IRUGO, aoedisk_show_netif, NULL);
@@ -130,6 +181,44 @@
 	.attrs = aoe_attrs,
 };
 
+static const struct file_operations aoe_debugfs_fops = {
+	.open = aoe_debugfs_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static void
+aoedisk_add_debugfs(struct aoedev *d)
+{
+	struct dentry *entry;
+	char *p;
+
+	if (aoe_debugfs_dir == NULL)
+		return;
+	p = strchr(d->gd->disk_name, '/');
+	if (p == NULL)
+		p = d->gd->disk_name;
+	else
+		p++;
+	BUG_ON(*p == '\0');
+	entry = debugfs_create_file(p, 0444, aoe_debugfs_dir, d,
+				    &aoe_debugfs_fops);
+	if (IS_ERR_OR_NULL(entry)) {
+		pr_info("aoe: cannot create debugfs file for %s\n",
+			d->gd->disk_name);
+		return;
+	}
+	BUG_ON(d->debugfs);
+	d->debugfs = entry;
+}
+void
+aoedisk_rm_debugfs(struct aoedev *d)
+{
+	debugfs_remove(d->debugfs);
+	d->debugfs = NULL;
+}
+
 static int
 aoedisk_add_sysfs(struct aoedev *d)
 {
@@ -330,6 +419,7 @@
 
 	add_disk(gd);
 	aoedisk_add_sysfs(d);
+	aoedisk_add_debugfs(d);
 
 	spin_lock_irqsave(&d->lock, flags);
 	WARN_ON(!(d->flags & DEVFL_GD_NOW));
@@ -351,6 +441,8 @@
 void
 aoeblk_exit(void)
 {
+	debugfs_remove_recursive(aoe_debugfs_dir);
+	aoe_debugfs_dir = NULL;
 	kmem_cache_destroy(buf_pool_cache);
 }
 
@@ -362,7 +454,11 @@
 					   0, 0, NULL);
 	if (buf_pool_cache == NULL)
 		return -ENOMEM;
-
+	aoe_debugfs_dir = debugfs_create_dir("aoe", NULL);
+	if (IS_ERR_OR_NULL(aoe_debugfs_dir)) {
+		pr_info("aoe: cannot create debugfs directory\n");
+		aoe_debugfs_dir = NULL;
+	}
 	return 0;
 }
 
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 4d45dba..d251543 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -380,7 +380,6 @@
 {
 	struct frame *f;
 	struct buf *buf;
-	struct aoetgt *t;
 	struct sk_buff *skb;
 	struct sk_buff_head queue;
 	ulong bcnt, fbcnt;
@@ -391,7 +390,6 @@
 	f = newframe(d);
 	if (f == NULL)
 		return 0;
-	t = *d->tgt;
 	bcnt = d->maxbcnt;
 	if (bcnt == 0)
 		bcnt = DEFAULTBCNT;
@@ -485,7 +483,6 @@
 	struct sk_buff *skb;
 	struct sk_buff_head queue;
 	struct aoe_hdr *h;
-	struct aoe_atahdr *ah;
 	struct aoetgt *t;
 	char buf[128];
 	u32 n;
@@ -500,7 +497,6 @@
 		return;
 	}
 	h = (struct aoe_hdr *) skb_mac_header(skb);
-	ah = (struct aoe_atahdr *) (h+1);
 
 	if (!(f->flags & FFL_PROBE)) {
 		snprintf(buf, sizeof(buf),
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index 784c92e..e774c50 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -12,6 +12,7 @@
 #include <linux/bitmap.h>
 #include <linux/kdev_t.h>
 #include <linux/moduleparam.h>
+#include <linux/string.h>
 #include "aoe.h"
 
 static void dummy_timer(ulong);
@@ -241,16 +242,12 @@
 static int
 user_req(char *s, size_t slen, struct aoedev *d)
 {
-	char *p;
+	const char *p;
 	size_t lim;
 
 	if (!d->gd)
 		return 0;
-	p = strrchr(d->gd->disk_name, '/');
-	if (!p)
-		p = d->gd->disk_name;
-	else
-		p += 1;
+	p = kbasename(d->gd->disk_name);
 	lim = sizeof(d->gd->disk_name);
 	lim -= p - d->gd->disk_name;
 	if (slen < lim)
@@ -278,6 +275,7 @@
 
 	del_timer_sync(&d->timer);
 	if (d->gd) {
+		aoedisk_rm_debugfs(d);
 		aoedisk_rm_sysfs(d);
 		del_gendisk(d->gd);
 		put_disk(d->gd);
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 62b6c2c..d2d95ff 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -4258,6 +4258,13 @@
 	h->nr_cmds = h->max_commands - 4 - cciss_tape_cmds;
 	h->maxsgentries = readl(&(h->cfgtable->MaxSGElements));
 	/*
+	 * The P600 may exhibit poor performnace under some workloads
+	 * if we use the value in the configuration table. Limit this
+	 * controller to MAXSGENTRIES (32) instead.
+	 */
+	if (h->board_id == 0x3225103C)
+		h->maxsgentries = MAXSGENTRIES;
+	/*
 	 * Limit in-command s/g elements to 32 save dma'able memory.
 	 * Howvever spec says if 0, use 31
 	 */
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index a56cfcd..77a60be 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -636,7 +636,7 @@
 		mg_request(host->breq);
 }
 
-void mg_times_out(unsigned long data)
+static void mg_times_out(unsigned long data)
 {
 	struct mg_host *host = (struct mg_host *)data;
 	char *name;
diff --git a/drivers/block/osdblk.c b/drivers/block/osdblk.c
index 1bbc681..79aa179 100644
--- a/drivers/block/osdblk.c
+++ b/drivers/block/osdblk.c
@@ -598,7 +598,7 @@
 	unsigned long ul;
 	struct list_head *tmp;
 
-	rc = strict_strtoul(buf, 10, &ul);
+	rc = kstrtoul(buf, 10, &ul);
 	if (rc)
 		return rc;
 
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index f5d0ea1..5618847 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -44,6 +44,8 @@
  *
  *************************************************************************/
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/pktcdvd.h>
 #include <linux/module.h>
 #include <linux/types.h>
@@ -69,23 +71,24 @@
 
 #define DRIVER_NAME	"pktcdvd"
 
-#if PACKET_DEBUG
-#define DPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args)
-#else
-#define DPRINTK(fmt, args...)
-#endif
+#define pkt_err(pd, fmt, ...)						\
+	pr_err("%s: " fmt, pd->name, ##__VA_ARGS__)
+#define pkt_notice(pd, fmt, ...)					\
+	pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__)
+#define pkt_info(pd, fmt, ...)						\
+	pr_info("%s: " fmt, pd->name, ##__VA_ARGS__)
 
-#if PACKET_DEBUG > 1
-#define VPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args)
-#else
-#define VPRINTK(fmt, args...)
-#endif
+#define pkt_dbg(level, pd, fmt, ...)					\
+do {									\
+	if (level == 2 && PACKET_DEBUG >= 2)				\
+		pr_notice("%s: %s():" fmt,				\
+			  pd->name, __func__, ##__VA_ARGS__);		\
+	else if (level == 1 && PACKET_DEBUG >= 1)			\
+		pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__);		\
+} while (0)
 
 #define MAX_SPEED 0xffff
 
-#define ZONE(sector, pd) (((sector) + (pd)->offset) & \
-			~(sector_t)((pd)->settings.size - 1))
-
 static DEFINE_MUTEX(pktcdvd_mutex);
 static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
 static struct proc_dir_entry *pkt_proc;
@@ -103,7 +106,10 @@
 static int pkt_remove_dev(dev_t pkt_dev);
 static int pkt_seq_show(struct seq_file *m, void *p);
 
-
+static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
+{
+	return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
+}
 
 /*
  * create and register a pktcdvd kernel object.
@@ -424,7 +430,7 @@
 	if (ret) {
 		kfree(class_pktcdvd);
 		class_pktcdvd = NULL;
-		printk(DRIVER_NAME": failed to create class pktcdvd\n");
+		pr_err("failed to create class pktcdvd\n");
 		return ret;
 	}
 	return 0;
@@ -517,7 +523,7 @@
 {
 	BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0);
 	if (atomic_dec_and_test(&pd->cdrw.pending_bios)) {
-		VPRINTK(DRIVER_NAME": queue empty\n");
+		pkt_dbg(2, pd, "queue empty\n");
 		atomic_set(&pd->iosched.attention, 1);
 		wake_up(&pd->wqueue);
 	}
@@ -734,36 +740,33 @@
 	return ret;
 }
 
+static const char *sense_key_string(__u8 index)
+{
+	static const char * const info[] = {
+		"No sense", "Recovered error", "Not ready",
+		"Medium error", "Hardware error", "Illegal request",
+		"Unit attention", "Data protect", "Blank check",
+	};
+
+	return index < ARRAY_SIZE(info) ? info[index] : "INVALID";
+}
+
 /*
  * A generic sense dump / resolve mechanism should be implemented across
  * all ATAPI + SCSI devices.
  */
-static void pkt_dump_sense(struct packet_command *cgc)
+static void pkt_dump_sense(struct pktcdvd_device *pd,
+			   struct packet_command *cgc)
 {
-	static char *info[9] = { "No sense", "Recovered error", "Not ready",
-				 "Medium error", "Hardware error", "Illegal request",
-				 "Unit attention", "Data protect", "Blank check" };
-	int i;
 	struct request_sense *sense = cgc->sense;
 
-	printk(DRIVER_NAME":");
-	for (i = 0; i < CDROM_PACKET_SIZE; i++)
-		printk(" %02x", cgc->cmd[i]);
-	printk(" - ");
-
-	if (sense == NULL) {
-		printk("no sense\n");
-		return;
-	}
-
-	printk("sense %02x.%02x.%02x", sense->sense_key, sense->asc, sense->ascq);
-
-	if (sense->sense_key > 8) {
-		printk(" (INVALID)\n");
-		return;
-	}
-
-	printk(" (%s)\n", info[sense->sense_key]);
+	if (sense)
+		pkt_err(pd, "%*ph - sense %02x.%02x.%02x (%s)\n",
+			CDROM_PACKET_SIZE, cgc->cmd,
+			sense->sense_key, sense->asc, sense->ascq,
+			sense_key_string(sense->sense_key));
+	else
+		pkt_err(pd, "%*ph - no sense\n", CDROM_PACKET_SIZE, cgc->cmd);
 }
 
 /*
@@ -806,7 +809,7 @@
 	cgc.cmd[5] = write_speed & 0xff;
 
 	if ((ret = pkt_generic_packet(pd, &cgc)))
-		pkt_dump_sense(&cgc);
+		pkt_dump_sense(pd, &cgc);
 
 	return ret;
 }
@@ -872,7 +875,7 @@
 				need_write_seek = 0;
 			if (need_write_seek && reads_queued) {
 				if (atomic_read(&pd->cdrw.pending_bios) > 0) {
-					VPRINTK(DRIVER_NAME": write, waiting\n");
+					pkt_dbg(2, pd, "write, waiting\n");
 					break;
 				}
 				pkt_flush_cache(pd);
@@ -881,7 +884,7 @@
 		} else {
 			if (!reads_queued && writes_queued) {
 				if (atomic_read(&pd->cdrw.pending_bios) > 0) {
-					VPRINTK(DRIVER_NAME": read, waiting\n");
+					pkt_dbg(2, pd, "read, waiting\n");
 					break;
 				}
 				pd->iosched.writing = 1;
@@ -943,7 +946,7 @@
 		set_bit(PACKET_MERGE_SEGS, &pd->flags);
 		return 0;
 	} else {
-		printk(DRIVER_NAME": cdrom max_phys_segments too small\n");
+		pkt_err(pd, "cdrom max_phys_segments too small\n");
 		return -EIO;
 	}
 }
@@ -987,8 +990,9 @@
 	struct pktcdvd_device *pd = pkt->pd;
 	BUG_ON(!pd);
 
-	VPRINTK("pkt_end_io_read: bio=%p sec0=%llx sec=%llx err=%d\n", bio,
-		(unsigned long long)pkt->sector, (unsigned long long)bio->bi_sector, err);
+	pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n",
+		bio, (unsigned long long)pkt->sector,
+		(unsigned long long)bio->bi_sector, err);
 
 	if (err)
 		atomic_inc(&pkt->io_errors);
@@ -1005,7 +1009,7 @@
 	struct pktcdvd_device *pd = pkt->pd;
 	BUG_ON(!pd);
 
-	VPRINTK("pkt_end_io_packet_write: id=%d, err=%d\n", pkt->id, err);
+	pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, err);
 
 	pd->stats.pkt_ended++;
 
@@ -1047,7 +1051,7 @@
 	spin_unlock(&pkt->lock);
 
 	if (pkt->cache_valid) {
-		VPRINTK("pkt_gather_data: zone %llx cached\n",
+		pkt_dbg(2, pd, "zone %llx cached\n",
 			(unsigned long long)pkt->sector);
 		goto out_account;
 	}
@@ -1070,7 +1074,7 @@
 
 		p = (f * CD_FRAMESIZE) / PAGE_SIZE;
 		offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
-		VPRINTK("pkt_gather_data: Adding frame %d, page:%p offs:%d\n",
+		pkt_dbg(2, pd, "Adding frame %d, page:%p offs:%d\n",
 			f, pkt->pages[p], offset);
 		if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset))
 			BUG();
@@ -1082,7 +1086,7 @@
 	}
 
 out_account:
-	VPRINTK("pkt_gather_data: need %d frames for zone %llx\n",
+	pkt_dbg(2, pd, "need %d frames for zone %llx\n",
 		frames_read, (unsigned long long)pkt->sector);
 	pd->stats.pkt_started++;
 	pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9);
@@ -1183,7 +1187,8 @@
 		"IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED"
 	};
 	enum packet_data_state old_state = pkt->state;
-	VPRINTK("pkt %2d : s=%6llx %s -> %s\n", pkt->id, (unsigned long long)pkt->sector,
+	pkt_dbg(2, pd, "pkt %2d : s=%6llx %s -> %s\n",
+		pkt->id, (unsigned long long)pkt->sector,
 		state_name[old_state], state_name[state]);
 #endif
 	pkt->state = state;
@@ -1202,12 +1207,10 @@
 	struct rb_node *n;
 	int wakeup;
 
-	VPRINTK("handle_queue\n");
-
 	atomic_set(&pd->scan_queue, 0);
 
 	if (list_empty(&pd->cdrw.pkt_free_list)) {
-		VPRINTK("handle_queue: no pkt\n");
+		pkt_dbg(2, pd, "no pkt\n");
 		return 0;
 	}
 
@@ -1224,7 +1227,7 @@
 	node = first_node;
 	while (node) {
 		bio = node->bio;
-		zone = ZONE(bio->bi_sector, pd);
+		zone = get_zone(bio->bi_sector, pd);
 		list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
 			if (p->sector == zone) {
 				bio = NULL;
@@ -1244,7 +1247,7 @@
 	}
 	spin_unlock(&pd->lock);
 	if (!bio) {
-		VPRINTK("handle_queue: no bio\n");
+		pkt_dbg(2, pd, "no bio\n");
 		return 0;
 	}
 
@@ -1260,12 +1263,12 @@
 	 * to this packet.
 	 */
 	spin_lock(&pd->lock);
-	VPRINTK("pkt_handle_queue: looking for zone %llx\n", (unsigned long long)zone);
+	pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone);
 	while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
 		bio = node->bio;
-		VPRINTK("pkt_handle_queue: found zone=%llx\n",
-			(unsigned long long)ZONE(bio->bi_sector, pd));
-		if (ZONE(bio->bi_sector, pd) != zone)
+		pkt_dbg(2, pd, "found zone=%llx\n",
+			(unsigned long long)get_zone(bio->bi_sector, pd));
+		if (get_zone(bio->bi_sector, pd) != zone)
 			break;
 		pkt_rbtree_erase(pd, node);
 		spin_lock(&pkt->lock);
@@ -1316,7 +1319,7 @@
 		if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset))
 			BUG();
 	}
-	VPRINTK(DRIVER_NAME": vcnt=%d\n", pkt->w_bio->bi_vcnt);
+	pkt_dbg(2, pd, "vcnt=%d\n", pkt->w_bio->bi_vcnt);
 
 	/*
 	 * Fill-in bvec with data from orig_bios.
@@ -1327,7 +1330,7 @@
 	pkt_set_state(pkt, PACKET_WRITE_WAIT_STATE);
 	spin_unlock(&pkt->lock);
 
-	VPRINTK("pkt_start_write: Writing %d frames for zone %llx\n",
+	pkt_dbg(2, pd, "Writing %d frames for zone %llx\n",
 		pkt->write_size, (unsigned long long)pkt->sector);
 
 	if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames)) {
@@ -1359,7 +1362,7 @@
 {
 	int uptodate;
 
-	VPRINTK("run_state_machine: pkt %d\n", pkt->id);
+	pkt_dbg(2, pd, "pkt %d\n", pkt->id);
 
 	for (;;) {
 		switch (pkt->state) {
@@ -1398,7 +1401,7 @@
 			if (pkt_start_recovery(pkt)) {
 				pkt_start_write(pd, pkt);
 			} else {
-				VPRINTK("No recovery possible\n");
+				pkt_dbg(2, pd, "No recovery possible\n");
 				pkt_set_state(pkt, PACKET_FINISHED_STATE);
 			}
 			break;
@@ -1419,8 +1422,6 @@
 {
 	struct packet_data *pkt, *next;
 
-	VPRINTK("pkt_handle_packets\n");
-
 	/*
 	 * Run state machine for active packets
 	 */
@@ -1502,9 +1503,9 @@
 			if (PACKET_DEBUG > 1) {
 				int states[PACKET_NUM_STATES];
 				pkt_count_states(pd, states);
-				VPRINTK("kcdrwd: i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
-					states[0], states[1], states[2], states[3],
-					states[4], states[5]);
+				pkt_dbg(2, pd, "i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
+					states[0], states[1], states[2],
+					states[3], states[4], states[5]);
 			}
 
 			min_sleep_time = MAX_SCHEDULE_TIMEOUT;
@@ -1513,9 +1514,9 @@
 					min_sleep_time = pkt->sleep_time;
 			}
 
-			VPRINTK("kcdrwd: sleeping\n");
+			pkt_dbg(2, pd, "sleeping\n");
 			residue = schedule_timeout(min_sleep_time);
-			VPRINTK("kcdrwd: wake up\n");
+			pkt_dbg(2, pd, "wake up\n");
 
 			/* make swsusp happy with our thread */
 			try_to_freeze();
@@ -1563,9 +1564,10 @@
 
 static void pkt_print_settings(struct pktcdvd_device *pd)
 {
-	printk(DRIVER_NAME": %s packets, ", pd->settings.fp ? "Fixed" : "Variable");
-	printk("%u blocks, ", pd->settings.size >> 2);
-	printk("Mode-%c disc\n", pd->settings.block_mode == 8 ? '1' : '2');
+	pkt_info(pd, "%s packets, %u blocks, Mode-%c disc\n",
+		 pd->settings.fp ? "Fixed" : "Variable",
+		 pd->settings.size >> 2,
+		 pd->settings.block_mode == 8 ? '1' : '2');
 }
 
 static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, int page_code, int page_control)
@@ -1699,7 +1701,7 @@
 	init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ);
 	cgc.sense = &sense;
 	if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) {
-		pkt_dump_sense(&cgc);
+		pkt_dump_sense(pd, &cgc);
 		return ret;
 	}
 
@@ -1714,7 +1716,7 @@
 	init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ);
 	cgc.sense = &sense;
 	if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) {
-		pkt_dump_sense(&cgc);
+		pkt_dump_sense(pd, &cgc);
 		return ret;
 	}
 
@@ -1749,14 +1751,14 @@
 		/*
 		 * paranoia
 		 */
-		printk(DRIVER_NAME": write mode wrong %d\n", wp->data_block_type);
+		pkt_err(pd, "write mode wrong %d\n", wp->data_block_type);
 		return 1;
 	}
 	wp->packet_size = cpu_to_be32(pd->settings.size >> 2);
 
 	cgc.buflen = cgc.cmd[8] = size;
 	if ((ret = pkt_mode_select(pd, &cgc))) {
-		pkt_dump_sense(&cgc);
+		pkt_dump_sense(pd, &cgc);
 		return ret;
 	}
 
@@ -1793,7 +1795,7 @@
 	if (ti->rt == 1 && ti->blank == 0)
 		return 1;
 
-	printk(DRIVER_NAME": bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
+	pkt_err(pd, "bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
 	return 0;
 }
 
@@ -1811,7 +1813,8 @@
 		case 0x12: /* DVD-RAM */
 			return 1;
 		default:
-			VPRINTK(DRIVER_NAME": Wrong disc profile (%x)\n", pd->mmc3_profile);
+			pkt_dbg(2, pd, "Wrong disc profile (%x)\n",
+				pd->mmc3_profile);
 			return 0;
 	}
 
@@ -1820,22 +1823,22 @@
 	 * but i'm not sure, should we leave this to user apps? probably.
 	 */
 	if (di->disc_type == 0xff) {
-		printk(DRIVER_NAME": Unknown disc. No track?\n");
+		pkt_notice(pd, "unknown disc - no track?\n");
 		return 0;
 	}
 
 	if (di->disc_type != 0x20 && di->disc_type != 0) {
-		printk(DRIVER_NAME": Wrong disc type (%x)\n", di->disc_type);
+		pkt_err(pd, "wrong disc type (%x)\n", di->disc_type);
 		return 0;
 	}
 
 	if (di->erasable == 0) {
-		printk(DRIVER_NAME": Disc not erasable\n");
+		pkt_notice(pd, "disc not erasable\n");
 		return 0;
 	}
 
 	if (di->border_status == PACKET_SESSION_RESERVED) {
-		printk(DRIVER_NAME": Can't write to last track (reserved)\n");
+		pkt_err(pd, "can't write to last track (reserved)\n");
 		return 0;
 	}
 
@@ -1860,7 +1863,7 @@
 	memset(&ti, 0, sizeof(track_information));
 
 	if ((ret = pkt_get_disc_info(pd, &di))) {
-		printk("failed get_disc\n");
+		pkt_err(pd, "failed get_disc\n");
 		return ret;
 	}
 
@@ -1871,12 +1874,12 @@
 
 	track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
 	if ((ret = pkt_get_track_info(pd, track, 1, &ti))) {
-		printk(DRIVER_NAME": failed get_track\n");
+		pkt_err(pd, "failed get_track\n");
 		return ret;
 	}
 
 	if (!pkt_writable_track(pd, &ti)) {
-		printk(DRIVER_NAME": can't write to this track\n");
+		pkt_err(pd, "can't write to this track\n");
 		return -EROFS;
 	}
 
@@ -1886,11 +1889,11 @@
 	 */
 	pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
 	if (pd->settings.size == 0) {
-		printk(DRIVER_NAME": detected zero packet size!\n");
+		pkt_notice(pd, "detected zero packet size!\n");
 		return -ENXIO;
 	}
 	if (pd->settings.size > PACKET_MAX_SECTORS) {
-		printk(DRIVER_NAME": packet size is too big\n");
+		pkt_err(pd, "packet size is too big\n");
 		return -EROFS;
 	}
 	pd->settings.fp = ti.fp;
@@ -1932,7 +1935,7 @@
 			pd->settings.block_mode = PACKET_BLOCK_MODE2;
 			break;
 		default:
-			printk(DRIVER_NAME": unknown data mode\n");
+			pkt_err(pd, "unknown data mode\n");
 			return -EROFS;
 	}
 	return 0;
@@ -1966,10 +1969,10 @@
 	cgc.buflen = cgc.cmd[8] = 2 + ((buf[0] << 8) | (buf[1] & 0xff));
 	ret = pkt_mode_select(pd, &cgc);
 	if (ret) {
-		printk(DRIVER_NAME": write caching control failed\n");
-		pkt_dump_sense(&cgc);
+		pkt_err(pd, "write caching control failed\n");
+		pkt_dump_sense(pd, &cgc);
 	} else if (!ret && set)
-		printk(DRIVER_NAME": enabled write caching on %s\n", pd->name);
+		pkt_notice(pd, "enabled write caching\n");
 	return ret;
 }
 
@@ -2005,7 +2008,7 @@
 			     sizeof(struct mode_page_header);
 		ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
 		if (ret) {
-			pkt_dump_sense(&cgc);
+			pkt_dump_sense(pd, &cgc);
 			return ret;
 		}
 	}
@@ -2064,7 +2067,7 @@
 	cgc.cmd[8] = 2;
 	ret = pkt_generic_packet(pd, &cgc);
 	if (ret) {
-		pkt_dump_sense(&cgc);
+		pkt_dump_sense(pd, &cgc);
 		return ret;
 	}
 	size = ((unsigned int) buf[0]<<8) + buf[1] + 2;
@@ -2079,16 +2082,16 @@
 	cgc.cmd[8] = size;
 	ret = pkt_generic_packet(pd, &cgc);
 	if (ret) {
-		pkt_dump_sense(&cgc);
+		pkt_dump_sense(pd, &cgc);
 		return ret;
 	}
 
 	if (!(buf[6] & 0x40)) {
-		printk(DRIVER_NAME": Disc type is not CD-RW\n");
+		pkt_notice(pd, "disc type is not CD-RW\n");
 		return 1;
 	}
 	if (!(buf[6] & 0x4)) {
-		printk(DRIVER_NAME": A1 values on media are not valid, maybe not CDRW?\n");
+		pkt_notice(pd, "A1 values on media are not valid, maybe not CDRW?\n");
 		return 1;
 	}
 
@@ -2108,14 +2111,14 @@
 			*speed = us_clv_to_speed[sp];
 			break;
 		default:
-			printk(DRIVER_NAME": Unknown disc sub-type %d\n",st);
+			pkt_notice(pd, "unknown disc sub-type %d\n", st);
 			return 1;
 	}
 	if (*speed) {
-		printk(DRIVER_NAME": Max. media speed: %d\n",*speed);
+		pkt_info(pd, "maximum media speed: %d\n", *speed);
 		return 0;
 	} else {
-		printk(DRIVER_NAME": Unknown speed %d for sub-type %d\n",sp,st);
+		pkt_notice(pd, "unknown speed %d for sub-type %d\n", sp, st);
 		return 1;
 	}
 }
@@ -2126,7 +2129,7 @@
 	struct request_sense sense;
 	int ret;
 
-	VPRINTK(DRIVER_NAME": Performing OPC\n");
+	pkt_dbg(2, pd, "Performing OPC\n");
 
 	init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
 	cgc.sense = &sense;
@@ -2134,7 +2137,7 @@
 	cgc.cmd[0] = GPCMD_SEND_OPC;
 	cgc.cmd[1] = 1;
 	if ((ret = pkt_generic_packet(pd, &cgc)))
-		pkt_dump_sense(&cgc);
+		pkt_dump_sense(pd, &cgc);
 	return ret;
 }
 
@@ -2144,12 +2147,12 @@
 	unsigned int write_speed, media_write_speed, read_speed;
 
 	if ((ret = pkt_probe_settings(pd))) {
-		VPRINTK(DRIVER_NAME": %s failed probe\n", pd->name);
+		pkt_dbg(2, pd, "failed probe\n");
 		return ret;
 	}
 
 	if ((ret = pkt_set_write_settings(pd))) {
-		DPRINTK(DRIVER_NAME": %s failed saving write settings\n", pd->name);
+		pkt_dbg(1, pd, "failed saving write settings\n");
 		return -EIO;
 	}
 
@@ -2161,26 +2164,26 @@
 		case 0x13: /* DVD-RW */
 		case 0x1a: /* DVD+RW */
 		case 0x12: /* DVD-RAM */
-			DPRINTK(DRIVER_NAME": write speed %ukB/s\n", write_speed);
+			pkt_dbg(1, pd, "write speed %ukB/s\n", write_speed);
 			break;
 		default:
 			if ((ret = pkt_media_speed(pd, &media_write_speed)))
 				media_write_speed = 16;
 			write_speed = min(write_speed, media_write_speed * 177);
-			DPRINTK(DRIVER_NAME": write speed %ux\n", write_speed / 176);
+			pkt_dbg(1, pd, "write speed %ux\n", write_speed / 176);
 			break;
 	}
 	read_speed = write_speed;
 
 	if ((ret = pkt_set_speed(pd, write_speed, read_speed))) {
-		DPRINTK(DRIVER_NAME": %s couldn't set write speed\n", pd->name);
+		pkt_dbg(1, pd, "couldn't set write speed\n");
 		return -EIO;
 	}
 	pd->write_speed = write_speed;
 	pd->read_speed = read_speed;
 
 	if ((ret = pkt_perform_opc(pd))) {
-		DPRINTK(DRIVER_NAME": %s Optimum Power Calibration failed\n", pd->name);
+		pkt_dbg(1, pd, "Optimum Power Calibration failed\n");
 	}
 
 	return 0;
@@ -2205,7 +2208,7 @@
 		goto out;
 
 	if ((ret = pkt_get_last_written(pd, &lba))) {
-		printk(DRIVER_NAME": pkt_get_last_written failed\n");
+		pkt_err(pd, "pkt_get_last_written failed\n");
 		goto out_putdev;
 	}
 
@@ -2235,11 +2238,11 @@
 
 	if (write) {
 		if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
-			printk(DRIVER_NAME": not enough memory for buffers\n");
+			pkt_err(pd, "not enough memory for buffers\n");
 			ret = -ENOMEM;
 			goto out_putdev;
 		}
-		printk(DRIVER_NAME": %lukB available on disc\n", lba << 1);
+		pkt_info(pd, "%lukB available on disc\n", lba << 1);
 	}
 
 	return 0;
@@ -2257,7 +2260,7 @@
 static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
 {
 	if (flush && pkt_flush_cache(pd))
-		DPRINTK(DRIVER_NAME": %s not flushing cache\n", pd->name);
+		pkt_dbg(1, pd, "not flushing cache\n");
 
 	pkt_lock_door(pd, 0);
 
@@ -2279,8 +2282,6 @@
 	struct pktcdvd_device *pd = NULL;
 	int ret;
 
-	VPRINTK(DRIVER_NAME": entering open\n");
-
 	mutex_lock(&pktcdvd_mutex);
 	mutex_lock(&ctl_mutex);
 	pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev));
@@ -2315,7 +2316,6 @@
 out_dec:
 	pd->refcnt--;
 out:
-	VPRINTK(DRIVER_NAME": failed open (%d)\n", ret);
 	mutex_unlock(&ctl_mutex);
 	mutex_unlock(&pktcdvd_mutex);
 	return ret;
@@ -2360,7 +2360,8 @@
 
 	pd = q->queuedata;
 	if (!pd) {
-		printk(DRIVER_NAME": %s incorrect request queue\n", bdevname(bio->bi_bdev, b));
+		pr_err("%s incorrect request queue\n",
+		       bdevname(bio->bi_bdev, b));
 		goto end_io;
 	}
 
@@ -2382,20 +2383,20 @@
 	}
 
 	if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
-		printk(DRIVER_NAME": WRITE for ro device %s (%llu)\n",
-			pd->name, (unsigned long long)bio->bi_sector);
+		pkt_notice(pd, "WRITE for ro device (%llu)\n",
+			   (unsigned long long)bio->bi_sector);
 		goto end_io;
 	}
 
 	if (!bio->bi_size || (bio->bi_size % CD_FRAMESIZE)) {
-		printk(DRIVER_NAME": wrong bio size\n");
+		pkt_err(pd, "wrong bio size\n");
 		goto end_io;
 	}
 
 	blk_queue_bounce(q, &bio);
 
-	zone = ZONE(bio->bi_sector, pd);
-	VPRINTK("pkt_make_request: start = %6llx stop = %6llx\n",
+	zone = get_zone(bio->bi_sector, pd);
+	pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
 		(unsigned long long)bio->bi_sector,
 		(unsigned long long)bio_end_sector(bio));
 
@@ -2405,7 +2406,7 @@
 		sector_t last_zone;
 		int first_sectors;
 
-		last_zone = ZONE(bio_end_sector(bio) - 1, pd);
+		last_zone = get_zone(bio_end_sector(bio) - 1, pd);
 		if (last_zone != zone) {
 			BUG_ON(last_zone != zone + pd->settings.size);
 			first_sectors = last_zone - bio->bi_sector;
@@ -2500,7 +2501,7 @@
 			  struct bio_vec *bvec)
 {
 	struct pktcdvd_device *pd = q->queuedata;
-	sector_t zone = ZONE(bmd->bi_sector, pd);
+	sector_t zone = get_zone(bmd->bi_sector, pd);
 	int used = ((bmd->bi_sector - zone) << 9) + bmd->bi_size;
 	int remaining = (pd->settings.size << 9) - used;
 	int remaining2;
@@ -2609,7 +2610,7 @@
 	struct block_device *bdev;
 
 	if (pd->pkt_dev == dev) {
-		printk(DRIVER_NAME": Recursive setup not allowed\n");
+		pkt_err(pd, "recursive setup not allowed\n");
 		return -EBUSY;
 	}
 	for (i = 0; i < MAX_WRITERS; i++) {
@@ -2617,11 +2618,12 @@
 		if (!pd2)
 			continue;
 		if (pd2->bdev->bd_dev == dev) {
-			printk(DRIVER_NAME": %s already setup\n", bdevname(pd2->bdev, b));
+			pkt_err(pd, "%s already setup\n",
+				bdevname(pd2->bdev, b));
 			return -EBUSY;
 		}
 		if (pd2->pkt_dev == dev) {
-			printk(DRIVER_NAME": Can't chain pktcdvd devices\n");
+			pkt_err(pd, "can't chain pktcdvd devices\n");
 			return -EBUSY;
 		}
 	}
@@ -2644,13 +2646,13 @@
 	atomic_set(&pd->cdrw.pending_bios, 0);
 	pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->name);
 	if (IS_ERR(pd->cdrw.thread)) {
-		printk(DRIVER_NAME": can't start kernel thread\n");
+		pkt_err(pd, "can't start kernel thread\n");
 		ret = -ENOMEM;
 		goto out_mem;
 	}
 
 	proc_create_data(pd->name, 0, pkt_proc, &pkt_proc_fops, pd);
-	DPRINTK(DRIVER_NAME": writer %s mapped to %s\n", pd->name, bdevname(bdev, b));
+	pkt_dbg(1, pd, "writer mapped to %s\n", bdevname(bdev, b));
 	return 0;
 
 out_mem:
@@ -2665,8 +2667,8 @@
 	struct pktcdvd_device *pd = bdev->bd_disk->private_data;
 	int ret;
 
-	VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd,
-		MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
+	pkt_dbg(2, pd, "cmd %x, dev %d:%d\n",
+		cmd, MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
 
 	mutex_lock(&pktcdvd_mutex);
 	switch (cmd) {
@@ -2690,7 +2692,7 @@
 		break;
 
 	default:
-		VPRINTK(DRIVER_NAME": Unknown ioctl for %s (%x)\n", pd->name, cmd);
+		pkt_dbg(2, pd, "Unknown ioctl (%x)\n", cmd);
 		ret = -ENOTTY;
 	}
 	mutex_unlock(&pktcdvd_mutex);
@@ -2743,7 +2745,7 @@
 		if (!pkt_devs[idx])
 			break;
 	if (idx == MAX_WRITERS) {
-		printk(DRIVER_NAME": max %d writers supported\n", MAX_WRITERS);
+		pr_err("max %d writers supported\n", MAX_WRITERS);
 		ret = -EBUSY;
 		goto out_mutex;
 	}
@@ -2818,7 +2820,7 @@
 	kfree(pd);
 out_mutex:
 	mutex_unlock(&ctl_mutex);
-	printk(DRIVER_NAME": setup of pktcdvd device failed\n");
+	pr_err("setup of pktcdvd device failed\n");
 	return ret;
 }
 
@@ -2839,7 +2841,7 @@
 			break;
 	}
 	if (idx == MAX_WRITERS) {
-		DPRINTK(DRIVER_NAME": dev not setup\n");
+		pr_debug("dev not setup\n");
 		ret = -ENXIO;
 		goto out;
 	}
@@ -2859,7 +2861,7 @@
 	blkdev_put(pd->bdev, FMODE_READ | FMODE_NDELAY);
 
 	remove_proc_entry(pd->name, pkt_proc);
-	DPRINTK(DRIVER_NAME": writer %s unmapped\n", pd->name);
+	pkt_dbg(1, pd, "writer unmapped\n");
 
 	del_gendisk(pd->disk);
 	blk_cleanup_queue(pd->disk->queue);
@@ -2969,7 +2971,7 @@
 
 	ret = register_blkdev(pktdev_major, DRIVER_NAME);
 	if (ret < 0) {
-		printk(DRIVER_NAME": Unable to register block device\n");
+		pr_err("unable to register block device\n");
 		goto out2;
 	}
 	if (!pktdev_major)
@@ -2983,7 +2985,7 @@
 
 	ret = misc_register(&pkt_misc);
 	if (ret) {
-		printk(DRIVER_NAME": Unable to register misc device\n");
+		pr_err("unable to register misc device\n");
 		goto out_misc;
 	}
 
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 39c51cc..b22a7d0 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -5132,7 +5132,7 @@
 	bool already = false;
 	int ret;
 
-	ret = strict_strtoul(buf, 10, &ul);
+	ret = kstrtoul(buf, 10, &ul);
 	if (ret)
 		return ret;
 
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 8ed6ccb..b02d53a 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -924,7 +924,6 @@
 	return 0;
 
 out_kfree:
-	platform_set_drvdata(dev, NULL);
 	kfree(swd);
 out_iounmap:
 	iounmap(swim_base);
@@ -962,7 +961,6 @@
 	if (res)
 		release_mem_region(res->start, resource_size(res));
 
-	platform_set_drvdata(dev, NULL);
 	kfree(swd);
 
 	return 0;
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index fe5c3cd..c2014a0 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -620,7 +620,7 @@
 	}
 
 	/* Front end dir is a number, which is used as the handle. */
-	err = strict_strtoul(strrchr(dev->otherend, '/') + 1, 0, &handle);
+	err = kstrtoul(strrchr(dev->otherend, '/') + 1, 0, &handle);
 	if (err)
 		return;
 
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 4519cb3..5796d01 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -766,6 +766,25 @@
 }
 #endif
 
+#ifdef CONFIG_PM_SLEEP
+static int tpm_tis_resume(struct device *dev)
+{
+	struct tpm_chip *chip = dev_get_drvdata(dev);
+	int ret;
+
+	if (chip->vendor.irq)
+		tpm_tis_reenable_interrupts(chip);
+
+	ret = tpm_pm_resume(dev);
+	if (!ret)
+		tpm_do_selftest(chip);
+
+	return ret;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume);
+
 #ifdef CONFIG_PNP
 static int tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
 				      const struct pnp_device_id *pnp_id)
@@ -787,26 +806,6 @@
 	return tpm_tis_init(&pnp_dev->dev, start, len, irq);
 }
 
-static int tpm_tis_pnp_suspend(struct pnp_dev *dev, pm_message_t msg)
-{
-	return tpm_pm_suspend(&dev->dev);
-}
-
-static int tpm_tis_pnp_resume(struct pnp_dev *dev)
-{
-	struct tpm_chip *chip = pnp_get_drvdata(dev);
-	int ret;
-
-	if (chip->vendor.irq)
-		tpm_tis_reenable_interrupts(chip);
-
-	ret = tpm_pm_resume(&dev->dev);
-	if (!ret)
-		tpm_do_selftest(chip);
-
-	return ret;
-}
-
 static struct pnp_device_id tpm_pnp_tbl[] = {
 	{"PNP0C31", 0},		/* TPM */
 	{"ATM1200", 0},		/* Atmel */
@@ -835,9 +834,12 @@
 	.name = "tpm_tis",
 	.id_table = tpm_pnp_tbl,
 	.probe = tpm_tis_pnp_init,
-	.suspend = tpm_tis_pnp_suspend,
-	.resume = tpm_tis_pnp_resume,
 	.remove = tpm_tis_pnp_remove,
+#ifdef CONFIG_PM_SLEEP
+	.driver	= {
+		.pm = &tpm_tis_pm,
+	},
+#endif
 };
 
 #define TIS_HID_USR_IDX sizeof(tpm_pnp_tbl)/sizeof(struct pnp_device_id) -2
@@ -846,20 +848,6 @@
 MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
 #endif
 
-#ifdef CONFIG_PM_SLEEP
-static int tpm_tis_resume(struct device *dev)
-{
-	struct tpm_chip *chip = dev_get_drvdata(dev);
-
-	if (chip->vendor.irq)
-		tpm_tis_reenable_interrupts(chip);
-
-	return tpm_pm_resume(dev);
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume);
-
 static struct platform_driver tis_drv = {
 	.driver = {
 		.name = "tpm_tis",
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 51380d6..279407a 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -27,7 +27,7 @@
 	bool "DebugFS representation of clock tree"
 	select DEBUG_FS
 	---help---
-	  Creates a directory hierchy in debugfs for visualizing the clk
+	  Creates a directory hierarchy in debugfs for visualizing the clk
 	  tree structure.  Each directory contains read-only members
 	  that export information specific to that clk node: clk_rate,
 	  clk_flags, clk_prepare_count, clk_enable_count &
@@ -64,6 +64,12 @@
 	  This driver supports Silicon Labs 5351A/B/C programmable clock
 	  generators.
 
+config COMMON_CLK_S2MPS11
+	tristate "Clock driver for S2MPS11 MFD"
+	depends on MFD_SEC_CORE
+	---help---
+	  This driver supports S2MPS11 crystal oscillator clock.
+
 config CLK_TWL6040
 	tristate "External McPDM functional clock from twl6040"
 	depends on TWL6040_CORE
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 4038c2b..7b11106 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -40,5 +40,6 @@
 obj-$(CONFIG_COMMON_CLK_WM831X) += clk-wm831x.o
 obj-$(CONFIG_COMMON_CLK_MAX77686) += clk-max77686.o
 obj-$(CONFIG_COMMON_CLK_SI5351) += clk-si5351.o
+obj-$(CONFIG_COMMON_CLK_S2MPS11) += clk-s2mps11.o
 obj-$(CONFIG_CLK_TWL6040)	+= clk-twl6040.o
 obj-$(CONFIG_CLK_PPC_CORENET)	+= clk-ppc-corenet.o
diff --git a/drivers/clk/clk-bcm2835.c b/drivers/clk/clk-bcm2835.c
index 792bc57..5fb4ff5 100644
--- a/drivers/clk/clk-bcm2835.c
+++ b/drivers/clk/clk-bcm2835.c
@@ -23,7 +23,7 @@
 #include <linux/clk-provider.h>
 #include <linux/of.h>
 
-static const __initconst struct of_device_id clk_match[] = {
+static const struct of_device_id clk_match[] __initconst = {
 	{ .compatible = "fixed-clock", .data = of_fixed_clk_setup, },
 	{ }
 };
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index 6d55eb2..8d3009e 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -104,7 +104,7 @@
 	struct clk_divider *divider = to_clk_divider(hw);
 	unsigned int div, val;
 
-	val = readl(divider->reg) >> divider->shift;
+	val = clk_readl(divider->reg) >> divider->shift;
 	val &= div_mask(divider);
 
 	div = _get_div(divider, val);
@@ -230,11 +230,11 @@
 	if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
 		val = div_mask(divider) << (divider->shift + 16);
 	} else {
-		val = readl(divider->reg);
+		val = clk_readl(divider->reg);
 		val &= ~(div_mask(divider) << divider->shift);
 	}
 	val |= value << divider->shift;
-	writel(val, divider->reg);
+	clk_writel(val, divider->reg);
 
 	if (divider->lock)
 		spin_unlock_irqrestore(divider->lock, flags);
@@ -317,6 +317,7 @@
 	return _register_divider(dev, name, parent_name, flags, reg, shift,
 			width, clk_divider_flags, NULL, lock);
 }
+EXPORT_SYMBOL_GPL(clk_register_divider);
 
 /**
  * clk_register_divider_table - register a table based divider clock with
@@ -341,3 +342,4 @@
 	return _register_divider(dev, name, parent_name, flags, reg, shift,
 			width, clk_divider_flags, table, lock);
 }
+EXPORT_SYMBOL_GPL(clk_register_divider_table);
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index 9ff7d51..0e1d89b 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -97,6 +97,8 @@
 
 	return clk;
 }
+EXPORT_SYMBOL_GPL(clk_register_fixed_factor);
+
 #ifdef CONFIG_OF
 /**
  * of_fixed_factor_clk_setup() - Setup function for simple fixed factor clock
diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c
index dc58fbd..1ed591a 100644
--- a/drivers/clk/clk-fixed-rate.c
+++ b/drivers/clk/clk-fixed-rate.c
@@ -80,6 +80,7 @@
 
 	return clk;
 }
+EXPORT_SYMBOL_GPL(clk_register_fixed_rate);
 
 #ifdef CONFIG_OF
 /**
diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c
index 790306e..4a58c55 100644
--- a/drivers/clk/clk-gate.c
+++ b/drivers/clk/clk-gate.c
@@ -58,7 +58,7 @@
 		if (set)
 			reg |= BIT(gate->bit_idx);
 	} else {
-		reg = readl(gate->reg);
+		reg = clk_readl(gate->reg);
 
 		if (set)
 			reg |= BIT(gate->bit_idx);
@@ -66,7 +66,7 @@
 			reg &= ~BIT(gate->bit_idx);
 	}
 
-	writel(reg, gate->reg);
+	clk_writel(reg, gate->reg);
 
 	if (gate->lock)
 		spin_unlock_irqrestore(gate->lock, flags);
@@ -89,7 +89,7 @@
 	u32 reg;
 	struct clk_gate *gate = to_clk_gate(hw);
 
-	reg = readl(gate->reg);
+	reg = clk_readl(gate->reg);
 
 	/* if a set bit disables this clk, flip it before masking */
 	if (gate->flags & CLK_GATE_SET_TO_DISABLE)
@@ -161,3 +161,4 @@
 
 	return clk;
 }
+EXPORT_SYMBOL_GPL(clk_register_gate);
diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c
index 614444c..4f96ff3 100644
--- a/drivers/clk/clk-mux.c
+++ b/drivers/clk/clk-mux.c
@@ -42,7 +42,7 @@
 	 * OTOH, pmd_trace_clk_mux_ck uses a separate bit for each clock, so
 	 * val = 0x4 really means "bit 2, index starts at bit 0"
 	 */
-	val = readl(mux->reg) >> mux->shift;
+	val = clk_readl(mux->reg) >> mux->shift;
 	val &= mux->mask;
 
 	if (mux->table) {
@@ -89,11 +89,11 @@
 	if (mux->flags & CLK_MUX_HIWORD_MASK) {
 		val = mux->mask << (mux->shift + 16);
 	} else {
-		val = readl(mux->reg);
+		val = clk_readl(mux->reg);
 		val &= ~(mux->mask << mux->shift);
 	}
 	val |= index << mux->shift;
-	writel(val, mux->reg);
+	clk_writel(val, mux->reg);
 
 	if (mux->lock)
 		spin_unlock_irqrestore(mux->lock, flags);
@@ -104,9 +104,15 @@
 const struct clk_ops clk_mux_ops = {
 	.get_parent = clk_mux_get_parent,
 	.set_parent = clk_mux_set_parent,
+	.determine_rate = __clk_mux_determine_rate,
 };
 EXPORT_SYMBOL_GPL(clk_mux_ops);
 
+const struct clk_ops clk_mux_ro_ops = {
+	.get_parent = clk_mux_get_parent,
+};
+EXPORT_SYMBOL_GPL(clk_mux_ro_ops);
+
 struct clk *clk_register_mux_table(struct device *dev, const char *name,
 		const char **parent_names, u8 num_parents, unsigned long flags,
 		void __iomem *reg, u8 shift, u32 mask,
@@ -133,7 +139,10 @@
 	}
 
 	init.name = name;
-	init.ops = &clk_mux_ops;
+	if (clk_mux_flags & CLK_MUX_READ_ONLY)
+		init.ops = &clk_mux_ro_ops;
+	else
+		init.ops = &clk_mux_ops;
 	init.flags = flags | CLK_IS_BASIC;
 	init.parent_names = parent_names;
 	init.num_parents = num_parents;
@@ -154,6 +163,7 @@
 
 	return clk;
 }
+EXPORT_SYMBOL_GPL(clk_register_mux_table);
 
 struct clk *clk_register_mux(struct device *dev, const char *name,
 		const char **parent_names, u8 num_parents, unsigned long flags,
@@ -166,3 +176,4 @@
 				      flags, reg, shift, mask, clk_mux_flags,
 				      NULL, lock);
 }
+EXPORT_SYMBOL_GPL(clk_register_mux);
diff --git a/drivers/clk/clk-nomadik.c b/drivers/clk/clk-nomadik.c
index 6d819a3..51410c2 100644
--- a/drivers/clk/clk-nomadik.c
+++ b/drivers/clk/clk-nomadik.c
@@ -479,12 +479,12 @@
 		of_clk_add_provider(np, of_clk_src_simple_get, clk);
 }
 
-static const __initconst struct of_device_id nomadik_src_match[] = {
+static const struct of_device_id nomadik_src_match[] __initconst = {
 	{ .compatible = "stericsson,nomadik-src" },
 	{ /* sentinel */ }
 };
 
-static const __initconst struct of_device_id nomadik_src_clk_match[] = {
+static const struct of_device_id nomadik_src_clk_match[] __initconst = {
 	{
 		.compatible = "fixed-clock",
 		.data = of_fixed_clk_setup,
diff --git a/drivers/clk/clk-prima2.c b/drivers/clk/clk-prima2.c
index 643ca65..5ab95f1 100644
--- a/drivers/clk/clk-prima2.c
+++ b/drivers/clk/clk-prima2.c
@@ -1034,7 +1034,7 @@
 	usb0,  usb1,  maxclk,
 };
 
-static __initdata struct clk_hw* prima2_clk_hw_array[maxclk] = {
+static struct clk_hw *prima2_clk_hw_array[maxclk] __initdata = {
 	NULL, /* dummy */
 	NULL,
 	&clk_pll1.hw,
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
new file mode 100644
index 0000000..7be41e6
--- /dev/null
+++ b/drivers/clk/clk-s2mps11.c
@@ -0,0 +1,273 @@
+/*
+ * clk-s2mps11.c - Clock driver for S2MPS11.
+ *
+ * Copyright (C) 2013 Samsung Electornics
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/clkdev.h>
+#include <linux/regmap.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/samsung/s2mps11.h>
+#include <linux/mfd/samsung/core.h>
+
+#define s2mps11_name(a) (a->hw.init->name)
+
+static struct clk **clk_table;
+static struct clk_onecell_data clk_data;
+
+enum {
+	S2MPS11_CLK_AP = 0,
+	S2MPS11_CLK_CP,
+	S2MPS11_CLK_BT,
+	S2MPS11_CLKS_NUM,
+};
+
+struct s2mps11_clk {
+	struct sec_pmic_dev *iodev;
+	struct clk_hw hw;
+	struct clk *clk;
+	struct clk_lookup *lookup;
+	u32 mask;
+	bool enabled;
+};
+
+static struct s2mps11_clk *to_s2mps11_clk(struct clk_hw *hw)
+{
+	return container_of(hw, struct s2mps11_clk, hw);
+}
+
+static int s2mps11_clk_prepare(struct clk_hw *hw)
+{
+	struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw);
+	int ret;
+
+	ret = regmap_update_bits(s2mps11->iodev->regmap,
+				S2MPS11_REG_RTC_CTRL,
+				 s2mps11->mask, s2mps11->mask);
+	if (!ret)
+		s2mps11->enabled = true;
+
+	return ret;
+}
+
+static void s2mps11_clk_unprepare(struct clk_hw *hw)
+{
+	struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw);
+	int ret;
+
+	ret = regmap_update_bits(s2mps11->iodev->regmap, S2MPS11_REG_RTC_CTRL,
+			   s2mps11->mask, ~s2mps11->mask);
+
+	if (!ret)
+		s2mps11->enabled = false;
+}
+
+static int s2mps11_clk_is_enabled(struct clk_hw *hw)
+{
+	struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw);
+
+	return s2mps11->enabled;
+}
+
+static unsigned long s2mps11_clk_recalc_rate(struct clk_hw *hw,
+					     unsigned long parent_rate)
+{
+	struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw);
+	if (s2mps11->enabled)
+		return 32768;
+	else
+		return 0;
+}
+
+static struct clk_ops s2mps11_clk_ops = {
+	.prepare	= s2mps11_clk_prepare,
+	.unprepare	= s2mps11_clk_unprepare,
+	.is_enabled	= s2mps11_clk_is_enabled,
+	.recalc_rate	= s2mps11_clk_recalc_rate,
+};
+
+static struct clk_init_data s2mps11_clks_init[S2MPS11_CLKS_NUM] = {
+	[S2MPS11_CLK_AP] = {
+		.name = "s2mps11_ap",
+		.ops = &s2mps11_clk_ops,
+		.flags = CLK_IS_ROOT,
+	},
+	[S2MPS11_CLK_CP] = {
+		.name = "s2mps11_cp",
+		.ops = &s2mps11_clk_ops,
+		.flags = CLK_IS_ROOT,
+	},
+	[S2MPS11_CLK_BT] = {
+		.name = "s2mps11_bt",
+		.ops = &s2mps11_clk_ops,
+		.flags = CLK_IS_ROOT,
+	},
+};
+
+static struct device_node *s2mps11_clk_parse_dt(struct platform_device *pdev)
+{
+	struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+	struct device_node *clk_np;
+	int i;
+
+	if (!iodev->dev->of_node)
+		return NULL;
+
+	clk_np = of_find_node_by_name(iodev->dev->of_node, "clocks");
+	if (!clk_np) {
+		dev_err(&pdev->dev, "could not find clock sub-node\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	clk_table = devm_kzalloc(&pdev->dev, sizeof(struct clk *) *
+				 S2MPS11_CLKS_NUM, GFP_KERNEL);
+	if (!clk_table)
+		return ERR_PTR(-ENOMEM);
+
+	for (i = 0; i < S2MPS11_CLKS_NUM; i++)
+		of_property_read_string_index(clk_np, "clock-output-names", i,
+				&s2mps11_clks_init[i].name);
+
+	return clk_np;
+}
+
+static int s2mps11_clk_probe(struct platform_device *pdev)
+{
+	struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+	struct s2mps11_clk *s2mps11_clks, *s2mps11_clk;
+	struct device_node *clk_np = NULL;
+	int i, ret = 0;
+	u32 val;
+
+	s2mps11_clks = devm_kzalloc(&pdev->dev, sizeof(*s2mps11_clk) *
+					S2MPS11_CLKS_NUM, GFP_KERNEL);
+	if (!s2mps11_clks)
+		return -ENOMEM;
+
+	s2mps11_clk = s2mps11_clks;
+
+	clk_np = s2mps11_clk_parse_dt(pdev);
+	if (IS_ERR(clk_np))
+		return PTR_ERR(clk_np);
+
+	for (i = 0; i < S2MPS11_CLKS_NUM; i++, s2mps11_clk++) {
+		s2mps11_clk->iodev = iodev;
+		s2mps11_clk->hw.init = &s2mps11_clks_init[i];
+		s2mps11_clk->mask = 1 << i;
+
+		ret = regmap_read(s2mps11_clk->iodev->regmap,
+				  S2MPS11_REG_RTC_CTRL, &val);
+		if (ret < 0)
+			goto err_reg;
+
+		s2mps11_clk->enabled = val & s2mps11_clk->mask;
+
+		s2mps11_clk->clk = devm_clk_register(&pdev->dev,
+							&s2mps11_clk->hw);
+		if (IS_ERR(s2mps11_clk->clk)) {
+			dev_err(&pdev->dev, "Fail to register : %s\n",
+						s2mps11_name(s2mps11_clk));
+			ret = PTR_ERR(s2mps11_clk->clk);
+			goto err_reg;
+		}
+
+		s2mps11_clk->lookup = devm_kzalloc(&pdev->dev,
+					sizeof(struct clk_lookup), GFP_KERNEL);
+		if (!s2mps11_clk->lookup) {
+			ret = -ENOMEM;
+			goto err_lup;
+		}
+
+		s2mps11_clk->lookup->con_id = s2mps11_name(s2mps11_clk);
+		s2mps11_clk->lookup->clk = s2mps11_clk->clk;
+
+		clkdev_add(s2mps11_clk->lookup);
+	}
+
+	if (clk_table) {
+		for (i = 0; i < S2MPS11_CLKS_NUM; i++)
+			clk_table[i] = s2mps11_clks[i].clk;
+
+		clk_data.clks = clk_table;
+		clk_data.clk_num = S2MPS11_CLKS_NUM;
+		of_clk_add_provider(clk_np, of_clk_src_onecell_get, &clk_data);
+	}
+
+	platform_set_drvdata(pdev, s2mps11_clks);
+
+	return ret;
+err_lup:
+	devm_clk_unregister(&pdev->dev, s2mps11_clk->clk);
+err_reg:
+	while (s2mps11_clk > s2mps11_clks) {
+		if (s2mps11_clk->lookup) {
+			clkdev_drop(s2mps11_clk->lookup);
+			devm_clk_unregister(&pdev->dev, s2mps11_clk->clk);
+		}
+		s2mps11_clk--;
+	}
+
+	return ret;
+}
+
+static int s2mps11_clk_remove(struct platform_device *pdev)
+{
+	struct s2mps11_clk *s2mps11_clks = platform_get_drvdata(pdev);
+	int i;
+
+	for (i = 0; i < S2MPS11_CLKS_NUM; i++)
+		clkdev_drop(s2mps11_clks[i].lookup);
+
+	return 0;
+}
+
+static const struct platform_device_id s2mps11_clk_id[] = {
+	{ "s2mps11-clk", 0},
+	{ },
+};
+MODULE_DEVICE_TABLE(platform, s2mps11_clk_id);
+
+static struct platform_driver s2mps11_clk_driver = {
+	.driver = {
+		.name  = "s2mps11-clk",
+		.owner = THIS_MODULE,
+	},
+	.probe = s2mps11_clk_probe,
+	.remove = s2mps11_clk_remove,
+	.id_table = s2mps11_clk_id,
+};
+
+static int __init s2mps11_clk_init(void)
+{
+	return platform_driver_register(&s2mps11_clk_driver);
+}
+subsys_initcall(s2mps11_clk_init);
+
+static void __init s2mps11_clk_cleanup(void)
+{
+	platform_driver_unregister(&s2mps11_clk_driver);
+}
+module_exit(s2mps11_clk_cleanup);
+
+MODULE_DESCRIPTION("S2MPS11 Clock Driver");
+MODULE_AUTHOR("Yadwinder Singh Brar <yadi.brar@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/clk-u300.c b/drivers/clk/clk-u300.c
index 8774e05..3efbdd0 100644
--- a/drivers/clk/clk-u300.c
+++ b/drivers/clk/clk-u300.c
@@ -746,7 +746,7 @@
 	u16 clk_val;
 };
 
-struct u300_clock const __initconst u300_clk_lookup[] = {
+static struct u300_clock const u300_clk_lookup[] __initconst = {
 	{
 		.type = U300_CLK_TYPE_REST,
 		.id = 3,
@@ -1151,7 +1151,7 @@
 		of_clk_add_provider(np, of_clk_src_simple_get, clk);
 }
 
-static const __initconst struct of_device_id u300_clk_match[] = {
+static const struct of_device_id u300_clk_match[] __initconst = {
 	{
 		.compatible = "fixed-clock",
 		.data = of_fixed_clk_setup,
diff --git a/drivers/clk/clk-wm831x.c b/drivers/clk/clk-wm831x.c
index 1b3f8c9..805b4c3 100644
--- a/drivers/clk/clk-wm831x.c
+++ b/drivers/clk/clk-wm831x.c
@@ -31,7 +31,7 @@
 	bool xtal_ena;
 };
 
-static int wm831x_xtal_is_enabled(struct clk_hw *hw)
+static int wm831x_xtal_is_prepared(struct clk_hw *hw)
 {
 	struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
 						  xtal_hw);
@@ -52,7 +52,7 @@
 }
 
 static const struct clk_ops wm831x_xtal_ops = {
-	.is_enabled = wm831x_xtal_is_enabled,
+	.is_prepared = wm831x_xtal_is_prepared,
 	.recalc_rate = wm831x_xtal_recalc_rate,
 };
 
@@ -73,7 +73,7 @@
 	24576000,
 };
 
-static int wm831x_fll_is_enabled(struct clk_hw *hw)
+static int wm831x_fll_is_prepared(struct clk_hw *hw)
 {
 	struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
 						  fll_hw);
@@ -170,7 +170,7 @@
 	if (i == ARRAY_SIZE(wm831x_fll_auto_rates))
 		return -EINVAL;
 
-	if (wm831x_fll_is_enabled(hw))
+	if (wm831x_fll_is_prepared(hw))
 		return -EPERM;
 
 	return wm831x_set_bits(wm831x, WM831X_CLOCK_CONTROL_2,
@@ -220,7 +220,7 @@
 }
 
 static const struct clk_ops wm831x_fll_ops = {
-	.is_enabled = wm831x_fll_is_enabled,
+	.is_prepared = wm831x_fll_is_prepared,
 	.prepare = wm831x_fll_prepare,
 	.unprepare = wm831x_fll_unprepare,
 	.round_rate = wm831x_fll_round_rate,
@@ -237,7 +237,7 @@
 	.flags = CLK_SET_RATE_GATE,
 };
 
-static int wm831x_clkout_is_enabled(struct clk_hw *hw)
+static int wm831x_clkout_is_prepared(struct clk_hw *hw)
 {
 	struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
 						  clkout_hw);
@@ -335,7 +335,7 @@
 }
 
 static const struct clk_ops wm831x_clkout_ops = {
-	.is_enabled = wm831x_clkout_is_enabled,
+	.is_prepared = wm831x_clkout_is_prepared,
 	.prepare = wm831x_clkout_prepare,
 	.unprepare = wm831x_clkout_unprepare,
 	.get_parent = wm831x_clkout_get_parent,
@@ -360,6 +360,8 @@
 	if (!clkdata)
 		return -ENOMEM;
 
+	clkdata->wm831x = wm831x;
+
 	/* XTAL_ENA can only be set via OTP/InstantConfig so just read once */
 	ret = wm831x_reg_read(wm831x, WM831X_CLOCK_CONTROL_2);
 	if (ret < 0) {
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 54a191c..a004769 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -458,7 +458,6 @@
 			clk->ops->unprepare(clk->hw);
 	}
 }
-EXPORT_SYMBOL_GPL(__clk_get_flags);
 
 /* caller must hold prepare_lock */
 static void clk_disable_unused_subtree(struct clk *clk)
@@ -559,6 +558,19 @@
 	return !clk ? NULL : clk->parent;
 }
 
+struct clk *clk_get_parent_by_index(struct clk *clk, u8 index)
+{
+	if (!clk || index >= clk->num_parents)
+		return NULL;
+	else if (!clk->parents)
+		return __clk_lookup(clk->parent_names[index]);
+	else if (!clk->parents[index])
+		return clk->parents[index] =
+			__clk_lookup(clk->parent_names[index]);
+	else
+		return clk->parents[index];
+}
+
 unsigned int __clk_get_enable_count(struct clk *clk)
 {
 	return !clk ? 0 : clk->enable_count;
@@ -594,6 +606,7 @@
 {
 	return !clk ? 0 : clk->flags;
 }
+EXPORT_SYMBOL_GPL(__clk_get_flags);
 
 bool __clk_is_prepared(struct clk *clk)
 {
@@ -679,6 +692,55 @@
 	return NULL;
 }
 
+/*
+ * Helper for finding best parent to provide a given frequency. This can be used
+ * directly as a determine_rate callback (e.g. for a mux), or from a more
+ * complex clock that may combine a mux with other operations.
+ */
+long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
+			      unsigned long *best_parent_rate,
+			      struct clk **best_parent_p)
+{
+	struct clk *clk = hw->clk, *parent, *best_parent = NULL;
+	int i, num_parents;
+	unsigned long parent_rate, best = 0;
+
+	/* if NO_REPARENT flag set, pass through to current parent */
+	if (clk->flags & CLK_SET_RATE_NO_REPARENT) {
+		parent = clk->parent;
+		if (clk->flags & CLK_SET_RATE_PARENT)
+			best = __clk_round_rate(parent, rate);
+		else if (parent)
+			best = __clk_get_rate(parent);
+		else
+			best = __clk_get_rate(clk);
+		goto out;
+	}
+
+	/* find the parent that can provide the fastest rate <= rate */
+	num_parents = clk->num_parents;
+	for (i = 0; i < num_parents; i++) {
+		parent = clk_get_parent_by_index(clk, i);
+		if (!parent)
+			continue;
+		if (clk->flags & CLK_SET_RATE_PARENT)
+			parent_rate = __clk_round_rate(parent, rate);
+		else
+			parent_rate = __clk_get_rate(parent);
+		if (parent_rate <= rate && parent_rate > best) {
+			best_parent = parent;
+			best = parent_rate;
+		}
+	}
+
+out:
+	if (best_parent)
+		*best_parent_p = best_parent;
+	*best_parent_rate = best;
+
+	return best;
+}
+
 /***        clk api        ***/
 
 void __clk_unprepare(struct clk *clk)
@@ -702,7 +764,7 @@
 
 /**
  * clk_unprepare - undo preparation of a clock source
- * @clk: the clk being unprepare
+ * @clk: the clk being unprepared
  *
  * clk_unprepare may sleep, which differentiates it from clk_disable.  In a
  * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
@@ -869,27 +931,31 @@
 /**
  * __clk_round_rate - round the given rate for a clk
  * @clk: round the rate of this clock
+ * @rate: the rate which is to be rounded
  *
  * Caller must hold prepare_lock.  Useful for clk_ops such as .set_rate
  */
 unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
 {
 	unsigned long parent_rate = 0;
+	struct clk *parent;
 
 	if (!clk)
 		return 0;
 
-	if (!clk->ops->round_rate) {
-		if (clk->flags & CLK_SET_RATE_PARENT)
-			return __clk_round_rate(clk->parent, rate);
-		else
-			return clk->rate;
-	}
+	parent = clk->parent;
+	if (parent)
+		parent_rate = parent->rate;
 
-	if (clk->parent)
-		parent_rate = clk->parent->rate;
-
-	return clk->ops->round_rate(clk->hw, rate, &parent_rate);
+	if (clk->ops->determine_rate)
+		return clk->ops->determine_rate(clk->hw, rate, &parent_rate,
+						&parent);
+	else if (clk->ops->round_rate)
+		return clk->ops->round_rate(clk->hw, rate, &parent_rate);
+	else if (clk->flags & CLK_SET_RATE_PARENT)
+		return __clk_round_rate(clk->parent, rate);
+	else
+		return clk->rate;
 }
 
 /**
@@ -956,7 +1022,7 @@
  *
  * Walks the subtree of clks starting with clk and recalculates rates as it
  * goes.  Note that if a clk does not implement the .recalc_rate callback then
- * it is assumed that the clock will take on the rate of it's parent.
+ * it is assumed that the clock will take on the rate of its parent.
  *
  * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
  * if necessary.
@@ -1014,6 +1080,115 @@
 }
 EXPORT_SYMBOL_GPL(clk_get_rate);
 
+static u8 clk_fetch_parent_index(struct clk *clk, struct clk *parent)
+{
+	u8 i;
+
+	if (!clk->parents)
+		clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
+								GFP_KERNEL);
+
+	/*
+	 * find index of new parent clock using cached parent ptrs,
+	 * or if not yet cached, use string name comparison and cache
+	 * them now to avoid future calls to __clk_lookup.
+	 */
+	for (i = 0; i < clk->num_parents; i++) {
+		if (clk->parents && clk->parents[i] == parent)
+			break;
+		else if (!strcmp(clk->parent_names[i], parent->name)) {
+			if (clk->parents)
+				clk->parents[i] = __clk_lookup(parent->name);
+			break;
+		}
+	}
+
+	return i;
+}
+
+static void clk_reparent(struct clk *clk, struct clk *new_parent)
+{
+	hlist_del(&clk->child_node);
+
+	if (new_parent) {
+		/* avoid duplicate POST_RATE_CHANGE notifications */
+		if (new_parent->new_child == clk)
+			new_parent->new_child = NULL;
+
+		hlist_add_head(&clk->child_node, &new_parent->children);
+	} else {
+		hlist_add_head(&clk->child_node, &clk_orphan_list);
+	}
+
+	clk->parent = new_parent;
+}
+
+static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
+{
+	unsigned long flags;
+	int ret = 0;
+	struct clk *old_parent = clk->parent;
+
+	/*
+	 * Migrate prepare state between parents and prevent race with
+	 * clk_enable().
+	 *
+	 * If the clock is not prepared, then a race with
+	 * clk_enable/disable() is impossible since we already have the
+	 * prepare lock (future calls to clk_enable() need to be preceded by
+	 * a clk_prepare()).
+	 *
+	 * If the clock is prepared, migrate the prepared state to the new
+	 * parent and also protect against a race with clk_enable() by
+	 * forcing the clock and the new parent on.  This ensures that all
+	 * future calls to clk_enable() are practically NOPs with respect to
+	 * hardware and software states.
+	 *
+	 * See also: Comment for clk_set_parent() below.
+	 */
+	if (clk->prepare_count) {
+		__clk_prepare(parent);
+		clk_enable(parent);
+		clk_enable(clk);
+	}
+
+	/* update the clk tree topology */
+	flags = clk_enable_lock();
+	clk_reparent(clk, parent);
+	clk_enable_unlock(flags);
+
+	/* change clock input source */
+	if (parent && clk->ops->set_parent)
+		ret = clk->ops->set_parent(clk->hw, p_index);
+
+	if (ret) {
+		flags = clk_enable_lock();
+		clk_reparent(clk, old_parent);
+		clk_enable_unlock(flags);
+
+		if (clk->prepare_count) {
+			clk_disable(clk);
+			clk_disable(parent);
+			__clk_unprepare(parent);
+		}
+		return ret;
+	}
+
+	/*
+	 * Finish the migration of prepare state and undo the changes done
+	 * for preventing a race with clk_enable().
+	 */
+	if (clk->prepare_count) {
+		clk_disable(clk);
+		clk_disable(old_parent);
+		__clk_unprepare(old_parent);
+	}
+
+	/* update debugfs with new clk tree topology */
+	clk_debug_reparent(clk, parent);
+	return 0;
+}
+
 /**
  * __clk_speculate_rates
  * @clk: first clk in the subtree
@@ -1026,7 +1201,7 @@
  * pre-rate change notifications and returns early if no clks in the
  * subtree have subscribed to the notifications.  Note that if a clk does not
  * implement the .recalc_rate callback then it is assumed that the clock will
- * take on the rate of it's parent.
+ * take on the rate of its parent.
  *
  * Caller must hold prepare_lock.
  */
@@ -1058,18 +1233,25 @@
 	return ret;
 }
 
-static void clk_calc_subtree(struct clk *clk, unsigned long new_rate)
+static void clk_calc_subtree(struct clk *clk, unsigned long new_rate,
+			     struct clk *new_parent, u8 p_index)
 {
 	struct clk *child;
 
 	clk->new_rate = new_rate;
+	clk->new_parent = new_parent;
+	clk->new_parent_index = p_index;
+	/* include clk in new parent's PRE_RATE_CHANGE notifications */
+	clk->new_child = NULL;
+	if (new_parent && new_parent != clk->parent)
+		new_parent->new_child = clk;
 
 	hlist_for_each_entry(child, &clk->children, child_node) {
 		if (child->ops->recalc_rate)
 			child->new_rate = child->ops->recalc_rate(child->hw, new_rate);
 		else
 			child->new_rate = new_rate;
-		clk_calc_subtree(child, child->new_rate);
+		clk_calc_subtree(child, child->new_rate, NULL, 0);
 	}
 }
 
@@ -1080,50 +1262,63 @@
 static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
 {
 	struct clk *top = clk;
+	struct clk *old_parent, *parent;
 	unsigned long best_parent_rate = 0;
 	unsigned long new_rate;
+	u8 p_index = 0;
 
 	/* sanity */
 	if (IS_ERR_OR_NULL(clk))
 		return NULL;
 
 	/* save parent rate, if it exists */
-	if (clk->parent)
-		best_parent_rate = clk->parent->rate;
+	parent = old_parent = clk->parent;
+	if (parent)
+		best_parent_rate = parent->rate;
 
-	/* never propagate up to the parent */
-	if (!(clk->flags & CLK_SET_RATE_PARENT)) {
-		if (!clk->ops->round_rate) {
-			clk->new_rate = clk->rate;
-			return NULL;
-		}
-		new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
+	/* find the closest rate and parent clk/rate */
+	if (clk->ops->determine_rate) {
+		new_rate = clk->ops->determine_rate(clk->hw, rate,
+						    &best_parent_rate,
+						    &parent);
+	} else if (clk->ops->round_rate) {
+		new_rate = clk->ops->round_rate(clk->hw, rate,
+						&best_parent_rate);
+	} else if (!parent || !(clk->flags & CLK_SET_RATE_PARENT)) {
+		/* pass-through clock without adjustable parent */
+		clk->new_rate = clk->rate;
+		return NULL;
+	} else {
+		/* pass-through clock with adjustable parent */
+		top = clk_calc_new_rates(parent, rate);
+		new_rate = parent->new_rate;
 		goto out;
 	}
 
-	/* need clk->parent from here on out */
-	if (!clk->parent) {
-		pr_debug("%s: %s has NULL parent\n", __func__, clk->name);
+	/* some clocks must be gated to change parent */
+	if (parent != old_parent &&
+	    (clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count) {
+		pr_debug("%s: %s not gated but wants to reparent\n",
+			 __func__, clk->name);
 		return NULL;
 	}
 
-	if (!clk->ops->round_rate) {
-		top = clk_calc_new_rates(clk->parent, rate);
-		new_rate = clk->parent->new_rate;
-
-		goto out;
+	/* try finding the new parent index */
+	if (parent) {
+		p_index = clk_fetch_parent_index(clk, parent);
+		if (p_index == clk->num_parents) {
+			pr_debug("%s: clk %s can not be parent of clk %s\n",
+				 __func__, parent->name, clk->name);
+			return NULL;
+		}
 	}
 
-	new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
-
-	if (best_parent_rate != clk->parent->rate) {
-		top = clk_calc_new_rates(clk->parent, best_parent_rate);
-
-		goto out;
-	}
+	if ((clk->flags & CLK_SET_RATE_PARENT) && parent &&
+	    best_parent_rate != parent->rate)
+		top = clk_calc_new_rates(parent, best_parent_rate);
 
 out:
-	clk_calc_subtree(clk, new_rate);
+	clk_calc_subtree(clk, new_rate, parent, p_index);
 
 	return top;
 }
@@ -1135,7 +1330,7 @@
  */
 static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event)
 {
-	struct clk *child, *fail_clk = NULL;
+	struct clk *child, *tmp_clk, *fail_clk = NULL;
 	int ret = NOTIFY_DONE;
 
 	if (clk->rate == clk->new_rate)
@@ -1148,9 +1343,19 @@
 	}
 
 	hlist_for_each_entry(child, &clk->children, child_node) {
-		clk = clk_propagate_rate_change(child, event);
-		if (clk)
-			fail_clk = clk;
+		/* Skip children who will be reparented to another clock */
+		if (child->new_parent && child->new_parent != clk)
+			continue;
+		tmp_clk = clk_propagate_rate_change(child, event);
+		if (tmp_clk)
+			fail_clk = tmp_clk;
+	}
+
+	/* handle the new child who might not be in clk->children yet */
+	if (clk->new_child) {
+		tmp_clk = clk_propagate_rate_change(clk->new_child, event);
+		if (tmp_clk)
+			fail_clk = tmp_clk;
 	}
 
 	return fail_clk;
@@ -1168,6 +1373,10 @@
 
 	old_rate = clk->rate;
 
+	/* set parent */
+	if (clk->new_parent && clk->new_parent != clk->parent)
+		__clk_set_parent(clk, clk->new_parent, clk->new_parent_index);
+
 	if (clk->parent)
 		best_parent_rate = clk->parent->rate;
 
@@ -1182,8 +1391,16 @@
 	if (clk->notifier_count && old_rate != clk->rate)
 		__clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
 
-	hlist_for_each_entry(child, &clk->children, child_node)
+	hlist_for_each_entry(child, &clk->children, child_node) {
+		/* Skip children who will be reparented to another clock */
+		if (child->new_parent && child->new_parent != clk)
+			continue;
 		clk_change_rate(child);
+	}
+
+	/* handle the new child who might not be in clk->children yet */
+	if (clk->new_child)
+		clk_change_rate(clk->new_child);
 }
 
 /**
@@ -1198,7 +1415,7 @@
  * outcome of clk's .round_rate implementation.  If *parent_rate is unchanged
  * after calling .round_rate then upstream parent propagation is ignored.  If
  * *parent_rate comes back with a new rate for clk's parent then we propagate
- * up to clk's parent and set it's rate.  Upward propagation will continue
+ * up to clk's parent and set its rate.  Upward propagation will continue
  * until either a clk does not support the CLK_SET_RATE_PARENT flag or
  * .round_rate stops requesting changes to clk's parent_rate.
  *
@@ -1212,6 +1429,9 @@
 	struct clk *top, *fail_clk;
 	int ret = 0;
 
+	if (!clk)
+		return 0;
+
 	/* prevent racing with updates to the clock topology */
 	clk_prepare_lock();
 
@@ -1315,30 +1535,12 @@
 			kzalloc((sizeof(struct clk*) * clk->num_parents),
 					GFP_KERNEL);
 
-	if (!clk->parents)
-		ret = __clk_lookup(clk->parent_names[index]);
-	else if (!clk->parents[index])
-		ret = clk->parents[index] =
-			__clk_lookup(clk->parent_names[index]);
-	else
-		ret = clk->parents[index];
+	ret = clk_get_parent_by_index(clk, index);
 
 out:
 	return ret;
 }
 
-static void clk_reparent(struct clk *clk, struct clk *new_parent)
-{
-	hlist_del(&clk->child_node);
-
-	if (new_parent)
-		hlist_add_head(&clk->child_node, &new_parent->children);
-	else
-		hlist_add_head(&clk->child_node, &clk_orphan_list);
-
-	clk->parent = new_parent;
-}
-
 void __clk_reparent(struct clk *clk, struct clk *new_parent)
 {
 	clk_reparent(clk, new_parent);
@@ -1346,98 +1548,6 @@
 	__clk_recalc_rates(clk, POST_RATE_CHANGE);
 }
 
-static u8 clk_fetch_parent_index(struct clk *clk, struct clk *parent)
-{
-	u8 i;
-
-	if (!clk->parents)
-		clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
-								GFP_KERNEL);
-
-	/*
-	 * find index of new parent clock using cached parent ptrs,
-	 * or if not yet cached, use string name comparison and cache
-	 * them now to avoid future calls to __clk_lookup.
-	 */
-	for (i = 0; i < clk->num_parents; i++) {
-		if (clk->parents && clk->parents[i] == parent)
-			break;
-		else if (!strcmp(clk->parent_names[i], parent->name)) {
-			if (clk->parents)
-				clk->parents[i] = __clk_lookup(parent->name);
-			break;
-		}
-	}
-
-	return i;
-}
-
-static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
-{
-	unsigned long flags;
-	int ret = 0;
-	struct clk *old_parent = clk->parent;
-
-	/*
-	 * Migrate prepare state between parents and prevent race with
-	 * clk_enable().
-	 *
-	 * If the clock is not prepared, then a race with
-	 * clk_enable/disable() is impossible since we already have the
-	 * prepare lock (future calls to clk_enable() need to be preceded by
-	 * a clk_prepare()).
-	 *
-	 * If the clock is prepared, migrate the prepared state to the new
-	 * parent and also protect against a race with clk_enable() by
-	 * forcing the clock and the new parent on.  This ensures that all
-	 * future calls to clk_enable() are practically NOPs with respect to
-	 * hardware and software states.
-	 *
-	 * See also: Comment for clk_set_parent() below.
-	 */
-	if (clk->prepare_count) {
-		__clk_prepare(parent);
-		clk_enable(parent);
-		clk_enable(clk);
-	}
-
-	/* update the clk tree topology */
-	flags = clk_enable_lock();
-	clk_reparent(clk, parent);
-	clk_enable_unlock(flags);
-
-	/* change clock input source */
-	if (parent && clk->ops->set_parent)
-		ret = clk->ops->set_parent(clk->hw, p_index);
-
-	if (ret) {
-		flags = clk_enable_lock();
-		clk_reparent(clk, old_parent);
-		clk_enable_unlock(flags);
-
-		if (clk->prepare_count) {
-			clk_disable(clk);
-			clk_disable(parent);
-			__clk_unprepare(parent);
-		}
-		return ret;
-	}
-
-	/*
-	 * Finish the migration of prepare state and undo the changes done
-	 * for preventing a race with clk_enable().
-	 */
-	if (clk->prepare_count) {
-		clk_disable(clk);
-		clk_disable(old_parent);
-		__clk_unprepare(old_parent);
-	}
-
-	/* update debugfs with new clk tree topology */
-	clk_debug_reparent(clk, parent);
-	return 0;
-}
-
 /**
  * clk_set_parent - switch the parent of a mux clk
  * @clk: the mux clk whose input we are switching
@@ -1461,7 +1571,10 @@
 	u8 p_index = 0;
 	unsigned long p_rate = 0;
 
-	if (!clk || !clk->ops)
+	if (!clk)
+		return 0;
+
+	if (!clk->ops)
 		return -EINVAL;
 
 	/* verify ops for for multi-parent clks */
@@ -1544,8 +1657,9 @@
 
 	/* check that clk_ops are sane.  See Documentation/clk.txt */
 	if (clk->ops->set_rate &&
-			!(clk->ops->round_rate && clk->ops->recalc_rate)) {
-		pr_warning("%s: %s must implement .round_rate & .recalc_rate\n",
+	    !((clk->ops->round_rate || clk->ops->determine_rate) &&
+	      clk->ops->recalc_rate)) {
+		pr_warning("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
 				__func__, clk->name);
 		ret = -EINVAL;
 		goto out;
@@ -1628,7 +1742,7 @@
 	 * this clock
 	 */
 	hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
-		if (orphan->ops->get_parent) {
+		if (orphan->num_parents && orphan->ops->get_parent) {
 			i = orphan->ops->get_parent(orphan->hw);
 			if (!strcmp(clk->name, orphan->parent_names[i]))
 				__clk_reparent(orphan, clk);
@@ -1648,7 +1762,7 @@
 	 * The .init callback is not used by any of the basic clock types, but
 	 * exists for weird hardware that must perform initialization magic.
 	 * Please consider other ways of solving initialization problems before
-	 * using this callback, as it's use is discouraged.
+	 * using this callback, as its use is discouraged.
 	 */
 	if (clk->ops->init)
 		clk->ops->init(clk->hw);
@@ -1675,7 +1789,7 @@
  * very large numbers of clocks that need to be statically initialized.  It is
  * a layering violation to include clk-private.h from any code which implements
  * a clock's .ops; as such any statically initialized clock data MUST be in a
- * separate C file from the logic that implements it's operations.  Returns 0
+ * separate C file from the logic that implements its operations.  Returns 0
  * on success, otherwise an error code.
  */
 struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
@@ -2115,13 +2229,13 @@
  */
 void __init of_clk_init(const struct of_device_id *matches)
 {
+	const struct of_device_id *match;
 	struct device_node *np;
 
 	if (!matches)
 		matches = __clk_of_table;
 
-	for_each_matching_node(np, matches) {
-		const struct of_device_id *match = of_match_node(matches, np);
+	for_each_matching_node_and_match(np, matches, &match) {
 		of_clk_init_cb_t clk_init_cb = match->data;
 		clk_init_cb(np);
 	}
diff --git a/drivers/clk/mmp/clk-mmp2.c b/drivers/clk/mmp/clk-mmp2.c
index d1f1a19..b2721ca 100644
--- a/drivers/clk/mmp/clk-mmp2.c
+++ b/drivers/clk/mmp/clk-mmp2.c
@@ -248,7 +248,8 @@
 	clk_register_clkdev(clk, NULL, "mmp2-pwm.3");
 
 	clk = clk_register_mux(NULL, "uart0_mux", uart_parent,
-				ARRAY_SIZE(uart_parent), CLK_SET_RATE_PARENT,
+				ARRAY_SIZE(uart_parent),
+				CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 				apbc_base + APBC_UART0, 4, 3, 0, &clk_lock);
 	clk_set_parent(clk, vctcxo);
 	clk_register_clkdev(clk, "uart_mux.0", NULL);
@@ -258,7 +259,8 @@
 	clk_register_clkdev(clk, NULL, "pxa2xx-uart.0");
 
 	clk = clk_register_mux(NULL, "uart1_mux", uart_parent,
-				ARRAY_SIZE(uart_parent), CLK_SET_RATE_PARENT,
+				ARRAY_SIZE(uart_parent),
+				CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 				apbc_base + APBC_UART1, 4, 3, 0, &clk_lock);
 	clk_set_parent(clk, vctcxo);
 	clk_register_clkdev(clk, "uart_mux.1", NULL);
@@ -268,7 +270,8 @@
 	clk_register_clkdev(clk, NULL, "pxa2xx-uart.1");
 
 	clk = clk_register_mux(NULL, "uart2_mux", uart_parent,
-				ARRAY_SIZE(uart_parent), CLK_SET_RATE_PARENT,
+				ARRAY_SIZE(uart_parent),
+				CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 				apbc_base + APBC_UART2, 4, 3, 0, &clk_lock);
 	clk_set_parent(clk, vctcxo);
 	clk_register_clkdev(clk, "uart_mux.2", NULL);
@@ -278,7 +281,8 @@
 	clk_register_clkdev(clk, NULL, "pxa2xx-uart.2");
 
 	clk = clk_register_mux(NULL, "uart3_mux", uart_parent,
-				ARRAY_SIZE(uart_parent), CLK_SET_RATE_PARENT,
+				ARRAY_SIZE(uart_parent),
+				CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 				apbc_base + APBC_UART3, 4, 3, 0, &clk_lock);
 	clk_set_parent(clk, vctcxo);
 	clk_register_clkdev(clk, "uart_mux.3", NULL);
@@ -288,7 +292,8 @@
 	clk_register_clkdev(clk, NULL, "pxa2xx-uart.3");
 
 	clk = clk_register_mux(NULL, "ssp0_mux", ssp_parent,
-				ARRAY_SIZE(ssp_parent), CLK_SET_RATE_PARENT,
+				ARRAY_SIZE(ssp_parent),
+				CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 				apbc_base + APBC_SSP0, 4, 3, 0, &clk_lock);
 	clk_register_clkdev(clk, "uart_mux.0", NULL);
 
@@ -297,7 +302,8 @@
 	clk_register_clkdev(clk, NULL, "mmp-ssp.0");
 
 	clk = clk_register_mux(NULL, "ssp1_mux", ssp_parent,
-				ARRAY_SIZE(ssp_parent), CLK_SET_RATE_PARENT,
+				ARRAY_SIZE(ssp_parent),
+				CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 				apbc_base + APBC_SSP1, 4, 3, 0, &clk_lock);
 	clk_register_clkdev(clk, "ssp_mux.1", NULL);
 
@@ -306,7 +312,8 @@
 	clk_register_clkdev(clk, NULL, "mmp-ssp.1");
 
 	clk = clk_register_mux(NULL, "ssp2_mux", ssp_parent,
-				ARRAY_SIZE(ssp_parent), CLK_SET_RATE_PARENT,
+				ARRAY_SIZE(ssp_parent),
+				CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 				apbc_base + APBC_SSP2, 4, 3, 0, &clk_lock);
 	clk_register_clkdev(clk, "ssp_mux.2", NULL);
 
@@ -315,7 +322,8 @@
 	clk_register_clkdev(clk, NULL, "mmp-ssp.2");
 
 	clk = clk_register_mux(NULL, "ssp3_mux", ssp_parent,
-				ARRAY_SIZE(ssp_parent), CLK_SET_RATE_PARENT,
+				ARRAY_SIZE(ssp_parent),
+				CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 				apbc_base + APBC_SSP3, 4, 3, 0, &clk_lock);
 	clk_register_clkdev(clk, "ssp_mux.3", NULL);
 
@@ -324,7 +332,8 @@
 	clk_register_clkdev(clk, NULL, "mmp-ssp.3");
 
 	clk = clk_register_mux(NULL, "sdh_mux", sdh_parent,
-				ARRAY_SIZE(sdh_parent), CLK_SET_RATE_PARENT,
+				ARRAY_SIZE(sdh_parent),
+				CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 				apmu_base + APMU_SDH0, 8, 2, 0, &clk_lock);
 	clk_register_clkdev(clk, "sdh_mux", NULL);
 
@@ -354,7 +363,8 @@
 	clk_register_clkdev(clk, "usb_clk", NULL);
 
 	clk = clk_register_mux(NULL, "disp0_mux", disp_parent,
-				ARRAY_SIZE(disp_parent), CLK_SET_RATE_PARENT,
+				ARRAY_SIZE(disp_parent),
+				CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 				apmu_base + APMU_DISP0, 6, 2, 0, &clk_lock);
 	clk_register_clkdev(clk, "disp_mux.0", NULL);
 
@@ -376,7 +386,8 @@
 	clk_register_clkdev(clk, "disp_sphy.0", NULL);
 
 	clk = clk_register_mux(NULL, "disp1_mux", disp_parent,
-				ARRAY_SIZE(disp_parent), CLK_SET_RATE_PARENT,
+				ARRAY_SIZE(disp_parent),
+				CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 				apmu_base + APMU_DISP1, 6, 2, 0, &clk_lock);
 	clk_register_clkdev(clk, "disp_mux.1", NULL);
 
@@ -394,7 +405,8 @@
 	clk_register_clkdev(clk, "ccic_arbiter", NULL);
 
 	clk = clk_register_mux(NULL, "ccic0_mux", ccic_parent,
-				ARRAY_SIZE(ccic_parent), CLK_SET_RATE_PARENT,
+				ARRAY_SIZE(ccic_parent),
+				CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 				apmu_base + APMU_CCIC0, 6, 2, 0, &clk_lock);
 	clk_register_clkdev(clk, "ccic_mux.0", NULL);
 
@@ -421,7 +433,8 @@
 	clk_register_clkdev(clk, "sphyclk", "mmp-ccic.0");
 
 	clk = clk_register_mux(NULL, "ccic1_mux", ccic_parent,
-				ARRAY_SIZE(ccic_parent), CLK_SET_RATE_PARENT,
+				ARRAY_SIZE(ccic_parent),
+				CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 				apmu_base + APMU_CCIC1, 6, 2, 0, &clk_lock);
 	clk_register_clkdev(clk, "ccic_mux.1", NULL);
 
diff --git a/drivers/clk/mmp/clk-pxa168.c b/drivers/clk/mmp/clk-pxa168.c
index 28b3b51..014396b 100644
--- a/drivers/clk/mmp/clk-pxa168.c
+++ b/drivers/clk/mmp/clk-pxa168.c
@@ -199,7 +199,8 @@
 	clk_register_clkdev(clk, NULL, "pxa168-pwm.3");
 
 	clk = clk_register_mux(NULL, "uart0_mux", uart_parent,
-				ARRAY_SIZE(uart_parent), CLK_SET_RATE_PARENT,
+				ARRAY_SIZE(uart_parent),
+				CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 				apbc_base + APBC_UART0, 4, 3, 0, &clk_lock);
 	clk_set_parent(clk, uart_pll);
 	clk_register_clkdev(clk, "uart_mux.0", NULL);
@@ -209,7 +210,8 @@
 	clk_register_clkdev(clk, NULL, "pxa2xx-uart.0");
 
 	clk = clk_register_mux(NULL, "uart1_mux", uart_parent,
-				ARRAY_SIZE(uart_parent), CLK_SET_RATE_PARENT,
+				ARRAY_SIZE(uart_parent),
+				CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 				apbc_base + APBC_UART1, 4, 3, 0, &clk_lock);
 	clk_set_parent(clk, uart_pll);
 	clk_register_clkdev(clk, "uart_mux.1", NULL);
@@ -219,7 +221,8 @@
 	clk_register_clkdev(clk, NULL, "pxa2xx-uart.1");
 
 	clk = clk_register_mux(NULL, "uart2_mux", uart_parent,
-				ARRAY_SIZE(uart_parent), CLK_SET_RATE_PARENT,
+				ARRAY_SIZE(uart_parent),
+				CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 				apbc_base + APBC_UART2, 4, 3, 0, &clk_lock);
 	clk_set_parent(clk, uart_pll);
 	clk_register_clkdev(clk, "uart_mux.2", NULL);
@@ -229,7 +232,8 @@
 	clk_register_clkdev(clk, NULL, "pxa2xx-uart.2");
 
 	clk = clk_register_mux(NULL, "ssp0_mux", ssp_parent,
-				ARRAY_SIZE(ssp_parent), CLK_SET_RATE_PARENT,
+				ARRAY_SIZE(ssp_parent),
+				CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 				apbc_base + APBC_SSP0, 4, 3, 0, &clk_lock);
 	clk_register_clkdev(clk, "uart_mux.0", NULL);
 
@@ -238,7 +242,8 @@
 	clk_register_clkdev(clk, NULL, "mmp-ssp.0");
 
 	clk = clk_register_mux(NULL, "ssp1_mux", ssp_parent,
-				ARRAY_SIZE(ssp_parent), CLK_SET_RATE_PARENT,
+				ARRAY_SIZE(ssp_parent),
+				CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 				apbc_base + APBC_SSP1, 4, 3, 0, &clk_lock);
 	clk_register_clkdev(clk, "ssp_mux.1", NULL);
 
@@ -247,7 +252,8 @@
 	clk_register_clkdev(clk, NULL, "mmp-ssp.1");
 
 	clk = clk_register_mux(NULL, "ssp2_mux", ssp_parent,
-				ARRAY_SIZE(ssp_parent), CLK_SET_RATE_PARENT,
+				ARRAY_SIZE(ssp_parent),
+				CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 				apbc_base + APBC_SSP2, 4, 3, 0, &clk_lock);
 	clk_register_clkdev(clk, "ssp_mux.2", NULL);
 
@@ -256,7 +262,8 @@
 	clk_register_clkdev(clk, NULL, "mmp-ssp.2");
 
 	clk = clk_register_mux(NULL, "ssp3_mux", ssp_parent,
-				ARRAY_SIZE(ssp_parent), CLK_SET_RATE_PARENT,
+				ARRAY_SIZE(ssp_parent),
+				CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 				apbc_base + APBC_SSP3, 4, 3, 0, &clk_lock);
 	clk_register_clkdev(clk, "ssp_mux.3", NULL);
 
@@ -265,7 +272,8 @@
 	clk_register_clkdev(clk, NULL, "mmp-ssp.3");
 
 	clk = clk_register_mux(NULL, "ssp4_mux", ssp_parent,
-				ARRAY_SIZE(ssp_parent), CLK_SET_RATE_PARENT,
+				ARRAY_SIZE(ssp_parent),
+				CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 				apbc_base + APBC_SSP4, 4, 3, 0, &clk_lock);
 	clk_register_clkdev(clk, "ssp_mux.4", NULL);
 
@@ -278,7 +286,8 @@
 	clk_register_clkdev(clk, NULL, "pxa3xx-nand.0");
 
 	clk = clk_register_mux(NULL, "sdh0_mux", sdh_parent,
-				ARRAY_SIZE(sdh_parent), CLK_SET_RATE_PARENT,
+				ARRAY_SIZE(sdh_parent),
+				CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 				apmu_base + APMU_SDH0, 6, 1, 0, &clk_lock);
 	clk_register_clkdev(clk, "sdh0_mux", NULL);
 
@@ -287,7 +296,8 @@
 	clk_register_clkdev(clk, NULL, "sdhci-pxa.0");
 
 	clk = clk_register_mux(NULL, "sdh1_mux", sdh_parent,
-				ARRAY_SIZE(sdh_parent), CLK_SET_RATE_PARENT,
+				ARRAY_SIZE(sdh_parent),
+				CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 				apmu_base + APMU_SDH1, 6, 1, 0, &clk_lock);
 	clk_register_clkdev(clk, "sdh1_mux", NULL);
 
@@ -304,7 +314,8 @@
 	clk_register_clkdev(clk, "sph_clk", NULL);
 
 	clk = clk_register_mux(NULL, "disp0_mux", disp_parent,
-				ARRAY_SIZE(disp_parent), CLK_SET_RATE_PARENT,
+				ARRAY_SIZE(disp_parent),
+				CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 				apmu_base + APMU_DISP0, 6, 1, 0, &clk_lock);
 	clk_register_clkdev(clk, "disp_mux.0", NULL);
 
@@ -317,7 +328,8 @@
 	clk_register_clkdev(clk, "hclk", "mmp-disp.0");
 
 	clk = clk_register_mux(NULL, "ccic0_mux", ccic_parent,
-				ARRAY_SIZE(ccic_parent), CLK_SET_RATE_PARENT,
+				ARRAY_SIZE(ccic_parent),
+				CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 				apmu_base + APMU_CCIC0, 6, 1, 0, &clk_lock);
 	clk_register_clkdev(clk, "ccic_mux.0", NULL);
 
@@ -327,8 +339,8 @@
 
 	clk = clk_register_mux(NULL, "ccic0_phy_mux", ccic_phy_parent,
 				ARRAY_SIZE(ccic_phy_parent),
-				CLK_SET_RATE_PARENT, apmu_base + APMU_CCIC0,
-				7, 1, 0, &clk_lock);
+				CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+				apmu_base + APMU_CCIC0, 7, 1, 0, &clk_lock);
 	clk_register_clkdev(clk, "ccic_phy_mux.0", NULL);
 
 	clk = mmp_clk_register_apmu("ccic0_phy", "ccic0_phy_mux",
diff --git a/drivers/clk/mmp/clk-pxa910.c b/drivers/clk/mmp/clk-pxa910.c
index 6ec0569..9efc6a4 100644
--- a/drivers/clk/mmp/clk-pxa910.c
+++ b/drivers/clk/mmp/clk-pxa910.c
@@ -204,7 +204,8 @@
 	clk_register_clkdev(clk, NULL, "pxa910-pwm.3");
 
 	clk = clk_register_mux(NULL, "uart0_mux", uart_parent,
-				ARRAY_SIZE(uart_parent), CLK_SET_RATE_PARENT,
+				ARRAY_SIZE(uart_parent),
+				CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 				apbc_base + APBC_UART0, 4, 3, 0, &clk_lock);
 	clk_set_parent(clk, uart_pll);
 	clk_register_clkdev(clk, "uart_mux.0", NULL);
@@ -214,7 +215,8 @@
 	clk_register_clkdev(clk, NULL, "pxa2xx-uart.0");
 
 	clk = clk_register_mux(NULL, "uart1_mux", uart_parent,
-				ARRAY_SIZE(uart_parent), CLK_SET_RATE_PARENT,
+				ARRAY_SIZE(uart_parent),
+				CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 				apbc_base + APBC_UART1, 4, 3, 0, &clk_lock);
 	clk_set_parent(clk, uart_pll);
 	clk_register_clkdev(clk, "uart_mux.1", NULL);
@@ -224,7 +226,8 @@
 	clk_register_clkdev(clk, NULL, "pxa2xx-uart.1");
 
 	clk = clk_register_mux(NULL, "uart2_mux", uart_parent,
-				ARRAY_SIZE(uart_parent), CLK_SET_RATE_PARENT,
+				ARRAY_SIZE(uart_parent),
+				CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 				apbcp_base + APBCP_UART2, 4, 3, 0, &clk_lock);
 	clk_set_parent(clk, uart_pll);
 	clk_register_clkdev(clk, "uart_mux.2", NULL);
@@ -234,7 +237,8 @@
 	clk_register_clkdev(clk, NULL, "pxa2xx-uart.2");
 
 	clk = clk_register_mux(NULL, "ssp0_mux", ssp_parent,
-				ARRAY_SIZE(ssp_parent), CLK_SET_RATE_PARENT,
+				ARRAY_SIZE(ssp_parent),
+				CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 				apbc_base + APBC_SSP0, 4, 3, 0, &clk_lock);
 	clk_register_clkdev(clk, "uart_mux.0", NULL);
 
@@ -243,7 +247,8 @@
 	clk_register_clkdev(clk, NULL, "mmp-ssp.0");
 
 	clk = clk_register_mux(NULL, "ssp1_mux", ssp_parent,
-				ARRAY_SIZE(ssp_parent), CLK_SET_RATE_PARENT,
+				ARRAY_SIZE(ssp_parent),
+				CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 				apbc_base + APBC_SSP1, 4, 3, 0, &clk_lock);
 	clk_register_clkdev(clk, "ssp_mux.1", NULL);
 
@@ -256,7 +261,8 @@
 	clk_register_clkdev(clk, NULL, "pxa3xx-nand.0");
 
 	clk = clk_register_mux(NULL, "sdh0_mux", sdh_parent,
-				ARRAY_SIZE(sdh_parent), CLK_SET_RATE_PARENT,
+				ARRAY_SIZE(sdh_parent),
+				CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 				apmu_base + APMU_SDH0, 6, 1, 0, &clk_lock);
 	clk_register_clkdev(clk, "sdh0_mux", NULL);
 
@@ -265,7 +271,8 @@
 	clk_register_clkdev(clk, NULL, "sdhci-pxa.0");
 
 	clk = clk_register_mux(NULL, "sdh1_mux", sdh_parent,
-				ARRAY_SIZE(sdh_parent), CLK_SET_RATE_PARENT,
+				ARRAY_SIZE(sdh_parent),
+				CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 				apmu_base + APMU_SDH1, 6, 1, 0, &clk_lock);
 	clk_register_clkdev(clk, "sdh1_mux", NULL);
 
@@ -282,7 +289,8 @@
 	clk_register_clkdev(clk, "sph_clk", NULL);
 
 	clk = clk_register_mux(NULL, "disp0_mux", disp_parent,
-				ARRAY_SIZE(disp_parent), CLK_SET_RATE_PARENT,
+				ARRAY_SIZE(disp_parent),
+				CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 				apmu_base + APMU_DISP0, 6, 1, 0, &clk_lock);
 	clk_register_clkdev(clk, "disp_mux.0", NULL);
 
@@ -291,7 +299,8 @@
 	clk_register_clkdev(clk, NULL, "mmp-disp.0");
 
 	clk = clk_register_mux(NULL, "ccic0_mux", ccic_parent,
-				ARRAY_SIZE(ccic_parent), CLK_SET_RATE_PARENT,
+				ARRAY_SIZE(ccic_parent),
+				CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 				apmu_base + APMU_CCIC0, 6, 1, 0, &clk_lock);
 	clk_register_clkdev(clk, "ccic_mux.0", NULL);
 
@@ -301,8 +310,8 @@
 
 	clk = clk_register_mux(NULL, "ccic0_phy_mux", ccic_phy_parent,
 				ARRAY_SIZE(ccic_phy_parent),
-				CLK_SET_RATE_PARENT, apmu_base + APMU_CCIC0,
-				7, 1, 0, &clk_lock);
+				CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+				apmu_base + APMU_CCIC0, 7, 1, 0, &clk_lock);
 	clk_register_clkdev(clk, "ccic_phy_mux.0", NULL);
 
 	clk = mmp_clk_register_apmu("ccic0_phy", "ccic0_phy_mux",
diff --git a/drivers/clk/mvebu/armada-370.c b/drivers/clk/mvebu/armada-370.c
index 079960e..fc777bd 100644
--- a/drivers/clk/mvebu/armada-370.c
+++ b/drivers/clk/mvebu/armada-370.c
@@ -32,13 +32,13 @@
 
 enum { A370_CPU_TO_NBCLK, A370_CPU_TO_HCLK, A370_CPU_TO_DRAMCLK };
 
-static const struct coreclk_ratio __initconst a370_coreclk_ratios[] = {
+static const struct coreclk_ratio a370_coreclk_ratios[] __initconst = {
 	{ .id = A370_CPU_TO_NBCLK, .name = "nbclk" },
 	{ .id = A370_CPU_TO_HCLK, .name = "hclk" },
 	{ .id = A370_CPU_TO_DRAMCLK, .name = "dramclk" },
 };
 
-static const u32 __initconst a370_tclk_freqs[] = {
+static const u32 a370_tclk_freqs[] __initconst = {
 	16600000,
 	20000000,
 };
@@ -52,7 +52,7 @@
 	return a370_tclk_freqs[tclk_freq_select];
 }
 
-static const u32 __initconst a370_cpu_freqs[] = {
+static const u32 a370_cpu_freqs[] __initconst = {
 	400000000,
 	533000000,
 	667000000,
@@ -78,7 +78,7 @@
 	return cpu_freq;
 }
 
-static const int __initconst a370_nbclk_ratios[32][2] = {
+static const int a370_nbclk_ratios[32][2] __initconst = {
 	{0, 1}, {1, 2}, {2, 2}, {2, 2},
 	{1, 2}, {1, 2}, {1, 1}, {2, 3},
 	{0, 1}, {1, 2}, {2, 4}, {0, 1},
@@ -89,7 +89,7 @@
 	{0, 1}, {0, 1}, {0, 1}, {0, 1},
 };
 
-static const int __initconst a370_hclk_ratios[32][2] = {
+static const int a370_hclk_ratios[32][2] __initconst = {
 	{0, 1}, {1, 2}, {2, 6}, {2, 3},
 	{1, 3}, {1, 4}, {1, 2}, {2, 6},
 	{0, 1}, {1, 6}, {2, 10}, {0, 1},
@@ -100,7 +100,7 @@
 	{0, 1}, {0, 1}, {0, 1}, {0, 1},
 };
 
-static const int __initconst a370_dramclk_ratios[32][2] = {
+static const int a370_dramclk_ratios[32][2] __initconst = {
 	{0, 1}, {1, 2}, {2, 3}, {2, 3},
 	{1, 3}, {1, 2}, {1, 2}, {2, 6},
 	{0, 1}, {1, 3}, {2, 5}, {0, 1},
@@ -152,7 +152,7 @@
  * Clock Gating Control
  */
 
-static const struct clk_gating_soc_desc __initconst a370_gating_desc[] = {
+static const struct clk_gating_soc_desc a370_gating_desc[] __initconst = {
 	{ "audio", NULL, 0, 0 },
 	{ "pex0_en", NULL, 1, 0 },
 	{ "pex1_en", NULL,  2, 0 },
diff --git a/drivers/clk/mvebu/armada-xp.c b/drivers/clk/mvebu/armada-xp.c
index 13b62ce..9922c44 100644
--- a/drivers/clk/mvebu/armada-xp.c
+++ b/drivers/clk/mvebu/armada-xp.c
@@ -40,7 +40,7 @@
 
 enum { AXP_CPU_TO_NBCLK, AXP_CPU_TO_HCLK, AXP_CPU_TO_DRAMCLK };
 
-static const struct coreclk_ratio __initconst axp_coreclk_ratios[] = {
+static const struct coreclk_ratio axp_coreclk_ratios[] __initconst = {
 	{ .id = AXP_CPU_TO_NBCLK, .name = "nbclk" },
 	{ .id = AXP_CPU_TO_HCLK, .name = "hclk" },
 	{ .id = AXP_CPU_TO_DRAMCLK, .name = "dramclk" },
@@ -52,7 +52,7 @@
 	return 250000000;
 }
 
-static const u32 __initconst axp_cpu_freqs[] = {
+static const u32 axp_cpu_freqs[] __initconst = {
 	1000000000,
 	1066000000,
 	1200000000,
@@ -89,7 +89,7 @@
 	return cpu_freq;
 }
 
-static const int __initconst axp_nbclk_ratios[32][2] = {
+static const int axp_nbclk_ratios[32][2] __initconst = {
 	{0, 1}, {1, 2}, {2, 2}, {2, 2},
 	{1, 2}, {1, 2}, {1, 1}, {2, 3},
 	{0, 1}, {1, 2}, {2, 4}, {0, 1},
@@ -100,7 +100,7 @@
 	{0, 1}, {0, 1}, {0, 1}, {0, 1},
 };
 
-static const int __initconst axp_hclk_ratios[32][2] = {
+static const int axp_hclk_ratios[32][2] __initconst = {
 	{0, 1}, {1, 2}, {2, 6}, {2, 3},
 	{1, 3}, {1, 4}, {1, 2}, {2, 6},
 	{0, 1}, {1, 6}, {2, 10}, {0, 1},
@@ -111,7 +111,7 @@
 	{0, 1}, {0, 1}, {0, 1}, {0, 1},
 };
 
-static const int __initconst axp_dramclk_ratios[32][2] = {
+static const int axp_dramclk_ratios[32][2] __initconst = {
 	{0, 1}, {1, 2}, {2, 3}, {2, 3},
 	{1, 3}, {1, 2}, {1, 2}, {2, 6},
 	{0, 1}, {1, 3}, {2, 5}, {0, 1},
@@ -169,7 +169,7 @@
  * Clock Gating Control
  */
 
-static const struct clk_gating_soc_desc __initconst axp_gating_desc[] = {
+static const struct clk_gating_soc_desc axp_gating_desc[] __initconst = {
 	{ "audio", NULL, 0, 0 },
 	{ "ge3", NULL, 1, 0 },
 	{ "ge2", NULL,  2, 0 },
diff --git a/drivers/clk/mvebu/clk-cpu.c b/drivers/clk/mvebu/clk-cpu.c
index b0fbc07..1466865 100644
--- a/drivers/clk/mvebu/clk-cpu.c
+++ b/drivers/clk/mvebu/clk-cpu.c
@@ -119,7 +119,7 @@
 
 	cpuclk = kzalloc(ncpus * sizeof(*cpuclk), GFP_KERNEL);
 	if (WARN_ON(!cpuclk))
-		return;
+		goto cpuclk_out;
 
 	clks = kzalloc(ncpus * sizeof(*clks), GFP_KERNEL);
 	if (WARN_ON(!clks))
@@ -170,6 +170,8 @@
 		kfree(cpuclk[ncpus].clk_name);
 clks_out:
 	kfree(cpuclk);
+cpuclk_out:
+	iounmap(clock_complex_base);
 }
 
 CLK_OF_DECLARE(armada_xp_cpu_clock, "marvell,armada-xp-cpu-clock",
diff --git a/drivers/clk/mvebu/common.c b/drivers/clk/mvebu/common.c
index adaa4a1..25ceccf 100644
--- a/drivers/clk/mvebu/common.c
+++ b/drivers/clk/mvebu/common.c
@@ -45,8 +45,10 @@
 	clk_data.clk_num = 2 + desc->num_ratios;
 	clk_data.clks = kzalloc(clk_data.clk_num * sizeof(struct clk *),
 				GFP_KERNEL);
-	if (WARN_ON(!clk_data.clks))
+	if (WARN_ON(!clk_data.clks)) {
+		iounmap(base);
 		return;
+	}
 
 	/* Register TCLK */
 	of_property_read_string_index(np, "clock-output-names", 0,
@@ -134,7 +136,7 @@
 
 	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
 	if (WARN_ON(!ctrl))
-		return;
+		goto ctrl_out;
 
 	spin_lock_init(&ctrl->lock);
 
@@ -145,10 +147,8 @@
 	ctrl->num_gates = n;
 	ctrl->gates = kzalloc(ctrl->num_gates * sizeof(struct clk *),
 			      GFP_KERNEL);
-	if (WARN_ON(!ctrl->gates)) {
-		kfree(ctrl);
-		return;
-	}
+	if (WARN_ON(!ctrl->gates))
+		goto gates_out;
 
 	for (n = 0; n < ctrl->num_gates; n++) {
 		const char *parent =
@@ -160,4 +160,10 @@
 	}
 
 	of_clk_add_provider(np, clk_gating_get_src, ctrl);
+
+	return;
+gates_out:
+	kfree(ctrl);
+ctrl_out:
+	iounmap(base);
 }
diff --git a/drivers/clk/mvebu/dove.c b/drivers/clk/mvebu/dove.c
index 79d7aed..38aee1e 100644
--- a/drivers/clk/mvebu/dove.c
+++ b/drivers/clk/mvebu/dove.c
@@ -74,12 +74,12 @@
 
 enum { DOVE_CPU_TO_L2, DOVE_CPU_TO_DDR };
 
-static const struct coreclk_ratio __initconst dove_coreclk_ratios[] = {
+static const struct coreclk_ratio dove_coreclk_ratios[] __initconst = {
 	{ .id = DOVE_CPU_TO_L2, .name = "l2clk", },
 	{ .id = DOVE_CPU_TO_DDR, .name = "ddrclk", }
 };
 
-static const u32 __initconst dove_tclk_freqs[] = {
+static const u32 dove_tclk_freqs[] __initconst = {
 	166666667,
 	125000000,
 	0, 0
@@ -92,7 +92,7 @@
 	return dove_tclk_freqs[opt];
 }
 
-static const u32 __initconst dove_cpu_freqs[] = {
+static const u32 dove_cpu_freqs[] __initconst = {
 	0, 0, 0, 0, 0,
 	1000000000,
 	933333333, 933333333,
@@ -111,12 +111,12 @@
 	return dove_cpu_freqs[opt];
 }
 
-static const int __initconst dove_cpu_l2_ratios[8][2] = {
+static const int dove_cpu_l2_ratios[8][2] __initconst = {
 	{ 1, 1 }, { 0, 1 }, { 1, 2 }, { 0, 1 },
 	{ 1, 3 }, { 0, 1 }, { 1, 4 }, { 0, 1 }
 };
 
-static const int __initconst dove_cpu_ddr_ratios[16][2] = {
+static const int dove_cpu_ddr_ratios[16][2] __initconst = {
 	{ 1, 1 }, { 0, 1 }, { 1, 2 }, { 2, 5 },
 	{ 1, 3 }, { 0, 1 }, { 1, 4 }, { 0, 1 },
 	{ 1, 5 }, { 0, 1 }, { 1, 6 }, { 0, 1 },
@@ -164,7 +164,7 @@
  * Clock Gating Control
  */
 
-static const struct clk_gating_soc_desc __initconst dove_gating_desc[] = {
+static const struct clk_gating_soc_desc dove_gating_desc[] __initconst = {
 	{ "usb0", NULL, 0, 0 },
 	{ "usb1", NULL, 1, 0 },
 	{ "ge",	"gephy", 2, 0 },
diff --git a/drivers/clk/mvebu/kirkwood.c b/drivers/clk/mvebu/kirkwood.c
index 71d2461..2636a55 100644
--- a/drivers/clk/mvebu/kirkwood.c
+++ b/drivers/clk/mvebu/kirkwood.c
@@ -78,7 +78,7 @@
 
 enum { KIRKWOOD_CPU_TO_L2, KIRKWOOD_CPU_TO_DDR };
 
-static const struct coreclk_ratio __initconst kirkwood_coreclk_ratios[] = {
+static const struct coreclk_ratio kirkwood_coreclk_ratios[] __initconst = {
 	{ .id = KIRKWOOD_CPU_TO_L2, .name = "l2clk", },
 	{ .id = KIRKWOOD_CPU_TO_DDR, .name = "ddrclk", }
 };
@@ -90,7 +90,7 @@
 	return (opt) ? 166666667 : 200000000;
 }
 
-static const u32 __initconst kirkwood_cpu_freqs[] = {
+static const u32 kirkwood_cpu_freqs[] __initconst = {
 	0, 0, 0, 0,
 	600000000,
 	0,
@@ -111,12 +111,12 @@
 	return kirkwood_cpu_freqs[opt];
 }
 
-static const int __initconst kirkwood_cpu_l2_ratios[8][2] = {
+static const int kirkwood_cpu_l2_ratios[8][2] __initconst = {
 	{ 0, 1 }, { 1, 2 }, { 0, 1 }, { 1, 3 },
 	{ 0, 1 }, { 1, 4 }, { 0, 1 }, { 0, 1 }
 };
 
-static const int __initconst kirkwood_cpu_ddr_ratios[16][2] = {
+static const int kirkwood_cpu_ddr_ratios[16][2] __initconst = {
 	{ 0, 1 }, { 0, 1 }, { 1, 2 }, { 0, 1 },
 	{ 1, 3 }, { 0, 1 }, { 1, 4 }, { 2, 9 },
 	{ 1, 5 }, { 1, 6 }, { 0, 1 }, { 0, 1 },
@@ -145,7 +145,7 @@
 	}
 }
 
-static const u32 __initconst mv88f6180_cpu_freqs[] = {
+static const u32 mv88f6180_cpu_freqs[] __initconst = {
 	0, 0, 0, 0, 0,
 	600000000,
 	800000000,
@@ -158,7 +158,7 @@
 	return mv88f6180_cpu_freqs[opt];
 }
 
-static const int __initconst mv88f6180_cpu_ddr_ratios[8][2] = {
+static const int mv88f6180_cpu_ddr_ratios[8][2] __initconst = {
 	{ 0, 1 }, { 0, 1 }, { 0, 1 }, { 0, 1 },
 	{ 0, 1 }, { 1, 3 }, { 1, 4 }, { 1, 5 }
 };
@@ -219,7 +219,7 @@
  * Clock Gating Control
  */
 
-static const struct clk_gating_soc_desc __initconst kirkwood_gating_desc[] = {
+static const struct clk_gating_soc_desc kirkwood_gating_desc[] __initconst = {
 	{ "ge0", NULL, 0, 0 },
 	{ "pex0", NULL, 2, 0 },
 	{ "usb0", NULL, 3, 0 },
diff --git a/drivers/clk/mxs/clk-imx23.c b/drivers/clk/mxs/clk-imx23.c
index f6a7487..c396fe3 100644
--- a/drivers/clk/mxs/clk-imx23.c
+++ b/drivers/clk/mxs/clk-imx23.c
@@ -10,6 +10,7 @@
  */
 
 #include <linux/clk.h>
+#include <linux/clk/mxs.h>
 #include <linux/clkdev.h>
 #include <linux/err.h>
 #include <linux/init.h>
diff --git a/drivers/clk/mxs/clk.h b/drivers/clk/mxs/clk.h
index 81421e2..ef10ad9 100644
--- a/drivers/clk/mxs/clk.h
+++ b/drivers/clk/mxs/clk.h
@@ -52,8 +52,8 @@
 		u8 shift, u8 width, const char **parent_names, int num_parents)
 {
 	return clk_register_mux(NULL, name, parent_names, num_parents,
-				CLK_SET_RATE_PARENT, reg, shift, width,
-				0, &mxs_lock);
+				CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+				reg, shift, width, 0, &mxs_lock);
 }
 
 static inline struct clk *mxs_clk_fixed_factor(const char *name,
diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile
index 5d4d432..3413380 100644
--- a/drivers/clk/samsung/Makefile
+++ b/drivers/clk/samsung/Makefile
@@ -8,3 +8,6 @@
 obj-$(CONFIG_SOC_EXYNOS5420)	+= clk-exynos5420.o
 obj-$(CONFIG_SOC_EXYNOS5440)	+= clk-exynos5440.o
 obj-$(CONFIG_ARCH_EXYNOS)	+= clk-exynos-audss.o
+ifdef CONFIG_COMMON_CLK
+obj-$(CONFIG_ARCH_S3C64XX)	+= clk-s3c64xx.o
+endif
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
index 9b1bbd5..39b40aa 100644
--- a/drivers/clk/samsung/clk-exynos-audss.c
+++ b/drivers/clk/samsung/clk-exynos-audss.c
@@ -62,7 +62,7 @@
 #endif /* CONFIG_PM_SLEEP */
 
 /* register exynos_audss clocks */
-void __init exynos_audss_clk_init(struct device_node *np)
+static void __init exynos_audss_clk_init(struct device_node *np)
 {
 	reg_base = of_iomap(np, 0);
 	if (!reg_base) {
@@ -82,11 +82,13 @@
 	of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
 
 	clk_table[EXYNOS_MOUT_AUDSS] = clk_register_mux(NULL, "mout_audss",
-				mout_audss_p, ARRAY_SIZE(mout_audss_p), 0,
+				mout_audss_p, ARRAY_SIZE(mout_audss_p),
+				CLK_SET_RATE_NO_REPARENT,
 				reg_base + ASS_CLK_SRC, 0, 1, 0, &lock);
 
 	clk_table[EXYNOS_MOUT_I2S] = clk_register_mux(NULL, "mout_i2s",
-				mout_i2s_p, ARRAY_SIZE(mout_i2s_p), 0,
+				mout_i2s_p, ARRAY_SIZE(mout_i2s_p),
+				CLK_SET_RATE_NO_REPARENT,
 				reg_base + ASS_CLK_SRC, 2, 2, 0, &lock);
 
 	clk_table[EXYNOS_DOUT_SRP] = clk_register_divider(NULL, "dout_srp",
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index 4e57397..ad5ff50 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -17,7 +17,6 @@
 #include <linux/of_address.h>
 
 #include "clk.h"
-#include "clk-pll.h"
 
 /* Exynos4 clock controller register offsets */
 #define SRC_LEFTBUS		0x4200
@@ -97,12 +96,15 @@
 #define GATE_IP_PERIL		0xc950
 #define E4210_GATE_IP_PERIR	0xc960
 #define GATE_BLOCK		0xc970
+#define E4X12_MPLL_LOCK		0x10008
 #define E4X12_MPLL_CON0		0x10108
 #define SRC_DMC			0x10200
 #define SRC_MASK_DMC		0x10300
 #define DIV_DMC0		0x10500
 #define DIV_DMC1		0x10504
 #define GATE_IP_DMC		0x10900
+#define APLL_LOCK		0x14000
+#define E4210_MPLL_LOCK		0x14008
 #define APLL_CON0		0x14100
 #define E4210_MPLL_CON0		0x14108
 #define SRC_CPU			0x14200
@@ -121,6 +123,12 @@
 	EXYNOS4X12,
 };
 
+/* list of PLLs to be registered */
+enum exynos4_plls {
+	apll, mpll, epll, vpll,
+	nr_plls			/* number of PLLs */
+};
+
 /*
  * Let each supported clock get a unique id. This id is used to lookup the clock
  * for device tree based platforms. The clocks are categorized into three
@@ -169,7 +177,7 @@
 	gicisp, smmu_isp, smmu_drc, smmu_fd, smmu_lite0, smmu_lite1, mcuctl_isp,
 	mpwm_isp, i2c0_isp, i2c1_isp, mtcadc_isp, pwm_isp, wdt_isp, uart_isp,
 	asyncaxim, smmu_ispcx, spi0_isp, spi1_isp, pwm_isp_sclk, spi0_isp_sclk,
-	spi1_isp_sclk, uart_isp_sclk,
+	spi1_isp_sclk, uart_isp_sclk, tmu_apbif,
 
 	/* mux clocks */
 	mout_fimc0 = 384, mout_fimc1, mout_fimc2, mout_fimc3, mout_cam0,
@@ -187,7 +195,7 @@
  * list of controller registers to be saved and restored during a
  * suspend/resume cycle.
  */
-static __initdata unsigned long exynos4210_clk_save[] = {
+static unsigned long exynos4210_clk_save[] __initdata = {
 	E4210_SRC_IMAGE,
 	E4210_SRC_LCD1,
 	E4210_SRC_MASK_LCD1,
@@ -198,7 +206,7 @@
 	E4210_MPLL_CON0,
 };
 
-static __initdata unsigned long exynos4x12_clk_save[] = {
+static unsigned long exynos4x12_clk_save[] __initdata = {
 	E4X12_GATE_IP_IMAGE,
 	E4X12_GATE_IP_PERIR,
 	E4X12_SRC_CAM1,
@@ -207,7 +215,7 @@
 	E4X12_MPLL_CON0,
 };
 
-static __initdata unsigned long exynos4_clk_regs[] = {
+static unsigned long exynos4_clk_regs[] __initdata = {
 	SRC_LEFTBUS,
 	DIV_LEFTBUS,
 	GATE_IP_LEFTBUS,
@@ -338,24 +346,24 @@
 PNAME(mout_user_aclk266_gps_p4x12) = {"fin_pll", "div_aclk266_gps", };
 
 /* fixed rate clocks generated outside the soc */
-struct samsung_fixed_rate_clock exynos4_fixed_rate_ext_clks[] __initdata = {
+static struct samsung_fixed_rate_clock exynos4_fixed_rate_ext_clks[] __initdata = {
 	FRATE(xxti, "xxti", NULL, CLK_IS_ROOT, 0),
 	FRATE(xusbxti, "xusbxti", NULL, CLK_IS_ROOT, 0),
 };
 
 /* fixed rate clocks generated inside the soc */
-struct samsung_fixed_rate_clock exynos4_fixed_rate_clks[] __initdata = {
+static struct samsung_fixed_rate_clock exynos4_fixed_rate_clks[] __initdata = {
 	FRATE(none, "sclk_hdmi24m", NULL, CLK_IS_ROOT, 24000000),
 	FRATE(none, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 27000000),
 	FRATE(none, "sclk_usbphy0", NULL, CLK_IS_ROOT, 48000000),
 };
 
-struct samsung_fixed_rate_clock exynos4210_fixed_rate_clks[] __initdata = {
+static struct samsung_fixed_rate_clock exynos4210_fixed_rate_clks[] __initdata = {
 	FRATE(none, "sclk_usbphy1", NULL, CLK_IS_ROOT, 48000000),
 };
 
 /* list of mux clocks supported in all exynos4 soc's */
-struct samsung_mux_clock exynos4_mux_clks[] __initdata = {
+static struct samsung_mux_clock exynos4_mux_clks[] __initdata = {
 	MUX_FA(mout_apll, "mout_apll", mout_apll_p, SRC_CPU, 0, 1,
 			CLK_SET_RATE_PARENT, 0, "mout_apll"),
 	MUX(none, "mout_hdmi", mout_hdmi_p, SRC_TV, 0, 1),
@@ -367,17 +375,20 @@
 			CLK_SET_RATE_PARENT, 0),
 	MUX(none, "mout_spdif", mout_spdif_p, SRC_PERIL1, 8, 2),
 	MUX(none, "mout_onenand1", mout_onenand1_p, SRC_TOP0, 0, 1),
-	MUX_A(sclk_epll, "sclk_epll", mout_epll_p, SRC_TOP0, 4, 1, "sclk_epll"),
+	MUX(sclk_epll, "sclk_epll", mout_epll_p, SRC_TOP0, 4, 1),
 	MUX(none, "mout_onenand", mout_onenand_p, SRC_TOP0, 28, 1),
 };
 
 /* list of mux clocks supported in exynos4210 soc */
-struct samsung_mux_clock exynos4210_mux_clks[] __initdata = {
+static struct samsung_mux_clock exynos4210_mux_early[] __initdata = {
+	MUX(none, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP1, 0, 1),
+};
+
+static struct samsung_mux_clock exynos4210_mux_clks[] __initdata = {
 	MUX(none, "mout_aclk200", sclk_ampll_p4210, SRC_TOP0, 12, 1),
 	MUX(none, "mout_aclk100", sclk_ampll_p4210, SRC_TOP0, 16, 1),
 	MUX(none, "mout_aclk160", sclk_ampll_p4210, SRC_TOP0, 20, 1),
 	MUX(none, "mout_aclk133", sclk_ampll_p4210, SRC_TOP0, 24, 1),
-	MUX(none, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP1, 0, 1),
 	MUX(none, "mout_mixer", mout_mixer_p4210, SRC_TV, 4, 1),
 	MUX(none, "mout_dac", mout_dac_p4210, SRC_TV, 8, 1),
 	MUX(none, "mout_g2d0", sclk_ampll_p4210, E4210_SRC_IMAGE, 0, 1),
@@ -385,11 +396,9 @@
 	MUX(none, "mout_g2d", mout_g2d_p, E4210_SRC_IMAGE, 8, 1),
 	MUX(none, "mout_fimd1", group1_p4210, E4210_SRC_LCD1, 0, 4),
 	MUX(none, "mout_mipi1", group1_p4210, E4210_SRC_LCD1, 12, 4),
-	MUX_A(sclk_mpll, "sclk_mpll", mout_mpll_p, SRC_CPU, 8, 1, "mout_mpll"),
-	MUX_A(mout_core, "mout_core", mout_core_p4210,
-			SRC_CPU, 16, 1, "moutcore"),
-	MUX_A(sclk_vpll, "sclk_vpll", sclk_vpll_p4210,
-			SRC_TOP0, 8, 1, "sclk_vpll"),
+	MUX(sclk_mpll, "sclk_mpll", mout_mpll_p, SRC_CPU, 8, 1),
+	MUX(mout_core, "mout_core", mout_core_p4210, SRC_CPU, 16, 1),
+	MUX(sclk_vpll, "sclk_vpll", sclk_vpll_p4210, SRC_TOP0, 8, 1),
 	MUX(mout_fimc0, "mout_fimc0", group1_p4210, SRC_CAM, 0, 4),
 	MUX(mout_fimc1, "mout_fimc1", group1_p4210, SRC_CAM, 4, 4),
 	MUX(mout_fimc2, "mout_fimc2", group1_p4210, SRC_CAM, 8, 4),
@@ -423,9 +432,9 @@
 };
 
 /* list of mux clocks supported in exynos4x12 soc */
-struct samsung_mux_clock exynos4x12_mux_clks[] __initdata = {
-	MUX_A(mout_mpll_user_c, "mout_mpll_user_c", mout_mpll_user_p4x12,
-			SRC_CPU, 24, 1, "mout_mpll"),
+static struct samsung_mux_clock exynos4x12_mux_clks[] __initdata = {
+	MUX(mout_mpll_user_c, "mout_mpll_user_c", mout_mpll_user_p4x12,
+			SRC_CPU, 24, 1),
 	MUX(none, "mout_aclk266_gps", aclk_p4412, SRC_TOP1, 4, 1),
 	MUX(none, "mout_aclk400_mcuisp", aclk_p4412, SRC_TOP1, 8, 1),
 	MUX(mout_mpll_user_t, "mout_mpll_user_t", mout_mpll_user_p4x12,
@@ -445,12 +454,9 @@
 	MUX(none, "mout_jpeg0", sclk_ampll_p4x12, E4X12_SRC_CAM1, 0, 1),
 	MUX(none, "mout_jpeg1", sclk_evpll_p, E4X12_SRC_CAM1, 4, 1),
 	MUX(none, "mout_jpeg", mout_jpeg_p, E4X12_SRC_CAM1, 8, 1),
-	MUX_A(sclk_mpll, "sclk_mpll", mout_mpll_p,
-			SRC_DMC, 12, 1, "sclk_mpll"),
-	MUX_A(sclk_vpll, "sclk_vpll", mout_vpll_p,
-			SRC_TOP0, 8, 1, "sclk_vpll"),
-	MUX_A(mout_core, "mout_core", mout_core_p4x12,
-			SRC_CPU, 16, 1, "moutcore"),
+	MUX(sclk_mpll, "sclk_mpll", mout_mpll_p, SRC_DMC, 12, 1),
+	MUX(sclk_vpll, "sclk_vpll", mout_vpll_p, SRC_TOP0, 8, 1),
+	MUX(mout_core, "mout_core", mout_core_p4x12, SRC_CPU, 16, 1),
 	MUX(mout_fimc0, "mout_fimc0", group1_p4x12, SRC_CAM, 0, 4),
 	MUX(mout_fimc1, "mout_fimc1", group1_p4x12, SRC_CAM, 4, 4),
 	MUX(mout_fimc2, "mout_fimc2", group1_p4x12, SRC_CAM, 8, 4),
@@ -491,7 +497,7 @@
 };
 
 /* list of divider clocks supported in all exynos4 soc's */
-struct samsung_div_clock exynos4_div_clks[] __initdata = {
+static struct samsung_div_clock exynos4_div_clks[] __initdata = {
 	DIV(none, "div_core", "mout_core", DIV_CPU0, 0, 3),
 	DIV(none, "div_core2", "div_core", DIV_CPU0, 28, 3),
 	DIV(none, "div_fimc0", "mout_fimc0", DIV_CAM, 0, 4),
@@ -538,9 +544,8 @@
 	DIV(none, "div_spi_pre2", "div_spi2", DIV_PERIL2, 8, 8),
 	DIV(none, "div_audio1", "mout_audio1", DIV_PERIL4, 0, 4),
 	DIV(none, "div_audio2", "mout_audio2", DIV_PERIL4, 16, 4),
-	DIV_A(arm_clk, "arm_clk", "div_core2", DIV_CPU0, 28, 3, "armclk"),
-	DIV_A(sclk_apll, "sclk_apll", "mout_apll",
-			DIV_CPU0, 24, 3, "sclk_apll"),
+	DIV(arm_clk, "arm_clk", "div_core2", DIV_CPU0, 28, 3),
+	DIV(sclk_apll, "sclk_apll", "mout_apll", DIV_CPU0, 24, 3),
 	DIV_F(none, "div_mipi_pre0", "div_mipi0", DIV_LCD0, 20, 4,
 			CLK_SET_RATE_PARENT, 0),
 	DIV_F(none, "div_mmc_pre0", "div_mmc0", DIV_FSYS1, 8, 8,
@@ -554,7 +559,7 @@
 };
 
 /* list of divider clocks supported in exynos4210 soc */
-struct samsung_div_clock exynos4210_div_clks[] __initdata = {
+static struct samsung_div_clock exynos4210_div_clks[] __initdata = {
 	DIV(aclk200, "aclk200", "mout_aclk200", DIV_TOP, 0, 3),
 	DIV(sclk_fimg2d, "sclk_fimg2d", "mout_g2d", DIV_IMAGE, 0, 4),
 	DIV(none, "div_fimd1", "mout_fimd1", E4210_DIV_LCD1, 0, 4),
@@ -565,7 +570,7 @@
 };
 
 /* list of divider clocks supported in exynos4x12 soc */
-struct samsung_div_clock exynos4x12_div_clks[] __initdata = {
+static struct samsung_div_clock exynos4x12_div_clks[] __initdata = {
 	DIV(none, "div_mdnie0", "mout_mdnie0", DIV_LCD0, 4, 4),
 	DIV(none, "div_mdnie_pwm0", "mout_mdnie_pwm0", DIV_LCD0, 8, 4),
 	DIV(none, "div_mdnie_pwm_pre0", "div_mdnie_pwm0", DIV_LCD0, 12, 4),
@@ -594,7 +599,7 @@
 };
 
 /* list of gate clocks supported in all exynos4 soc's */
-struct samsung_gate_clock exynos4_gate_clks[] __initdata = {
+static struct samsung_gate_clock exynos4_gate_clks[] __initdata = {
 	/*
 	 * After all Exynos4 based platforms are migrated to use device tree,
 	 * the device name and clock alias names specified below for some
@@ -629,164 +634,151 @@
 			CLK_SET_RATE_PARENT, 0),
 	GATE(sclk_audio1, "sclk_audio1", "div_audio1", SRC_MASK_PERIL1, 0,
 			CLK_SET_RATE_PARENT, 0),
-	GATE_D(vp, "s5p-mixer", "vp", "aclk160", GATE_IP_TV, 0, 0, 0),
-	GATE_D(mixer, "s5p-mixer", "mixer", "aclk160", GATE_IP_TV, 1, 0, 0),
-	GATE_D(hdmi, "exynos4-hdmi", "hdmi", "aclk160", GATE_IP_TV, 3, 0, 0),
-	GATE_A(pwm, "pwm", "aclk100", GATE_IP_PERIL, 24, 0, 0, "timers"),
-	GATE_A(sdmmc4, "sdmmc4", "aclk133", GATE_IP_FSYS, 9, 0, 0, "biu"),
-	GATE_A(usb_host, "usb_host", "aclk133",
-			GATE_IP_FSYS, 12, 0, 0, "usbhost"),
-	GATE_DA(sclk_fimc0, "exynos4-fimc.0", "sclk_fimc0", "div_fimc0",
-			SRC_MASK_CAM, 0, CLK_SET_RATE_PARENT, 0, "sclk_fimc"),
-	GATE_DA(sclk_fimc1, "exynos4-fimc.1", "sclk_fimc1", "div_fimc1",
-			SRC_MASK_CAM, 4, CLK_SET_RATE_PARENT, 0, "sclk_fimc"),
-	GATE_DA(sclk_fimc2, "exynos4-fimc.2", "sclk_fimc2", "div_fimc2",
-			SRC_MASK_CAM, 8, CLK_SET_RATE_PARENT, 0, "sclk_fimc"),
-	GATE_DA(sclk_fimc3, "exynos4-fimc.3", "sclk_fimc3", "div_fimc3",
-			SRC_MASK_CAM, 12, CLK_SET_RATE_PARENT, 0, "sclk_fimc"),
-	GATE_DA(sclk_csis0, "s5p-mipi-csis.0", "sclk_csis0", "div_csis0",
-			SRC_MASK_CAM, 24, CLK_SET_RATE_PARENT, 0, "sclk_csis"),
-	GATE_DA(sclk_csis1, "s5p-mipi-csis.1", "sclk_csis1", "div_csis1",
-			SRC_MASK_CAM, 28, CLK_SET_RATE_PARENT, 0, "sclk_csis"),
-	GATE_DA(sclk_fimd0, "exynos4-fb.0", "sclk_fimd0", "div_fimd0",
-			SRC_MASK_LCD0, 0, CLK_SET_RATE_PARENT, 0, "sclk_fimd"),
-	GATE_DA(sclk_mmc0, "exynos4-sdhci.0", "sclk_mmc0", "div_mmc_pre0",
-			SRC_MASK_FSYS, 0, CLK_SET_RATE_PARENT, 0,
-			"mmc_busclk.2"),
-	GATE_DA(sclk_mmc1, "exynos4-sdhci.1", "sclk_mmc1", "div_mmc_pre1",
-			SRC_MASK_FSYS, 4, CLK_SET_RATE_PARENT, 0,
-			"mmc_busclk.2"),
-	GATE_DA(sclk_mmc2, "exynos4-sdhci.2", "sclk_mmc2", "div_mmc_pre2",
-			SRC_MASK_FSYS, 8, CLK_SET_RATE_PARENT, 0,
-			"mmc_busclk.2"),
-	GATE_DA(sclk_mmc3, "exynos4-sdhci.3", "sclk_mmc3", "div_mmc_pre3",
-			SRC_MASK_FSYS, 12, CLK_SET_RATE_PARENT, 0,
-			"mmc_busclk.2"),
-	GATE_DA(sclk_mmc4, NULL, "sclk_mmc4", "div_mmc_pre4",
-			SRC_MASK_FSYS, 16, CLK_SET_RATE_PARENT, 0, "ciu"),
-	GATE_DA(sclk_uart0, "exynos4210-uart.0", "uclk0", "div_uart0",
-			SRC_MASK_PERIL0, 0, CLK_SET_RATE_PARENT,
-			0, "clk_uart_baud0"),
-	GATE_DA(sclk_uart1, "exynos4210-uart.1", "uclk1", "div_uart1",
-			SRC_MASK_PERIL0, 4, CLK_SET_RATE_PARENT,
-			0, "clk_uart_baud0"),
-	GATE_DA(sclk_uart2, "exynos4210-uart.2", "uclk2", "div_uart2",
-			SRC_MASK_PERIL0, 8, CLK_SET_RATE_PARENT,
-			0, "clk_uart_baud0"),
-	GATE_DA(sclk_uart3, "exynos4210-uart.3", "uclk3", "div_uart3",
-			SRC_MASK_PERIL0, 12, CLK_SET_RATE_PARENT,
-			0, "clk_uart_baud0"),
-	GATE_DA(sclk_uart4, "exynos4210-uart.4", "uclk4", "div_uart4",
-			SRC_MASK_PERIL0, 16, CLK_SET_RATE_PARENT,
-			0, "clk_uart_baud0"),
+	GATE(vp, "vp", "aclk160", GATE_IP_TV, 0, 0, 0),
+	GATE(mixer, "mixer", "aclk160", GATE_IP_TV, 1, 0, 0),
+	GATE(hdmi, "hdmi", "aclk160", GATE_IP_TV, 3, 0, 0),
+	GATE(pwm, "pwm", "aclk100", GATE_IP_PERIL, 24, 0, 0),
+	GATE(sdmmc4, "sdmmc4", "aclk133", GATE_IP_FSYS, 9, 0, 0),
+	GATE(usb_host, "usb_host", "aclk133", GATE_IP_FSYS, 12, 0, 0),
+	GATE(sclk_fimc0, "sclk_fimc0", "div_fimc0", SRC_MASK_CAM, 0,
+			CLK_SET_RATE_PARENT, 0),
+	GATE(sclk_fimc1, "sclk_fimc1", "div_fimc1", SRC_MASK_CAM, 4,
+			CLK_SET_RATE_PARENT, 0),
+	GATE(sclk_fimc2, "sclk_fimc2", "div_fimc2", SRC_MASK_CAM, 8,
+			CLK_SET_RATE_PARENT, 0),
+	GATE(sclk_fimc3, "sclk_fimc3", "div_fimc3", SRC_MASK_CAM, 12,
+			CLK_SET_RATE_PARENT, 0),
+	GATE(sclk_csis0, "sclk_csis0", "div_csis0", SRC_MASK_CAM, 24,
+			CLK_SET_RATE_PARENT, 0),
+	GATE(sclk_csis1, "sclk_csis1", "div_csis1", SRC_MASK_CAM, 28,
+			CLK_SET_RATE_PARENT, 0),
+	GATE(sclk_fimd0, "sclk_fimd0", "div_fimd0", SRC_MASK_LCD0, 0,
+			CLK_SET_RATE_PARENT, 0),
+	GATE(sclk_mmc0, "sclk_mmc0", "div_mmc_pre0", SRC_MASK_FSYS, 0,
+			CLK_SET_RATE_PARENT, 0),
+	GATE(sclk_mmc1, "sclk_mmc1", "div_mmc_pre1", SRC_MASK_FSYS, 4,
+			CLK_SET_RATE_PARENT, 0),
+	GATE(sclk_mmc2, "sclk_mmc2", "div_mmc_pre2", SRC_MASK_FSYS, 8,
+			CLK_SET_RATE_PARENT, 0),
+	GATE(sclk_mmc3, "sclk_mmc3", "div_mmc_pre3", SRC_MASK_FSYS, 12,
+			CLK_SET_RATE_PARENT, 0),
+	GATE(sclk_mmc4, "sclk_mmc4", "div_mmc_pre4", SRC_MASK_FSYS, 16,
+			CLK_SET_RATE_PARENT, 0),
+	GATE(sclk_uart0, "uclk0", "div_uart0", SRC_MASK_PERIL0, 0,
+			CLK_SET_RATE_PARENT, 0),
+	GATE(sclk_uart1, "uclk1", "div_uart1", SRC_MASK_PERIL0, 4,
+			CLK_SET_RATE_PARENT, 0),
+	GATE(sclk_uart2, "uclk2", "div_uart2", SRC_MASK_PERIL0, 8,
+			CLK_SET_RATE_PARENT, 0),
+	GATE(sclk_uart3, "uclk3", "div_uart3", SRC_MASK_PERIL0, 12,
+			CLK_SET_RATE_PARENT, 0),
+	GATE(sclk_uart4, "uclk4", "div_uart4", SRC_MASK_PERIL0, 16,
+			CLK_SET_RATE_PARENT, 0),
 	GATE(sclk_audio2, "sclk_audio2", "div_audio2", SRC_MASK_PERIL1, 4,
 			CLK_SET_RATE_PARENT, 0),
-	GATE_DA(sclk_spi0, "exynos4210-spi.0", "sclk_spi0", "div_spi_pre0",
-			SRC_MASK_PERIL1, 16, CLK_SET_RATE_PARENT,
-			0, "spi_busclk0"),
-	GATE_DA(sclk_spi1, "exynos4210-spi.1", "sclk_spi1", "div_spi_pre1",
-			SRC_MASK_PERIL1, 20, CLK_SET_RATE_PARENT,
-			0, "spi_busclk0"),
-	GATE_DA(sclk_spi2, "exynos4210-spi.2", "sclk_spi2", "div_spi_pre2",
-			SRC_MASK_PERIL1, 24, CLK_SET_RATE_PARENT,
-			0, "spi_busclk0"),
-	GATE_DA(fimc0, "exynos4-fimc.0", "fimc0", "aclk160",
-			GATE_IP_CAM, 0, 0, 0, "fimc"),
-	GATE_DA(fimc1, "exynos4-fimc.1", "fimc1", "aclk160",
-			GATE_IP_CAM, 1, 0, 0, "fimc"),
-	GATE_DA(fimc2, "exynos4-fimc.2", "fimc2", "aclk160",
-			GATE_IP_CAM, 2, 0, 0, "fimc"),
-	GATE_DA(fimc3, "exynos4-fimc.3", "fimc3", "aclk160",
-			GATE_IP_CAM, 3, 0, 0, "fimc"),
-	GATE_DA(csis0, "s5p-mipi-csis.0", "csis0", "aclk160",
-			GATE_IP_CAM, 4, 0, 0, "fimc"),
-	GATE_DA(csis1, "s5p-mipi-csis.1", "csis1", "aclk160",
-			GATE_IP_CAM, 5, 0, 0, "fimc"),
-	GATE_DA(smmu_fimc0, "exynos-sysmmu.5", "smmu_fimc0", "aclk160",
-			GATE_IP_CAM, 7, 0, 0, "sysmmu"),
-	GATE_DA(smmu_fimc1, "exynos-sysmmu.6", "smmu_fimc1", "aclk160",
-			GATE_IP_CAM, 8, 0, 0, "sysmmu"),
-	GATE_DA(smmu_fimc2, "exynos-sysmmu.7", "smmu_fimc2", "aclk160",
-			GATE_IP_CAM, 9, 0, 0, "sysmmu"),
-	GATE_DA(smmu_fimc3, "exynos-sysmmu.8", "smmu_fimc3", "aclk160",
-			GATE_IP_CAM, 10, 0, 0, "sysmmu"),
-	GATE_DA(smmu_jpeg, "exynos-sysmmu.3", "smmu_jpeg", "aclk160",
-			GATE_IP_CAM, 11, 0, 0, "sysmmu"),
+	GATE(sclk_spi0, "sclk_spi0", "div_spi_pre0", SRC_MASK_PERIL1, 16,
+			CLK_SET_RATE_PARENT, 0),
+	GATE(sclk_spi1, "sclk_spi1", "div_spi_pre1", SRC_MASK_PERIL1, 20,
+			CLK_SET_RATE_PARENT, 0),
+	GATE(sclk_spi2, "sclk_spi2", "div_spi_pre2", SRC_MASK_PERIL1, 24,
+			CLK_SET_RATE_PARENT, 0),
+	GATE(fimc0, "fimc0", "aclk160", GATE_IP_CAM, 0,
+			0, 0),
+	GATE(fimc1, "fimc1", "aclk160", GATE_IP_CAM, 1,
+			0, 0),
+	GATE(fimc2, "fimc2", "aclk160", GATE_IP_CAM, 2,
+			0, 0),
+	GATE(fimc3, "fimc3", "aclk160", GATE_IP_CAM, 3,
+			0, 0),
+	GATE(csis0, "csis0", "aclk160", GATE_IP_CAM, 4,
+			0, 0),
+	GATE(csis1, "csis1", "aclk160", GATE_IP_CAM, 5,
+			0, 0),
+	GATE(smmu_fimc0, "smmu_fimc0", "aclk160", GATE_IP_CAM, 7,
+			0, 0),
+	GATE(smmu_fimc1, "smmu_fimc1", "aclk160", GATE_IP_CAM, 8,
+			0, 0),
+	GATE(smmu_fimc2, "smmu_fimc2", "aclk160", GATE_IP_CAM, 9,
+			0, 0),
+	GATE(smmu_fimc3, "smmu_fimc3", "aclk160", GATE_IP_CAM, 10,
+			0, 0),
+	GATE(smmu_jpeg, "smmu_jpeg", "aclk160", GATE_IP_CAM, 11,
+			0, 0),
 	GATE(pixelasyncm0, "pxl_async0", "aclk160", GATE_IP_CAM, 17, 0, 0),
 	GATE(pixelasyncm1, "pxl_async1", "aclk160", GATE_IP_CAM, 18, 0, 0),
-	GATE_DA(smmu_tv, "exynos-sysmmu.2", "smmu_tv", "aclk160",
-			GATE_IP_TV, 4, 0, 0, "sysmmu"),
-	GATE_DA(mfc, "s5p-mfc", "mfc", "aclk100", GATE_IP_MFC, 0, 0, 0, "mfc"),
-	GATE_DA(smmu_mfcl, "exynos-sysmmu.0", "smmu_mfcl", "aclk100",
-			GATE_IP_MFC, 1, 0, 0, "sysmmu"),
-	GATE_DA(smmu_mfcr, "exynos-sysmmu.1", "smmu_mfcr", "aclk100",
-			GATE_IP_MFC, 2, 0, 0, "sysmmu"),
-	GATE_DA(fimd0, "exynos4-fb.0", "fimd0", "aclk160",
-			GATE_IP_LCD0, 0, 0, 0, "fimd"),
-	GATE_DA(smmu_fimd0, "exynos-sysmmu.10", "smmu_fimd0", "aclk160",
-			GATE_IP_LCD0, 4, 0, 0, "sysmmu"),
-	GATE_DA(pdma0, "dma-pl330.0", "pdma0", "aclk133",
-			GATE_IP_FSYS, 0, 0, 0, "dma"),
-	GATE_DA(pdma1, "dma-pl330.1", "pdma1", "aclk133",
-			GATE_IP_FSYS, 1, 0, 0, "dma"),
-	GATE_DA(sdmmc0, "exynos4-sdhci.0", "sdmmc0", "aclk133",
-			GATE_IP_FSYS, 5, 0, 0, "hsmmc"),
-	GATE_DA(sdmmc1, "exynos4-sdhci.1", "sdmmc1", "aclk133",
-			GATE_IP_FSYS, 6, 0, 0, "hsmmc"),
-	GATE_DA(sdmmc2, "exynos4-sdhci.2", "sdmmc2", "aclk133",
-			GATE_IP_FSYS, 7, 0, 0, "hsmmc"),
-	GATE_DA(sdmmc3, "exynos4-sdhci.3", "sdmmc3", "aclk133",
-			GATE_IP_FSYS, 8, 0, 0, "hsmmc"),
-	GATE_DA(uart0, "exynos4210-uart.0", "uart0", "aclk100",
-			GATE_IP_PERIL, 0, 0, 0, "uart"),
-	GATE_DA(uart1, "exynos4210-uart.1", "uart1", "aclk100",
-			GATE_IP_PERIL, 1, 0, 0, "uart"),
-	GATE_DA(uart2, "exynos4210-uart.2", "uart2", "aclk100",
-			GATE_IP_PERIL, 2, 0, 0, "uart"),
-	GATE_DA(uart3, "exynos4210-uart.3", "uart3", "aclk100",
-			GATE_IP_PERIL, 3, 0, 0, "uart"),
-	GATE_DA(uart4, "exynos4210-uart.4", "uart4", "aclk100",
-			GATE_IP_PERIL, 4, 0, 0, "uart"),
-	GATE_DA(i2c0, "s3c2440-i2c.0", "i2c0", "aclk100",
-			GATE_IP_PERIL, 6, 0, 0, "i2c"),
-	GATE_DA(i2c1, "s3c2440-i2c.1", "i2c1", "aclk100",
-			GATE_IP_PERIL, 7, 0, 0, "i2c"),
-	GATE_DA(i2c2, "s3c2440-i2c.2", "i2c2", "aclk100",
-			GATE_IP_PERIL, 8, 0, 0, "i2c"),
-	GATE_DA(i2c3, "s3c2440-i2c.3", "i2c3", "aclk100",
-			GATE_IP_PERIL, 9, 0, 0, "i2c"),
-	GATE_DA(i2c4, "s3c2440-i2c.4", "i2c4", "aclk100",
-			GATE_IP_PERIL, 10, 0, 0, "i2c"),
-	GATE_DA(i2c5, "s3c2440-i2c.5", "i2c5", "aclk100",
-			GATE_IP_PERIL, 11, 0, 0, "i2c"),
-	GATE_DA(i2c6, "s3c2440-i2c.6", "i2c6", "aclk100",
-			GATE_IP_PERIL, 12, 0, 0, "i2c"),
-	GATE_DA(i2c7, "s3c2440-i2c.7", "i2c7", "aclk100",
-			GATE_IP_PERIL, 13, 0, 0, "i2c"),
-	GATE_DA(i2c_hdmi, "s3c2440-hdmiphy-i2c", "i2c-hdmi", "aclk100",
-			GATE_IP_PERIL, 14, 0, 0, "i2c"),
-	GATE_DA(spi0, "exynos4210-spi.0", "spi0", "aclk100",
-			GATE_IP_PERIL, 16, 0, 0, "spi"),
-	GATE_DA(spi1, "exynos4210-spi.1", "spi1", "aclk100",
-			GATE_IP_PERIL, 17, 0, 0, "spi"),
-	GATE_DA(spi2, "exynos4210-spi.2", "spi2", "aclk100",
-			GATE_IP_PERIL, 18, 0, 0, "spi"),
-	GATE_DA(i2s1, "samsung-i2s.1", "i2s1", "aclk100",
-			GATE_IP_PERIL, 20, 0, 0, "iis"),
-	GATE_DA(i2s2, "samsung-i2s.2", "i2s2", "aclk100",
-			GATE_IP_PERIL, 21, 0, 0, "iis"),
-	GATE_DA(pcm1, "samsung-pcm.1", "pcm1", "aclk100",
-			GATE_IP_PERIL, 22, 0, 0, "pcm"),
-	GATE_DA(pcm2, "samsung-pcm.2", "pcm2", "aclk100",
-			GATE_IP_PERIL, 23, 0, 0, "pcm"),
-	GATE_DA(spdif, "samsung-spdif", "spdif", "aclk100",
-			GATE_IP_PERIL, 26, 0, 0, "spdif"),
-	GATE_DA(ac97, "samsung-ac97", "ac97", "aclk100",
-			GATE_IP_PERIL, 27, 0, 0, "ac97"),
+	GATE(smmu_tv, "smmu_tv", "aclk160", GATE_IP_TV, 4,
+			0, 0),
+	GATE(mfc, "mfc", "aclk100", GATE_IP_MFC, 0, 0, 0),
+	GATE(smmu_mfcl, "smmu_mfcl", "aclk100", GATE_IP_MFC, 1,
+			0, 0),
+	GATE(smmu_mfcr, "smmu_mfcr", "aclk100", GATE_IP_MFC, 2,
+			0, 0),
+	GATE(fimd0, "fimd0", "aclk160", GATE_IP_LCD0, 0,
+			0, 0),
+	GATE(smmu_fimd0, "smmu_fimd0", "aclk160", GATE_IP_LCD0, 4,
+			0, 0),
+	GATE(pdma0, "pdma0", "aclk133", GATE_IP_FSYS, 0,
+			0, 0),
+	GATE(pdma1, "pdma1", "aclk133", GATE_IP_FSYS, 1,
+			0, 0),
+	GATE(sdmmc0, "sdmmc0", "aclk133", GATE_IP_FSYS, 5,
+			0, 0),
+	GATE(sdmmc1, "sdmmc1", "aclk133", GATE_IP_FSYS, 6,
+			0, 0),
+	GATE(sdmmc2, "sdmmc2", "aclk133", GATE_IP_FSYS, 7,
+			0, 0),
+	GATE(sdmmc3, "sdmmc3", "aclk133", GATE_IP_FSYS, 8,
+			0, 0),
+	GATE(uart0, "uart0", "aclk100", GATE_IP_PERIL, 0,
+			0, 0),
+	GATE(uart1, "uart1", "aclk100", GATE_IP_PERIL, 1,
+			0, 0),
+	GATE(uart2, "uart2", "aclk100", GATE_IP_PERIL, 2,
+			0, 0),
+	GATE(uart3, "uart3", "aclk100", GATE_IP_PERIL, 3,
+			0, 0),
+	GATE(uart4, "uart4", "aclk100", GATE_IP_PERIL, 4,
+			0, 0),
+	GATE(i2c0, "i2c0", "aclk100", GATE_IP_PERIL, 6,
+			0, 0),
+	GATE(i2c1, "i2c1", "aclk100", GATE_IP_PERIL, 7,
+			0, 0),
+	GATE(i2c2, "i2c2", "aclk100", GATE_IP_PERIL, 8,
+			0, 0),
+	GATE(i2c3, "i2c3", "aclk100", GATE_IP_PERIL, 9,
+			0, 0),
+	GATE(i2c4, "i2c4", "aclk100", GATE_IP_PERIL, 10,
+			0, 0),
+	GATE(i2c5, "i2c5", "aclk100", GATE_IP_PERIL, 11,
+			0, 0),
+	GATE(i2c6, "i2c6", "aclk100", GATE_IP_PERIL, 12,
+			0, 0),
+	GATE(i2c7, "i2c7", "aclk100", GATE_IP_PERIL, 13,
+			0, 0),
+	GATE(i2c_hdmi, "i2c-hdmi", "aclk100", GATE_IP_PERIL, 14,
+			0, 0),
+	GATE(spi0, "spi0", "aclk100", GATE_IP_PERIL, 16,
+			0, 0),
+	GATE(spi1, "spi1", "aclk100", GATE_IP_PERIL, 17,
+			0, 0),
+	GATE(spi2, "spi2", "aclk100", GATE_IP_PERIL, 18,
+			0, 0),
+	GATE(i2s1, "i2s1", "aclk100", GATE_IP_PERIL, 20,
+			0, 0),
+	GATE(i2s2, "i2s2", "aclk100", GATE_IP_PERIL, 21,
+			0, 0),
+	GATE(pcm1, "pcm1", "aclk100", GATE_IP_PERIL, 22,
+			0, 0),
+	GATE(pcm2, "pcm2", "aclk100", GATE_IP_PERIL, 23,
+			0, 0),
+	GATE(spdif, "spdif", "aclk100", GATE_IP_PERIL, 26,
+			0, 0),
+	GATE(ac97, "ac97", "aclk100", GATE_IP_PERIL, 27,
+			0, 0),
 };
 
 /* list of gate clocks supported in exynos4210 soc */
-struct samsung_gate_clock exynos4210_gate_clks[] __initdata = {
+static struct samsung_gate_clock exynos4210_gate_clks[] __initdata = {
 	GATE(tvenc, "tvenc", "aclk160", GATE_IP_TV, 2, 0, 0),
 	GATE(g2d, "g2d", "aclk200", E4210_GATE_IP_IMAGE, 0, 0, 0),
 	GATE(rotator, "rotator", "aclk200", E4210_GATE_IP_IMAGE, 1, 0, 0),
@@ -811,17 +803,23 @@
 			SRC_MASK_FSYS, 24, CLK_SET_RATE_PARENT, 0),
 	GATE(sclk_mixer, "sclk_mixer", "mout_mixer", SRC_MASK_TV, 4, 0, 0),
 	GATE(sclk_dac, "sclk_dac", "mout_dac", SRC_MASK_TV, 8, 0, 0),
-	GATE_A(tsadc, "tsadc", "aclk100", GATE_IP_PERIL, 15, 0, 0, "adc"),
-	GATE_A(mct, "mct", "aclk100", E4210_GATE_IP_PERIR, 13, 0, 0, "mct"),
-	GATE_A(wdt, "watchdog", "aclk100", E4210_GATE_IP_PERIR, 14, 0, 0, "watchdog"),
-	GATE_A(rtc, "rtc", "aclk100", E4210_GATE_IP_PERIR, 15, 0, 0, "rtc"),
-	GATE_A(keyif, "keyif", "aclk100", E4210_GATE_IP_PERIR, 16, 0, 0, "keypad"),
-	GATE_DA(sclk_fimd1, "exynos4-fb.1", "sclk_fimd1", "div_fimd1",
-			E4210_SRC_MASK_LCD1, 0, CLK_SET_RATE_PARENT, 0, "sclk_fimd"),
+	GATE(tsadc, "tsadc", "aclk100", GATE_IP_PERIL, 15,
+			0, 0),
+	GATE(mct, "mct", "aclk100", E4210_GATE_IP_PERIR, 13,
+			0, 0),
+	GATE(wdt, "watchdog", "aclk100", E4210_GATE_IP_PERIR, 14,
+			0, 0),
+	GATE(rtc, "rtc", "aclk100", E4210_GATE_IP_PERIR, 15,
+			0, 0),
+	GATE(keyif, "keyif", "aclk100", E4210_GATE_IP_PERIR, 16,
+			0, 0),
+	GATE(sclk_fimd1, "sclk_fimd1", "div_fimd1", E4210_SRC_MASK_LCD1, 0,
+			CLK_SET_RATE_PARENT, 0),
+	GATE(tmu_apbif, "tmu_apbif", "aclk100", E4210_GATE_IP_PERIR, 17, 0, 0),
 };
 
 /* list of gate clocks supported in exynos4x12 soc */
-struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = {
+static struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = {
 	GATE(audss, "audss", "sclk_epll", E4X12_GATE_IP_MAUDIO, 0, 0, 0),
 	GATE(mdnie0, "mdnie0", "aclk160", GATE_IP_LCD0, 2, 0, 0),
 	GATE(rotator, "rotator", "aclk200", E4X12_GATE_IP_IMAGE, 1, 0, 0),
@@ -840,10 +838,11 @@
 			SRC_MASK_FSYS, 24, CLK_SET_RATE_PARENT, 0),
 	GATE(smmu_rotator, "smmu_rotator", "aclk200",
 			E4X12_GATE_IP_IMAGE, 4, 0, 0),
-	GATE_A(mct, "mct", "aclk100", E4X12_GATE_IP_PERIR, 13, 0, 0, "mct"),
-	GATE_A(rtc, "rtc", "aclk100", E4X12_GATE_IP_PERIR, 15, 0, 0, "rtc"),
-	GATE_A(keyif, "keyif", "aclk100",
-			E4X12_GATE_IP_PERIR, 16, 0, 0, "keypad"),
+	GATE(mct, "mct", "aclk100", E4X12_GATE_IP_PERIR, 13,
+			0, 0),
+	GATE(rtc, "rtc", "aclk100", E4X12_GATE_IP_PERIR, 15,
+			0, 0),
+	GATE(keyif, "keyif", "aclk100", E4X12_GATE_IP_PERIR, 16, 0, 0),
 	GATE(sclk_pwm_isp, "sclk_pwm_isp", "div_pwm_isp",
 			E4X12_SRC_MASK_ISP, 0, CLK_SET_RATE_PARENT, 0),
 	GATE(sclk_spi0_isp, "sclk_spi0_isp", "div_spi0_isp_pre",
@@ -860,12 +859,11 @@
 			E4X12_GATE_IP_ISP, 2, 0, 0),
 	GATE(uart_isp_sclk, "uart_isp_sclk", "sclk_uart_isp",
 			E4X12_GATE_IP_ISP, 3, 0, 0),
-	GATE_A(wdt, "watchdog", "aclk100",
-			E4X12_GATE_IP_PERIR, 14, 0, 0, "watchdog"),
-	GATE_DA(pcm0, "samsung-pcm.0", "pcm0", "aclk100",
-			E4X12_GATE_IP_MAUDIO, 2, 0, 0, "pcm"),
-	GATE_DA(i2s0, "samsung-i2s.0", "i2s0", "aclk100",
-			E4X12_GATE_IP_MAUDIO, 3, 0, 0, "iis"),
+	GATE(wdt, "watchdog", "aclk100", E4X12_GATE_IP_PERIR, 14, 0, 0),
+	GATE(pcm0, "pcm0", "aclk100", E4X12_GATE_IP_MAUDIO, 2,
+			0, 0),
+	GATE(i2s0, "i2s0", "aclk100", E4X12_GATE_IP_MAUDIO, 3,
+			0, 0),
 	GATE(fimc_isp, "isp", "aclk200", E4X12_GATE_ISP0, 0,
 			CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
 	GATE(fimc_drc, "drc", "aclk200", E4X12_GATE_ISP0, 1,
@@ -919,6 +917,21 @@
 	GATE(spi1_isp, "spi1_isp", "aclk200", E4X12_GATE_ISP1, 13,
 			CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
 	GATE(g2d, "g2d", "aclk200", GATE_IP_DMC, 23, 0, 0),
+	GATE(tmu_apbif, "tmu_apbif", "aclk100", E4X12_GATE_IP_PERIR, 17, 0, 0),
+};
+
+static struct samsung_clock_alias exynos4_aliases[] __initdata = {
+	ALIAS(mout_core, NULL, "moutcore"),
+	ALIAS(arm_clk, NULL, "armclk"),
+	ALIAS(sclk_apll, NULL, "mout_apll"),
+};
+
+static struct samsung_clock_alias exynos4210_aliases[] __initdata = {
+	ALIAS(sclk_mpll, NULL, "mout_mpll"),
+};
+
+static struct samsung_clock_alias exynos4x12_aliases[] __initdata = {
+	ALIAS(mout_mpll_user_c, NULL, "mout_mpll"),
 };
 
 /*
@@ -973,36 +986,116 @@
 
 }
 
-/*
- * This function allows non-dt platforms to specify the clock speed of the
- * xxti and xusbxti clocks. These clocks are then registered with the specified
- * clock speed.
- */
-void __init exynos4_clk_register_fixed_ext(unsigned long xxti_f,
-						unsigned long xusbxti_f)
-{
-	exynos4_fixed_rate_ext_clks[0].fixed_rate = xxti_f;
-	exynos4_fixed_rate_ext_clks[1].fixed_rate = xusbxti_f;
-	samsung_clk_register_fixed_rate(exynos4_fixed_rate_ext_clks,
-			ARRAY_SIZE(exynos4_fixed_rate_ext_clks));
-}
-
-static __initdata struct of_device_id ext_clk_match[] = {
+static struct of_device_id ext_clk_match[] __initdata = {
 	{ .compatible = "samsung,clock-xxti", .data = (void *)0, },
 	{ .compatible = "samsung,clock-xusbxti", .data = (void *)1, },
 	{},
 };
 
-/* register exynos4 clocks */
-void __init exynos4_clk_init(struct device_node *np, enum exynos4_soc exynos4_soc, void __iomem *reg_base, unsigned long xom)
-{
-	struct clk *apll, *mpll, *epll, *vpll;
+/* PLLs PMS values */
+static struct samsung_pll_rate_table exynos4210_apll_rates[] __initdata = {
+	PLL_45XX_RATE(1200000000, 150,  3, 1, 28),
+	PLL_45XX_RATE(1000000000, 250,  6, 1, 28),
+	PLL_45XX_RATE( 800000000, 200,  6, 1, 28),
+	PLL_45XX_RATE( 666857142, 389, 14, 1, 13),
+	PLL_45XX_RATE( 600000000, 100,  4, 1, 13),
+	PLL_45XX_RATE( 533000000, 533, 24, 1,  5),
+	PLL_45XX_RATE( 500000000, 250,  6, 2, 28),
+	PLL_45XX_RATE( 400000000, 200,  6, 2, 28),
+	PLL_45XX_RATE( 200000000, 200,  6, 3, 28),
+	{ /* sentinel */ }
+};
 
-	if (np) {
-		reg_base = of_iomap(np, 0);
-		if (!reg_base)
-			panic("%s: failed to map registers\n", __func__);
-	}
+static struct samsung_pll_rate_table exynos4210_epll_rates[] __initdata = {
+	PLL_4600_RATE(192000000, 48, 3, 1,     0, 0),
+	PLL_4600_RATE(180633605, 45, 3, 1, 10381, 0),
+	PLL_4600_RATE(180000000, 45, 3, 1,     0, 0),
+	PLL_4600_RATE( 73727996, 73, 3, 3, 47710, 1),
+	PLL_4600_RATE( 67737602, 90, 4, 3, 20762, 1),
+	PLL_4600_RATE( 49151992, 49, 3, 3,  9961, 0),
+	PLL_4600_RATE( 45158401, 45, 3, 3, 10381, 0),
+	{ /* sentinel */ }
+};
+
+static struct samsung_pll_rate_table exynos4210_vpll_rates[] __initdata = {
+	PLL_4650_RATE(360000000, 44, 3, 0, 1024, 0, 14, 0),
+	PLL_4650_RATE(324000000, 53, 2, 1, 1024, 1,  1, 1),
+	PLL_4650_RATE(259617187, 63, 3, 1, 1950, 0, 20, 1),
+	PLL_4650_RATE(110000000, 53, 3, 2, 2048, 0, 17, 0),
+	PLL_4650_RATE( 55360351, 53, 3, 3, 2417, 0, 17, 0),
+	{ /* sentinel */ }
+};
+
+static struct samsung_pll_rate_table exynos4x12_apll_rates[] __initdata = {
+	PLL_35XX_RATE(1500000000, 250, 4, 0),
+	PLL_35XX_RATE(1400000000, 175, 3, 0),
+	PLL_35XX_RATE(1300000000, 325, 6, 0),
+	PLL_35XX_RATE(1200000000, 200, 4, 0),
+	PLL_35XX_RATE(1100000000, 275, 6, 0),
+	PLL_35XX_RATE(1000000000, 125, 3, 0),
+	PLL_35XX_RATE( 900000000, 150, 4, 0),
+	PLL_35XX_RATE( 800000000, 100, 3, 0),
+	PLL_35XX_RATE( 700000000, 175, 3, 1),
+	PLL_35XX_RATE( 600000000, 200, 4, 1),
+	PLL_35XX_RATE( 500000000, 125, 3, 1),
+	PLL_35XX_RATE( 400000000, 100, 3, 1),
+	PLL_35XX_RATE( 300000000, 200, 4, 2),
+	PLL_35XX_RATE( 200000000, 100, 3, 2),
+	{ /* sentinel */ }
+};
+
+static struct samsung_pll_rate_table exynos4x12_epll_rates[] __initdata = {
+	PLL_36XX_RATE(192000000, 48, 3, 1,     0),
+	PLL_36XX_RATE(180633605, 45, 3, 1, 10381),
+	PLL_36XX_RATE(180000000, 45, 3, 1,     0),
+	PLL_36XX_RATE( 73727996, 73, 3, 3, 47710),
+	PLL_36XX_RATE( 67737602, 90, 4, 3, 20762),
+	PLL_36XX_RATE( 49151992, 49, 3, 3,  9961),
+	PLL_36XX_RATE( 45158401, 45, 3, 3, 10381),
+	{ /* sentinel */ }
+};
+
+static struct samsung_pll_rate_table exynos4x12_vpll_rates[] __initdata = {
+	PLL_36XX_RATE(533000000, 133, 3, 1, 16384),
+	PLL_36XX_RATE(440000000, 110, 3, 1,     0),
+	PLL_36XX_RATE(350000000, 175, 3, 2,     0),
+	PLL_36XX_RATE(266000000, 133, 3, 2,     0),
+	PLL_36XX_RATE(160000000, 160, 3, 3,     0),
+	PLL_36XX_RATE(106031250,  53, 3, 2,  1024),
+	PLL_36XX_RATE( 53015625,  53, 3, 3,  1024),
+	{ /* sentinel */ }
+};
+
+static struct samsung_pll_clock exynos4210_plls[nr_plls] __initdata = {
+	[apll] = PLL_A(pll_4508, fout_apll, "fout_apll", "fin_pll", APLL_LOCK,
+		APLL_CON0, "fout_apll", NULL),
+	[mpll] = PLL_A(pll_4508, fout_mpll, "fout_mpll", "fin_pll",
+		E4210_MPLL_LOCK, E4210_MPLL_CON0, "fout_mpll", NULL),
+	[epll] = PLL_A(pll_4600, fout_epll, "fout_epll", "fin_pll", EPLL_LOCK,
+		EPLL_CON0, "fout_epll", NULL),
+	[vpll] = PLL_A(pll_4650c, fout_vpll, "fout_vpll", "mout_vpllsrc",
+		VPLL_LOCK, VPLL_CON0, "fout_vpll", NULL),
+};
+
+static struct samsung_pll_clock exynos4x12_plls[nr_plls] __initdata = {
+	[apll] = PLL(pll_35xx, fout_apll, "fout_apll", "fin_pll",
+			APLL_LOCK, APLL_CON0, NULL),
+	[mpll] = PLL(pll_35xx, fout_mpll, "fout_mpll", "fin_pll",
+			E4X12_MPLL_LOCK, E4X12_MPLL_CON0, NULL),
+	[epll] = PLL(pll_36xx, fout_epll, "fout_epll", "fin_pll",
+			EPLL_LOCK, EPLL_CON0, NULL),
+	[vpll] = PLL(pll_36xx, fout_vpll, "fout_vpll", "fin_pll",
+			VPLL_LOCK, VPLL_CON0, NULL),
+};
+
+/* register exynos4 clocks */
+static void __init exynos4_clk_init(struct device_node *np,
+				    enum exynos4_soc exynos4_soc,
+				    void __iomem *reg_base, unsigned long xom)
+{
+	reg_base = of_iomap(np, 0);
+	if (!reg_base)
+		panic("%s: failed to map registers\n", __func__);
 
 	if (exynos4_soc == EXYNOS4210)
 		samsung_clk_init(np, reg_base, nr_clks,
@@ -1013,37 +1106,42 @@
 			exynos4_clk_regs, ARRAY_SIZE(exynos4_clk_regs),
 			exynos4x12_clk_save, ARRAY_SIZE(exynos4x12_clk_save));
 
-	if (np)
-		samsung_clk_of_register_fixed_ext(exynos4_fixed_rate_ext_clks,
+	samsung_clk_of_register_fixed_ext(exynos4_fixed_rate_ext_clks,
 			ARRAY_SIZE(exynos4_fixed_rate_ext_clks),
 			ext_clk_match);
 
 	exynos4_clk_register_finpll(xom);
 
 	if (exynos4_soc == EXYNOS4210) {
-		apll = samsung_clk_register_pll45xx("fout_apll", "fin_pll",
-					reg_base + APLL_CON0, pll_4508);
-		mpll = samsung_clk_register_pll45xx("fout_mpll", "fin_pll",
-					reg_base + E4210_MPLL_CON0, pll_4508);
-		epll = samsung_clk_register_pll46xx("fout_epll", "fin_pll",
-					reg_base + EPLL_CON0, pll_4600);
-		vpll = samsung_clk_register_pll46xx("fout_vpll", "mout_vpllsrc",
-					reg_base + VPLL_CON0, pll_4650c);
-	} else {
-		apll = samsung_clk_register_pll35xx("fout_apll", "fin_pll",
-					reg_base + APLL_CON0);
-		mpll = samsung_clk_register_pll35xx("fout_mpll", "fin_pll",
-					reg_base + E4X12_MPLL_CON0);
-		epll = samsung_clk_register_pll36xx("fout_epll", "fin_pll",
-					reg_base + EPLL_CON0);
-		vpll = samsung_clk_register_pll36xx("fout_vpll", "fin_pll",
-					reg_base + VPLL_CON0);
-	}
+		samsung_clk_register_mux(exynos4210_mux_early,
+					ARRAY_SIZE(exynos4210_mux_early));
 
-	samsung_clk_add_lookup(apll, fout_apll);
-	samsung_clk_add_lookup(mpll, fout_mpll);
-	samsung_clk_add_lookup(epll, fout_epll);
-	samsung_clk_add_lookup(vpll, fout_vpll);
+		if (_get_rate("fin_pll") == 24000000) {
+			exynos4210_plls[apll].rate_table =
+							exynos4210_apll_rates;
+			exynos4210_plls[epll].rate_table =
+							exynos4210_epll_rates;
+		}
+
+		if (_get_rate("mout_vpllsrc") == 24000000)
+			exynos4210_plls[vpll].rate_table =
+							exynos4210_vpll_rates;
+
+		samsung_clk_register_pll(exynos4210_plls,
+					ARRAY_SIZE(exynos4210_plls), reg_base);
+	} else {
+		if (_get_rate("fin_pll") == 24000000) {
+			exynos4x12_plls[apll].rate_table =
+							exynos4x12_apll_rates;
+			exynos4x12_plls[epll].rate_table =
+							exynos4x12_epll_rates;
+			exynos4x12_plls[vpll].rate_table =
+							exynos4x12_vpll_rates;
+		}
+
+		samsung_clk_register_pll(exynos4x12_plls,
+					ARRAY_SIZE(exynos4x12_plls), reg_base);
+	}
 
 	samsung_clk_register_fixed_rate(exynos4_fixed_rate_clks,
 			ARRAY_SIZE(exynos4_fixed_rate_clks));
@@ -1063,6 +1161,8 @@
 			ARRAY_SIZE(exynos4210_div_clks));
 		samsung_clk_register_gate(exynos4210_gate_clks,
 			ARRAY_SIZE(exynos4210_gate_clks));
+		samsung_clk_register_alias(exynos4210_aliases,
+			ARRAY_SIZE(exynos4210_aliases));
 	} else {
 		samsung_clk_register_mux(exynos4x12_mux_clks,
 			ARRAY_SIZE(exynos4x12_mux_clks));
@@ -1070,14 +1170,19 @@
 			ARRAY_SIZE(exynos4x12_div_clks));
 		samsung_clk_register_gate(exynos4x12_gate_clks,
 			ARRAY_SIZE(exynos4x12_gate_clks));
+		samsung_clk_register_alias(exynos4x12_aliases,
+			ARRAY_SIZE(exynos4x12_aliases));
 	}
 
+	samsung_clk_register_alias(exynos4_aliases,
+			ARRAY_SIZE(exynos4_aliases));
+
 	pr_info("%s clocks: sclk_apll = %ld, sclk_mpll = %ld\n"
 		"\tsclk_epll = %ld, sclk_vpll = %ld, arm_clk = %ld\n",
 		exynos4_soc == EXYNOS4210 ? "Exynos4210" : "Exynos4x12",
-		_get_rate("sclk_apll"),	_get_rate("mout_mpll"),
+		_get_rate("sclk_apll"),	_get_rate("sclk_mpll"),
 		_get_rate("sclk_epll"), _get_rate("sclk_vpll"),
-		_get_rate("armclk"));
+		_get_rate("arm_clk"));
 }
 
 
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index 6f767c5..adf3234 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -17,11 +17,22 @@
 #include <linux/of_address.h>
 
 #include "clk.h"
-#include "clk-pll.h"
 
+#define APLL_LOCK		0x0
+#define APLL_CON0		0x100
 #define SRC_CPU			0x200
 #define DIV_CPU0		0x500
+#define MPLL_LOCK		0x4000
+#define MPLL_CON0		0x4100
 #define SRC_CORE1		0x4204
+#define CPLL_LOCK		0x10020
+#define EPLL_LOCK		0x10030
+#define VPLL_LOCK		0x10040
+#define GPLL_LOCK		0x10050
+#define CPLL_CON0		0x10120
+#define EPLL_CON0		0x10130
+#define VPLL_CON0		0x10140
+#define GPLL_CON0		0x10150
 #define SRC_TOP0		0x10210
 #define SRC_TOP2		0x10218
 #define SRC_GSCL		0x10220
@@ -59,9 +70,18 @@
 #define GATE_IP_FSYS		0x10944
 #define GATE_IP_PERIC		0x10950
 #define GATE_IP_PERIS		0x10960
+#define BPLL_LOCK		0x20010
+#define BPLL_CON0		0x20110
 #define SRC_CDREX		0x20200
 #define PLL_DIV2_SEL		0x20a24
 #define GATE_IP_DISP1		0x10928
+#define GATE_IP_ACP		0x10000
+
+/* list of PLLs to be registered */
+enum exynos5250_plls {
+	apll, mpll, cpll, epll, vpll, gpll, bpll,
+	nr_plls			/* number of PLLs */
+};
 
 /*
  * Let each supported clock get a unique id. This id is used to lookup the clock
@@ -79,7 +99,8 @@
 	none,
 
 	/* core clocks */
-	fin_pll,
+	fin_pll, fout_apll, fout_mpll, fout_bpll, fout_gpll, fout_cpll,
+	fout_epll, fout_vpll,
 
 	/* gate for special clocks (sclk) */
 	sclk_cam_bayer = 128, sclk_cam0, sclk_cam1, sclk_gscl_wa, sclk_gscl_wb,
@@ -87,7 +108,7 @@
 	sclk_mmc0, sclk_mmc1, sclk_mmc2, sclk_mmc3, sclk_sata, sclk_usb3,
 	sclk_jpeg, sclk_uart0, sclk_uart1, sclk_uart2, sclk_uart3, sclk_pwm,
 	sclk_audio1, sclk_audio2, sclk_spdif, sclk_spi0, sclk_spi1, sclk_spi2,
-	div_i2s1, div_i2s2,
+	div_i2s1, div_i2s2, sclk_hdmiphy,
 
 	/* gate clocks */
 	gscl0 = 256, gscl1, gscl2, gscl3, gscl_wa, gscl_wb, smmu_gscl0,
@@ -99,7 +120,10 @@
 	spi2, i2s1, i2s2, pcm1, pcm2, pwm, spdif, ac97, hsi2c0, hsi2c1, hsi2c2,
 	hsi2c3, chipid, sysreg, pmu, cmu_top, cmu_core, cmu_mem, tzpc0, tzpc1,
 	tzpc2, tzpc3, tzpc4, tzpc5, tzpc6, tzpc7, tzpc8, tzpc9, hdmi_cec, mct,
-	wdt, rtc, tmu, fimd1, mie1, dsim0, dp, mixer, hdmi,
+	wdt, rtc, tmu, fimd1, mie1, dsim0, dp, mixer, hdmi, g2d,
+
+	/* mux clocks */
+	mout_hdmi = 1024,
 
 	nr_clks,
 };
@@ -108,7 +132,7 @@
  * list of controller registers to be saved and restored during a
  * suspend/resume cycle.
  */
-static __initdata unsigned long exynos5250_clk_regs[] = {
+static unsigned long exynos5250_clk_regs[] __initdata = {
 	SRC_CPU,
 	DIV_CPU0,
 	SRC_CORE1,
@@ -152,6 +176,7 @@
 	SRC_CDREX,
 	PLL_DIV2_SEL,
 	GATE_IP_DISP1,
+	GATE_IP_ACP,
 };
 
 /* list of all parent clock list */
@@ -191,31 +216,34 @@
 				"spdif_extclk" };
 
 /* fixed rate clocks generated outside the soc */
-struct samsung_fixed_rate_clock exynos5250_fixed_rate_ext_clks[] __initdata = {
+static struct samsung_fixed_rate_clock exynos5250_fixed_rate_ext_clks[] __initdata = {
 	FRATE(fin_pll, "fin_pll", NULL, CLK_IS_ROOT, 0),
 };
 
 /* fixed rate clocks generated inside the soc */
-struct samsung_fixed_rate_clock exynos5250_fixed_rate_clks[] __initdata = {
-	FRATE(none, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 24000000),
+static struct samsung_fixed_rate_clock exynos5250_fixed_rate_clks[] __initdata = {
+	FRATE(sclk_hdmiphy, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 24000000),
 	FRATE(none, "sclk_hdmi27m", NULL, CLK_IS_ROOT, 27000000),
 	FRATE(none, "sclk_dptxphy", NULL, CLK_IS_ROOT, 24000000),
 	FRATE(none, "sclk_uhostphy", NULL, CLK_IS_ROOT, 48000000),
 };
 
-struct samsung_fixed_factor_clock exynos5250_fixed_factor_clks[] __initdata = {
+static struct samsung_fixed_factor_clock exynos5250_fixed_factor_clks[] __initdata = {
 	FFACTOR(none, "fout_mplldiv2", "fout_mpll", 1, 2, 0),
 	FFACTOR(none, "fout_bplldiv2", "fout_bpll", 1, 2, 0),
 };
 
-struct samsung_mux_clock exynos5250_mux_clks[] __initdata = {
+static struct samsung_mux_clock exynos5250_pll_pmux_clks[] __initdata = {
+	MUX(none, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP2, 0, 1),
+};
+
+static struct samsung_mux_clock exynos5250_mux_clks[] __initdata = {
 	MUX_A(none, "mout_apll", mout_apll_p, SRC_CPU, 0, 1, "mout_apll"),
 	MUX_A(none, "mout_cpu", mout_cpu_p, SRC_CPU, 16, 1, "mout_cpu"),
 	MUX(none, "mout_mpll_fout", mout_mpll_fout_p, PLL_DIV2_SEL, 4, 1),
 	MUX_A(none, "sclk_mpll", mout_mpll_p, SRC_CORE1, 8, 1, "mout_mpll"),
 	MUX(none, "mout_bpll_fout", mout_bpll_fout_p, PLL_DIV2_SEL, 0, 1),
 	MUX(none, "sclk_bpll", mout_bpll_p, SRC_CDREX, 0, 1),
-	MUX(none, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP2, 0, 1),
 	MUX(none, "sclk_vpll", mout_vpll_p, SRC_TOP2, 16, 1),
 	MUX(none, "sclk_epll", mout_epll_p, SRC_TOP2, 12, 1),
 	MUX(none, "sclk_cpll", mout_cpll_p, SRC_TOP2, 8, 1),
@@ -232,7 +260,7 @@
 	MUX(none, "mout_fimd1", mout_group1_p, SRC_DISP1_0, 0, 4),
 	MUX(none, "mout_mipi1", mout_group1_p, SRC_DISP1_0, 12, 4),
 	MUX(none, "mout_dp", mout_group1_p, SRC_DISP1_0, 16, 4),
-	MUX(none, "mout_hdmi", mout_hdmi_p, SRC_DISP1_0, 20, 1),
+	MUX(mout_hdmi, "mout_hdmi", mout_hdmi_p, SRC_DISP1_0, 20, 1),
 	MUX(none, "mout_audio0", mout_audio0_p, SRC_MAU, 0, 4),
 	MUX(none, "mout_mmc0", mout_group1_p, SRC_FSYS, 0, 4),
 	MUX(none, "mout_mmc1", mout_group1_p, SRC_FSYS, 4, 4),
@@ -254,7 +282,7 @@
 	MUX(none, "mout_spi2", mout_group1_p, SRC_PERIC1, 24, 4),
 };
 
-struct samsung_div_clock exynos5250_div_clks[] __initdata = {
+static struct samsung_div_clock exynos5250_div_clks[] __initdata = {
 	DIV(none, "div_arm", "mout_cpu", DIV_CPU0, 0, 3),
 	DIV(none, "sclk_apll", "mout_apll", DIV_CPU0, 24, 3),
 	DIV(none, "aclk66_pre", "sclk_mpll_user", DIV_TOP1, 24, 3),
@@ -314,7 +342,7 @@
 			DIV_PERIC2, 8, 8, CLK_SET_RATE_PARENT, 0),
 };
 
-struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
+static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
 	GATE(gscl0, "gscl0", "none", GATE_IP_GSCL, 0, 0, 0),
 	GATE(gscl1, "gscl1", "none", GATE_IP_GSCL, 1, 0, 0),
 	GATE(gscl2, "gscl2", "aclk266", GATE_IP_GSCL, 2, 0, 0),
@@ -461,20 +489,60 @@
 	GATE(mie1, "mie1", "aclk200", GATE_IP_DISP1, 1, 0, 0),
 	GATE(dsim0, "dsim0", "aclk200", GATE_IP_DISP1, 3, 0, 0),
 	GATE(dp, "dp", "aclk200", GATE_IP_DISP1, 4, 0, 0),
-	GATE(mixer, "mixer", "aclk200", GATE_IP_DISP1, 5, 0, 0),
-	GATE(hdmi, "hdmi", "aclk200", GATE_IP_DISP1, 6, 0, 0),
+	GATE(mixer, "mixer", "mout_aclk200_disp1", GATE_IP_DISP1, 5, 0, 0),
+	GATE(hdmi, "hdmi", "mout_aclk200_disp1", GATE_IP_DISP1, 6, 0, 0),
+	GATE(g2d, "g2d", "aclk200", GATE_IP_ACP, 3, 0, 0),
 };
 
-static __initdata struct of_device_id ext_clk_match[] = {
+static struct samsung_pll_rate_table vpll_24mhz_tbl[] __initdata = {
+	/* sorted in descending order */
+	/* PLL_36XX_RATE(rate, m, p, s, k) */
+	PLL_36XX_RATE(266000000, 266, 3, 3, 0),
+	/* Not in UM, but need for eDP on snow */
+	PLL_36XX_RATE(70500000, 94, 2, 4, 0),
+	{ },
+};
+
+static struct samsung_pll_rate_table epll_24mhz_tbl[] __initdata = {
+	/* sorted in descending order */
+	/* PLL_36XX_RATE(rate, m, p, s, k) */
+	PLL_36XX_RATE(192000000, 64, 2, 2, 0),
+	PLL_36XX_RATE(180633600, 90, 3, 2, 20762),
+	PLL_36XX_RATE(180000000, 90, 3, 2, 0),
+	PLL_36XX_RATE(73728000, 98, 2, 4, 19923),
+	PLL_36XX_RATE(67737600, 90, 2, 4, 20762),
+	PLL_36XX_RATE(49152000, 98, 3, 4, 19923),
+	PLL_36XX_RATE(45158400, 90, 3, 4, 20762),
+	PLL_36XX_RATE(32768000, 131, 3, 5, 4719),
+	{ },
+};
+
+static struct samsung_pll_clock exynos5250_plls[nr_plls] __initdata = {
+	[apll] = PLL_A(pll_35xx, fout_apll, "fout_apll", "fin_pll", APLL_LOCK,
+		APLL_CON0, "fout_apll", NULL),
+	[mpll] = PLL_A(pll_35xx, fout_mpll, "fout_mpll", "fin_pll", MPLL_LOCK,
+		MPLL_CON0, "fout_mpll", NULL),
+	[bpll] = PLL(pll_35xx, fout_bpll, "fout_bpll", "fin_pll", BPLL_LOCK,
+		BPLL_CON0, NULL),
+	[gpll] = PLL(pll_35xx, fout_gpll, "fout_gpll", "fin_pll", GPLL_LOCK,
+		GPLL_CON0, NULL),
+	[cpll] = PLL(pll_35xx, fout_cpll, "fout_cpll", "fin_pll", CPLL_LOCK,
+		CPLL_CON0, NULL),
+	[epll] = PLL(pll_36xx, fout_epll, "fout_epll", "fin_pll", EPLL_LOCK,
+		EPLL_CON0, NULL),
+	[vpll] = PLL(pll_36xx, fout_vpll, "fout_vpll", "mout_vpllsrc",
+		VPLL_LOCK, VPLL_CON0, NULL),
+};
+
+static struct of_device_id ext_clk_match[] __initdata = {
 	{ .compatible = "samsung,clock-xxti", .data = (void *)0, },
 	{ },
 };
 
 /* register exynox5250 clocks */
-void __init exynos5250_clk_init(struct device_node *np)
+static void __init exynos5250_clk_init(struct device_node *np)
 {
 	void __iomem *reg_base;
-	struct clk *apll, *mpll, *epll, *vpll, *bpll, *gpll, *cpll;
 
 	if (np) {
 		reg_base = of_iomap(np, 0);
@@ -490,22 +558,17 @@
 	samsung_clk_of_register_fixed_ext(exynos5250_fixed_rate_ext_clks,
 			ARRAY_SIZE(exynos5250_fixed_rate_ext_clks),
 			ext_clk_match);
+	samsung_clk_register_mux(exynos5250_pll_pmux_clks,
+				ARRAY_SIZE(exynos5250_pll_pmux_clks));
 
-	apll = samsung_clk_register_pll35xx("fout_apll", "fin_pll",
-			reg_base + 0x100);
-	mpll = samsung_clk_register_pll35xx("fout_mpll", "fin_pll",
-			reg_base + 0x4100);
-	bpll = samsung_clk_register_pll35xx("fout_bpll", "fin_pll",
-			reg_base + 0x20110);
-	gpll = samsung_clk_register_pll35xx("fout_gpll", "fin_pll",
-			reg_base + 0x10150);
-	cpll = samsung_clk_register_pll35xx("fout_cpll", "fin_pll",
-			reg_base + 0x10120);
-	epll = samsung_clk_register_pll36xx("fout_epll", "fin_pll",
-			reg_base + 0x10130);
-	vpll = samsung_clk_register_pll36xx("fout_vpll", "mout_vpllsrc",
-			reg_base + 0x10140);
+	if (_get_rate("fin_pll") == 24 * MHZ)
+		exynos5250_plls[epll].rate_table = epll_24mhz_tbl;
 
+	if (_get_rate("mout_vpllsrc") == 24 * MHZ)
+		exynos5250_plls[vpll].rate_table =  vpll_24mhz_tbl;
+
+	samsung_clk_register_pll(exynos5250_plls, ARRAY_SIZE(exynos5250_plls),
+					reg_base);
 	samsung_clk_register_fixed_rate(exynos5250_fixed_rate_clks,
 			ARRAY_SIZE(exynos5250_fixed_rate_clks));
 	samsung_clk_register_fixed_factor(exynos5250_fixed_factor_clks,
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 68a96cb..48c4a93 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -17,13 +17,30 @@
 #include <linux/of_address.h>
 
 #include "clk.h"
-#include "clk-pll.h"
 
+#define APLL_LOCK		0x0
+#define APLL_CON0		0x100
 #define SRC_CPU			0x200
 #define DIV_CPU0		0x500
 #define DIV_CPU1		0x504
 #define GATE_BUS_CPU		0x700
 #define GATE_SCLK_CPU		0x800
+#define CPLL_LOCK		0x10020
+#define DPLL_LOCK		0x10030
+#define EPLL_LOCK		0x10040
+#define RPLL_LOCK		0x10050
+#define IPLL_LOCK		0x10060
+#define SPLL_LOCK		0x10070
+#define VPLL_LOCK		0x10070
+#define MPLL_LOCK		0x10090
+#define CPLL_CON0		0x10120
+#define DPLL_CON0		0x10128
+#define EPLL_CON0		0x10130
+#define RPLL_CON0		0x10140
+#define IPLL_CON0		0x10150
+#define SPLL_CON0		0x10160
+#define VPLL_CON0		0x10170
+#define MPLL_CON0		0x10180
 #define SRC_TOP0		0x10200
 #define SRC_TOP1		0x10204
 #define SRC_TOP2		0x10208
@@ -75,15 +92,27 @@
 #define GATE_TOP_SCLK_MAU	0x1083c
 #define GATE_TOP_SCLK_FSYS	0x10840
 #define GATE_TOP_SCLK_PERIC	0x10850
+#define BPLL_LOCK		0x20010
+#define BPLL_CON0		0x20110
 #define SRC_CDREX		0x20200
+#define KPLL_LOCK		0x28000
+#define KPLL_CON0		0x28100
 #define SRC_KFC			0x28200
 #define DIV_KFC0		0x28500
 
+/* list of PLLs */
+enum exynos5420_plls {
+	apll, cpll, dpll, epll, rpll, ipll, spll, vpll, mpll,
+	bpll, kpll,
+	nr_plls			/* number of PLLs */
+};
+
 enum exynos5420_clks {
 	none,
 
 	/* core clocks */
-	fin_pll,
+	fin_pll,  fout_apll, fout_cpll, fout_dpll, fout_epll, fout_rpll,
+	fout_ipll, fout_spll, fout_vpll, fout_mpll, fout_bpll, fout_kpll,
 
 	/* gate for special clocks (sclk) */
 	sclk_uart0 = 128, sclk_uart1, sclk_uart2, sclk_uart3, sclk_mmc0,
@@ -91,7 +120,7 @@
 	sclk_i2s2, sclk_pcm1, sclk_pcm2, sclk_spdif, sclk_hdmi, sclk_pixel,
 	sclk_dp1, sclk_mipi1, sclk_fimd1, sclk_maudio0, sclk_maupcm0,
 	sclk_usbd300, sclk_usbd301, sclk_usbphy300, sclk_usbphy301, sclk_unipro,
-	sclk_pwm, sclk_gscl_wa, sclk_gscl_wb,
+	sclk_pwm, sclk_gscl_wa, sclk_gscl_wb, sclk_hdmiphy,
 
 	/* gate clocks */
 	aclk66_peric = 256, uart0, uart1, uart2, uart3, i2c0, i2c1, i2c2, i2c3,
@@ -109,7 +138,13 @@
 	aclk300_gscl = 460, smmu_gscl0, smmu_gscl1, gscl_wa, gscl_wb, gscl0,
 	gscl1, clk_3aa, aclk266_g2d = 470, sss, slim_sss, mdma0,
 	aclk333_g2d = 480, g2d, aclk333_432_gscl = 490, smmu_3aa, smmu_fimcl0,
-	smmu_fimcl1, smmu_fimcl3, fimc_lite3, aclk_g3d = 500, g3d,
+	smmu_fimcl1, smmu_fimcl3, fimc_lite3, aclk_g3d = 500, g3d, smmu_mixer,
+
+	/* mux clocks */
+	mout_hdmi = 640,
+
+	/* divider clocks */
+	dout_pixel = 768,
 
 	nr_clks,
 };
@@ -118,7 +153,7 @@
  * list of controller registers to be saved and restored during a
  * suspend/resume cycle.
  */
-static __initdata unsigned long exynos5420_clk_regs[] = {
+static unsigned long exynos5420_clk_regs[] __initdata = {
 	SRC_CPU,
 	DIV_CPU0,
 	DIV_CPU1,
@@ -257,29 +292,29 @@
 		  "sclk_spll", "sclk_ipll", "sclk_epll", "sclk_rpll" };
 PNAME(spdif_p)	= { "fin_pll", "dout_audio0", "dout_audio1", "dout_audio2",
 		  "spdif_extclk", "sclk_ipll", "sclk_epll", "sclk_rpll" };
-PNAME(hdmi_p)	= { "sclk_hdmiphy", "dout_hdmi_pixel" };
+PNAME(hdmi_p)	= { "dout_hdmi_pixel", "sclk_hdmiphy" };
 PNAME(maudio0_p)	= { "fin_pll", "maudio_clk", "sclk_dpll", "sclk_mpll",
 			  "sclk_spll", "sclk_ipll", "sclk_epll", "sclk_rpll" };
 
 /* fixed rate clocks generated outside the soc */
-struct samsung_fixed_rate_clock exynos5420_fixed_rate_ext_clks[] __initdata = {
+static struct samsung_fixed_rate_clock exynos5420_fixed_rate_ext_clks[] __initdata = {
 	FRATE(fin_pll, "fin_pll", NULL, CLK_IS_ROOT, 0),
 };
 
 /* fixed rate clocks generated inside the soc */
-struct samsung_fixed_rate_clock exynos5420_fixed_rate_clks[] __initdata = {
-	FRATE(none, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 24000000),
+static struct samsung_fixed_rate_clock exynos5420_fixed_rate_clks[] __initdata = {
+	FRATE(sclk_hdmiphy, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 24000000),
 	FRATE(none, "sclk_pwi", NULL, CLK_IS_ROOT, 24000000),
 	FRATE(none, "sclk_usbh20", NULL, CLK_IS_ROOT, 48000000),
 	FRATE(none, "mphy_refclk_ixtal24", NULL, CLK_IS_ROOT, 48000000),
 	FRATE(none, "sclk_usbh20_scan_clk", NULL, CLK_IS_ROOT, 480000000),
 };
 
-struct samsung_fixed_factor_clock exynos5420_fixed_factor_clks[] __initdata = {
+static struct samsung_fixed_factor_clock exynos5420_fixed_factor_clks[] __initdata = {
 	FFACTOR(none, "sclk_hsic_12m", "fin_pll", 1, 2, 0),
 };
 
-struct samsung_mux_clock exynos5420_mux_clks[] __initdata = {
+static struct samsung_mux_clock exynos5420_mux_clks[] __initdata = {
 	MUX(none, "mout_mspll_kfc", mspll_cpu_p, SRC_TOP7, 8, 2),
 	MUX(none, "mout_mspll_cpu", mspll_cpu_p, SRC_TOP7, 12, 2),
 	MUX(none, "mout_apll", apll_p, SRC_CPU, 0, 1),
@@ -371,7 +406,7 @@
 	MUX(none, "mout_mipi1", group2_p, SRC_DISP10, 16, 3),
 	MUX(none, "mout_dp1", group2_p, SRC_DISP10, 20, 3),
 	MUX(none, "mout_pixel", group2_p, SRC_DISP10, 24, 3),
-	MUX(none, "mout_hdmi", hdmi_p, SRC_DISP10, 28, 1),
+	MUX(mout_hdmi, "mout_hdmi", hdmi_p, SRC_DISP10, 28, 1),
 
 	/* MAU Block */
 	MUX(none, "mout_maudio0", maudio0_p, SRC_MAU, 28, 3),
@@ -399,7 +434,7 @@
 	MUX(none, "mout_spi2", group2_p, SRC_PERIC1, 28, 3),
 };
 
-struct samsung_div_clock exynos5420_div_clks[] __initdata = {
+static struct samsung_div_clock exynos5420_div_clks[] __initdata = {
 	DIV(none, "div_arm", "mout_cpu", DIV_CPU0, 0, 3),
 	DIV(none, "sclk_apll", "mout_apll", DIV_CPU0, 24, 3),
 	DIV(none, "armclk2", "div_arm", DIV_CPU0, 28, 3),
@@ -431,7 +466,7 @@
 	DIV(none, "dout_fimd1", "mout_fimd1", DIV_DISP10, 0, 4),
 	DIV(none, "dout_mipi1", "mout_mipi1", DIV_DISP10, 16, 8),
 	DIV(none, "dout_dp1", "mout_dp1", DIV_DISP10, 24, 4),
-	DIV(none, "dout_hdmi_pixel", "mout_pixel", DIV_DISP10, 28, 4),
+	DIV(dout_pixel, "dout_hdmi_pixel", "mout_pixel", DIV_DISP10, 28, 4),
 
 	/* Audio Block */
 	DIV(none, "dout_maudio0", "mout_maudio0", DIV_MAU, 20, 4),
@@ -479,7 +514,7 @@
 	DIV(none, "dout_pre_spi2", "dout_spi2", DIV_PERIC4, 24, 8),
 };
 
-struct samsung_gate_clock exynos5420_gate_clks[] __initdata = {
+static struct samsung_gate_clock exynos5420_gate_clks[] __initdata = {
 	/* TODO: Re-verify the CG bits for all the gate clocks */
 	GATE_A(mct, "pclk_st", "aclk66_psgen", GATE_BUS_PERIS1, 2, 0, 0, "mct"),
 
@@ -696,19 +731,43 @@
 	GATE(smmu_mscl0, "smmu_mscl0", "aclk400_mscl", GATE_IP_MSCL, 8, 0, 0),
 	GATE(smmu_mscl1, "smmu_mscl1", "aclk400_mscl", GATE_IP_MSCL, 9, 0, 0),
 	GATE(smmu_mscl2, "smmu_mscl2", "aclk400_mscl", GATE_IP_MSCL, 10, 0, 0),
+	GATE(smmu_mixer, "smmu_mixer", "aclk200_disp1", GATE_IP_DISP1, 9, 0, 0),
 };
 
-static __initdata struct of_device_id ext_clk_match[] = {
+static struct samsung_pll_clock exynos5420_plls[nr_plls] __initdata = {
+	[apll] = PLL(pll_2550, fout_apll, "fout_apll", "fin_pll", APLL_LOCK,
+		APLL_CON0, NULL),
+	[cpll] = PLL(pll_2550, fout_mpll, "fout_mpll", "fin_pll", MPLL_LOCK,
+		MPLL_CON0, NULL),
+	[dpll] = PLL(pll_2550, fout_dpll, "fout_dpll", "fin_pll", DPLL_LOCK,
+		DPLL_CON0, NULL),
+	[epll] = PLL(pll_2650, fout_epll, "fout_epll", "fin_pll", EPLL_LOCK,
+		EPLL_CON0, NULL),
+	[rpll] = PLL(pll_2650, fout_rpll, "fout_rpll", "fin_pll", RPLL_LOCK,
+		RPLL_CON0, NULL),
+	[ipll] = PLL(pll_2550, fout_ipll, "fout_ipll", "fin_pll", IPLL_LOCK,
+		IPLL_CON0, NULL),
+	[spll] = PLL(pll_2550, fout_spll, "fout_spll", "fin_pll", SPLL_LOCK,
+		SPLL_CON0, NULL),
+	[vpll] = PLL(pll_2550, fout_vpll, "fout_vpll", "fin_pll", VPLL_LOCK,
+		VPLL_CON0, NULL),
+	[mpll] = PLL(pll_2550, fout_mpll, "fout_mpll", "fin_pll", MPLL_LOCK,
+		MPLL_CON0, NULL),
+	[bpll] = PLL(pll_2550, fout_bpll, "fout_bpll", "fin_pll", BPLL_LOCK,
+		BPLL_CON0, NULL),
+	[kpll] = PLL(pll_2550, fout_kpll, "fout_kpll", "fin_pll", KPLL_LOCK,
+		KPLL_CON0, NULL),
+};
+
+static struct of_device_id ext_clk_match[] __initdata = {
 	{ .compatible = "samsung,exynos5420-oscclk", .data = (void *)0, },
 	{ },
 };
 
 /* register exynos5420 clocks */
-void __init exynos5420_clk_init(struct device_node *np)
+static void __init exynos5420_clk_init(struct device_node *np)
 {
 	void __iomem *reg_base;
-	struct clk *apll, *bpll, *cpll, *dpll, *epll, *ipll, *kpll, *mpll;
-	struct clk *rpll, *spll, *vpll;
 
 	if (np) {
 		reg_base = of_iomap(np, 0);
@@ -724,30 +783,8 @@
 	samsung_clk_of_register_fixed_ext(exynos5420_fixed_rate_ext_clks,
 			ARRAY_SIZE(exynos5420_fixed_rate_ext_clks),
 			ext_clk_match);
-
-	apll = samsung_clk_register_pll35xx("fout_apll", "fin_pll",
-			reg_base + 0x100);
-	bpll = samsung_clk_register_pll35xx("fout_bpll", "fin_pll",
-			reg_base + 0x20110);
-	cpll = samsung_clk_register_pll35xx("fout_cpll", "fin_pll",
-			reg_base + 0x10120);
-	dpll = samsung_clk_register_pll35xx("fout_dpll", "fin_pll",
-			reg_base + 0x10128);
-	epll = samsung_clk_register_pll36xx("fout_epll", "fin_pll",
-			reg_base + 0x10130);
-	ipll = samsung_clk_register_pll35xx("fout_ipll", "fin_pll",
-			reg_base + 0x10150);
-	kpll = samsung_clk_register_pll35xx("fout_kpll", "fin_pll",
-			reg_base + 0x28100);
-	mpll = samsung_clk_register_pll35xx("fout_mpll", "fin_pll",
-			reg_base + 0x10180);
-	rpll = samsung_clk_register_pll36xx("fout_rpll", "fin_pll",
-			reg_base + 0x10140);
-	spll = samsung_clk_register_pll35xx("fout_spll", "fin_pll",
-			reg_base + 0x10160);
-	vpll = samsung_clk_register_pll35xx("fout_vpll", "fin_pll",
-			reg_base + 0x10170);
-
+	samsung_clk_register_pll(exynos5420_plls, ARRAY_SIZE(exynos5420_plls),
+					reg_base);
 	samsung_clk_register_fixed_rate(exynos5420_fixed_rate_clks,
 			ARRAY_SIZE(exynos5420_fixed_rate_clks));
 	samsung_clk_register_fixed_factor(exynos5420_fixed_factor_clks,
diff --git a/drivers/clk/samsung/clk-exynos5440.c b/drivers/clk/samsung/clk-exynos5440.c
index 7d54341..f865894 100644
--- a/drivers/clk/samsung/clk-exynos5440.c
+++ b/drivers/clk/samsung/clk-exynos5440.c
@@ -41,12 +41,12 @@
 PNAME(mout_spi_p)	= { "div125", "div200" };
 
 /* fixed rate clocks generated outside the soc */
-struct samsung_fixed_rate_clock exynos5440_fixed_rate_ext_clks[] __initdata = {
+static struct samsung_fixed_rate_clock exynos5440_fixed_rate_ext_clks[] __initdata = {
 	FRATE(none, "xtal", NULL, CLK_IS_ROOT, 0),
 };
 
 /* fixed rate clocks */
-struct samsung_fixed_rate_clock exynos5440_fixed_rate_clks[] __initdata = {
+static struct samsung_fixed_rate_clock exynos5440_fixed_rate_clks[] __initdata = {
 	FRATE(none, "ppll", NULL, CLK_IS_ROOT, 1000000000),
 	FRATE(none, "usb_phy0", NULL, CLK_IS_ROOT, 60000000),
 	FRATE(none, "usb_phy1", NULL, CLK_IS_ROOT, 60000000),
@@ -55,26 +55,26 @@
 };
 
 /* fixed factor clocks */
-struct samsung_fixed_factor_clock exynos5440_fixed_factor_clks[] __initdata = {
+static struct samsung_fixed_factor_clock exynos5440_fixed_factor_clks[] __initdata = {
 	FFACTOR(none, "div250", "ppll", 1, 4, 0),
 	FFACTOR(none, "div200", "ppll", 1, 5, 0),
 	FFACTOR(none, "div125", "div250", 1, 2, 0),
 };
 
 /* mux clocks */
-struct samsung_mux_clock exynos5440_mux_clks[] __initdata = {
+static struct samsung_mux_clock exynos5440_mux_clks[] __initdata = {
 	MUX(none, "mout_spi", mout_spi_p, MISC_DOUT1, 5, 1),
 	MUX_A(arm_clk, "arm_clk", mout_armclk_p,
 			CPU_CLK_STATUS, 0, 1, "armclk"),
 };
 
 /* divider clocks */
-struct samsung_div_clock exynos5440_div_clks[] __initdata = {
+static struct samsung_div_clock exynos5440_div_clks[] __initdata = {
 	DIV(spi_baud, "div_spi", "mout_spi", MISC_DOUT1, 3, 2),
 };
 
 /* gate clocks */
-struct samsung_gate_clock exynos5440_gate_clks[] __initdata = {
+static struct samsung_gate_clock exynos5440_gate_clks[] __initdata = {
 	GATE(pb0_250, "pb0_250", "div250", CLKEN_OV_VAL, 3, 0, 0),
 	GATE(pr0_250, "pr0_250", "div250", CLKEN_OV_VAL, 4, 0, 0),
 	GATE(pr1_250, "pr1_250", "div250", CLKEN_OV_VAL, 5, 0, 0),
@@ -97,13 +97,13 @@
 	GATE(cs250_o, "cs250_o", "cs250", CLKEN_OV_VAL, 19, 0, 0),
 };
 
-static __initdata struct of_device_id ext_clk_match[] = {
+static struct of_device_id ext_clk_match[] __initdata = {
 	{ .compatible = "samsung,clock-xtal", .data = (void *)0, },
 	{},
 };
 
 /* register exynos5440 clocks */
-void __init exynos5440_clk_init(struct device_node *np)
+static void __init exynos5440_clk_init(struct device_node *np)
 {
 	void __iomem *reg_base;
 
@@ -132,7 +132,7 @@
 	samsung_clk_register_gate(exynos5440_gate_clks,
 			ARRAY_SIZE(exynos5440_gate_clks));
 
-	pr_info("Exynos5440: arm_clk = %ldHz\n", _get_rate("armclk"));
+	pr_info("Exynos5440: arm_clk = %ldHz\n", _get_rate("arm_clk"));
 	pr_info("exynos5440 clock initialization complete\n");
 }
 CLK_OF_DECLARE(exynos5440_clk, "samsung,exynos5440-clock", exynos5440_clk_init);
diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
index 362f12d..529e11d 100644
--- a/drivers/clk/samsung/clk-pll.c
+++ b/drivers/clk/samsung/clk-pll.c
@@ -10,31 +10,73 @@
 */
 
 #include <linux/errno.h>
+#include <linux/hrtimer.h>
 #include "clk.h"
 #include "clk-pll.h"
 
+#define PLL_TIMEOUT_MS		10
+
+struct samsung_clk_pll {
+	struct clk_hw		hw;
+	void __iomem		*lock_reg;
+	void __iomem		*con_reg;
+	enum samsung_pll_type	type;
+	unsigned int		rate_count;
+	const struct samsung_pll_rate_table *rate_table;
+};
+
+#define to_clk_pll(_hw) container_of(_hw, struct samsung_clk_pll, hw)
+
+static const struct samsung_pll_rate_table *samsung_get_pll_settings(
+				struct samsung_clk_pll *pll, unsigned long rate)
+{
+	const struct samsung_pll_rate_table  *rate_table = pll->rate_table;
+	int i;
+
+	for (i = 0; i < pll->rate_count; i++) {
+		if (rate == rate_table[i].rate)
+			return &rate_table[i];
+	}
+
+	return NULL;
+}
+
+static long samsung_pll_round_rate(struct clk_hw *hw,
+			unsigned long drate, unsigned long *prate)
+{
+	struct samsung_clk_pll *pll = to_clk_pll(hw);
+	const struct samsung_pll_rate_table *rate_table = pll->rate_table;
+	int i;
+
+	/* Assumming rate_table is in descending order */
+	for (i = 0; i < pll->rate_count; i++) {
+		if (drate >= rate_table[i].rate)
+			return rate_table[i].rate;
+	}
+
+	/* return minimum supported value */
+	return rate_table[i - 1].rate;
+}
+
 /*
  * PLL35xx Clock Type
  */
+/* Maximum lock time can be 270 * PDIV cycles */
+#define PLL35XX_LOCK_FACTOR	(270)
 
 #define PLL35XX_MDIV_MASK       (0x3FF)
 #define PLL35XX_PDIV_MASK       (0x3F)
 #define PLL35XX_SDIV_MASK       (0x7)
+#define PLL35XX_LOCK_STAT_MASK	(0x1)
 #define PLL35XX_MDIV_SHIFT      (16)
 #define PLL35XX_PDIV_SHIFT      (8)
 #define PLL35XX_SDIV_SHIFT      (0)
-
-struct samsung_clk_pll35xx {
-	struct clk_hw		hw;
-	const void __iomem	*con_reg;
-};
-
-#define to_clk_pll35xx(_hw) container_of(_hw, struct samsung_clk_pll35xx, hw)
+#define PLL35XX_LOCK_STAT_SHIFT	(29)
 
 static unsigned long samsung_pll35xx_recalc_rate(struct clk_hw *hw,
 				unsigned long parent_rate)
 {
-	struct samsung_clk_pll35xx *pll = to_clk_pll35xx(hw);
+	struct samsung_clk_pll *pll = to_clk_pll(hw);
 	u32 mdiv, pdiv, sdiv, pll_con;
 	u64 fvco = parent_rate;
 
@@ -49,48 +91,80 @@
 	return (unsigned long)fvco;
 }
 
+static inline bool samsung_pll35xx_mp_change(
+		const struct samsung_pll_rate_table *rate, u32 pll_con)
+{
+	u32 old_mdiv, old_pdiv;
+
+	old_mdiv = (pll_con >> PLL35XX_MDIV_SHIFT) & PLL35XX_MDIV_MASK;
+	old_pdiv = (pll_con >> PLL35XX_PDIV_SHIFT) & PLL35XX_PDIV_MASK;
+
+	return (rate->mdiv != old_mdiv || rate->pdiv != old_pdiv);
+}
+
+static int samsung_pll35xx_set_rate(struct clk_hw *hw, unsigned long drate,
+					unsigned long prate)
+{
+	struct samsung_clk_pll *pll = to_clk_pll(hw);
+	const struct samsung_pll_rate_table *rate;
+	u32 tmp;
+
+	/* Get required rate settings from table */
+	rate = samsung_get_pll_settings(pll, drate);
+	if (!rate) {
+		pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
+			drate, __clk_get_name(hw->clk));
+		return -EINVAL;
+	}
+
+	tmp = __raw_readl(pll->con_reg);
+
+	if (!(samsung_pll35xx_mp_change(rate, tmp))) {
+		/* If only s change, change just s value only*/
+		tmp &= ~(PLL35XX_SDIV_MASK << PLL35XX_SDIV_SHIFT);
+		tmp |= rate->sdiv << PLL35XX_SDIV_SHIFT;
+		__raw_writel(tmp, pll->con_reg);
+
+		return 0;
+	}
+
+	/* Set PLL lock time. */
+	__raw_writel(rate->pdiv * PLL35XX_LOCK_FACTOR,
+			pll->lock_reg);
+
+	/* Change PLL PMS values */
+	tmp &= ~((PLL35XX_MDIV_MASK << PLL35XX_MDIV_SHIFT) |
+			(PLL35XX_PDIV_MASK << PLL35XX_PDIV_SHIFT) |
+			(PLL35XX_SDIV_MASK << PLL35XX_SDIV_SHIFT));
+	tmp |= (rate->mdiv << PLL35XX_MDIV_SHIFT) |
+			(rate->pdiv << PLL35XX_PDIV_SHIFT) |
+			(rate->sdiv << PLL35XX_SDIV_SHIFT);
+	__raw_writel(tmp, pll->con_reg);
+
+	/* wait_lock_time */
+	do {
+		cpu_relax();
+		tmp = __raw_readl(pll->con_reg);
+	} while (!(tmp & (PLL35XX_LOCK_STAT_MASK
+				<< PLL35XX_LOCK_STAT_SHIFT)));
+	return 0;
+}
+
 static const struct clk_ops samsung_pll35xx_clk_ops = {
 	.recalc_rate = samsung_pll35xx_recalc_rate,
+	.round_rate = samsung_pll_round_rate,
+	.set_rate = samsung_pll35xx_set_rate,
 };
 
-struct clk * __init samsung_clk_register_pll35xx(const char *name,
-			const char *pname, const void __iomem *con_reg)
-{
-	struct samsung_clk_pll35xx *pll;
-	struct clk *clk;
-	struct clk_init_data init;
-
-	pll = kzalloc(sizeof(*pll), GFP_KERNEL);
-	if (!pll) {
-		pr_err("%s: could not allocate pll clk %s\n", __func__, name);
-		return NULL;
-	}
-
-	init.name = name;
-	init.ops = &samsung_pll35xx_clk_ops;
-	init.flags = CLK_GET_RATE_NOCACHE;
-	init.parent_names = &pname;
-	init.num_parents = 1;
-
-	pll->hw.init = &init;
-	pll->con_reg = con_reg;
-
-	clk = clk_register(NULL, &pll->hw);
-	if (IS_ERR(clk)) {
-		pr_err("%s: failed to register pll clock %s\n", __func__,
-				name);
-		kfree(pll);
-	}
-
-	if (clk_register_clkdev(clk, name, NULL))
-		pr_err("%s: failed to register lookup for %s", __func__, name);
-
-	return clk;
-}
+static const struct clk_ops samsung_pll35xx_clk_min_ops = {
+	.recalc_rate = samsung_pll35xx_recalc_rate,
+};
 
 /*
  * PLL36xx Clock Type
  */
+/* Maximum lock time can be 3000 * PDIV cycles */
+#define PLL36XX_LOCK_FACTOR    (3000)
 
 #define PLL36XX_KDIV_MASK	(0xFFFF)
 #define PLL36XX_MDIV_MASK	(0x1FF)
@@ -99,18 +173,13 @@
 #define PLL36XX_MDIV_SHIFT	(16)
 #define PLL36XX_PDIV_SHIFT	(8)
 #define PLL36XX_SDIV_SHIFT	(0)
-
-struct samsung_clk_pll36xx {
-	struct clk_hw		hw;
-	const void __iomem	*con_reg;
-};
-
-#define to_clk_pll36xx(_hw) container_of(_hw, struct samsung_clk_pll36xx, hw)
+#define PLL36XX_KDIV_SHIFT	(0)
+#define PLL36XX_LOCK_STAT_SHIFT	(29)
 
 static unsigned long samsung_pll36xx_recalc_rate(struct clk_hw *hw,
 				unsigned long parent_rate)
 {
-	struct samsung_clk_pll36xx *pll = to_clk_pll36xx(hw);
+	struct samsung_clk_pll *pll = to_clk_pll(hw);
 	u32 mdiv, pdiv, sdiv, pll_con0, pll_con1;
 	s16 kdiv;
 	u64 fvco = parent_rate;
@@ -129,68 +198,102 @@
 	return (unsigned long)fvco;
 }
 
+static inline bool samsung_pll36xx_mpk_change(
+	const struct samsung_pll_rate_table *rate, u32 pll_con0, u32 pll_con1)
+{
+	u32 old_mdiv, old_pdiv, old_kdiv;
+
+	old_mdiv = (pll_con0 >> PLL36XX_MDIV_SHIFT) & PLL36XX_MDIV_MASK;
+	old_pdiv = (pll_con0 >> PLL36XX_PDIV_SHIFT) & PLL36XX_PDIV_MASK;
+	old_kdiv = (pll_con1 >> PLL36XX_KDIV_SHIFT) & PLL36XX_KDIV_MASK;
+
+	return (rate->mdiv != old_mdiv || rate->pdiv != old_pdiv ||
+		rate->kdiv != old_kdiv);
+}
+
+static int samsung_pll36xx_set_rate(struct clk_hw *hw, unsigned long drate,
+					unsigned long parent_rate)
+{
+	struct samsung_clk_pll *pll = to_clk_pll(hw);
+	u32 tmp, pll_con0, pll_con1;
+	const struct samsung_pll_rate_table *rate;
+
+	rate = samsung_get_pll_settings(pll, drate);
+	if (!rate) {
+		pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
+			drate, __clk_get_name(hw->clk));
+		return -EINVAL;
+	}
+
+	pll_con0 = __raw_readl(pll->con_reg);
+	pll_con1 = __raw_readl(pll->con_reg + 4);
+
+	if (!(samsung_pll36xx_mpk_change(rate, pll_con0, pll_con1))) {
+		/* If only s change, change just s value only*/
+		pll_con0 &= ~(PLL36XX_SDIV_MASK << PLL36XX_SDIV_SHIFT);
+		pll_con0 |= (rate->sdiv << PLL36XX_SDIV_SHIFT);
+		__raw_writel(pll_con0, pll->con_reg);
+
+		return 0;
+	}
+
+	/* Set PLL lock time. */
+	__raw_writel(rate->pdiv * PLL36XX_LOCK_FACTOR, pll->lock_reg);
+
+	 /* Change PLL PMS values */
+	pll_con0 &= ~((PLL36XX_MDIV_MASK << PLL36XX_MDIV_SHIFT) |
+			(PLL36XX_PDIV_MASK << PLL36XX_PDIV_SHIFT) |
+			(PLL36XX_SDIV_MASK << PLL36XX_SDIV_SHIFT));
+	pll_con0 |= (rate->mdiv << PLL36XX_MDIV_SHIFT) |
+			(rate->pdiv << PLL36XX_PDIV_SHIFT) |
+			(rate->sdiv << PLL36XX_SDIV_SHIFT);
+	__raw_writel(pll_con0, pll->con_reg);
+
+	pll_con1 &= ~(PLL36XX_KDIV_MASK << PLL36XX_KDIV_SHIFT);
+	pll_con1 |= rate->kdiv << PLL36XX_KDIV_SHIFT;
+	__raw_writel(pll_con1, pll->con_reg + 4);
+
+	/* wait_lock_time */
+	do {
+		cpu_relax();
+		tmp = __raw_readl(pll->con_reg);
+	} while (!(tmp & (1 << PLL36XX_LOCK_STAT_SHIFT)));
+
+	return 0;
+}
+
 static const struct clk_ops samsung_pll36xx_clk_ops = {
 	.recalc_rate = samsung_pll36xx_recalc_rate,
+	.set_rate = samsung_pll36xx_set_rate,
+	.round_rate = samsung_pll_round_rate,
 };
 
-struct clk * __init samsung_clk_register_pll36xx(const char *name,
-			const char *pname, const void __iomem *con_reg)
-{
-	struct samsung_clk_pll36xx *pll;
-	struct clk *clk;
-	struct clk_init_data init;
-
-	pll = kzalloc(sizeof(*pll), GFP_KERNEL);
-	if (!pll) {
-		pr_err("%s: could not allocate pll clk %s\n", __func__, name);
-		return NULL;
-	}
-
-	init.name = name;
-	init.ops = &samsung_pll36xx_clk_ops;
-	init.flags = CLK_GET_RATE_NOCACHE;
-	init.parent_names = &pname;
-	init.num_parents = 1;
-
-	pll->hw.init = &init;
-	pll->con_reg = con_reg;
-
-	clk = clk_register(NULL, &pll->hw);
-	if (IS_ERR(clk)) {
-		pr_err("%s: failed to register pll clock %s\n", __func__,
-				name);
-		kfree(pll);
-	}
-
-	if (clk_register_clkdev(clk, name, NULL))
-		pr_err("%s: failed to register lookup for %s", __func__, name);
-
-	return clk;
-}
+static const struct clk_ops samsung_pll36xx_clk_min_ops = {
+	.recalc_rate = samsung_pll36xx_recalc_rate,
+};
 
 /*
  * PLL45xx Clock Type
  */
+#define PLL4502_LOCK_FACTOR	400
+#define PLL4508_LOCK_FACTOR	240
 
 #define PLL45XX_MDIV_MASK	(0x3FF)
 #define PLL45XX_PDIV_MASK	(0x3F)
 #define PLL45XX_SDIV_MASK	(0x7)
+#define PLL45XX_AFC_MASK	(0x1F)
 #define PLL45XX_MDIV_SHIFT	(16)
 #define PLL45XX_PDIV_SHIFT	(8)
 #define PLL45XX_SDIV_SHIFT	(0)
+#define PLL45XX_AFC_SHIFT	(0)
 
-struct samsung_clk_pll45xx {
-	struct clk_hw		hw;
-	enum pll45xx_type	type;
-	const void __iomem	*con_reg;
-};
-
-#define to_clk_pll45xx(_hw) container_of(_hw, struct samsung_clk_pll45xx, hw)
+#define PLL45XX_ENABLE		BIT(31)
+#define PLL45XX_LOCKED		BIT(29)
 
 static unsigned long samsung_pll45xx_recalc_rate(struct clk_hw *hw,
 				unsigned long parent_rate)
 {
-	struct samsung_clk_pll45xx *pll = to_clk_pll45xx(hw);
+	struct samsung_clk_pll *pll = to_clk_pll(hw);
 	u32 mdiv, pdiv, sdiv, pll_con;
 	u64 fvco = parent_rate;
 
@@ -208,54 +311,113 @@
 	return (unsigned long)fvco;
 }
 
+static bool samsung_pll45xx_mp_change(u32 pll_con0, u32 pll_con1,
+				const struct samsung_pll_rate_table *rate)
+{
+	u32 old_mdiv, old_pdiv, old_afc;
+
+	old_mdiv = (pll_con0 >> PLL45XX_MDIV_SHIFT) & PLL45XX_MDIV_MASK;
+	old_pdiv = (pll_con0 >> PLL45XX_PDIV_SHIFT) & PLL45XX_PDIV_MASK;
+	old_afc = (pll_con1 >> PLL45XX_AFC_SHIFT) & PLL45XX_AFC_MASK;
+
+	return (old_mdiv != rate->mdiv || old_pdiv != rate->pdiv
+		|| old_afc != rate->afc);
+}
+
+static int samsung_pll45xx_set_rate(struct clk_hw *hw, unsigned long drate,
+					unsigned long prate)
+{
+	struct samsung_clk_pll *pll = to_clk_pll(hw);
+	const struct samsung_pll_rate_table *rate;
+	u32 con0, con1;
+	ktime_t start;
+
+	/* Get required rate settings from table */
+	rate = samsung_get_pll_settings(pll, drate);
+	if (!rate) {
+		pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
+			drate, __clk_get_name(hw->clk));
+		return -EINVAL;
+	}
+
+	con0 = __raw_readl(pll->con_reg);
+	con1 = __raw_readl(pll->con_reg + 0x4);
+
+	if (!(samsung_pll45xx_mp_change(con0, con1, rate))) {
+		/* If only s change, change just s value only*/
+		con0 &= ~(PLL45XX_SDIV_MASK << PLL45XX_SDIV_SHIFT);
+		con0 |= rate->sdiv << PLL45XX_SDIV_SHIFT;
+		__raw_writel(con0, pll->con_reg);
+
+		return 0;
+	}
+
+	/* Set PLL PMS values. */
+	con0 &= ~((PLL45XX_MDIV_MASK << PLL45XX_MDIV_SHIFT) |
+			(PLL45XX_PDIV_MASK << PLL45XX_PDIV_SHIFT) |
+			(PLL45XX_SDIV_MASK << PLL45XX_SDIV_SHIFT));
+	con0 |= (rate->mdiv << PLL45XX_MDIV_SHIFT) |
+			(rate->pdiv << PLL45XX_PDIV_SHIFT) |
+			(rate->sdiv << PLL45XX_SDIV_SHIFT);
+
+	/* Set PLL AFC value. */
+	con1 = __raw_readl(pll->con_reg + 0x4);
+	con1 &= ~(PLL45XX_AFC_MASK << PLL45XX_AFC_SHIFT);
+	con1 |= (rate->afc << PLL45XX_AFC_SHIFT);
+
+	/* Set PLL lock time. */
+	switch (pll->type) {
+	case pll_4502:
+		__raw_writel(rate->pdiv * PLL4502_LOCK_FACTOR, pll->lock_reg);
+		break;
+	case pll_4508:
+		__raw_writel(rate->pdiv * PLL4508_LOCK_FACTOR, pll->lock_reg);
+		break;
+	default:
+		break;
+	};
+
+	/* Set new configuration. */
+	__raw_writel(con1, pll->con_reg + 0x4);
+	__raw_writel(con0, pll->con_reg);
+
+	/* Wait for locking. */
+	start = ktime_get();
+	while (!(__raw_readl(pll->con_reg) & PLL45XX_LOCKED)) {
+		ktime_t delta = ktime_sub(ktime_get(), start);
+
+		if (ktime_to_ms(delta) > PLL_TIMEOUT_MS) {
+			pr_err("%s: could not lock PLL %s\n",
+					__func__, __clk_get_name(hw->clk));
+			return -EFAULT;
+		}
+
+		cpu_relax();
+	}
+
+	return 0;
+}
+
 static const struct clk_ops samsung_pll45xx_clk_ops = {
 	.recalc_rate = samsung_pll45xx_recalc_rate,
+	.round_rate = samsung_pll_round_rate,
+	.set_rate = samsung_pll45xx_set_rate,
 };
 
-struct clk * __init samsung_clk_register_pll45xx(const char *name,
-			const char *pname, const void __iomem *con_reg,
-			enum pll45xx_type type)
-{
-	struct samsung_clk_pll45xx *pll;
-	struct clk *clk;
-	struct clk_init_data init;
-
-	pll = kzalloc(sizeof(*pll), GFP_KERNEL);
-	if (!pll) {
-		pr_err("%s: could not allocate pll clk %s\n", __func__, name);
-		return NULL;
-	}
-
-	init.name = name;
-	init.ops = &samsung_pll45xx_clk_ops;
-	init.flags = CLK_GET_RATE_NOCACHE;
-	init.parent_names = &pname;
-	init.num_parents = 1;
-
-	pll->hw.init = &init;
-	pll->con_reg = con_reg;
-	pll->type = type;
-
-	clk = clk_register(NULL, &pll->hw);
-	if (IS_ERR(clk)) {
-		pr_err("%s: failed to register pll clock %s\n", __func__,
-				name);
-		kfree(pll);
-	}
-
-	if (clk_register_clkdev(clk, name, NULL))
-		pr_err("%s: failed to register lookup for %s", __func__, name);
-
-	return clk;
-}
+static const struct clk_ops samsung_pll45xx_clk_min_ops = {
+	.recalc_rate = samsung_pll45xx_recalc_rate,
+};
 
 /*
  * PLL46xx Clock Type
  */
+#define PLL46XX_LOCK_FACTOR	3000
 
+#define PLL46XX_VSEL_MASK	(1)
 #define PLL46XX_MDIV_MASK	(0x1FF)
 #define PLL46XX_PDIV_MASK	(0x3F)
 #define PLL46XX_SDIV_MASK	(0x7)
+#define PLL46XX_VSEL_SHIFT	(27)
 #define PLL46XX_MDIV_SHIFT	(16)
 #define PLL46XX_PDIV_SHIFT	(8)
 #define PLL46XX_SDIV_SHIFT	(0)
@@ -263,19 +425,20 @@
 #define PLL46XX_KDIV_MASK	(0xFFFF)
 #define PLL4650C_KDIV_MASK	(0xFFF)
 #define PLL46XX_KDIV_SHIFT	(0)
+#define PLL46XX_MFR_MASK	(0x3F)
+#define PLL46XX_MRR_MASK	(0x1F)
+#define PLL46XX_KDIV_SHIFT	(0)
+#define PLL46XX_MFR_SHIFT	(16)
+#define PLL46XX_MRR_SHIFT	(24)
 
-struct samsung_clk_pll46xx {
-	struct clk_hw		hw;
-	enum pll46xx_type	type;
-	const void __iomem	*con_reg;
-};
-
-#define to_clk_pll46xx(_hw) container_of(_hw, struct samsung_clk_pll46xx, hw)
+#define PLL46XX_ENABLE		BIT(31)
+#define PLL46XX_LOCKED		BIT(29)
+#define PLL46XX_VSEL		BIT(27)
 
 static unsigned long samsung_pll46xx_recalc_rate(struct clk_hw *hw,
 				unsigned long parent_rate)
 {
-	struct samsung_clk_pll46xx *pll = to_clk_pll46xx(hw);
+	struct samsung_clk_pll *pll = to_clk_pll(hw);
 	u32 mdiv, pdiv, sdiv, kdiv, pll_con0, pll_con1, shift;
 	u64 fvco = parent_rate;
 
@@ -295,47 +458,175 @@
 	return (unsigned long)fvco;
 }
 
+static bool samsung_pll46xx_mpk_change(u32 pll_con0, u32 pll_con1,
+				const struct samsung_pll_rate_table *rate)
+{
+	u32 old_mdiv, old_pdiv, old_kdiv;
+
+	old_mdiv = (pll_con0 >> PLL46XX_MDIV_SHIFT) & PLL46XX_MDIV_MASK;
+	old_pdiv = (pll_con0 >> PLL46XX_PDIV_SHIFT) & PLL46XX_PDIV_MASK;
+	old_kdiv = (pll_con1 >> PLL46XX_KDIV_SHIFT) & PLL46XX_KDIV_MASK;
+
+	return (old_mdiv != rate->mdiv || old_pdiv != rate->pdiv
+		|| old_kdiv != rate->kdiv);
+}
+
+static int samsung_pll46xx_set_rate(struct clk_hw *hw, unsigned long drate,
+					unsigned long prate)
+{
+	struct samsung_clk_pll *pll = to_clk_pll(hw);
+	const struct samsung_pll_rate_table *rate;
+	u32 con0, con1, lock;
+	ktime_t start;
+
+	/* Get required rate settings from table */
+	rate = samsung_get_pll_settings(pll, drate);
+	if (!rate) {
+		pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
+			drate, __clk_get_name(hw->clk));
+		return -EINVAL;
+	}
+
+	con0 = __raw_readl(pll->con_reg);
+	con1 = __raw_readl(pll->con_reg + 0x4);
+
+	if (!(samsung_pll46xx_mpk_change(con0, con1, rate))) {
+		/* If only s change, change just s value only*/
+		con0 &= ~(PLL46XX_SDIV_MASK << PLL46XX_SDIV_SHIFT);
+		con0 |= rate->sdiv << PLL46XX_SDIV_SHIFT;
+		__raw_writel(con0, pll->con_reg);
+
+		return 0;
+	}
+
+	/* Set PLL lock time. */
+	lock = rate->pdiv * PLL46XX_LOCK_FACTOR;
+	if (lock > 0xffff)
+		/* Maximum lock time bitfield is 16-bit. */
+		lock = 0xffff;
+
+	/* Set PLL PMS and VSEL values. */
+	con0 &= ~((PLL46XX_MDIV_MASK << PLL46XX_MDIV_SHIFT) |
+			(PLL46XX_PDIV_MASK << PLL46XX_PDIV_SHIFT) |
+			(PLL46XX_SDIV_MASK << PLL46XX_SDIV_SHIFT) |
+			(PLL46XX_VSEL_MASK << PLL46XX_VSEL_SHIFT));
+	con0 |= (rate->mdiv << PLL46XX_MDIV_SHIFT) |
+			(rate->pdiv << PLL46XX_PDIV_SHIFT) |
+			(rate->sdiv << PLL46XX_SDIV_SHIFT) |
+			(rate->vsel << PLL46XX_VSEL_SHIFT);
+
+	/* Set PLL K, MFR and MRR values. */
+	con1 = __raw_readl(pll->con_reg + 0x4);
+	con1 &= ~((PLL46XX_KDIV_MASK << PLL46XX_KDIV_SHIFT) |
+			(PLL46XX_MFR_MASK << PLL46XX_MFR_SHIFT) |
+			(PLL46XX_MRR_MASK << PLL46XX_MRR_SHIFT));
+	con1 |= (rate->kdiv << PLL46XX_KDIV_SHIFT) |
+			(rate->mfr << PLL46XX_MFR_SHIFT) |
+			(rate->mrr << PLL46XX_MRR_SHIFT);
+
+	/* Write configuration to PLL */
+	__raw_writel(lock, pll->lock_reg);
+	__raw_writel(con0, pll->con_reg);
+	__raw_writel(con1, pll->con_reg + 0x4);
+
+	/* Wait for locking. */
+	start = ktime_get();
+	while (!(__raw_readl(pll->con_reg) & PLL46XX_LOCKED)) {
+		ktime_t delta = ktime_sub(ktime_get(), start);
+
+		if (ktime_to_ms(delta) > PLL_TIMEOUT_MS) {
+			pr_err("%s: could not lock PLL %s\n",
+					__func__, __clk_get_name(hw->clk));
+			return -EFAULT;
+		}
+
+		cpu_relax();
+	}
+
+	return 0;
+}
+
 static const struct clk_ops samsung_pll46xx_clk_ops = {
 	.recalc_rate = samsung_pll46xx_recalc_rate,
+	.round_rate = samsung_pll_round_rate,
+	.set_rate = samsung_pll46xx_set_rate,
+};
+
+static const struct clk_ops samsung_pll46xx_clk_min_ops = {
+	.recalc_rate = samsung_pll46xx_recalc_rate,
 };
 
-struct clk * __init samsung_clk_register_pll46xx(const char *name,
-			const char *pname, const void __iomem *con_reg,
-			enum pll46xx_type type)
+/*
+ * PLL6552 Clock Type
+ */
+
+#define PLL6552_MDIV_MASK	0x3ff
+#define PLL6552_PDIV_MASK	0x3f
+#define PLL6552_SDIV_MASK	0x7
+#define PLL6552_MDIV_SHIFT	16
+#define PLL6552_PDIV_SHIFT	8
+#define PLL6552_SDIV_SHIFT	0
+
+static unsigned long samsung_pll6552_recalc_rate(struct clk_hw *hw,
+						unsigned long parent_rate)
 {
-	struct samsung_clk_pll46xx *pll;
-	struct clk *clk;
-	struct clk_init_data init;
+	struct samsung_clk_pll *pll = to_clk_pll(hw);
+	u32 mdiv, pdiv, sdiv, pll_con;
+	u64 fvco = parent_rate;
 
-	pll = kzalloc(sizeof(*pll), GFP_KERNEL);
-	if (!pll) {
-		pr_err("%s: could not allocate pll clk %s\n", __func__, name);
-		return NULL;
-	}
+	pll_con = __raw_readl(pll->con_reg);
+	mdiv = (pll_con >> PLL6552_MDIV_SHIFT) & PLL6552_MDIV_MASK;
+	pdiv = (pll_con >> PLL6552_PDIV_SHIFT) & PLL6552_PDIV_MASK;
+	sdiv = (pll_con >> PLL6552_SDIV_SHIFT) & PLL6552_SDIV_MASK;
 
-	init.name = name;
-	init.ops = &samsung_pll46xx_clk_ops;
-	init.flags = CLK_GET_RATE_NOCACHE;
-	init.parent_names = &pname;
-	init.num_parents = 1;
+	fvco *= mdiv;
+	do_div(fvco, (pdiv << sdiv));
 
-	pll->hw.init = &init;
-	pll->con_reg = con_reg;
-	pll->type = type;
-
-	clk = clk_register(NULL, &pll->hw);
-	if (IS_ERR(clk)) {
-		pr_err("%s: failed to register pll clock %s\n", __func__,
-				name);
-		kfree(pll);
-	}
-
-	if (clk_register_clkdev(clk, name, NULL))
-		pr_err("%s: failed to register lookup for %s", __func__, name);
-
-	return clk;
+	return (unsigned long)fvco;
 }
 
+static const struct clk_ops samsung_pll6552_clk_ops = {
+	.recalc_rate = samsung_pll6552_recalc_rate,
+};
+
+/*
+ * PLL6553 Clock Type
+ */
+
+#define PLL6553_MDIV_MASK	0xff
+#define PLL6553_PDIV_MASK	0x3f
+#define PLL6553_SDIV_MASK	0x7
+#define PLL6553_KDIV_MASK	0xffff
+#define PLL6553_MDIV_SHIFT	16
+#define PLL6553_PDIV_SHIFT	8
+#define PLL6553_SDIV_SHIFT	0
+#define PLL6553_KDIV_SHIFT	0
+
+static unsigned long samsung_pll6553_recalc_rate(struct clk_hw *hw,
+						unsigned long parent_rate)
+{
+	struct samsung_clk_pll *pll = to_clk_pll(hw);
+	u32 mdiv, pdiv, sdiv, kdiv, pll_con0, pll_con1;
+	u64 fvco = parent_rate;
+
+	pll_con0 = __raw_readl(pll->con_reg);
+	pll_con1 = __raw_readl(pll->con_reg + 0x4);
+	mdiv = (pll_con0 >> PLL6553_MDIV_SHIFT) & PLL6553_MDIV_MASK;
+	pdiv = (pll_con0 >> PLL6553_PDIV_SHIFT) & PLL6553_PDIV_MASK;
+	sdiv = (pll_con0 >> PLL6553_SDIV_SHIFT) & PLL6553_SDIV_MASK;
+	kdiv = (pll_con1 >> PLL6553_KDIV_SHIFT) & PLL6553_KDIV_MASK;
+
+	fvco *= (mdiv << 16) + kdiv;
+	do_div(fvco, (pdiv << sdiv));
+	fvco >>= 16;
+
+	return (unsigned long)fvco;
+}
+
+static const struct clk_ops samsung_pll6553_clk_ops = {
+	.recalc_rate = samsung_pll6553_recalc_rate,
+};
+
 /*
  * PLL2550x Clock Type
  */
@@ -418,3 +709,117 @@
 
 	return clk;
 }
+
+static void __init _samsung_clk_register_pll(struct samsung_pll_clock *pll_clk,
+						void __iomem *base)
+{
+	struct samsung_clk_pll *pll;
+	struct clk *clk;
+	struct clk_init_data init;
+	int ret, len;
+
+	pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+	if (!pll) {
+		pr_err("%s: could not allocate pll clk %s\n",
+			__func__, pll_clk->name);
+		return;
+	}
+
+	init.name = pll_clk->name;
+	init.flags = pll_clk->flags;
+	init.parent_names = &pll_clk->parent_name;
+	init.num_parents = 1;
+
+	if (pll_clk->rate_table) {
+		/* find count of rates in rate_table */
+		for (len = 0; pll_clk->rate_table[len].rate != 0; )
+			len++;
+
+		pll->rate_count = len;
+		pll->rate_table = kmemdup(pll_clk->rate_table,
+					pll->rate_count *
+					sizeof(struct samsung_pll_rate_table),
+					GFP_KERNEL);
+		WARN(!pll->rate_table,
+			"%s: could not allocate rate table for %s\n",
+			__func__, pll_clk->name);
+	}
+
+	switch (pll_clk->type) {
+	/* clk_ops for 35xx and 2550 are similar */
+	case pll_35xx:
+	case pll_2550:
+		if (!pll->rate_table)
+			init.ops = &samsung_pll35xx_clk_min_ops;
+		else
+			init.ops = &samsung_pll35xx_clk_ops;
+		break;
+	case pll_4500:
+		init.ops = &samsung_pll45xx_clk_min_ops;
+		break;
+	case pll_4502:
+	case pll_4508:
+		if (!pll->rate_table)
+			init.ops = &samsung_pll45xx_clk_min_ops;
+		else
+			init.ops = &samsung_pll45xx_clk_ops;
+		break;
+	/* clk_ops for 36xx and 2650 are similar */
+	case pll_36xx:
+	case pll_2650:
+		if (!pll->rate_table)
+			init.ops = &samsung_pll36xx_clk_min_ops;
+		else
+			init.ops = &samsung_pll36xx_clk_ops;
+		break;
+	case pll_6552:
+		init.ops = &samsung_pll6552_clk_ops;
+		break;
+	case pll_6553:
+		init.ops = &samsung_pll6553_clk_ops;
+		break;
+	case pll_4600:
+	case pll_4650:
+	case pll_4650c:
+		if (!pll->rate_table)
+			init.ops = &samsung_pll46xx_clk_min_ops;
+		else
+			init.ops = &samsung_pll46xx_clk_ops;
+		break;
+	default:
+		pr_warn("%s: Unknown pll type for pll clk %s\n",
+			__func__, pll_clk->name);
+	}
+
+	pll->hw.init = &init;
+	pll->type = pll_clk->type;
+	pll->lock_reg = base + pll_clk->lock_offset;
+	pll->con_reg = base + pll_clk->con_offset;
+
+	clk = clk_register(NULL, &pll->hw);
+	if (IS_ERR(clk)) {
+		pr_err("%s: failed to register pll clock %s : %ld\n",
+			__func__, pll_clk->name, PTR_ERR(clk));
+		kfree(pll);
+		return;
+	}
+
+	samsung_clk_add_lookup(clk, pll_clk->id);
+
+	if (!pll_clk->alias)
+		return;
+
+	ret = clk_register_clkdev(clk, pll_clk->alias, pll_clk->dev_name);
+	if (ret)
+		pr_err("%s: failed to register lookup for %s : %d",
+			__func__, pll_clk->name, ret);
+}
+
+void __init samsung_clk_register_pll(struct samsung_pll_clock *pll_list,
+				unsigned int nr_pll, void __iomem *base)
+{
+	int cnt;
+
+	for (cnt = 0; cnt < nr_pll; cnt++)
+		_samsung_clk_register_pll(&pll_list[cnt], base);
+}
diff --git a/drivers/clk/samsung/clk-pll.h b/drivers/clk/samsung/clk-pll.h
index f33786e..6c39030 100644
--- a/drivers/clk/samsung/clk-pll.h
+++ b/drivers/clk/samsung/clk-pll.h
@@ -12,28 +12,83 @@
 #ifndef __SAMSUNG_CLK_PLL_H
 #define __SAMSUNG_CLK_PLL_H
 
-enum pll45xx_type {
+enum samsung_pll_type {
+	pll_35xx,
+	pll_36xx,
+	pll_2550,
+	pll_2650,
 	pll_4500,
 	pll_4502,
-	pll_4508
-};
-
-enum pll46xx_type {
+	pll_4508,
 	pll_4600,
 	pll_4650,
 	pll_4650c,
+	pll_6552,
+	pll_6553,
 };
 
-extern struct clk * __init samsung_clk_register_pll35xx(const char *name,
-			const char *pname, const void __iomem *con_reg);
-extern struct clk * __init samsung_clk_register_pll36xx(const char *name,
-			const char *pname, const void __iomem *con_reg);
-extern struct clk * __init samsung_clk_register_pll45xx(const char *name,
-			const char *pname, const void __iomem *con_reg,
-			enum pll45xx_type type);
-extern struct clk * __init samsung_clk_register_pll46xx(const char *name,
-			const char *pname, const void __iomem *con_reg,
-			enum pll46xx_type type);
+#define PLL_35XX_RATE(_rate, _m, _p, _s)			\
+	{							\
+		.rate	=	(_rate),				\
+		.mdiv	=	(_m),				\
+		.pdiv	=	(_p),				\
+		.sdiv	=	(_s),				\
+	}
+
+#define PLL_36XX_RATE(_rate, _m, _p, _s, _k)			\
+	{							\
+		.rate	=	(_rate),				\
+		.mdiv	=	(_m),				\
+		.pdiv	=	(_p),				\
+		.sdiv	=	(_s),				\
+		.kdiv	=	(_k),				\
+	}
+
+#define PLL_45XX_RATE(_rate, _m, _p, _s, _afc)			\
+	{							\
+		.rate	=	(_rate),			\
+		.mdiv	=	(_m),				\
+		.pdiv	=	(_p),				\
+		.sdiv	=	(_s),				\
+		.afc	=	(_afc),				\
+	}
+
+#define PLL_4600_RATE(_rate, _m, _p, _s, _k, _vsel)		\
+	{							\
+		.rate	=	(_rate),			\
+		.mdiv	=	(_m),				\
+		.pdiv	=	(_p),				\
+		.sdiv	=	(_s),				\
+		.kdiv	=	(_k),				\
+		.vsel	=	(_vsel),			\
+	}
+
+#define PLL_4650_RATE(_rate, _m, _p, _s, _k, _mfr, _mrr, _vsel)	\
+	{							\
+		.rate	=	(_rate),			\
+		.mdiv	=	(_m),				\
+		.pdiv	=	(_p),				\
+		.sdiv	=	(_s),				\
+		.kdiv	=	(_k),				\
+		.mfr	=	(_mfr),				\
+		.mrr	=	(_mrr),				\
+		.vsel	=	(_vsel),			\
+	}
+
+/* NOTE: Rate table should be kept sorted in descending order. */
+
+struct samsung_pll_rate_table {
+	unsigned int rate;
+	unsigned int pdiv;
+	unsigned int mdiv;
+	unsigned int sdiv;
+	unsigned int kdiv;
+	unsigned int afc;
+	unsigned int mfr;
+	unsigned int mrr;
+	unsigned int vsel;
+};
+
 extern struct clk * __init samsung_clk_register_pll2550x(const char *name,
 			const char *pname, const void __iomem *reg_base,
 			const unsigned long offset);
diff --git a/drivers/clk/samsung/clk-s3c64xx.c b/drivers/clk/samsung/clk-s3c64xx.c
new file mode 100644
index 0000000..7d2c842
--- /dev/null
+++ b/drivers/clk/samsung/clk-s3c64xx.c
@@ -0,0 +1,473 @@
+/*
+ * Copyright (c) 2013 Tomasz Figa <tomasz.figa at gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Common Clock Framework support for all S3C64xx SoCs.
+*/
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include <dt-bindings/clock/samsung,s3c64xx-clock.h>
+
+#include "clk.h"
+#include "clk-pll.h"
+
+/* S3C64xx clock controller register offsets. */
+#define APLL_LOCK		0x000
+#define MPLL_LOCK		0x004
+#define EPLL_LOCK		0x008
+#define APLL_CON		0x00c
+#define MPLL_CON		0x010
+#define EPLL_CON0		0x014
+#define EPLL_CON1		0x018
+#define CLK_SRC			0x01c
+#define CLK_DIV0		0x020
+#define CLK_DIV1		0x024
+#define CLK_DIV2		0x028
+#define HCLK_GATE		0x030
+#define PCLK_GATE		0x034
+#define SCLK_GATE		0x038
+#define MEM0_GATE		0x03c
+#define CLK_SRC2		0x10c
+#define OTHERS			0x900
+
+/* Helper macros to define clock arrays. */
+#define FIXED_RATE_CLOCKS(name)	\
+		static struct samsung_fixed_rate_clock name[]
+#define MUX_CLOCKS(name)	\
+		static struct samsung_mux_clock name[]
+#define DIV_CLOCKS(name)	\
+		static struct samsung_div_clock name[]
+#define GATE_CLOCKS(name)	\
+		static struct samsung_gate_clock name[]
+
+/* Helper macros for gate types present on S3C64xx. */
+#define GATE_BUS(_id, cname, pname, o, b) \
+		GATE(_id, cname, pname, o, b, 0, 0)
+#define GATE_SCLK(_id, cname, pname, o, b) \
+		GATE(_id, cname, pname, o, b, CLK_SET_RATE_PARENT, 0)
+#define GATE_ON(_id, cname, pname, o, b) \
+		GATE(_id, cname, pname, o, b, CLK_IGNORE_UNUSED, 0)
+
+/* list of PLLs to be registered */
+enum s3c64xx_plls {
+	apll, mpll, epll,
+};
+
+/*
+ * List of controller registers to be saved and restored during
+ * a suspend/resume cycle.
+ */
+static unsigned long s3c64xx_clk_regs[] __initdata = {
+	APLL_LOCK,
+	MPLL_LOCK,
+	EPLL_LOCK,
+	APLL_CON,
+	MPLL_CON,
+	EPLL_CON0,
+	EPLL_CON1,
+	CLK_SRC,
+	CLK_DIV0,
+	CLK_DIV1,
+	CLK_DIV2,
+	HCLK_GATE,
+	PCLK_GATE,
+	SCLK_GATE,
+};
+
+static unsigned long s3c6410_clk_regs[] __initdata = {
+	CLK_SRC2,
+	MEM0_GATE,
+};
+
+/* List of parent clocks common for all S3C64xx SoCs. */
+PNAME(spi_mmc_p)	= { "mout_epll", "dout_mpll", "fin_pll", "clk27m" };
+PNAME(uart_p)		= { "mout_epll", "dout_mpll" };
+PNAME(audio0_p)		= { "mout_epll", "dout_mpll", "fin_pll", "iiscdclk0",
+				"pcmcdclk0", "none", "none", "none" };
+PNAME(audio1_p)		= { "mout_epll", "dout_mpll", "fin_pll", "iiscdclk1",
+				"pcmcdclk0", "none", "none", "none" };
+PNAME(mfc_p)		= { "hclkx2", "mout_epll" };
+PNAME(apll_p)		= { "fin_pll", "fout_apll" };
+PNAME(mpll_p)		= { "fin_pll", "fout_mpll" };
+PNAME(epll_p)		= { "fin_pll", "fout_epll" };
+PNAME(hclkx2_p)		= { "mout_mpll", "mout_apll" };
+
+/* S3C6400-specific parent clocks. */
+PNAME(scaler_lcd_p6400)	= { "mout_epll", "dout_mpll", "none", "none" };
+PNAME(irda_p6400)	= { "mout_epll", "dout_mpll", "none", "clk48m" };
+PNAME(uhost_p6400)	= { "clk48m", "mout_epll", "dout_mpll", "none" };
+
+/* S3C6410-specific parent clocks. */
+PNAME(clk27_p6410)	= { "clk27m", "fin_pll" };
+PNAME(scaler_lcd_p6410)	= { "mout_epll", "dout_mpll", "fin_pll", "none" };
+PNAME(irda_p6410)	= { "mout_epll", "dout_mpll", "fin_pll", "clk48m" };
+PNAME(uhost_p6410)	= { "clk48m", "mout_epll", "dout_mpll", "fin_pll" };
+PNAME(audio2_p6410)	= { "mout_epll", "dout_mpll", "fin_pll", "iiscdclk2",
+				"pcmcdclk1", "none", "none", "none" };
+
+/* Fixed rate clocks generated outside the SoC. */
+FIXED_RATE_CLOCKS(s3c64xx_fixed_rate_ext_clks) __initdata = {
+	FRATE(0, "fin_pll", NULL, CLK_IS_ROOT, 0),
+	FRATE(0, "xusbxti", NULL, CLK_IS_ROOT, 0),
+};
+
+/* Fixed rate clocks generated inside the SoC. */
+FIXED_RATE_CLOCKS(s3c64xx_fixed_rate_clks) __initdata = {
+	FRATE(CLK27M, "clk27m", NULL, CLK_IS_ROOT, 27000000),
+	FRATE(CLK48M, "clk48m", NULL, CLK_IS_ROOT, 48000000),
+};
+
+/* List of clock muxes present on all S3C64xx SoCs. */
+MUX_CLOCKS(s3c64xx_mux_clks) __initdata = {
+	MUX_F(0, "mout_syncmux", hclkx2_p, OTHERS, 6, 1, 0, CLK_MUX_READ_ONLY),
+	MUX(MOUT_APLL, "mout_apll", apll_p, CLK_SRC, 0, 1),
+	MUX(MOUT_MPLL, "mout_mpll", mpll_p, CLK_SRC, 1, 1),
+	MUX(MOUT_EPLL, "mout_epll", epll_p, CLK_SRC, 2, 1),
+	MUX(MOUT_MFC, "mout_mfc", mfc_p, CLK_SRC, 4, 1),
+	MUX(MOUT_AUDIO0, "mout_audio0", audio0_p, CLK_SRC, 7, 3),
+	MUX(MOUT_AUDIO1, "mout_audio1", audio1_p, CLK_SRC, 10, 3),
+	MUX(MOUT_UART, "mout_uart", uart_p, CLK_SRC, 13, 1),
+	MUX(MOUT_SPI0, "mout_spi0", spi_mmc_p, CLK_SRC, 14, 2),
+	MUX(MOUT_SPI1, "mout_spi1", spi_mmc_p, CLK_SRC, 16, 2),
+	MUX(MOUT_MMC0, "mout_mmc0", spi_mmc_p, CLK_SRC, 18, 2),
+	MUX(MOUT_MMC1, "mout_mmc1", spi_mmc_p, CLK_SRC, 20, 2),
+	MUX(MOUT_MMC2, "mout_mmc2", spi_mmc_p, CLK_SRC, 22, 2),
+};
+
+/* List of clock muxes present on S3C6400. */
+MUX_CLOCKS(s3c6400_mux_clks) __initdata = {
+	MUX(MOUT_UHOST, "mout_uhost", uhost_p6400, CLK_SRC, 5, 2),
+	MUX(MOUT_IRDA, "mout_irda", irda_p6400, CLK_SRC, 24, 2),
+	MUX(MOUT_LCD, "mout_lcd", scaler_lcd_p6400, CLK_SRC, 26, 2),
+	MUX(MOUT_SCALER, "mout_scaler", scaler_lcd_p6400, CLK_SRC, 28, 2),
+};
+
+/* List of clock muxes present on S3C6410. */
+MUX_CLOCKS(s3c6410_mux_clks) __initdata = {
+	MUX(MOUT_UHOST, "mout_uhost", uhost_p6410, CLK_SRC, 5, 2),
+	MUX(MOUT_IRDA, "mout_irda", irda_p6410, CLK_SRC, 24, 2),
+	MUX(MOUT_LCD, "mout_lcd", scaler_lcd_p6410, CLK_SRC, 26, 2),
+	MUX(MOUT_SCALER, "mout_scaler", scaler_lcd_p6410, CLK_SRC, 28, 2),
+	MUX(MOUT_DAC27, "mout_dac27", clk27_p6410, CLK_SRC, 30, 1),
+	MUX(MOUT_TV27, "mout_tv27", clk27_p6410, CLK_SRC, 31, 1),
+	MUX(MOUT_AUDIO2, "mout_audio2", audio2_p6410, CLK_SRC2, 0, 3),
+};
+
+/* List of clock dividers present on all S3C64xx SoCs. */
+DIV_CLOCKS(s3c64xx_div_clks) __initdata = {
+	DIV(DOUT_MPLL, "dout_mpll", "mout_mpll", CLK_DIV0, 4, 1),
+	DIV(HCLKX2, "hclkx2", "mout_syncmux", CLK_DIV0, 9, 3),
+	DIV(HCLK, "hclk", "hclkx2", CLK_DIV0, 8, 1),
+	DIV(PCLK, "pclk", "hclkx2", CLK_DIV0, 12, 4),
+	DIV(DOUT_SECUR, "dout_secur", "hclkx2", CLK_DIV0, 18, 2),
+	DIV(DOUT_CAM, "dout_cam", "hclkx2", CLK_DIV0, 20, 4),
+	DIV(DOUT_JPEG, "dout_jpeg", "hclkx2", CLK_DIV0, 24, 4),
+	DIV(DOUT_MFC, "dout_mfc", "mout_mfc", CLK_DIV0, 28, 4),
+	DIV(DOUT_MMC0, "dout_mmc0", "mout_mmc0", CLK_DIV1, 0, 4),
+	DIV(DOUT_MMC1, "dout_mmc1", "mout_mmc1", CLK_DIV1, 4, 4),
+	DIV(DOUT_MMC2, "dout_mmc2", "mout_mmc2", CLK_DIV1, 8, 4),
+	DIV(DOUT_LCD, "dout_lcd", "mout_lcd", CLK_DIV1, 12, 4),
+	DIV(DOUT_SCALER, "dout_scaler", "mout_scaler", CLK_DIV1, 16, 4),
+	DIV(DOUT_UHOST, "dout_uhost", "mout_uhost", CLK_DIV1, 20, 4),
+	DIV(DOUT_SPI0, "dout_spi0", "mout_spi0", CLK_DIV2, 0, 4),
+	DIV(DOUT_SPI1, "dout_spi1", "mout_spi1", CLK_DIV2, 4, 4),
+	DIV(DOUT_AUDIO0, "dout_audio0", "mout_audio0", CLK_DIV2, 8, 4),
+	DIV(DOUT_AUDIO1, "dout_audio1", "mout_audio1", CLK_DIV2, 12, 4),
+	DIV(DOUT_UART, "dout_uart", "mout_uart", CLK_DIV2, 16, 4),
+	DIV(DOUT_IRDA, "dout_irda", "mout_irda", CLK_DIV2, 20, 4),
+};
+
+/* List of clock dividers present on S3C6400. */
+DIV_CLOCKS(s3c6400_div_clks) __initdata = {
+	DIV(ARMCLK, "armclk", "mout_apll", CLK_DIV0, 0, 3),
+};
+
+/* List of clock dividers present on S3C6410. */
+DIV_CLOCKS(s3c6410_div_clks) __initdata = {
+	DIV(ARMCLK, "armclk", "mout_apll", CLK_DIV0, 0, 4),
+	DIV(DOUT_FIMC, "dout_fimc", "hclk", CLK_DIV1, 24, 4),
+	DIV(DOUT_AUDIO2, "dout_audio2", "mout_audio2", CLK_DIV2, 24, 4),
+};
+
+/* List of clock gates present on all S3C64xx SoCs. */
+GATE_CLOCKS(s3c64xx_gate_clks) __initdata = {
+	GATE_BUS(HCLK_UHOST, "hclk_uhost", "hclk", HCLK_GATE, 29),
+	GATE_BUS(HCLK_SECUR, "hclk_secur", "hclk", HCLK_GATE, 28),
+	GATE_BUS(HCLK_SDMA1, "hclk_sdma1", "hclk", HCLK_GATE, 27),
+	GATE_BUS(HCLK_SDMA0, "hclk_sdma0", "hclk", HCLK_GATE, 26),
+	GATE_ON(HCLK_DDR1, "hclk_ddr1", "hclk", HCLK_GATE, 24),
+	GATE_BUS(HCLK_USB, "hclk_usb", "hclk", HCLK_GATE, 20),
+	GATE_BUS(HCLK_HSMMC2, "hclk_hsmmc2", "hclk", HCLK_GATE, 19),
+	GATE_BUS(HCLK_HSMMC1, "hclk_hsmmc1", "hclk", HCLK_GATE, 18),
+	GATE_BUS(HCLK_HSMMC0, "hclk_hsmmc0", "hclk", HCLK_GATE, 17),
+	GATE_BUS(HCLK_MDP, "hclk_mdp", "hclk", HCLK_GATE, 16),
+	GATE_BUS(HCLK_DHOST, "hclk_dhost", "hclk", HCLK_GATE, 15),
+	GATE_BUS(HCLK_IHOST, "hclk_ihost", "hclk", HCLK_GATE, 14),
+	GATE_BUS(HCLK_DMA1, "hclk_dma1", "hclk", HCLK_GATE, 13),
+	GATE_BUS(HCLK_DMA0, "hclk_dma0", "hclk", HCLK_GATE, 12),
+	GATE_BUS(HCLK_JPEG, "hclk_jpeg", "hclk", HCLK_GATE, 11),
+	GATE_BUS(HCLK_CAMIF, "hclk_camif", "hclk", HCLK_GATE, 10),
+	GATE_BUS(HCLK_SCALER, "hclk_scaler", "hclk", HCLK_GATE, 9),
+	GATE_BUS(HCLK_2D, "hclk_2d", "hclk", HCLK_GATE, 8),
+	GATE_BUS(HCLK_TV, "hclk_tv", "hclk", HCLK_GATE, 7),
+	GATE_BUS(HCLK_POST0, "hclk_post0", "hclk", HCLK_GATE, 5),
+	GATE_BUS(HCLK_ROT, "hclk_rot", "hclk", HCLK_GATE, 4),
+	GATE_BUS(HCLK_LCD, "hclk_lcd", "hclk", HCLK_GATE, 3),
+	GATE_BUS(HCLK_TZIC, "hclk_tzic", "hclk", HCLK_GATE, 2),
+	GATE_ON(HCLK_INTC, "hclk_intc", "hclk", HCLK_GATE, 1),
+	GATE_ON(PCLK_SKEY, "pclk_skey", "pclk", PCLK_GATE, 24),
+	GATE_ON(PCLK_CHIPID, "pclk_chipid", "pclk", PCLK_GATE, 23),
+	GATE_BUS(PCLK_SPI1, "pclk_spi1", "pclk", PCLK_GATE, 22),
+	GATE_BUS(PCLK_SPI0, "pclk_spi0", "pclk", PCLK_GATE, 21),
+	GATE_BUS(PCLK_HSIRX, "pclk_hsirx", "pclk", PCLK_GATE, 20),
+	GATE_BUS(PCLK_HSITX, "pclk_hsitx", "pclk", PCLK_GATE, 19),
+	GATE_ON(PCLK_GPIO, "pclk_gpio", "pclk", PCLK_GATE, 18),
+	GATE_BUS(PCLK_IIC0, "pclk_iic0", "pclk", PCLK_GATE, 17),
+	GATE_BUS(PCLK_IIS1, "pclk_iis1", "pclk", PCLK_GATE, 16),
+	GATE_BUS(PCLK_IIS0, "pclk_iis0", "pclk", PCLK_GATE, 15),
+	GATE_BUS(PCLK_AC97, "pclk_ac97", "pclk", PCLK_GATE, 14),
+	GATE_BUS(PCLK_TZPC, "pclk_tzpc", "pclk", PCLK_GATE, 13),
+	GATE_BUS(PCLK_TSADC, "pclk_tsadc", "pclk", PCLK_GATE, 12),
+	GATE_BUS(PCLK_KEYPAD, "pclk_keypad", "pclk", PCLK_GATE, 11),
+	GATE_BUS(PCLK_IRDA, "pclk_irda", "pclk", PCLK_GATE, 10),
+	GATE_BUS(PCLK_PCM1, "pclk_pcm1", "pclk", PCLK_GATE, 9),
+	GATE_BUS(PCLK_PCM0, "pclk_pcm0", "pclk", PCLK_GATE, 8),
+	GATE_BUS(PCLK_PWM, "pclk_pwm", "pclk", PCLK_GATE, 7),
+	GATE_BUS(PCLK_RTC, "pclk_rtc", "pclk", PCLK_GATE, 6),
+	GATE_BUS(PCLK_WDT, "pclk_wdt", "pclk", PCLK_GATE, 5),
+	GATE_BUS(PCLK_UART3, "pclk_uart3", "pclk", PCLK_GATE, 4),
+	GATE_BUS(PCLK_UART2, "pclk_uart2", "pclk", PCLK_GATE, 3),
+	GATE_BUS(PCLK_UART1, "pclk_uart1", "pclk", PCLK_GATE, 2),
+	GATE_BUS(PCLK_UART0, "pclk_uart0", "pclk", PCLK_GATE, 1),
+	GATE_BUS(PCLK_MFC, "pclk_mfc", "pclk", PCLK_GATE, 0),
+	GATE_SCLK(SCLK_UHOST, "sclk_uhost", "dout_uhost", SCLK_GATE, 30),
+	GATE_SCLK(SCLK_MMC2_48, "sclk_mmc2_48", "clk48m", SCLK_GATE, 29),
+	GATE_SCLK(SCLK_MMC1_48, "sclk_mmc1_48", "clk48m", SCLK_GATE, 28),
+	GATE_SCLK(SCLK_MMC0_48, "sclk_mmc0_48", "clk48m", SCLK_GATE, 27),
+	GATE_SCLK(SCLK_MMC2, "sclk_mmc2", "dout_mmc2", SCLK_GATE, 26),
+	GATE_SCLK(SCLK_MMC1, "sclk_mmc1", "dout_mmc1", SCLK_GATE, 25),
+	GATE_SCLK(SCLK_MMC0, "sclk_mmc0", "dout_mmc0", SCLK_GATE, 24),
+	GATE_SCLK(SCLK_SPI1_48, "sclk_spi1_48", "clk48m", SCLK_GATE, 23),
+	GATE_SCLK(SCLK_SPI0_48, "sclk_spi0_48", "clk48m", SCLK_GATE, 22),
+	GATE_SCLK(SCLK_SPI1, "sclk_spi1", "dout_spi1", SCLK_GATE, 21),
+	GATE_SCLK(SCLK_SPI0, "sclk_spi0", "dout_spi0", SCLK_GATE, 20),
+	GATE_SCLK(SCLK_DAC27, "sclk_dac27", "mout_dac27", SCLK_GATE, 19),
+	GATE_SCLK(SCLK_TV27, "sclk_tv27", "mout_tv27", SCLK_GATE, 18),
+	GATE_SCLK(SCLK_SCALER27, "sclk_scaler27", "clk27m", SCLK_GATE, 17),
+	GATE_SCLK(SCLK_SCALER, "sclk_scaler", "dout_scaler", SCLK_GATE, 16),
+	GATE_SCLK(SCLK_LCD27, "sclk_lcd27", "clk27m", SCLK_GATE, 15),
+	GATE_SCLK(SCLK_LCD, "sclk_lcd", "dout_lcd", SCLK_GATE, 14),
+	GATE_SCLK(SCLK_POST0_27, "sclk_post0_27", "clk27m", SCLK_GATE, 12),
+	GATE_SCLK(SCLK_POST0, "sclk_post0", "dout_lcd", SCLK_GATE, 10),
+	GATE_SCLK(SCLK_AUDIO1, "sclk_audio1", "dout_audio1", SCLK_GATE, 9),
+	GATE_SCLK(SCLK_AUDIO0, "sclk_audio0", "dout_audio0", SCLK_GATE, 8),
+	GATE_SCLK(SCLK_SECUR, "sclk_secur", "dout_secur", SCLK_GATE, 7),
+	GATE_SCLK(SCLK_IRDA, "sclk_irda", "dout_irda", SCLK_GATE, 6),
+	GATE_SCLK(SCLK_UART, "sclk_uart", "dout_uart", SCLK_GATE, 5),
+	GATE_SCLK(SCLK_MFC, "sclk_mfc", "dout_mfc", SCLK_GATE, 3),
+	GATE_SCLK(SCLK_CAM, "sclk_cam", "dout_cam", SCLK_GATE, 2),
+	GATE_SCLK(SCLK_JPEG, "sclk_jpeg", "dout_jpeg", SCLK_GATE, 1),
+};
+
+/* List of clock gates present on S3C6400. */
+GATE_CLOCKS(s3c6400_gate_clks) __initdata = {
+	GATE_ON(HCLK_DDR0, "hclk_ddr0", "hclk", HCLK_GATE, 23),
+	GATE_SCLK(SCLK_ONENAND, "sclk_onenand", "parent", SCLK_GATE, 4),
+};
+
+/* List of clock gates present on S3C6410. */
+GATE_CLOCKS(s3c6410_gate_clks) __initdata = {
+	GATE_BUS(HCLK_3DSE, "hclk_3dse", "hclk", HCLK_GATE, 31),
+	GATE_ON(HCLK_IROM, "hclk_irom", "hclk", HCLK_GATE, 25),
+	GATE_ON(HCLK_MEM1, "hclk_mem1", "hclk", HCLK_GATE, 22),
+	GATE_ON(HCLK_MEM0, "hclk_mem0", "hclk", HCLK_GATE, 21),
+	GATE_BUS(HCLK_MFC, "hclk_mfc", "hclk", HCLK_GATE, 0),
+	GATE_BUS(PCLK_IIC1, "pclk_iic1", "pclk", PCLK_GATE, 27),
+	GATE_BUS(PCLK_IIS2, "pclk_iis2", "pclk", PCLK_GATE, 26),
+	GATE_SCLK(SCLK_FIMC, "sclk_fimc", "dout_fimc", SCLK_GATE, 13),
+	GATE_SCLK(SCLK_AUDIO2, "sclk_audio2", "dout_audio2", SCLK_GATE, 11),
+	GATE_BUS(MEM0_CFCON, "mem0_cfcon", "hclk_mem0", MEM0_GATE, 5),
+	GATE_BUS(MEM0_ONENAND1, "mem0_onenand1", "hclk_mem0", MEM0_GATE, 4),
+	GATE_BUS(MEM0_ONENAND0, "mem0_onenand0", "hclk_mem0", MEM0_GATE, 3),
+	GATE_BUS(MEM0_NFCON, "mem0_nfcon", "hclk_mem0", MEM0_GATE, 2),
+	GATE_ON(MEM0_SROM, "mem0_srom", "hclk_mem0", MEM0_GATE, 1),
+};
+
+/* List of PLL clocks. */
+static struct samsung_pll_clock s3c64xx_pll_clks[] __initdata = {
+	[apll] = PLL(pll_6552, FOUT_APLL, "fout_apll", "fin_pll",
+						APLL_LOCK, APLL_CON, NULL),
+	[mpll] = PLL(pll_6552, FOUT_MPLL, "fout_mpll", "fin_pll",
+						MPLL_LOCK, MPLL_CON, NULL),
+	[epll] = PLL(pll_6553, FOUT_EPLL, "fout_epll", "fin_pll",
+						EPLL_LOCK, EPLL_CON0, NULL),
+};
+
+/* Aliases for common s3c64xx clocks. */
+static struct samsung_clock_alias s3c64xx_clock_aliases[] = {
+	ALIAS(FOUT_APLL, NULL, "fout_apll"),
+	ALIAS(FOUT_MPLL, NULL, "fout_mpll"),
+	ALIAS(FOUT_EPLL, NULL, "fout_epll"),
+	ALIAS(MOUT_EPLL, NULL, "mout_epll"),
+	ALIAS(DOUT_MPLL, NULL, "dout_mpll"),
+	ALIAS(HCLKX2, NULL, "hclk2"),
+	ALIAS(HCLK, NULL, "hclk"),
+	ALIAS(PCLK, NULL, "pclk"),
+	ALIAS(PCLK, NULL, "clk_uart_baud2"),
+	ALIAS(ARMCLK, NULL, "armclk"),
+	ALIAS(HCLK_UHOST, "s3c2410-ohci", "usb-host"),
+	ALIAS(HCLK_USB, "s3c-hsotg", "otg"),
+	ALIAS(HCLK_HSMMC2, "s3c-sdhci.2", "hsmmc"),
+	ALIAS(HCLK_HSMMC2, "s3c-sdhci.2", "mmc_busclk.0"),
+	ALIAS(HCLK_HSMMC1, "s3c-sdhci.1", "hsmmc"),
+	ALIAS(HCLK_HSMMC1, "s3c-sdhci.1", "mmc_busclk.0"),
+	ALIAS(HCLK_HSMMC0, "s3c-sdhci.0", "hsmmc"),
+	ALIAS(HCLK_HSMMC0, "s3c-sdhci.0", "mmc_busclk.0"),
+	ALIAS(HCLK_DMA1, NULL, "dma1"),
+	ALIAS(HCLK_DMA0, NULL, "dma0"),
+	ALIAS(HCLK_CAMIF, "s3c-camif", "camif"),
+	ALIAS(HCLK_LCD, "s3c-fb", "lcd"),
+	ALIAS(PCLK_SPI1, "s3c6410-spi.1", "spi"),
+	ALIAS(PCLK_SPI0, "s3c6410-spi.0", "spi"),
+	ALIAS(PCLK_IIC0, "s3c2440-i2c.0", "i2c"),
+	ALIAS(PCLK_IIS1, "samsung-i2s.1", "iis"),
+	ALIAS(PCLK_IIS0, "samsung-i2s.0", "iis"),
+	ALIAS(PCLK_AC97, "samsung-ac97", "ac97"),
+	ALIAS(PCLK_TSADC, "s3c64xx-adc", "adc"),
+	ALIAS(PCLK_KEYPAD, "samsung-keypad", "keypad"),
+	ALIAS(PCLK_PCM1, "samsung-pcm.1", "pcm"),
+	ALIAS(PCLK_PCM0, "samsung-pcm.0", "pcm"),
+	ALIAS(PCLK_PWM, NULL, "timers"),
+	ALIAS(PCLK_RTC, "s3c64xx-rtc", "rtc"),
+	ALIAS(PCLK_WDT, NULL, "watchdog"),
+	ALIAS(PCLK_UART3, "s3c6400-uart.3", "uart"),
+	ALIAS(PCLK_UART2, "s3c6400-uart.2", "uart"),
+	ALIAS(PCLK_UART1, "s3c6400-uart.1", "uart"),
+	ALIAS(PCLK_UART0, "s3c6400-uart.0", "uart"),
+	ALIAS(SCLK_UHOST, "s3c2410-ohci", "usb-bus-host"),
+	ALIAS(SCLK_MMC2, "s3c-sdhci.2", "mmc_busclk.2"),
+	ALIAS(SCLK_MMC1, "s3c-sdhci.1", "mmc_busclk.2"),
+	ALIAS(SCLK_MMC0, "s3c-sdhci.0", "mmc_busclk.2"),
+	ALIAS(SCLK_SPI1, "s3c6410-spi.1", "spi-bus"),
+	ALIAS(SCLK_SPI0, "s3c6410-spi.0", "spi-bus"),
+	ALIAS(SCLK_AUDIO1, "samsung-pcm.1", "audio-bus"),
+	ALIAS(SCLK_AUDIO1, "samsung-i2s.1", "audio-bus"),
+	ALIAS(SCLK_AUDIO0, "samsung-pcm.0", "audio-bus"),
+	ALIAS(SCLK_AUDIO0, "samsung-i2s.0", "audio-bus"),
+	ALIAS(SCLK_UART, NULL, "clk_uart_baud3"),
+	ALIAS(SCLK_CAM, "s3c-camif", "camera"),
+};
+
+/* Aliases for s3c6400-specific clocks. */
+static struct samsung_clock_alias s3c6400_clock_aliases[] = {
+	/* Nothing to place here yet. */
+};
+
+/* Aliases for s3c6410-specific clocks. */
+static struct samsung_clock_alias s3c6410_clock_aliases[] = {
+	ALIAS(PCLK_IIC1, "s3c2440-i2c.1", "i2c"),
+	ALIAS(PCLK_IIS2, "samsung-i2s.2", "iis"),
+	ALIAS(SCLK_FIMC, "s3c-camif", "fimc"),
+	ALIAS(SCLK_AUDIO2, "samsung-i2s.2", "audio-bus"),
+	ALIAS(MEM0_SROM, NULL, "srom"),
+};
+
+static void __init s3c64xx_clk_register_fixed_ext(unsigned long fin_pll_f,
+							unsigned long xusbxti_f)
+{
+	s3c64xx_fixed_rate_ext_clks[0].fixed_rate = fin_pll_f;
+	s3c64xx_fixed_rate_ext_clks[1].fixed_rate = xusbxti_f;
+	samsung_clk_register_fixed_rate(s3c64xx_fixed_rate_ext_clks,
+				ARRAY_SIZE(s3c64xx_fixed_rate_ext_clks));
+}
+
+/* Register s3c64xx clocks. */
+void __init s3c64xx_clk_init(struct device_node *np, unsigned long xtal_f,
+			     unsigned long xusbxti_f, bool is_s3c6400,
+			     void __iomem *reg_base)
+{
+	unsigned long *soc_regs = NULL;
+	unsigned long nr_soc_regs = 0;
+
+	if (np) {
+		reg_base = of_iomap(np, 0);
+		if (!reg_base)
+			panic("%s: failed to map registers\n", __func__);
+	}
+
+	if (!is_s3c6400) {
+		soc_regs = s3c6410_clk_regs;
+		nr_soc_regs = ARRAY_SIZE(s3c6410_clk_regs);
+	}
+
+	samsung_clk_init(np, reg_base, NR_CLKS, s3c64xx_clk_regs,
+			ARRAY_SIZE(s3c64xx_clk_regs), soc_regs, nr_soc_regs);
+
+	/* Register external clocks. */
+	if (!np)
+		s3c64xx_clk_register_fixed_ext(xtal_f, xusbxti_f);
+
+	/* Register PLLs. */
+	samsung_clk_register_pll(s3c64xx_pll_clks,
+				ARRAY_SIZE(s3c64xx_pll_clks), reg_base);
+
+	/* Register common internal clocks. */
+	samsung_clk_register_fixed_rate(s3c64xx_fixed_rate_clks,
+					ARRAY_SIZE(s3c64xx_fixed_rate_clks));
+	samsung_clk_register_mux(s3c64xx_mux_clks,
+					ARRAY_SIZE(s3c64xx_mux_clks));
+	samsung_clk_register_div(s3c64xx_div_clks,
+					ARRAY_SIZE(s3c64xx_div_clks));
+	samsung_clk_register_gate(s3c64xx_gate_clks,
+					ARRAY_SIZE(s3c64xx_gate_clks));
+
+	/* Register SoC-specific clocks. */
+	if (is_s3c6400) {
+		samsung_clk_register_mux(s3c6400_mux_clks,
+					ARRAY_SIZE(s3c6400_mux_clks));
+		samsung_clk_register_div(s3c6400_div_clks,
+					ARRAY_SIZE(s3c6400_div_clks));
+		samsung_clk_register_gate(s3c6400_gate_clks,
+					ARRAY_SIZE(s3c6400_gate_clks));
+		samsung_clk_register_alias(s3c6400_clock_aliases,
+					ARRAY_SIZE(s3c6400_clock_aliases));
+	} else {
+		samsung_clk_register_mux(s3c6410_mux_clks,
+					ARRAY_SIZE(s3c6410_mux_clks));
+		samsung_clk_register_div(s3c6410_div_clks,
+					ARRAY_SIZE(s3c6410_div_clks));
+		samsung_clk_register_gate(s3c6410_gate_clks,
+					ARRAY_SIZE(s3c6410_gate_clks));
+		samsung_clk_register_alias(s3c6410_clock_aliases,
+					ARRAY_SIZE(s3c6410_clock_aliases));
+	}
+
+	samsung_clk_register_alias(s3c64xx_clock_aliases,
+					ARRAY_SIZE(s3c64xx_clock_aliases));
+
+	pr_info("%s clocks: apll = %lu, mpll = %lu\n"
+		"\tepll = %lu, arm_clk = %lu\n",
+		is_s3c6400 ? "S3C6400" : "S3C6410",
+		_get_rate("fout_apll"),	_get_rate("fout_mpll"),
+		_get_rate("fout_epll"), _get_rate("armclk"));
+}
+
+static void __init s3c6400_clk_init(struct device_node *np)
+{
+	s3c64xx_clk_init(np, 0, 0, true, NULL);
+}
+CLK_OF_DECLARE(s3c6400_clk, "samsung,s3c6400-clock", s3c6400_clk_init);
+
+static void __init s3c6410_clk_init(struct device_node *np)
+{
+	s3c64xx_clk_init(np, 0, 0, false, NULL);
+}
+CLK_OF_DECLARE(s3c6410_clk, "samsung,s3c6410-clock", s3c6410_clk_init);
diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c
index cd3c40a..f503f32 100644
--- a/drivers/clk/samsung/clk.c
+++ b/drivers/clk/samsung/clk.c
@@ -307,14 +307,12 @@
 unsigned long _get_rate(const char *clk_name)
 {
 	struct clk *clk;
-	unsigned long rate;
 
-	clk = clk_get(NULL, clk_name);
-	if (IS_ERR(clk)) {
+	clk = __clk_lookup(clk_name);
+	if (!clk) {
 		pr_err("%s: could not find clock %s\n", __func__, clk_name);
 		return 0;
 	}
-	rate = clk_get_rate(clk);
-	clk_put(clk);
-	return rate;
+
+	return clk_get_rate(clk);
 }
diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h
index 2f7dba2..31b4174 100644
--- a/drivers/clk/samsung/clk.h
+++ b/drivers/clk/samsung/clk.h
@@ -19,6 +19,7 @@
 #include <linux/clk-provider.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
+#include "clk-pll.h"
 
 /**
  * struct samsung_clock_alias: information about mux clock
@@ -39,6 +40,8 @@
 		.alias		= a,				\
 	}
 
+#define MHZ (1000 * 1000)
+
 /**
  * struct samsung_fixed_rate_clock: information about fixed-rate clock
  * @id: platform specific id of the clock.
@@ -127,7 +130,7 @@
 		.name		= cname,			\
 		.parent_names	= pnames,			\
 		.num_parents	= ARRAY_SIZE(pnames),		\
-		.flags		= f,				\
+		.flags		= (f) | CLK_SET_RATE_NO_REPARENT, \
 		.offset		= o,				\
 		.shift		= s,				\
 		.width		= w,				\
@@ -261,6 +264,54 @@
 	u32	value;
 };
 
+/**
+ * struct samsung_pll_clock: information about pll clock
+ * @id: platform specific id of the clock.
+ * @dev_name: name of the device to which this clock belongs.
+ * @name: name of this pll clock.
+ * @parent_name: name of the parent clock.
+ * @flags: optional flags for basic clock.
+ * @con_offset: offset of the register for configuring the PLL.
+ * @lock_offset: offset of the register for locking the PLL.
+ * @type: Type of PLL to be registered.
+ * @alias: optional clock alias name to be assigned to this clock.
+ */
+struct samsung_pll_clock {
+	unsigned int		id;
+	const char		*dev_name;
+	const char		*name;
+	const char		*parent_name;
+	unsigned long		flags;
+	int			con_offset;
+	int			lock_offset;
+	enum samsung_pll_type	type;
+	const struct samsung_pll_rate_table *rate_table;
+	const char              *alias;
+};
+
+#define __PLL(_typ, _id, _dname, _name, _pname, _flags, _lock, _con,	\
+		_rtable, _alias)					\
+	{								\
+		.id		= _id,					\
+		.type		= _typ,					\
+		.dev_name	= _dname,				\
+		.name		= _name,				\
+		.parent_name	= _pname,				\
+		.flags		= CLK_GET_RATE_NOCACHE,			\
+		.con_offset	= _con,					\
+		.lock_offset	= _lock,				\
+		.rate_table	= _rtable,				\
+		.alias		= _alias,				\
+	}
+
+#define PLL(_typ, _id, _name, _pname, _lock, _con, _rtable)	\
+	__PLL(_typ, _id, NULL, _name, _pname, CLK_GET_RATE_NOCACHE,	\
+		_lock, _con, _rtable, _name)
+
+#define PLL_A(_typ, _id, _name, _pname, _lock, _con, _alias, _rtable) \
+	__PLL(_typ, _id, NULL, _name, _pname, CLK_GET_RATE_NOCACHE,	\
+		_lock, _con, _rtable, _alias)
+
 extern void __init samsung_clk_init(struct device_node *np, void __iomem *base,
 		unsigned long nr_clks, unsigned long *rdump,
 		unsigned long nr_rdump, unsigned long *soc_rdump,
@@ -284,6 +335,8 @@
 		unsigned int nr_clk);
 extern void __init samsung_clk_register_gate(
 		struct samsung_gate_clock *clk_list, unsigned int nr_clk);
+extern void __init samsung_clk_register_pll(struct samsung_pll_clock *pll_list,
+		unsigned int nr_clk, void __iomem *base);
 
 extern unsigned long _get_rate(const char *clk_name);
 
diff --git a/drivers/clk/spear/spear1310_clock.c b/drivers/clk/spear/spear1310_clock.c
index aedbbe1..65894f7 100644
--- a/drivers/clk/spear/spear1310_clock.c
+++ b/drivers/clk/spear/spear1310_clock.c
@@ -416,9 +416,9 @@
 	/* clock derived from 24 or 25 MHz osc clk */
 	/* vco-pll */
 	clk = clk_register_mux(NULL, "vco1_mclk", vco_parents,
-			ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG,
-			SPEAR1310_PLL1_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0,
-			&_lock);
+			ARRAY_SIZE(vco_parents), CLK_SET_RATE_NO_REPARENT,
+			SPEAR1310_PLL_CFG, SPEAR1310_PLL1_CLK_SHIFT,
+			SPEAR1310_PLL_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "vco1_mclk", NULL);
 	clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "vco1_mclk",
 			0, SPEAR1310_PLL1_CTR, SPEAR1310_PLL1_FRQ, pll_rtbl,
@@ -427,9 +427,9 @@
 	clk_register_clkdev(clk1, "pll1_clk", NULL);
 
 	clk = clk_register_mux(NULL, "vco2_mclk", vco_parents,
-			ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG,
-			SPEAR1310_PLL2_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0,
-			&_lock);
+			ARRAY_SIZE(vco_parents), CLK_SET_RATE_NO_REPARENT,
+			SPEAR1310_PLL_CFG, SPEAR1310_PLL2_CLK_SHIFT,
+			SPEAR1310_PLL_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "vco2_mclk", NULL);
 	clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "vco2_mclk",
 			0, SPEAR1310_PLL2_CTR, SPEAR1310_PLL2_FRQ, pll_rtbl,
@@ -438,9 +438,9 @@
 	clk_register_clkdev(clk1, "pll2_clk", NULL);
 
 	clk = clk_register_mux(NULL, "vco3_mclk", vco_parents,
-			ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG,
-			SPEAR1310_PLL3_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0,
-			&_lock);
+			ARRAY_SIZE(vco_parents), CLK_SET_RATE_NO_REPARENT,
+			SPEAR1310_PLL_CFG, SPEAR1310_PLL3_CLK_SHIFT,
+			SPEAR1310_PLL_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "vco3_mclk", NULL);
 	clk = clk_register_vco_pll("vco3_clk", "pll3_clk", NULL, "vco3_mclk",
 			0, SPEAR1310_PLL3_CTR, SPEAR1310_PLL3_FRQ, pll_rtbl,
@@ -515,9 +515,9 @@
 
 	/* gpt clocks */
 	clk = clk_register_mux(NULL, "gpt0_mclk", gpt_parents,
-			ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
-			SPEAR1310_GPT0_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
-			&_lock);
+			ARRAY_SIZE(gpt_parents), CLK_SET_RATE_NO_REPARENT,
+			SPEAR1310_PERIP_CLK_CFG, SPEAR1310_GPT0_CLK_SHIFT,
+			SPEAR1310_GPT_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "gpt0_mclk", NULL);
 	clk = clk_register_gate(NULL, "gpt0_clk", "gpt0_mclk", 0,
 			SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GPT0_CLK_ENB, 0,
@@ -525,9 +525,9 @@
 	clk_register_clkdev(clk, NULL, "gpt0");
 
 	clk = clk_register_mux(NULL, "gpt1_mclk", gpt_parents,
-			ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
-			SPEAR1310_GPT1_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
-			&_lock);
+			ARRAY_SIZE(gpt_parents), CLK_SET_RATE_NO_REPARENT,
+			SPEAR1310_PERIP_CLK_CFG, SPEAR1310_GPT1_CLK_SHIFT,
+			SPEAR1310_GPT_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "gpt1_mclk", NULL);
 	clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mclk", 0,
 			SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GPT1_CLK_ENB, 0,
@@ -535,9 +535,9 @@
 	clk_register_clkdev(clk, NULL, "gpt1");
 
 	clk = clk_register_mux(NULL, "gpt2_mclk", gpt_parents,
-			ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
-			SPEAR1310_GPT2_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
-			&_lock);
+			ARRAY_SIZE(gpt_parents), CLK_SET_RATE_NO_REPARENT,
+			SPEAR1310_PERIP_CLK_CFG, SPEAR1310_GPT2_CLK_SHIFT,
+			SPEAR1310_GPT_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "gpt2_mclk", NULL);
 	clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mclk", 0,
 			SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_GPT2_CLK_ENB, 0,
@@ -545,9 +545,9 @@
 	clk_register_clkdev(clk, NULL, "gpt2");
 
 	clk = clk_register_mux(NULL, "gpt3_mclk", gpt_parents,
-			ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
-			SPEAR1310_GPT3_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
-			&_lock);
+			ARRAY_SIZE(gpt_parents), CLK_SET_RATE_NO_REPARENT,
+			SPEAR1310_PERIP_CLK_CFG, SPEAR1310_GPT3_CLK_SHIFT,
+			SPEAR1310_GPT_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "gpt3_mclk", NULL);
 	clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mclk", 0,
 			SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_GPT3_CLK_ENB, 0,
@@ -562,7 +562,8 @@
 	clk_register_clkdev(clk1, "uart_syn_gclk", NULL);
 
 	clk = clk_register_mux(NULL, "uart0_mclk", uart0_parents,
-			ARRAY_SIZE(uart0_parents), CLK_SET_RATE_PARENT,
+			ARRAY_SIZE(uart0_parents),
+			CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 			SPEAR1310_PERIP_CLK_CFG, SPEAR1310_UART_CLK_SHIFT,
 			SPEAR1310_UART_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "uart0_mclk", NULL);
@@ -602,7 +603,8 @@
 	clk_register_clkdev(clk1, "c3_syn_gclk", NULL);
 
 	clk = clk_register_mux(NULL, "c3_mclk", c3_parents,
-			ARRAY_SIZE(c3_parents), CLK_SET_RATE_PARENT,
+			ARRAY_SIZE(c3_parents),
+			CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 			SPEAR1310_PERIP_CLK_CFG, SPEAR1310_C3_CLK_SHIFT,
 			SPEAR1310_C3_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "c3_mclk", NULL);
@@ -614,8 +616,8 @@
 
 	/* gmac */
 	clk = clk_register_mux(NULL, "phy_input_mclk", gmac_phy_input_parents,
-			ARRAY_SIZE(gmac_phy_input_parents), 0,
-			SPEAR1310_GMAC_CLK_CFG,
+			ARRAY_SIZE(gmac_phy_input_parents),
+			CLK_SET_RATE_NO_REPARENT, SPEAR1310_GMAC_CLK_CFG,
 			SPEAR1310_GMAC_PHY_INPUT_CLK_SHIFT,
 			SPEAR1310_GMAC_PHY_INPUT_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "phy_input_mclk", NULL);
@@ -627,15 +629,16 @@
 	clk_register_clkdev(clk1, "phy_syn_gclk", NULL);
 
 	clk = clk_register_mux(NULL, "phy_mclk", gmac_phy_parents,
-			ARRAY_SIZE(gmac_phy_parents), 0,
+			ARRAY_SIZE(gmac_phy_parents), CLK_SET_RATE_NO_REPARENT,
 			SPEAR1310_PERIP_CLK_CFG, SPEAR1310_GMAC_PHY_CLK_SHIFT,
 			SPEAR1310_GMAC_PHY_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "stmmacphy.0", NULL);
 
 	/* clcd */
 	clk = clk_register_mux(NULL, "clcd_syn_mclk", clcd_synth_parents,
-			ARRAY_SIZE(clcd_synth_parents), 0,
-			SPEAR1310_CLCD_CLK_SYNT, SPEAR1310_CLCD_SYNT_CLK_SHIFT,
+			ARRAY_SIZE(clcd_synth_parents),
+			CLK_SET_RATE_NO_REPARENT, SPEAR1310_CLCD_CLK_SYNT,
+			SPEAR1310_CLCD_SYNT_CLK_SHIFT,
 			SPEAR1310_CLCD_SYNT_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "clcd_syn_mclk", NULL);
 
@@ -645,7 +648,8 @@
 	clk_register_clkdev(clk, "clcd_syn_clk", NULL);
 
 	clk = clk_register_mux(NULL, "clcd_pixel_mclk", clcd_pixel_parents,
-			ARRAY_SIZE(clcd_pixel_parents), CLK_SET_RATE_PARENT,
+			ARRAY_SIZE(clcd_pixel_parents),
+			CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 			SPEAR1310_PERIP_CLK_CFG, SPEAR1310_CLCD_CLK_SHIFT,
 			SPEAR1310_CLCD_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "clcd_pixel_mclk", NULL);
@@ -657,9 +661,9 @@
 
 	/* i2s */
 	clk = clk_register_mux(NULL, "i2s_src_mclk", i2s_src_parents,
-			ARRAY_SIZE(i2s_src_parents), 0, SPEAR1310_I2S_CLK_CFG,
-			SPEAR1310_I2S_SRC_CLK_SHIFT, SPEAR1310_I2S_SRC_CLK_MASK,
-			0, &_lock);
+			ARRAY_SIZE(i2s_src_parents), CLK_SET_RATE_NO_REPARENT,
+			SPEAR1310_I2S_CLK_CFG, SPEAR1310_I2S_SRC_CLK_SHIFT,
+			SPEAR1310_I2S_SRC_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "i2s_src_mclk", NULL);
 
 	clk = clk_register_aux("i2s_prs1_clk", NULL, "i2s_src_mclk", 0,
@@ -668,7 +672,8 @@
 	clk_register_clkdev(clk, "i2s_prs1_clk", NULL);
 
 	clk = clk_register_mux(NULL, "i2s_ref_mclk", i2s_ref_parents,
-			ARRAY_SIZE(i2s_ref_parents), CLK_SET_RATE_PARENT,
+			ARRAY_SIZE(i2s_ref_parents),
+			CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 			SPEAR1310_I2S_CLK_CFG, SPEAR1310_I2S_REF_SHIFT,
 			SPEAR1310_I2S_REF_SEL_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "i2s_ref_mclk", NULL);
@@ -806,13 +811,15 @@
 
 	/* RAS clks */
 	clk = clk_register_mux(NULL, "gen_syn0_1_mclk", gen_synth0_1_parents,
-			ARRAY_SIZE(gen_synth0_1_parents), 0, SPEAR1310_PLL_CFG,
+			ARRAY_SIZE(gen_synth0_1_parents),
+			CLK_SET_RATE_NO_REPARENT, SPEAR1310_PLL_CFG,
 			SPEAR1310_RAS_SYNT0_1_CLK_SHIFT,
 			SPEAR1310_RAS_SYNT_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "gen_syn0_1_clk", NULL);
 
 	clk = clk_register_mux(NULL, "gen_syn2_3_mclk", gen_synth2_3_parents,
-			ARRAY_SIZE(gen_synth2_3_parents), 0, SPEAR1310_PLL_CFG,
+			ARRAY_SIZE(gen_synth2_3_parents),
+			CLK_SET_RATE_NO_REPARENT, SPEAR1310_PLL_CFG,
 			SPEAR1310_RAS_SYNT2_3_CLK_SHIFT,
 			SPEAR1310_RAS_SYNT_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "gen_syn2_3_clk", NULL);
@@ -929,8 +936,8 @@
 
 	clk = clk_register_mux(NULL, "smii_rgmii_phy_mclk",
 			smii_rgmii_phy_parents,
-			ARRAY_SIZE(smii_rgmii_phy_parents), 0,
-			SPEAR1310_RAS_CTRL_REG1,
+			ARRAY_SIZE(smii_rgmii_phy_parents),
+			CLK_SET_RATE_NO_REPARENT, SPEAR1310_RAS_CTRL_REG1,
 			SPEAR1310_SMII_RGMII_PHY_CLK_SHIFT,
 			SPEAR1310_PHY_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "stmmacphy.1", NULL);
@@ -938,15 +945,15 @@
 	clk_register_clkdev(clk, "stmmacphy.4", NULL);
 
 	clk = clk_register_mux(NULL, "rmii_phy_mclk", rmii_phy_parents,
-			ARRAY_SIZE(rmii_phy_parents), 0,
+			ARRAY_SIZE(rmii_phy_parents), CLK_SET_RATE_NO_REPARENT,
 			SPEAR1310_RAS_CTRL_REG1, SPEAR1310_RMII_PHY_CLK_SHIFT,
 			SPEAR1310_PHY_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "stmmacphy.3", NULL);
 
 	clk = clk_register_mux(NULL, "uart1_mclk", uart_parents,
-			ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
-			SPEAR1310_UART1_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
-			0, &_lock);
+			ARRAY_SIZE(uart_parents), CLK_SET_RATE_NO_REPARENT,
+			SPEAR1310_RAS_CTRL_REG0, SPEAR1310_UART1_CLK_SHIFT,
+			SPEAR1310_RAS_UART_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "uart1_mclk", NULL);
 
 	clk = clk_register_gate(NULL, "uart1_clk", "uart1_mclk", 0,
@@ -955,9 +962,9 @@
 	clk_register_clkdev(clk, NULL, "5c800000.serial");
 
 	clk = clk_register_mux(NULL, "uart2_mclk", uart_parents,
-			ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
-			SPEAR1310_UART2_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
-			0, &_lock);
+			ARRAY_SIZE(uart_parents), CLK_SET_RATE_NO_REPARENT,
+			SPEAR1310_RAS_CTRL_REG0, SPEAR1310_UART2_CLK_SHIFT,
+			SPEAR1310_RAS_UART_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "uart2_mclk", NULL);
 
 	clk = clk_register_gate(NULL, "uart2_clk", "uart2_mclk", 0,
@@ -966,9 +973,9 @@
 	clk_register_clkdev(clk, NULL, "5c900000.serial");
 
 	clk = clk_register_mux(NULL, "uart3_mclk", uart_parents,
-			ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
-			SPEAR1310_UART3_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
-			0, &_lock);
+			ARRAY_SIZE(uart_parents), CLK_SET_RATE_NO_REPARENT,
+			SPEAR1310_RAS_CTRL_REG0, SPEAR1310_UART3_CLK_SHIFT,
+			SPEAR1310_RAS_UART_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "uart3_mclk", NULL);
 
 	clk = clk_register_gate(NULL, "uart3_clk", "uart3_mclk", 0,
@@ -977,9 +984,9 @@
 	clk_register_clkdev(clk, NULL, "5ca00000.serial");
 
 	clk = clk_register_mux(NULL, "uart4_mclk", uart_parents,
-			ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
-			SPEAR1310_UART4_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
-			0, &_lock);
+			ARRAY_SIZE(uart_parents), CLK_SET_RATE_NO_REPARENT,
+			SPEAR1310_RAS_CTRL_REG0, SPEAR1310_UART4_CLK_SHIFT,
+			SPEAR1310_RAS_UART_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "uart4_mclk", NULL);
 
 	clk = clk_register_gate(NULL, "uart4_clk", "uart4_mclk", 0,
@@ -988,9 +995,9 @@
 	clk_register_clkdev(clk, NULL, "5cb00000.serial");
 
 	clk = clk_register_mux(NULL, "uart5_mclk", uart_parents,
-			ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
-			SPEAR1310_UART5_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
-			0, &_lock);
+			ARRAY_SIZE(uart_parents), CLK_SET_RATE_NO_REPARENT,
+			SPEAR1310_RAS_CTRL_REG0, SPEAR1310_UART5_CLK_SHIFT,
+			SPEAR1310_RAS_UART_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "uart5_mclk", NULL);
 
 	clk = clk_register_gate(NULL, "uart5_clk", "uart5_mclk", 0,
@@ -999,9 +1006,9 @@
 	clk_register_clkdev(clk, NULL, "5cc00000.serial");
 
 	clk = clk_register_mux(NULL, "i2c1_mclk", i2c_parents,
-			ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
-			SPEAR1310_I2C1_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
-			&_lock);
+			ARRAY_SIZE(i2c_parents), CLK_SET_RATE_NO_REPARENT,
+			SPEAR1310_RAS_CTRL_REG0, SPEAR1310_I2C1_CLK_SHIFT,
+			SPEAR1310_I2C_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "i2c1_mclk", NULL);
 
 	clk = clk_register_gate(NULL, "i2c1_clk", "i2c1_mclk", 0,
@@ -1010,9 +1017,9 @@
 	clk_register_clkdev(clk, NULL, "5cd00000.i2c");
 
 	clk = clk_register_mux(NULL, "i2c2_mclk", i2c_parents,
-			ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
-			SPEAR1310_I2C2_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
-			&_lock);
+			ARRAY_SIZE(i2c_parents), CLK_SET_RATE_NO_REPARENT,
+			SPEAR1310_RAS_CTRL_REG0, SPEAR1310_I2C2_CLK_SHIFT,
+			SPEAR1310_I2C_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "i2c2_mclk", NULL);
 
 	clk = clk_register_gate(NULL, "i2c2_clk", "i2c2_mclk", 0,
@@ -1021,9 +1028,9 @@
 	clk_register_clkdev(clk, NULL, "5ce00000.i2c");
 
 	clk = clk_register_mux(NULL, "i2c3_mclk", i2c_parents,
-			ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
-			SPEAR1310_I2C3_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
-			&_lock);
+			ARRAY_SIZE(i2c_parents), CLK_SET_RATE_NO_REPARENT,
+			SPEAR1310_RAS_CTRL_REG0, SPEAR1310_I2C3_CLK_SHIFT,
+			SPEAR1310_I2C_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "i2c3_mclk", NULL);
 
 	clk = clk_register_gate(NULL, "i2c3_clk", "i2c3_mclk", 0,
@@ -1032,9 +1039,9 @@
 	clk_register_clkdev(clk, NULL, "5cf00000.i2c");
 
 	clk = clk_register_mux(NULL, "i2c4_mclk", i2c_parents,
-			ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
-			SPEAR1310_I2C4_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
-			&_lock);
+			ARRAY_SIZE(i2c_parents), CLK_SET_RATE_NO_REPARENT,
+			SPEAR1310_RAS_CTRL_REG0, SPEAR1310_I2C4_CLK_SHIFT,
+			SPEAR1310_I2C_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "i2c4_mclk", NULL);
 
 	clk = clk_register_gate(NULL, "i2c4_clk", "i2c4_mclk", 0,
@@ -1043,9 +1050,9 @@
 	clk_register_clkdev(clk, NULL, "5d000000.i2c");
 
 	clk = clk_register_mux(NULL, "i2c5_mclk", i2c_parents,
-			ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
-			SPEAR1310_I2C5_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
-			&_lock);
+			ARRAY_SIZE(i2c_parents), CLK_SET_RATE_NO_REPARENT,
+			SPEAR1310_RAS_CTRL_REG0, SPEAR1310_I2C5_CLK_SHIFT,
+			SPEAR1310_I2C_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "i2c5_mclk", NULL);
 
 	clk = clk_register_gate(NULL, "i2c5_clk", "i2c5_mclk", 0,
@@ -1054,9 +1061,9 @@
 	clk_register_clkdev(clk, NULL, "5d100000.i2c");
 
 	clk = clk_register_mux(NULL, "i2c6_mclk", i2c_parents,
-			ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
-			SPEAR1310_I2C6_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
-			&_lock);
+			ARRAY_SIZE(i2c_parents), CLK_SET_RATE_NO_REPARENT,
+			SPEAR1310_RAS_CTRL_REG0, SPEAR1310_I2C6_CLK_SHIFT,
+			SPEAR1310_I2C_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "i2c6_mclk", NULL);
 
 	clk = clk_register_gate(NULL, "i2c6_clk", "i2c6_mclk", 0,
@@ -1065,9 +1072,9 @@
 	clk_register_clkdev(clk, NULL, "5d200000.i2c");
 
 	clk = clk_register_mux(NULL, "i2c7_mclk", i2c_parents,
-			ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
-			SPEAR1310_I2C7_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
-			&_lock);
+			ARRAY_SIZE(i2c_parents), CLK_SET_RATE_NO_REPARENT,
+			SPEAR1310_RAS_CTRL_REG0, SPEAR1310_I2C7_CLK_SHIFT,
+			SPEAR1310_I2C_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "i2c7_mclk", NULL);
 
 	clk = clk_register_gate(NULL, "i2c7_clk", "i2c7_mclk", 0,
@@ -1076,9 +1083,9 @@
 	clk_register_clkdev(clk, NULL, "5d300000.i2c");
 
 	clk = clk_register_mux(NULL, "ssp1_mclk", ssp1_parents,
-			ARRAY_SIZE(ssp1_parents), 0, SPEAR1310_RAS_CTRL_REG0,
-			SPEAR1310_SSP1_CLK_SHIFT, SPEAR1310_SSP1_CLK_MASK, 0,
-			&_lock);
+			ARRAY_SIZE(ssp1_parents), CLK_SET_RATE_NO_REPARENT,
+			SPEAR1310_RAS_CTRL_REG0, SPEAR1310_SSP1_CLK_SHIFT,
+			SPEAR1310_SSP1_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "ssp1_mclk", NULL);
 
 	clk = clk_register_gate(NULL, "ssp1_clk", "ssp1_mclk", 0,
@@ -1087,9 +1094,9 @@
 	clk_register_clkdev(clk, NULL, "5d400000.spi");
 
 	clk = clk_register_mux(NULL, "pci_mclk", pci_parents,
-			ARRAY_SIZE(pci_parents), 0, SPEAR1310_RAS_CTRL_REG0,
-			SPEAR1310_PCI_CLK_SHIFT, SPEAR1310_PCI_CLK_MASK, 0,
-			&_lock);
+			ARRAY_SIZE(pci_parents), CLK_SET_RATE_NO_REPARENT,
+			SPEAR1310_RAS_CTRL_REG0, SPEAR1310_PCI_CLK_SHIFT,
+			SPEAR1310_PCI_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "pci_mclk", NULL);
 
 	clk = clk_register_gate(NULL, "pci_clk", "pci_mclk", 0,
@@ -1098,9 +1105,9 @@
 	clk_register_clkdev(clk, NULL, "pci");
 
 	clk = clk_register_mux(NULL, "tdm1_mclk", tdm_parents,
-			ARRAY_SIZE(tdm_parents), 0, SPEAR1310_RAS_CTRL_REG0,
-			SPEAR1310_TDM1_CLK_SHIFT, SPEAR1310_TDM_CLK_MASK, 0,
-			&_lock);
+			ARRAY_SIZE(tdm_parents), CLK_SET_RATE_NO_REPARENT,
+			SPEAR1310_RAS_CTRL_REG0, SPEAR1310_TDM1_CLK_SHIFT,
+			SPEAR1310_TDM_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "tdm1_mclk", NULL);
 
 	clk = clk_register_gate(NULL, "tdm1_clk", "tdm1_mclk", 0,
@@ -1109,9 +1116,9 @@
 	clk_register_clkdev(clk, NULL, "tdm_hdlc.0");
 
 	clk = clk_register_mux(NULL, "tdm2_mclk", tdm_parents,
-			ARRAY_SIZE(tdm_parents), 0, SPEAR1310_RAS_CTRL_REG0,
-			SPEAR1310_TDM2_CLK_SHIFT, SPEAR1310_TDM_CLK_MASK, 0,
-			&_lock);
+			ARRAY_SIZE(tdm_parents), CLK_SET_RATE_NO_REPARENT,
+			SPEAR1310_RAS_CTRL_REG0, SPEAR1310_TDM2_CLK_SHIFT,
+			SPEAR1310_TDM_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "tdm2_mclk", NULL);
 
 	clk = clk_register_gate(NULL, "tdm2_clk", "tdm2_mclk", 0,
diff --git a/drivers/clk/spear/spear1340_clock.c b/drivers/clk/spear/spear1340_clock.c
index 9d0b394..fe835c1 100644
--- a/drivers/clk/spear/spear1340_clock.c
+++ b/drivers/clk/spear/spear1340_clock.c
@@ -473,9 +473,9 @@
 	/* clock derived from 24 or 25 MHz osc clk */
 	/* vco-pll */
 	clk = clk_register_mux(NULL, "vco1_mclk", vco_parents,
-			ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG,
-			SPEAR1340_PLL1_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0,
-			&_lock);
+			ARRAY_SIZE(vco_parents), CLK_SET_RATE_NO_REPARENT,
+			SPEAR1340_PLL_CFG, SPEAR1340_PLL1_CLK_SHIFT,
+			SPEAR1340_PLL_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "vco1_mclk", NULL);
 	clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "vco1_mclk", 0,
 			SPEAR1340_PLL1_CTR, SPEAR1340_PLL1_FRQ, pll_rtbl,
@@ -484,9 +484,9 @@
 	clk_register_clkdev(clk1, "pll1_clk", NULL);
 
 	clk = clk_register_mux(NULL, "vco2_mclk", vco_parents,
-			ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG,
-			SPEAR1340_PLL2_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0,
-			&_lock);
+			ARRAY_SIZE(vco_parents), CLK_SET_RATE_NO_REPARENT,
+			SPEAR1340_PLL_CFG, SPEAR1340_PLL2_CLK_SHIFT,
+			SPEAR1340_PLL_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "vco2_mclk", NULL);
 	clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "vco2_mclk", 0,
 			SPEAR1340_PLL2_CTR, SPEAR1340_PLL2_FRQ, pll_rtbl,
@@ -495,9 +495,9 @@
 	clk_register_clkdev(clk1, "pll2_clk", NULL);
 
 	clk = clk_register_mux(NULL, "vco3_mclk", vco_parents,
-			ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG,
-			SPEAR1340_PLL3_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0,
-			&_lock);
+			ARRAY_SIZE(vco_parents), CLK_SET_RATE_NO_REPARENT,
+			SPEAR1340_PLL_CFG, SPEAR1340_PLL3_CLK_SHIFT,
+			SPEAR1340_PLL_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "vco3_mclk", NULL);
 	clk = clk_register_vco_pll("vco3_clk", "pll3_clk", NULL, "vco3_mclk", 0,
 			SPEAR1340_PLL3_CTR, SPEAR1340_PLL3_FRQ, pll_rtbl,
@@ -561,8 +561,8 @@
 	clk_register_clkdev(clk, "amba_syn_clk", NULL);
 
 	clk = clk_register_mux(NULL, "sys_mclk", sys_parents,
-			ARRAY_SIZE(sys_parents), 0, SPEAR1340_SYS_CLK_CTRL,
-			SPEAR1340_SCLK_SRC_SEL_SHIFT,
+			ARRAY_SIZE(sys_parents), CLK_SET_RATE_NO_REPARENT,
+			SPEAR1340_SYS_CLK_CTRL, SPEAR1340_SCLK_SRC_SEL_SHIFT,
 			SPEAR1340_SCLK_SRC_SEL_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "sys_mclk", NULL);
 
@@ -583,8 +583,8 @@
 	clk_register_clkdev(clk, NULL, "smp_twd");
 
 	clk = clk_register_mux(NULL, "ahb_clk", ahb_parents,
-			ARRAY_SIZE(ahb_parents), 0, SPEAR1340_SYS_CLK_CTRL,
-			SPEAR1340_HCLK_SRC_SEL_SHIFT,
+			ARRAY_SIZE(ahb_parents), CLK_SET_RATE_NO_REPARENT,
+			SPEAR1340_SYS_CLK_CTRL, SPEAR1340_HCLK_SRC_SEL_SHIFT,
 			SPEAR1340_HCLK_SRC_SEL_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "ahb_clk", NULL);
 
@@ -594,9 +594,9 @@
 
 	/* gpt clocks */
 	clk = clk_register_mux(NULL, "gpt0_mclk", gpt_parents,
-			ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
-			SPEAR1340_GPT0_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
-			&_lock);
+			ARRAY_SIZE(gpt_parents), CLK_SET_RATE_NO_REPARENT,
+			SPEAR1340_PERIP_CLK_CFG, SPEAR1340_GPT0_CLK_SHIFT,
+			SPEAR1340_GPT_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "gpt0_mclk", NULL);
 	clk = clk_register_gate(NULL, "gpt0_clk", "gpt0_mclk", 0,
 			SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GPT0_CLK_ENB, 0,
@@ -604,9 +604,9 @@
 	clk_register_clkdev(clk, NULL, "gpt0");
 
 	clk = clk_register_mux(NULL, "gpt1_mclk", gpt_parents,
-			ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
-			SPEAR1340_GPT1_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
-			&_lock);
+			ARRAY_SIZE(gpt_parents), CLK_SET_RATE_NO_REPARENT,
+			SPEAR1340_PERIP_CLK_CFG, SPEAR1340_GPT1_CLK_SHIFT,
+			SPEAR1340_GPT_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "gpt1_mclk", NULL);
 	clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mclk", 0,
 			SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GPT1_CLK_ENB, 0,
@@ -614,9 +614,9 @@
 	clk_register_clkdev(clk, NULL, "gpt1");
 
 	clk = clk_register_mux(NULL, "gpt2_mclk", gpt_parents,
-			ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
-			SPEAR1340_GPT2_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
-			&_lock);
+			ARRAY_SIZE(gpt_parents), CLK_SET_RATE_NO_REPARENT,
+			SPEAR1340_PERIP_CLK_CFG, SPEAR1340_GPT2_CLK_SHIFT,
+			SPEAR1340_GPT_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "gpt2_mclk", NULL);
 	clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mclk", 0,
 			SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_GPT2_CLK_ENB, 0,
@@ -624,9 +624,9 @@
 	clk_register_clkdev(clk, NULL, "gpt2");
 
 	clk = clk_register_mux(NULL, "gpt3_mclk", gpt_parents,
-			ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
-			SPEAR1340_GPT3_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
-			&_lock);
+			ARRAY_SIZE(gpt_parents), CLK_SET_RATE_NO_REPARENT,
+			SPEAR1340_PERIP_CLK_CFG, SPEAR1340_GPT3_CLK_SHIFT,
+			SPEAR1340_GPT_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "gpt3_mclk", NULL);
 	clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mclk", 0,
 			SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_GPT3_CLK_ENB, 0,
@@ -641,7 +641,8 @@
 	clk_register_clkdev(clk1, "uart0_syn_gclk", NULL);
 
 	clk = clk_register_mux(NULL, "uart0_mclk", uart0_parents,
-			ARRAY_SIZE(uart0_parents), CLK_SET_RATE_PARENT,
+			ARRAY_SIZE(uart0_parents),
+			CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 			SPEAR1340_PERIP_CLK_CFG, SPEAR1340_UART0_CLK_SHIFT,
 			SPEAR1340_UART_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "uart0_mclk", NULL);
@@ -658,9 +659,9 @@
 	clk_register_clkdev(clk1, "uart1_syn_gclk", NULL);
 
 	clk = clk_register_mux(NULL, "uart1_mclk", uart1_parents,
-			ARRAY_SIZE(uart1_parents), 0, SPEAR1340_PERIP_CLK_CFG,
-			SPEAR1340_UART1_CLK_SHIFT, SPEAR1340_UART_CLK_MASK, 0,
-			&_lock);
+			ARRAY_SIZE(uart1_parents), CLK_SET_RATE_NO_REPARENT,
+			SPEAR1340_PERIP_CLK_CFG, SPEAR1340_UART1_CLK_SHIFT,
+			SPEAR1340_UART_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "uart1_mclk", NULL);
 
 	clk = clk_register_gate(NULL, "uart1_clk", "uart1_mclk", 0,
@@ -698,7 +699,8 @@
 	clk_register_clkdev(clk1, "c3_syn_gclk", NULL);
 
 	clk = clk_register_mux(NULL, "c3_mclk", c3_parents,
-			ARRAY_SIZE(c3_parents), CLK_SET_RATE_PARENT,
+			ARRAY_SIZE(c3_parents),
+			CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 			SPEAR1340_PERIP_CLK_CFG, SPEAR1340_C3_CLK_SHIFT,
 			SPEAR1340_C3_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "c3_mclk", NULL);
@@ -710,8 +712,8 @@
 
 	/* gmac */
 	clk = clk_register_mux(NULL, "phy_input_mclk", gmac_phy_input_parents,
-			ARRAY_SIZE(gmac_phy_input_parents), 0,
-			SPEAR1340_GMAC_CLK_CFG,
+			ARRAY_SIZE(gmac_phy_input_parents),
+			CLK_SET_RATE_NO_REPARENT, SPEAR1340_GMAC_CLK_CFG,
 			SPEAR1340_GMAC_PHY_INPUT_CLK_SHIFT,
 			SPEAR1340_GMAC_PHY_INPUT_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "phy_input_mclk", NULL);
@@ -723,15 +725,16 @@
 	clk_register_clkdev(clk1, "phy_syn_gclk", NULL);
 
 	clk = clk_register_mux(NULL, "phy_mclk", gmac_phy_parents,
-			ARRAY_SIZE(gmac_phy_parents), 0,
+			ARRAY_SIZE(gmac_phy_parents), CLK_SET_RATE_NO_REPARENT,
 			SPEAR1340_PERIP_CLK_CFG, SPEAR1340_GMAC_PHY_CLK_SHIFT,
 			SPEAR1340_GMAC_PHY_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "stmmacphy.0", NULL);
 
 	/* clcd */
 	clk = clk_register_mux(NULL, "clcd_syn_mclk", clcd_synth_parents,
-			ARRAY_SIZE(clcd_synth_parents), 0,
-			SPEAR1340_CLCD_CLK_SYNT, SPEAR1340_CLCD_SYNT_CLK_SHIFT,
+			ARRAY_SIZE(clcd_synth_parents),
+			CLK_SET_RATE_NO_REPARENT, SPEAR1340_CLCD_CLK_SYNT,
+			SPEAR1340_CLCD_SYNT_CLK_SHIFT,
 			SPEAR1340_CLCD_SYNT_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "clcd_syn_mclk", NULL);
 
@@ -741,7 +744,8 @@
 	clk_register_clkdev(clk, "clcd_syn_clk", NULL);
 
 	clk = clk_register_mux(NULL, "clcd_pixel_mclk", clcd_pixel_parents,
-			ARRAY_SIZE(clcd_pixel_parents), CLK_SET_RATE_PARENT,
+			ARRAY_SIZE(clcd_pixel_parents),
+			CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 			SPEAR1340_PERIP_CLK_CFG, SPEAR1340_CLCD_CLK_SHIFT,
 			SPEAR1340_CLCD_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "clcd_pixel_mclk", NULL);
@@ -753,9 +757,9 @@
 
 	/* i2s */
 	clk = clk_register_mux(NULL, "i2s_src_mclk", i2s_src_parents,
-			ARRAY_SIZE(i2s_src_parents), 0, SPEAR1340_I2S_CLK_CFG,
-			SPEAR1340_I2S_SRC_CLK_SHIFT, SPEAR1340_I2S_SRC_CLK_MASK,
-			0, &_lock);
+			ARRAY_SIZE(i2s_src_parents), CLK_SET_RATE_NO_REPARENT,
+			SPEAR1340_I2S_CLK_CFG, SPEAR1340_I2S_SRC_CLK_SHIFT,
+			SPEAR1340_I2S_SRC_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "i2s_src_mclk", NULL);
 
 	clk = clk_register_aux("i2s_prs1_clk", NULL, "i2s_src_mclk",
@@ -765,7 +769,8 @@
 	clk_register_clkdev(clk, "i2s_prs1_clk", NULL);
 
 	clk = clk_register_mux(NULL, "i2s_ref_mclk", i2s_ref_parents,
-			ARRAY_SIZE(i2s_ref_parents), CLK_SET_RATE_PARENT,
+			ARRAY_SIZE(i2s_ref_parents),
+			CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 			SPEAR1340_I2S_CLK_CFG, SPEAR1340_I2S_REF_SHIFT,
 			SPEAR1340_I2S_REF_SEL_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "i2s_ref_mclk", NULL);
@@ -891,13 +896,15 @@
 
 	/* RAS clks */
 	clk = clk_register_mux(NULL, "gen_syn0_1_mclk", gen_synth0_1_parents,
-			ARRAY_SIZE(gen_synth0_1_parents), 0, SPEAR1340_PLL_CFG,
+			ARRAY_SIZE(gen_synth0_1_parents),
+			CLK_SET_RATE_NO_REPARENT, SPEAR1340_PLL_CFG,
 			SPEAR1340_GEN_SYNT0_1_CLK_SHIFT,
 			SPEAR1340_GEN_SYNT_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "gen_syn0_1_mclk", NULL);
 
 	clk = clk_register_mux(NULL, "gen_syn2_3_mclk", gen_synth2_3_parents,
-			ARRAY_SIZE(gen_synth2_3_parents), 0, SPEAR1340_PLL_CFG,
+			ARRAY_SIZE(gen_synth2_3_parents),
+			CLK_SET_RATE_NO_REPARENT, SPEAR1340_PLL_CFG,
 			SPEAR1340_GEN_SYNT2_3_CLK_SHIFT,
 			SPEAR1340_GEN_SYNT_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "gen_syn2_3_mclk", NULL);
@@ -938,7 +945,8 @@
 	clk_register_clkdev(clk, NULL, "spear_cec.1");
 
 	clk = clk_register_mux(NULL, "spdif_out_mclk", spdif_out_parents,
-			ARRAY_SIZE(spdif_out_parents), CLK_SET_RATE_PARENT,
+			ARRAY_SIZE(spdif_out_parents),
+			CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 			SPEAR1340_PERIP_CLK_CFG, SPEAR1340_SPDIF_OUT_CLK_SHIFT,
 			SPEAR1340_SPDIF_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "spdif_out_mclk", NULL);
@@ -949,7 +957,8 @@
 	clk_register_clkdev(clk, NULL, "d0000000.spdif-out");
 
 	clk = clk_register_mux(NULL, "spdif_in_mclk", spdif_in_parents,
-			ARRAY_SIZE(spdif_in_parents), CLK_SET_RATE_PARENT,
+			ARRAY_SIZE(spdif_in_parents),
+			CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 			SPEAR1340_PERIP_CLK_CFG, SPEAR1340_SPDIF_IN_CLK_SHIFT,
 			SPEAR1340_SPDIF_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "spdif_in_mclk", NULL);
diff --git a/drivers/clk/spear/spear3xx_clock.c b/drivers/clk/spear/spear3xx_clock.c
index 080c3c5..c2d2043 100644
--- a/drivers/clk/spear/spear3xx_clock.c
+++ b/drivers/clk/spear/spear3xx_clock.c
@@ -294,7 +294,8 @@
 	clk_register_clkdev(clk, NULL, "a9400000.i2s");
 
 	clk = clk_register_mux(NULL, "i2s_ref_clk", i2s_ref_parents,
-			ARRAY_SIZE(i2s_ref_parents), CLK_SET_RATE_PARENT,
+			ARRAY_SIZE(i2s_ref_parents),
+			CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 			SPEAR320_CONTROL_REG, I2S_REF_PCLK_SHIFT,
 			I2S_REF_PCLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "i2s_ref_clk", NULL);
@@ -313,57 +314,66 @@
 	clk_register_clkdev(clk, "hclk", "ab000000.eth");
 
 	clk = clk_register_mux(NULL, "rs485_clk", uartx_parents,
-			ARRAY_SIZE(uartx_parents), CLK_SET_RATE_PARENT,
+			ARRAY_SIZE(uartx_parents),
+			CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 			SPEAR320_EXT_CTRL_REG, SPEAR320_RS485_PCLK_SHIFT,
 			SPEAR320_UARTX_PCLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, NULL, "a9300000.serial");
 
 	clk = clk_register_mux(NULL, "sdhci_clk", sdhci_parents,
-			ARRAY_SIZE(sdhci_parents), CLK_SET_RATE_PARENT,
+			ARRAY_SIZE(sdhci_parents),
+			CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 			SPEAR320_CONTROL_REG, SDHCI_PCLK_SHIFT, SDHCI_PCLK_MASK,
 			0, &_lock);
 	clk_register_clkdev(clk, NULL, "70000000.sdhci");
 
 	clk = clk_register_mux(NULL, "smii_pclk", smii0_parents,
-			ARRAY_SIZE(smii0_parents), 0, SPEAR320_CONTROL_REG,
-			SMII_PCLK_SHIFT, SMII_PCLK_MASK, 0, &_lock);
+			ARRAY_SIZE(smii0_parents), CLK_SET_RATE_NO_REPARENT,
+			SPEAR320_CONTROL_REG, SMII_PCLK_SHIFT, SMII_PCLK_MASK,
+			0, &_lock);
 	clk_register_clkdev(clk, NULL, "smii_pclk");
 
 	clk = clk_register_fixed_factor(NULL, "smii_clk", "smii_pclk", 0, 1, 1);
 	clk_register_clkdev(clk, NULL, "smii");
 
 	clk = clk_register_mux(NULL, "uart1_clk", uartx_parents,
-			ARRAY_SIZE(uartx_parents), CLK_SET_RATE_PARENT,
+			ARRAY_SIZE(uartx_parents),
+			CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 			SPEAR320_CONTROL_REG, UART1_PCLK_SHIFT, UART1_PCLK_MASK,
 			0, &_lock);
 	clk_register_clkdev(clk, NULL, "a3000000.serial");
 
 	clk = clk_register_mux(NULL, "uart2_clk", uartx_parents,
-			ARRAY_SIZE(uartx_parents), CLK_SET_RATE_PARENT,
+			ARRAY_SIZE(uartx_parents),
+			CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 			SPEAR320_EXT_CTRL_REG, SPEAR320_UART2_PCLK_SHIFT,
 			SPEAR320_UARTX_PCLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, NULL, "a4000000.serial");
 
 	clk = clk_register_mux(NULL, "uart3_clk", uartx_parents,
-			ARRAY_SIZE(uartx_parents), CLK_SET_RATE_PARENT,
+			ARRAY_SIZE(uartx_parents),
+			CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 			SPEAR320_EXT_CTRL_REG, SPEAR320_UART3_PCLK_SHIFT,
 			SPEAR320_UARTX_PCLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, NULL, "a9100000.serial");
 
 	clk = clk_register_mux(NULL, "uart4_clk", uartx_parents,
-			ARRAY_SIZE(uartx_parents), CLK_SET_RATE_PARENT,
+			ARRAY_SIZE(uartx_parents),
+			CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 			SPEAR320_EXT_CTRL_REG, SPEAR320_UART4_PCLK_SHIFT,
 			SPEAR320_UARTX_PCLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, NULL, "a9200000.serial");
 
 	clk = clk_register_mux(NULL, "uart5_clk", uartx_parents,
-			ARRAY_SIZE(uartx_parents), CLK_SET_RATE_PARENT,
+			ARRAY_SIZE(uartx_parents),
+			CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 			SPEAR320_EXT_CTRL_REG, SPEAR320_UART5_PCLK_SHIFT,
 			SPEAR320_UARTX_PCLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, NULL, "60000000.serial");
 
 	clk = clk_register_mux(NULL, "uart6_clk", uartx_parents,
-			ARRAY_SIZE(uartx_parents), CLK_SET_RATE_PARENT,
+			ARRAY_SIZE(uartx_parents),
+			CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 			SPEAR320_EXT_CTRL_REG, SPEAR320_UART6_PCLK_SHIFT,
 			SPEAR320_UARTX_PCLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, NULL, "60100000.serial");
@@ -427,7 +437,8 @@
 	clk_register_clkdev(clk1, "uart_syn_gclk", NULL);
 
 	clk = clk_register_mux(NULL, "uart0_mclk", uart0_parents,
-			ARRAY_SIZE(uart0_parents), CLK_SET_RATE_PARENT,
+			ARRAY_SIZE(uart0_parents),
+			CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 			PERIP_CLK_CFG, UART_CLK_SHIFT, UART_CLK_MASK, 0,
 			&_lock);
 	clk_register_clkdev(clk, "uart0_mclk", NULL);
@@ -444,7 +455,8 @@
 	clk_register_clkdev(clk1, "firda_syn_gclk", NULL);
 
 	clk = clk_register_mux(NULL, "firda_mclk", firda_parents,
-			ARRAY_SIZE(firda_parents), CLK_SET_RATE_PARENT,
+			ARRAY_SIZE(firda_parents),
+			CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 			PERIP_CLK_CFG, FIRDA_CLK_SHIFT, FIRDA_CLK_MASK, 0,
 			&_lock);
 	clk_register_clkdev(clk, "firda_mclk", NULL);
@@ -458,14 +470,16 @@
 	clk_register_gpt("gpt0_syn_clk", "pll1_clk", 0, PRSC0_CLK_CFG, gpt_rtbl,
 			ARRAY_SIZE(gpt_rtbl), &_lock);
 	clk = clk_register_mux(NULL, "gpt0_clk", gpt0_parents,
-			ARRAY_SIZE(gpt0_parents), CLK_SET_RATE_PARENT,
+			ARRAY_SIZE(gpt0_parents),
+			CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 			PERIP_CLK_CFG, GPT0_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, NULL, "gpt0");
 
 	clk_register_gpt("gpt1_syn_clk", "pll1_clk", 0, PRSC1_CLK_CFG, gpt_rtbl,
 			ARRAY_SIZE(gpt_rtbl), &_lock);
 	clk = clk_register_mux(NULL, "gpt1_mclk", gpt1_parents,
-			ARRAY_SIZE(gpt1_parents), CLK_SET_RATE_PARENT,
+			ARRAY_SIZE(gpt1_parents),
+			CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 			PERIP_CLK_CFG, GPT1_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "gpt1_mclk", NULL);
 	clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mclk",
@@ -476,7 +490,8 @@
 	clk_register_gpt("gpt2_syn_clk", "pll1_clk", 0, PRSC2_CLK_CFG, gpt_rtbl,
 			ARRAY_SIZE(gpt_rtbl), &_lock);
 	clk = clk_register_mux(NULL, "gpt2_mclk", gpt2_parents,
-			ARRAY_SIZE(gpt2_parents), CLK_SET_RATE_PARENT,
+			ARRAY_SIZE(gpt2_parents),
+			CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
 			PERIP_CLK_CFG, GPT2_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "gpt2_mclk", NULL);
 	clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mclk",
@@ -498,9 +513,9 @@
 	clk_register_clkdev(clk1, "gen1_syn_gclk", NULL);
 
 	clk = clk_register_mux(NULL, "gen2_3_par_clk", gen2_3_parents,
-			ARRAY_SIZE(gen2_3_parents), 0, CORE_CLK_CFG,
-			GEN_SYNTH2_3_CLK_SHIFT, GEN_SYNTH2_3_CLK_MASK, 0,
-			&_lock);
+			ARRAY_SIZE(gen2_3_parents), CLK_SET_RATE_NO_REPARENT,
+			CORE_CLK_CFG, GEN_SYNTH2_3_CLK_SHIFT,
+			GEN_SYNTH2_3_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "gen2_3_par_clk", NULL);
 
 	clk = clk_register_aux("gen2_syn_clk", "gen2_syn_gclk",
@@ -540,8 +555,8 @@
 	clk_register_clkdev(clk, "ahbmult2_clk", NULL);
 
 	clk = clk_register_mux(NULL, "ddr_clk", ddr_parents,
-			ARRAY_SIZE(ddr_parents), 0, PLL_CLK_CFG, MCTR_CLK_SHIFT,
-			MCTR_CLK_MASK, 0, &_lock);
+			ARRAY_SIZE(ddr_parents), CLK_SET_RATE_NO_REPARENT,
+			PLL_CLK_CFG, MCTR_CLK_SHIFT, MCTR_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "ddr_clk", NULL);
 
 	clk = clk_register_divider(NULL, "apb_clk", "ahb_clk",
diff --git a/drivers/clk/spear/spear6xx_clock.c b/drivers/clk/spear/spear6xx_clock.c
index 9406f24..4f649c9 100644
--- a/drivers/clk/spear/spear6xx_clock.c
+++ b/drivers/clk/spear/spear6xx_clock.c
@@ -169,8 +169,9 @@
 	clk_register_clkdev(clk1, "uart_syn_gclk", NULL);
 
 	clk = clk_register_mux(NULL, "uart_mclk", uart_parents,
-			ARRAY_SIZE(uart_parents), 0, PERIP_CLK_CFG,
-			UART_CLK_SHIFT, UART_CLK_MASK, 0, &_lock);
+			ARRAY_SIZE(uart_parents), CLK_SET_RATE_NO_REPARENT,
+			PERIP_CLK_CFG, UART_CLK_SHIFT, UART_CLK_MASK, 0,
+			&_lock);
 	clk_register_clkdev(clk, "uart_mclk", NULL);
 
 	clk = clk_register_gate(NULL, "uart0", "uart_mclk", 0, PERIP1_CLK_ENB,
@@ -188,8 +189,9 @@
 	clk_register_clkdev(clk1, "firda_syn_gclk", NULL);
 
 	clk = clk_register_mux(NULL, "firda_mclk", firda_parents,
-			ARRAY_SIZE(firda_parents), 0, PERIP_CLK_CFG,
-			FIRDA_CLK_SHIFT, FIRDA_CLK_MASK, 0, &_lock);
+			ARRAY_SIZE(firda_parents), CLK_SET_RATE_NO_REPARENT,
+			PERIP_CLK_CFG, FIRDA_CLK_SHIFT, FIRDA_CLK_MASK, 0,
+			&_lock);
 	clk_register_clkdev(clk, "firda_mclk", NULL);
 
 	clk = clk_register_gate(NULL, "firda_clk", "firda_mclk", 0,
@@ -203,8 +205,9 @@
 	clk_register_clkdev(clk1, "clcd_syn_gclk", NULL);
 
 	clk = clk_register_mux(NULL, "clcd_mclk", clcd_parents,
-			ARRAY_SIZE(clcd_parents), 0, PERIP_CLK_CFG,
-			CLCD_CLK_SHIFT, CLCD_CLK_MASK, 0, &_lock);
+			ARRAY_SIZE(clcd_parents), CLK_SET_RATE_NO_REPARENT,
+			PERIP_CLK_CFG, CLCD_CLK_SHIFT, CLCD_CLK_MASK, 0,
+			&_lock);
 	clk_register_clkdev(clk, "clcd_mclk", NULL);
 
 	clk = clk_register_gate(NULL, "clcd_clk", "clcd_mclk", 0,
@@ -217,13 +220,13 @@
 	clk_register_clkdev(clk, "gpt0_1_syn_clk", NULL);
 
 	clk = clk_register_mux(NULL, "gpt0_mclk", gpt0_1_parents,
-			ARRAY_SIZE(gpt0_1_parents), 0, PERIP_CLK_CFG,
-			GPT0_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
+			ARRAY_SIZE(gpt0_1_parents), CLK_SET_RATE_NO_REPARENT,
+			PERIP_CLK_CFG, GPT0_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, NULL, "gpt0");
 
 	clk = clk_register_mux(NULL, "gpt1_mclk", gpt0_1_parents,
-			ARRAY_SIZE(gpt0_1_parents), 0, PERIP_CLK_CFG,
-			GPT1_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
+			ARRAY_SIZE(gpt0_1_parents), CLK_SET_RATE_NO_REPARENT,
+			PERIP_CLK_CFG, GPT1_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "gpt1_mclk", NULL);
 
 	clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mclk", 0,
@@ -235,8 +238,8 @@
 	clk_register_clkdev(clk, "gpt2_syn_clk", NULL);
 
 	clk = clk_register_mux(NULL, "gpt2_mclk", gpt2_parents,
-			ARRAY_SIZE(gpt2_parents), 0, PERIP_CLK_CFG,
-			GPT2_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
+			ARRAY_SIZE(gpt2_parents), CLK_SET_RATE_NO_REPARENT,
+			PERIP_CLK_CFG, GPT2_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "gpt2_mclk", NULL);
 
 	clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mclk", 0,
@@ -248,8 +251,8 @@
 	clk_register_clkdev(clk, "gpt3_syn_clk", NULL);
 
 	clk = clk_register_mux(NULL, "gpt3_mclk", gpt3_parents,
-			ARRAY_SIZE(gpt3_parents), 0, PERIP_CLK_CFG,
-			GPT3_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
+			ARRAY_SIZE(gpt3_parents), CLK_SET_RATE_NO_REPARENT,
+			PERIP_CLK_CFG, GPT3_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "gpt3_mclk", NULL);
 
 	clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mclk", 0,
@@ -277,8 +280,8 @@
 	clk_register_clkdev(clk, "ahbmult2_clk", NULL);
 
 	clk = clk_register_mux(NULL, "ddr_clk", ddr_parents,
-			ARRAY_SIZE(ddr_parents), 0, PLL_CLK_CFG, MCTR_CLK_SHIFT,
-			MCTR_CLK_MASK, 0, &_lock);
+			ARRAY_SIZE(ddr_parents), CLK_SET_RATE_NO_REPARENT,
+			PLL_CLK_CFG, MCTR_CLK_SHIFT, MCTR_CLK_MASK, 0, &_lock);
 	clk_register_clkdev(clk, "ddr_clk", NULL);
 
 	clk = clk_register_divider(NULL, "apb_clk", "ahb_clk",
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
index 412912b..34ee69f 100644
--- a/drivers/clk/sunxi/clk-sunxi.c
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -25,12 +25,12 @@
 static DEFINE_SPINLOCK(clk_lock);
 
 /**
- * sunxi_osc_clk_setup() - Setup function for gatable oscillator
+ * sun4i_osc_clk_setup() - Setup function for gatable oscillator
  */
 
 #define SUNXI_OSC24M_GATE	0
 
-static void __init sunxi_osc_clk_setup(struct device_node *node)
+static void __init sun4i_osc_clk_setup(struct device_node *node)
 {
 	struct clk *clk;
 	struct clk_fixed_rate *fixed;
@@ -64,22 +64,23 @@
 			&gate->hw, &clk_gate_ops,
 			CLK_IS_ROOT);
 
-	if (clk) {
+	if (!IS_ERR(clk)) {
 		of_clk_add_provider(node, of_clk_src_simple_get, clk);
 		clk_register_clkdev(clk, clk_name, NULL);
 	}
 }
+CLK_OF_DECLARE(sun4i_osc, "allwinner,sun4i-osc-clk", sun4i_osc_clk_setup);
 
 
 
 /**
- * sunxi_get_pll1_factors() - calculates n, k, m, p factors for PLL1
+ * sun4i_get_pll1_factors() - calculates n, k, m, p factors for PLL1
  * PLL1 rate is calculated as follows
  * rate = (parent_rate * n * (k + 1) >> p) / (m + 1);
  * parent_rate is always 24Mhz
  */
 
-static void sunxi_get_pll1_factors(u32 *freq, u32 parent_rate,
+static void sun4i_get_pll1_factors(u32 *freq, u32 parent_rate,
 				   u8 *n, u8 *k, u8 *m, u8 *p)
 {
 	u8 div;
@@ -124,15 +125,97 @@
 	*n = div / 4;
 }
 
+/**
+ * sun6i_a31_get_pll1_factors() - calculates n, k and m factors for PLL1
+ * PLL1 rate is calculated as follows
+ * rate = parent_rate * (n + 1) * (k + 1) / (m + 1);
+ * parent_rate should always be 24MHz
+ */
+static void sun6i_a31_get_pll1_factors(u32 *freq, u32 parent_rate,
+				       u8 *n, u8 *k, u8 *m, u8 *p)
+{
+	/*
+	 * We can operate only on MHz, this will make our life easier
+	 * later.
+	 */
+	u32 freq_mhz = *freq / 1000000;
+	u32 parent_freq_mhz = parent_rate / 1000000;
 
+	/*
+	 * Round down the frequency to the closest multiple of either
+	 * 6 or 16
+	 */
+	u32 round_freq_6 = round_down(freq_mhz, 6);
+	u32 round_freq_16 = round_down(freq_mhz, 16);
+
+	if (round_freq_6 > round_freq_16)
+		freq_mhz = round_freq_6;
+	else
+		freq_mhz = round_freq_16;
+
+	*freq = freq_mhz * 1000000;
+
+	/*
+	 * If the factors pointer are null, we were just called to
+	 * round down the frequency.
+	 * Exit.
+	 */
+	if (n == NULL)
+		return;
+
+	/* If the frequency is a multiple of 32 MHz, k is always 3 */
+	if (!(freq_mhz % 32))
+		*k = 3;
+	/* If the frequency is a multiple of 9 MHz, k is always 2 */
+	else if (!(freq_mhz % 9))
+		*k = 2;
+	/* If the frequency is a multiple of 8 MHz, k is always 1 */
+	else if (!(freq_mhz % 8))
+		*k = 1;
+	/* Otherwise, we don't use the k factor */
+	else
+		*k = 0;
+
+	/*
+	 * If the frequency is a multiple of 2 but not a multiple of
+	 * 3, m is 3. This is the first time we use 6 here, yet we
+	 * will use it on several other places.
+	 * We use this number because it's the lowest frequency we can
+	 * generate (with n = 0, k = 0, m = 3), so every other frequency
+	 * somehow relates to this frequency.
+	 */
+	if ((freq_mhz % 6) == 2 || (freq_mhz % 6) == 4)
+		*m = 2;
+	/*
+	 * If the frequency is a multiple of 6MHz, but the factor is
+	 * odd, m will be 3
+	 */
+	else if ((freq_mhz / 6) & 1)
+		*m = 3;
+	/* Otherwise, we end up with m = 1 */
+	else
+		*m = 1;
+
+	/* Calculate n thanks to the above factors we already got */
+	*n = freq_mhz * (*m + 1) / ((*k + 1) * parent_freq_mhz) - 1;
+
+	/*
+	 * If n end up being outbound, and that we can still decrease
+	 * m, do it.
+	 */
+	if ((*n + 1) > 31 && (*m + 1) > 1) {
+		*n = (*n + 1) / 2 - 1;
+		*m = (*m + 1) / 2 - 1;
+	}
+}
 
 /**
- * sunxi_get_apb1_factors() - calculates m, p factors for APB1
+ * sun4i_get_apb1_factors() - calculates m, p factors for APB1
  * APB1 rate is calculated as follows
  * rate = (parent_rate >> p) / (m + 1);
  */
 
-static void sunxi_get_apb1_factors(u32 *freq, u32 parent_rate,
+static void sun4i_get_apb1_factors(u32 *freq, u32 parent_rate,
 				   u8 *n, u8 *k, u8 *m, u8 *p)
 {
 	u8 calcm, calcp;
@@ -178,7 +261,7 @@
 	void (*getter) (u32 *rate, u32 parent_rate, u8 *n, u8 *k, u8 *m, u8 *p);
 };
 
-static struct clk_factors_config pll1_config = {
+static struct clk_factors_config sun4i_pll1_config = {
 	.nshift = 8,
 	.nwidth = 5,
 	.kshift = 4,
@@ -189,21 +272,35 @@
 	.pwidth = 2,
 };
 
-static struct clk_factors_config apb1_config = {
+static struct clk_factors_config sun6i_a31_pll1_config = {
+	.nshift	= 8,
+	.nwidth = 5,
+	.kshift = 4,
+	.kwidth = 2,
+	.mshift = 0,
+	.mwidth = 2,
+};
+
+static struct clk_factors_config sun4i_apb1_config = {
 	.mshift = 0,
 	.mwidth = 5,
 	.pshift = 16,
 	.pwidth = 2,
 };
 
-static const __initconst struct factors_data pll1_data = {
-	.table = &pll1_config,
-	.getter = sunxi_get_pll1_factors,
+static const struct factors_data sun4i_pll1_data __initconst = {
+	.table = &sun4i_pll1_config,
+	.getter = sun4i_get_pll1_factors,
 };
 
-static const __initconst struct factors_data apb1_data = {
-	.table = &apb1_config,
-	.getter = sunxi_get_apb1_factors,
+static const struct factors_data sun6i_a31_pll1_data __initconst = {
+	.table = &sun6i_a31_pll1_config,
+	.getter = sun6i_a31_get_pll1_factors,
+};
+
+static const struct factors_data sun4i_apb1_data __initconst = {
+	.table = &sun4i_apb1_config,
+	.getter = sun4i_get_apb1_factors,
 };
 
 static void __init sunxi_factors_clk_setup(struct device_node *node,
@@ -221,7 +318,7 @@
 	clk = clk_register_factors(NULL, clk_name, parent, 0, reg,
 				   data->table, data->getter, &clk_lock);
 
-	if (clk) {
+	if (!IS_ERR(clk)) {
 		of_clk_add_provider(node, of_clk_src_simple_get, clk);
 		clk_register_clkdev(clk, clk_name, NULL);
 	}
@@ -239,11 +336,15 @@
 	u8 shift;
 };
 
-static const __initconst struct mux_data cpu_mux_data = {
+static const struct mux_data sun4i_cpu_mux_data __initconst = {
 	.shift = 16,
 };
 
-static const __initconst struct mux_data apb1_mux_data = {
+static const struct mux_data sun6i_a31_ahb1_mux_data __initconst = {
+	.shift = 12,
+};
+
+static const struct mux_data sun4i_apb1_mux_data __initconst = {
 	.shift = 24,
 };
 
@@ -261,7 +362,8 @@
 	while (i < 5 && (parents[i] = of_clk_get_parent_name(node, i)) != NULL)
 		i++;
 
-	clk = clk_register_mux(NULL, clk_name, parents, i, 0, reg,
+	clk = clk_register_mux(NULL, clk_name, parents, i,
+			       CLK_SET_RATE_NO_REPARENT, reg,
 			       data->shift, SUNXI_MUX_GATE_WIDTH,
 			       0, &clk_lock);
 
@@ -277,26 +379,34 @@
  * sunxi_divider_clk_setup() - Setup function for simple divider clocks
  */
 
-#define SUNXI_DIVISOR_WIDTH	2
-
 struct div_data {
-	u8 shift;
-	u8 pow;
+	u8	shift;
+	u8	pow;
+	u8	width;
 };
 
-static const __initconst struct div_data axi_data = {
-	.shift = 0,
-	.pow = 0,
+static const struct div_data sun4i_axi_data __initconst = {
+	.shift	= 0,
+	.pow	= 0,
+	.width	= 2,
 };
 
-static const __initconst struct div_data ahb_data = {
-	.shift = 4,
-	.pow = 1,
+static const struct div_data sun4i_ahb_data __initconst = {
+	.shift	= 4,
+	.pow	= 1,
+	.width	= 2,
 };
 
-static const __initconst struct div_data apb0_data = {
-	.shift = 8,
-	.pow = 1,
+static const struct div_data sun4i_apb0_data __initconst = {
+	.shift	= 8,
+	.pow	= 1,
+	.width	= 2,
+};
+
+static const struct div_data sun6i_a31_apb2_div_data __initconst = {
+	.shift	= 0,
+	.pow	= 0,
+	.width	= 4,
 };
 
 static void __init sunxi_divider_clk_setup(struct device_node *node,
@@ -312,7 +422,7 @@
 	clk_parent = of_clk_get_parent_name(node, 0);
 
 	clk = clk_register_divider(NULL, clk_name, clk_parent, 0,
-				   reg, data->shift, SUNXI_DIVISOR_WIDTH,
+				   reg, data->shift, data->width,
 				   data->pow ? CLK_DIVIDER_POWER_OF_TWO : 0,
 				   &clk_lock);
 	if (clk) {
@@ -333,34 +443,70 @@
 	DECLARE_BITMAP(mask, SUNXI_GATES_MAX_SIZE);
 };
 
-static const __initconst struct gates_data sun4i_axi_gates_data = {
+static const struct gates_data sun4i_axi_gates_data __initconst = {
 	.mask = {1},
 };
 
-static const __initconst struct gates_data sun4i_ahb_gates_data = {
+static const struct gates_data sun4i_ahb_gates_data __initconst = {
 	.mask = {0x7F77FFF, 0x14FB3F},
 };
 
-static const __initconst struct gates_data sun5i_a13_ahb_gates_data = {
+static const struct gates_data sun5i_a10s_ahb_gates_data __initconst = {
+	.mask = {0x147667e7, 0x185915},
+};
+
+static const struct gates_data sun5i_a13_ahb_gates_data __initconst = {
 	.mask = {0x107067e7, 0x185111},
 };
 
-static const __initconst struct gates_data sun4i_apb0_gates_data = {
+static const struct gates_data sun6i_a31_ahb1_gates_data __initconst = {
+	.mask = {0xEDFE7F62, 0x794F931},
+};
+
+static const struct gates_data sun7i_a20_ahb_gates_data __initconst = {
+	.mask = { 0x12f77fff, 0x16ff3f },
+};
+
+static const struct gates_data sun4i_apb0_gates_data __initconst = {
 	.mask = {0x4EF},
 };
 
-static const __initconst struct gates_data sun5i_a13_apb0_gates_data = {
+static const struct gates_data sun5i_a10s_apb0_gates_data __initconst = {
+	.mask = {0x469},
+};
+
+static const struct gates_data sun5i_a13_apb0_gates_data __initconst = {
 	.mask = {0x61},
 };
 
-static const __initconst struct gates_data sun4i_apb1_gates_data = {
+static const struct gates_data sun7i_a20_apb0_gates_data __initconst = {
+	.mask = { 0x4ff },
+};
+
+static const struct gates_data sun4i_apb1_gates_data __initconst = {
 	.mask = {0xFF00F7},
 };
 
-static const __initconst struct gates_data sun5i_a13_apb1_gates_data = {
+static const struct gates_data sun5i_a10s_apb1_gates_data __initconst = {
+	.mask = {0xf0007},
+};
+
+static const struct gates_data sun5i_a13_apb1_gates_data __initconst = {
 	.mask = {0xa0007},
 };
 
+static const struct gates_data sun6i_a31_apb1_gates_data __initconst = {
+	.mask = {0x3031},
+};
+
+static const struct gates_data sun6i_a31_apb2_gates_data __initconst = {
+	.mask = {0x3F000F},
+};
+
+static const struct gates_data sun7i_a20_apb1_gates_data __initconst = {
+	.mask = { 0xff80ff },
+};
+
 static void __init sunxi_gates_clk_setup(struct device_node *node,
 					 struct gates_data *data)
 {
@@ -410,43 +556,49 @@
 	of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
 }
 
-/* Matches for of_clk_init */
-static const __initconst struct of_device_id clk_match[] = {
-	{.compatible = "allwinner,sun4i-osc-clk", .data = sunxi_osc_clk_setup,},
-	{}
-};
-
 /* Matches for factors clocks */
-static const __initconst struct of_device_id clk_factors_match[] = {
-	{.compatible = "allwinner,sun4i-pll1-clk", .data = &pll1_data,},
-	{.compatible = "allwinner,sun4i-apb1-clk", .data = &apb1_data,},
+static const struct of_device_id clk_factors_match[] __initconst = {
+	{.compatible = "allwinner,sun4i-pll1-clk", .data = &sun4i_pll1_data,},
+	{.compatible = "allwinner,sun6i-a31-pll1-clk", .data = &sun6i_a31_pll1_data,},
+	{.compatible = "allwinner,sun4i-apb1-clk", .data = &sun4i_apb1_data,},
 	{}
 };
 
 /* Matches for divider clocks */
-static const __initconst struct of_device_id clk_div_match[] = {
-	{.compatible = "allwinner,sun4i-axi-clk", .data = &axi_data,},
-	{.compatible = "allwinner,sun4i-ahb-clk", .data = &ahb_data,},
-	{.compatible = "allwinner,sun4i-apb0-clk", .data = &apb0_data,},
+static const struct of_device_id clk_div_match[] __initconst = {
+	{.compatible = "allwinner,sun4i-axi-clk", .data = &sun4i_axi_data,},
+	{.compatible = "allwinner,sun4i-ahb-clk", .data = &sun4i_ahb_data,},
+	{.compatible = "allwinner,sun4i-apb0-clk", .data = &sun4i_apb0_data,},
+	{.compatible = "allwinner,sun6i-a31-apb2-div-clk", .data = &sun6i_a31_apb2_div_data,},
 	{}
 };
 
 /* Matches for mux clocks */
-static const __initconst struct of_device_id clk_mux_match[] = {
-	{.compatible = "allwinner,sun4i-cpu-clk", .data = &cpu_mux_data,},
-	{.compatible = "allwinner,sun4i-apb1-mux-clk", .data = &apb1_mux_data,},
+static const struct of_device_id clk_mux_match[] __initconst = {
+	{.compatible = "allwinner,sun4i-cpu-clk", .data = &sun4i_cpu_mux_data,},
+	{.compatible = "allwinner,sun4i-apb1-mux-clk", .data = &sun4i_apb1_mux_data,},
+	{.compatible = "allwinner,sun6i-a31-ahb1-mux-clk", .data = &sun6i_a31_ahb1_mux_data,},
 	{}
 };
 
 /* Matches for gate clocks */
-static const __initconst struct of_device_id clk_gates_match[] = {
+static const struct of_device_id clk_gates_match[] __initconst = {
 	{.compatible = "allwinner,sun4i-axi-gates-clk", .data = &sun4i_axi_gates_data,},
 	{.compatible = "allwinner,sun4i-ahb-gates-clk", .data = &sun4i_ahb_gates_data,},
+	{.compatible = "allwinner,sun5i-a10s-ahb-gates-clk", .data = &sun5i_a10s_ahb_gates_data,},
 	{.compatible = "allwinner,sun5i-a13-ahb-gates-clk", .data = &sun5i_a13_ahb_gates_data,},
+	{.compatible = "allwinner,sun6i-a31-ahb1-gates-clk", .data = &sun6i_a31_ahb1_gates_data,},
+	{.compatible = "allwinner,sun7i-a20-ahb-gates-clk", .data = &sun7i_a20_ahb_gates_data,},
 	{.compatible = "allwinner,sun4i-apb0-gates-clk", .data = &sun4i_apb0_gates_data,},
+	{.compatible = "allwinner,sun5i-a10s-apb0-gates-clk", .data = &sun5i_a10s_apb0_gates_data,},
 	{.compatible = "allwinner,sun5i-a13-apb0-gates-clk", .data = &sun5i_a13_apb0_gates_data,},
+	{.compatible = "allwinner,sun7i-a20-apb0-gates-clk", .data = &sun7i_a20_apb0_gates_data,},
 	{.compatible = "allwinner,sun4i-apb1-gates-clk", .data = &sun4i_apb1_gates_data,},
+	{.compatible = "allwinner,sun5i-a10s-apb1-gates-clk", .data = &sun5i_a10s_apb1_gates_data,},
 	{.compatible = "allwinner,sun5i-a13-apb1-gates-clk", .data = &sun5i_a13_apb1_gates_data,},
+	{.compatible = "allwinner,sun6i-a31-apb1-gates-clk", .data = &sun6i_a31_apb1_gates_data,},
+	{.compatible = "allwinner,sun7i-a20-apb1-gates-clk", .data = &sun7i_a20_apb1_gates_data,},
+	{.compatible = "allwinner,sun6i-a31-apb2-gates-clk", .data = &sun6i_a31_apb2_gates_data,},
 	{}
 };
 
@@ -467,8 +619,8 @@
 
 void __init sunxi_init_clocks(void)
 {
-	/* Register all the simple sunxi clocks on DT */
-	of_clk_init(clk_match);
+	/* Register all the simple and basic clocks on DT */
+	of_clk_init(NULL);
 
 	/* Register factor clocks */
 	of_sunxi_table_clock_setup(clk_factors_match, sunxi_factors_clk_setup);
diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
index 806d803..9467da7 100644
--- a/drivers/clk/tegra/clk-tegra114.c
+++ b/drivers/clk/tegra/clk-tegra114.c
@@ -1566,7 +1566,8 @@
 
 	/* audio0 */
 	clk = clk_register_mux(NULL, "audio0_mux", mux_audio_sync_clk,
-			       ARRAY_SIZE(mux_audio_sync_clk), 0,
+			       ARRAY_SIZE(mux_audio_sync_clk),
+			       CLK_SET_RATE_NO_REPARENT,
 			       clk_base + AUDIO_SYNC_CLK_I2S0, 0, 3, 0,
 			       NULL);
 	clks[audio0_mux] = clk;
@@ -1578,7 +1579,8 @@
 
 	/* audio1 */
 	clk = clk_register_mux(NULL, "audio1_mux", mux_audio_sync_clk,
-			       ARRAY_SIZE(mux_audio_sync_clk), 0,
+			       ARRAY_SIZE(mux_audio_sync_clk),
+			       CLK_SET_RATE_NO_REPARENT,
 			       clk_base + AUDIO_SYNC_CLK_I2S1, 0, 3, 0,
 			       NULL);
 	clks[audio1_mux] = clk;
@@ -1590,7 +1592,8 @@
 
 	/* audio2 */
 	clk = clk_register_mux(NULL, "audio2_mux", mux_audio_sync_clk,
-			       ARRAY_SIZE(mux_audio_sync_clk), 0,
+			       ARRAY_SIZE(mux_audio_sync_clk),
+			       CLK_SET_RATE_NO_REPARENT,
 			       clk_base + AUDIO_SYNC_CLK_I2S2, 0, 3, 0,
 			       NULL);
 	clks[audio2_mux] = clk;
@@ -1602,7 +1605,8 @@
 
 	/* audio3 */
 	clk = clk_register_mux(NULL, "audio3_mux", mux_audio_sync_clk,
-			       ARRAY_SIZE(mux_audio_sync_clk), 0,
+			       ARRAY_SIZE(mux_audio_sync_clk),
+			       CLK_SET_RATE_NO_REPARENT,
 			       clk_base + AUDIO_SYNC_CLK_I2S3, 0, 3, 0,
 			       NULL);
 	clks[audio3_mux] = clk;
@@ -1614,7 +1618,8 @@
 
 	/* audio4 */
 	clk = clk_register_mux(NULL, "audio4_mux", mux_audio_sync_clk,
-			       ARRAY_SIZE(mux_audio_sync_clk), 0,
+			       ARRAY_SIZE(mux_audio_sync_clk),
+			       CLK_SET_RATE_NO_REPARENT,
 			       clk_base + AUDIO_SYNC_CLK_I2S4, 0, 3, 0,
 			       NULL);
 	clks[audio4_mux] = clk;
@@ -1626,7 +1631,8 @@
 
 	/* spdif */
 	clk = clk_register_mux(NULL, "spdif_mux", mux_audio_sync_clk,
-			       ARRAY_SIZE(mux_audio_sync_clk), 0,
+			       ARRAY_SIZE(mux_audio_sync_clk),
+			       CLK_SET_RATE_NO_REPARENT,
 			       clk_base + AUDIO_SYNC_CLK_SPDIF, 0, 3, 0,
 			       NULL);
 	clks[spdif_mux] = clk;
@@ -1721,7 +1727,8 @@
 
 	/* clk_out_1 */
 	clk = clk_register_mux(NULL, "clk_out_1_mux", clk_out1_parents,
-			       ARRAY_SIZE(clk_out1_parents), 0,
+			       ARRAY_SIZE(clk_out1_parents),
+			       CLK_SET_RATE_NO_REPARENT,
 			       pmc_base + PMC_CLK_OUT_CNTRL, 6, 3, 0,
 			       &clk_out_lock);
 	clks[clk_out_1_mux] = clk;
@@ -1733,7 +1740,8 @@
 
 	/* clk_out_2 */
 	clk = clk_register_mux(NULL, "clk_out_2_mux", clk_out2_parents,
-			       ARRAY_SIZE(clk_out2_parents), 0,
+			       ARRAY_SIZE(clk_out2_parents),
+			       CLK_SET_RATE_NO_REPARENT,
 			       pmc_base + PMC_CLK_OUT_CNTRL, 14, 3, 0,
 			       &clk_out_lock);
 	clks[clk_out_2_mux] = clk;
@@ -1745,7 +1753,8 @@
 
 	/* clk_out_3 */
 	clk = clk_register_mux(NULL, "clk_out_3_mux", clk_out3_parents,
-			       ARRAY_SIZE(clk_out3_parents), 0,
+			       ARRAY_SIZE(clk_out3_parents),
+			       CLK_SET_RATE_NO_REPARENT,
 			       pmc_base + PMC_CLK_OUT_CNTRL, 22, 3, 0,
 			       &clk_out_lock);
 	clks[clk_out_3_mux] = clk;
@@ -2063,7 +2072,8 @@
 
 	/* dsia */
 	clk = clk_register_mux(NULL, "dsia_mux", mux_plld_out0_plld2_out0,
-			       ARRAY_SIZE(mux_plld_out0_plld2_out0), 0,
+			       ARRAY_SIZE(mux_plld_out0_plld2_out0),
+			       CLK_SET_RATE_NO_REPARENT,
 			       clk_base + PLLD_BASE, 25, 1, 0, &pll_d_lock);
 	clks[dsia_mux] = clk;
 	clk = tegra_clk_register_periph_gate("dsia", "dsia_mux", 0, clk_base,
@@ -2073,7 +2083,8 @@
 
 	/* dsib */
 	clk = clk_register_mux(NULL, "dsib_mux", mux_plld_out0_plld2_out0,
-			       ARRAY_SIZE(mux_plld_out0_plld2_out0), 0,
+			       ARRAY_SIZE(mux_plld_out0_plld2_out0),
+			       CLK_SET_RATE_NO_REPARENT,
 			       clk_base + PLLD2_BASE, 25, 1, 0, &pll_d2_lock);
 	clks[dsib_mux] = clk;
 	clk = tegra_clk_register_periph_gate("dsib", "dsib_mux", 0, clk_base,
@@ -2110,7 +2121,8 @@
 
 	/* emc */
 	clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
-			       ARRAY_SIZE(mux_pllmcp_clkm), 0,
+			       ARRAY_SIZE(mux_pllmcp_clkm),
+			       CLK_SET_RATE_NO_REPARENT,
 			       clk_base + CLK_SOURCE_EMC,
 			       29, 3, 0, NULL);
 	clk = tegra_clk_register_periph_gate("emc", "emc_mux", 0, clk_base,
@@ -2194,7 +2206,7 @@
  * dfll_soc/dfll_ref apparently must be kept enabled, otherwise I2C5
  * breaks
  */
-static __initdata struct tegra_clk_init_table init_table[] = {
+static struct tegra_clk_init_table init_table[] __initdata = {
 	{uarta, pll_p, 408000000, 0},
 	{uartb, pll_p, 408000000, 0},
 	{uartc, pll_p, 408000000, 0},
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
index 759ca47..056f649 100644
--- a/drivers/clk/tegra/clk-tegra20.c
+++ b/drivers/clk/tegra/clk-tegra20.c
@@ -778,7 +778,8 @@
 
 	/* audio */
 	clk = clk_register_mux(NULL, "audio_mux", audio_parents,
-				ARRAY_SIZE(audio_parents), 0,
+				ARRAY_SIZE(audio_parents),
+				CLK_SET_RATE_NO_REPARENT,
 				clk_base + AUDIO_SYNC_CLK, 0, 3, 0, NULL);
 	clk = clk_register_gate(NULL, "audio", "audio_mux", 0,
 				clk_base + AUDIO_SYNC_CLK, 4,
@@ -941,7 +942,8 @@
 
 	/* emc */
 	clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
-			       ARRAY_SIZE(mux_pllmcp_clkm), 0,
+			       ARRAY_SIZE(mux_pllmcp_clkm),
+			       CLK_SET_RATE_NO_REPARENT,
 			       clk_base + CLK_SOURCE_EMC,
 			       30, 2, 0, NULL);
 	clk = tegra_clk_register_periph_gate("emc", "emc_mux", 0, clk_base, 0,
@@ -1223,7 +1225,7 @@
 #endif
 };
 
-static __initdata struct tegra_clk_init_table init_table[] = {
+static struct tegra_clk_init_table init_table[] __initdata = {
 	{pll_p, clk_max, 216000000, 1},
 	{pll_p_out1, clk_max, 28800000, 1},
 	{pll_p_out2, clk_max, 48000000, 1},
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index e2c6ca0..dbe7c80 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -971,7 +971,7 @@
 	/* PLLU */
 	clk = tegra_clk_register_pll("pll_u", "pll_ref", clk_base, pmc_base, 0,
 			    0, &pll_u_params, TEGRA_PLLU | TEGRA_PLL_HAS_CPCON |
-			    TEGRA_PLL_SET_LFCON | TEGRA_PLL_USE_LOCK,
+			    TEGRA_PLL_SET_LFCON,
 			    pll_u_freq_table,
 			    NULL);
 	clk_register_clkdev(clk, "pll_u", NULL);
@@ -1026,7 +1026,8 @@
 
 	/* PLLE */
 	clk = clk_register_mux(NULL, "pll_e_mux", pll_e_parents,
-			       ARRAY_SIZE(pll_e_parents), 0,
+			       ARRAY_SIZE(pll_e_parents),
+			       CLK_SET_RATE_NO_REPARENT,
 			       clk_base + PLLE_AUX, 2, 1, 0, NULL);
 	clk = tegra_clk_register_plle("pll_e", "pll_e_mux", clk_base, pmc_base,
 			     CLK_GET_RATE_NOCACHE, 100000000, &pll_e_params,
@@ -1086,7 +1087,8 @@
 
 	/* audio0 */
 	clk = clk_register_mux(NULL, "audio0_mux", mux_audio_sync_clk,
-				ARRAY_SIZE(mux_audio_sync_clk), 0,
+				ARRAY_SIZE(mux_audio_sync_clk),
+				CLK_SET_RATE_NO_REPARENT,
 				clk_base + AUDIO_SYNC_CLK_I2S0, 0, 3, 0, NULL);
 	clk = clk_register_gate(NULL, "audio0", "audio0_mux", 0,
 				clk_base + AUDIO_SYNC_CLK_I2S0, 4,
@@ -1096,7 +1098,8 @@
 
 	/* audio1 */
 	clk = clk_register_mux(NULL, "audio1_mux", mux_audio_sync_clk,
-				ARRAY_SIZE(mux_audio_sync_clk), 0,
+				ARRAY_SIZE(mux_audio_sync_clk),
+				CLK_SET_RATE_NO_REPARENT,
 				clk_base + AUDIO_SYNC_CLK_I2S1, 0, 3, 0, NULL);
 	clk = clk_register_gate(NULL, "audio1", "audio1_mux", 0,
 				clk_base + AUDIO_SYNC_CLK_I2S1, 4,
@@ -1106,7 +1109,8 @@
 
 	/* audio2 */
 	clk = clk_register_mux(NULL, "audio2_mux", mux_audio_sync_clk,
-				ARRAY_SIZE(mux_audio_sync_clk), 0,
+				ARRAY_SIZE(mux_audio_sync_clk),
+				CLK_SET_RATE_NO_REPARENT,
 				clk_base + AUDIO_SYNC_CLK_I2S2, 0, 3, 0, NULL);
 	clk = clk_register_gate(NULL, "audio2", "audio2_mux", 0,
 				clk_base + AUDIO_SYNC_CLK_I2S2, 4,
@@ -1116,7 +1120,8 @@
 
 	/* audio3 */
 	clk = clk_register_mux(NULL, "audio3_mux", mux_audio_sync_clk,
-				ARRAY_SIZE(mux_audio_sync_clk), 0,
+				ARRAY_SIZE(mux_audio_sync_clk),
+				CLK_SET_RATE_NO_REPARENT,
 				clk_base + AUDIO_SYNC_CLK_I2S3, 0, 3, 0, NULL);
 	clk = clk_register_gate(NULL, "audio3", "audio3_mux", 0,
 				clk_base + AUDIO_SYNC_CLK_I2S3, 4,
@@ -1126,7 +1131,8 @@
 
 	/* audio4 */
 	clk = clk_register_mux(NULL, "audio4_mux", mux_audio_sync_clk,
-				ARRAY_SIZE(mux_audio_sync_clk), 0,
+				ARRAY_SIZE(mux_audio_sync_clk),
+				CLK_SET_RATE_NO_REPARENT,
 				clk_base + AUDIO_SYNC_CLK_I2S4, 0, 3, 0, NULL);
 	clk = clk_register_gate(NULL, "audio4", "audio4_mux", 0,
 				clk_base + AUDIO_SYNC_CLK_I2S4, 4,
@@ -1136,7 +1142,8 @@
 
 	/* spdif */
 	clk = clk_register_mux(NULL, "spdif_mux", mux_audio_sync_clk,
-				ARRAY_SIZE(mux_audio_sync_clk), 0,
+				ARRAY_SIZE(mux_audio_sync_clk),
+				CLK_SET_RATE_NO_REPARENT,
 				clk_base + AUDIO_SYNC_CLK_SPDIF, 0, 3, 0, NULL);
 	clk = clk_register_gate(NULL, "spdif", "spdif_mux", 0,
 				clk_base + AUDIO_SYNC_CLK_SPDIF, 4,
@@ -1229,7 +1236,8 @@
 
 	/* clk_out_1 */
 	clk = clk_register_mux(NULL, "clk_out_1_mux", clk_out1_parents,
-			       ARRAY_SIZE(clk_out1_parents), 0,
+			       ARRAY_SIZE(clk_out1_parents),
+			       CLK_SET_RATE_NO_REPARENT,
 			       pmc_base + PMC_CLK_OUT_CNTRL, 6, 3, 0,
 			       &clk_out_lock);
 	clks[clk_out_1_mux] = clk;
@@ -1241,7 +1249,8 @@
 
 	/* clk_out_2 */
 	clk = clk_register_mux(NULL, "clk_out_2_mux", clk_out2_parents,
-			       ARRAY_SIZE(clk_out2_parents), 0,
+			       ARRAY_SIZE(clk_out2_parents),
+			       CLK_SET_RATE_NO_REPARENT,
 			       pmc_base + PMC_CLK_OUT_CNTRL, 14, 3, 0,
 			       &clk_out_lock);
 	clk = clk_register_gate(NULL, "clk_out_2", "clk_out_2_mux", 0,
@@ -1252,7 +1261,8 @@
 
 	/* clk_out_3 */
 	clk = clk_register_mux(NULL, "clk_out_3_mux", clk_out3_parents,
-			       ARRAY_SIZE(clk_out3_parents), 0,
+			       ARRAY_SIZE(clk_out3_parents),
+			       CLK_SET_RATE_NO_REPARENT,
 			       pmc_base + PMC_CLK_OUT_CNTRL, 22, 3, 0,
 			       &clk_out_lock);
 	clk = clk_register_gate(NULL, "clk_out_3", "clk_out_3_mux", 0,
@@ -1679,7 +1689,8 @@
 
 	/* emc */
 	clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
-			       ARRAY_SIZE(mux_pllmcp_clkm), 0,
+			       ARRAY_SIZE(mux_pllmcp_clkm),
+			       CLK_SET_RATE_NO_REPARENT,
 			       clk_base + CLK_SOURCE_EMC,
 			       30, 2, 0, NULL);
 	clk = tegra_clk_register_periph_gate("emc", "emc_mux", 0, clk_base, 0,
@@ -1901,7 +1912,7 @@
 #endif
 };
 
-static __initdata struct tegra_clk_init_table init_table[] = {
+static struct tegra_clk_init_table init_table[] __initdata = {
 	{uarta, pll_p, 408000000, 0},
 	{uartb, pll_p, 408000000, 0},
 	{uartc, pll_p, 408000000, 0},
diff --git a/drivers/clk/versatile/clk-vexpress.c b/drivers/clk/versatile/clk-vexpress.c
index a4a728d..2d5e1b4 100644
--- a/drivers/clk/versatile/clk-vexpress.c
+++ b/drivers/clk/versatile/clk-vexpress.c
@@ -37,8 +37,8 @@
 		snprintf(name, ARRAY_SIZE(name), "timerclken%d", i);
 
 		vexpress_sp810_timerclken[i] = clk_register_mux(NULL, name,
-				parents, 2, 0, base + SCCTRL,
-				SCCTRL_TIMERENnSEL_SHIFT(i), 1,
+				parents, 2, CLK_SET_RATE_NO_REPARENT,
+				base + SCCTRL, SCCTRL_TIMERENnSEL_SHIFT(i), 1,
 				0, &vexpress_sp810_lock);
 
 		if (WARN_ON(IS_ERR(vexpress_sp810_timerclken[i])))
diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c
index 089d3e3..cc40fe6 100644
--- a/drivers/clk/zynq/clkc.c
+++ b/drivers/clk/zynq/clkc.c
@@ -125,8 +125,9 @@
 	div0_name = kasprintf(GFP_KERNEL, "%s_div0", clk_name);
 	div1_name = kasprintf(GFP_KERNEL, "%s_div1", clk_name);
 
-	clk = clk_register_mux(NULL, mux_name, parents, 4, 0,
-			fclk_ctrl_reg, 4, 2, 0, fclk_lock);
+	clk = clk_register_mux(NULL, mux_name, parents, 4,
+			CLK_SET_RATE_NO_REPARENT, fclk_ctrl_reg, 4, 2, 0,
+			fclk_lock);
 
 	clk = clk_register_divider(NULL, div0_name, mux_name,
 			0, fclk_ctrl_reg, 8, 6, CLK_DIVIDER_ONE_BASED |
@@ -168,8 +169,8 @@
 	mux_name = kasprintf(GFP_KERNEL, "%s_mux", clk_name0);
 	div_name = kasprintf(GFP_KERNEL, "%s_div", clk_name0);
 
-	clk = clk_register_mux(NULL, mux_name, parents, 4, 0,
-			clk_ctrl, 4, 2, 0, lock);
+	clk = clk_register_mux(NULL, mux_name, parents, 4,
+			CLK_SET_RATE_NO_REPARENT, clk_ctrl, 4, 2, 0, lock);
 
 	clk = clk_register_divider(NULL, div_name, mux_name, 0, clk_ctrl, 8, 6,
 			CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, lock);
@@ -236,25 +237,26 @@
 	clk = clk_register_zynq_pll("armpll_int", "ps_clk", SLCR_ARMPLL_CTRL,
 			SLCR_PLL_STATUS, 0, &armpll_lock);
 	clks[armpll] = clk_register_mux(NULL, clk_output_name[armpll],
-			armpll_parents, 2, 0, SLCR_ARMPLL_CTRL, 4, 1, 0,
-			&armpll_lock);
+			armpll_parents, 2, CLK_SET_RATE_NO_REPARENT,
+			SLCR_ARMPLL_CTRL, 4, 1, 0, &armpll_lock);
 
 	clk = clk_register_zynq_pll("ddrpll_int", "ps_clk", SLCR_DDRPLL_CTRL,
 			SLCR_PLL_STATUS, 1, &ddrpll_lock);
 	clks[ddrpll] = clk_register_mux(NULL, clk_output_name[ddrpll],
-			ddrpll_parents, 2, 0, SLCR_DDRPLL_CTRL, 4, 1, 0,
-			&ddrpll_lock);
+			ddrpll_parents, 2, CLK_SET_RATE_NO_REPARENT,
+			SLCR_DDRPLL_CTRL, 4, 1, 0, &ddrpll_lock);
 
 	clk = clk_register_zynq_pll("iopll_int", "ps_clk", SLCR_IOPLL_CTRL,
 			SLCR_PLL_STATUS, 2, &iopll_lock);
 	clks[iopll] = clk_register_mux(NULL, clk_output_name[iopll],
-			iopll_parents, 2, 0, SLCR_IOPLL_CTRL, 4, 1, 0,
-			&iopll_lock);
+			iopll_parents, 2, CLK_SET_RATE_NO_REPARENT,
+			SLCR_IOPLL_CTRL, 4, 1, 0, &iopll_lock);
 
 	/* CPU clocks */
 	tmp = readl(SLCR_621_TRUE) & 1;
-	clk = clk_register_mux(NULL, "cpu_mux", cpu_parents, 4, 0,
-			SLCR_ARM_CLK_CTRL, 4, 2, 0, &armclk_lock);
+	clk = clk_register_mux(NULL, "cpu_mux", cpu_parents, 4,
+			CLK_SET_RATE_NO_REPARENT, SLCR_ARM_CLK_CTRL, 4, 2, 0,
+			&armclk_lock);
 	clk = clk_register_divider(NULL, "cpu_div", "cpu_mux", 0,
 			SLCR_ARM_CLK_CTRL, 8, 6, CLK_DIVIDER_ONE_BASED |
 			CLK_DIVIDER_ALLOW_ZERO, &armclk_lock);
@@ -293,8 +295,9 @@
 			swdt_ext_clk_mux_parents[i + 1] = dummy_nm;
 	}
 	clks[swdt] = clk_register_mux(NULL, clk_output_name[swdt],
-			swdt_ext_clk_mux_parents, 2, CLK_SET_RATE_PARENT,
-			SLCR_SWDT_CLK_SEL, 0, 1, 0, &swdtclk_lock);
+			swdt_ext_clk_mux_parents, 2, CLK_SET_RATE_PARENT |
+			CLK_SET_RATE_NO_REPARENT, SLCR_SWDT_CLK_SEL, 0, 1, 0,
+			&swdtclk_lock);
 
 	/* DDR clocks */
 	clk = clk_register_divider(NULL, "ddr2x_div", "ddrpll", 0,
@@ -356,8 +359,9 @@
 			gem0_mux_parents[i + 1] = of_clk_get_parent_name(np,
 					idx);
 	}
-	clk = clk_register_mux(NULL, "gem0_mux", periph_parents, 4, 0,
-			SLCR_GEM0_CLK_CTRL, 4, 2, 0, &gem0clk_lock);
+	clk = clk_register_mux(NULL, "gem0_mux", periph_parents, 4,
+			CLK_SET_RATE_NO_REPARENT, SLCR_GEM0_CLK_CTRL, 4, 2, 0,
+			&gem0clk_lock);
 	clk = clk_register_divider(NULL, "gem0_div0", "gem0_mux", 0,
 			SLCR_GEM0_CLK_CTRL, 8, 6, CLK_DIVIDER_ONE_BASED |
 			CLK_DIVIDER_ALLOW_ZERO, &gem0clk_lock);
@@ -366,7 +370,8 @@
 			CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
 			&gem0clk_lock);
 	clk = clk_register_mux(NULL, "gem0_emio_mux", gem0_mux_parents, 2,
-			CLK_SET_RATE_PARENT, SLCR_GEM0_CLK_CTRL, 6, 1, 0,
+			CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+			SLCR_GEM0_CLK_CTRL, 6, 1, 0,
 			&gem0clk_lock);
 	clks[gem0] = clk_register_gate(NULL, clk_output_name[gem0],
 			"gem0_emio_mux", CLK_SET_RATE_PARENT,
@@ -379,8 +384,9 @@
 			gem1_mux_parents[i + 1] = of_clk_get_parent_name(np,
 					idx);
 	}
-	clk = clk_register_mux(NULL, "gem1_mux", periph_parents, 4, 0,
-			SLCR_GEM1_CLK_CTRL, 4, 2, 0, &gem1clk_lock);
+	clk = clk_register_mux(NULL, "gem1_mux", periph_parents, 4,
+			CLK_SET_RATE_NO_REPARENT, SLCR_GEM1_CLK_CTRL, 4, 2, 0,
+			&gem1clk_lock);
 	clk = clk_register_divider(NULL, "gem1_div0", "gem1_mux", 0,
 			SLCR_GEM1_CLK_CTRL, 8, 6, CLK_DIVIDER_ONE_BASED |
 			CLK_DIVIDER_ALLOW_ZERO, &gem1clk_lock);
@@ -389,7 +395,8 @@
 			CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
 			&gem1clk_lock);
 	clk = clk_register_mux(NULL, "gem1_emio_mux", gem1_mux_parents, 2,
-			CLK_SET_RATE_PARENT, SLCR_GEM1_CLK_CTRL, 6, 1, 0,
+			CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+			SLCR_GEM1_CLK_CTRL, 6, 1, 0,
 			&gem1clk_lock);
 	clks[gem1] = clk_register_gate(NULL, clk_output_name[gem1],
 			"gem1_emio_mux", CLK_SET_RATE_PARENT,
@@ -409,8 +416,9 @@
 			can_mio_mux_parents[i] = dummy_nm;
 	}
 	kfree(clk_name);
-	clk = clk_register_mux(NULL, "can_mux", periph_parents, 4, 0,
-			SLCR_CAN_CLK_CTRL, 4, 2, 0, &canclk_lock);
+	clk = clk_register_mux(NULL, "can_mux", periph_parents, 4,
+			CLK_SET_RATE_NO_REPARENT, SLCR_CAN_CLK_CTRL, 4, 2, 0,
+			&canclk_lock);
 	clk = clk_register_divider(NULL, "can_div0", "can_mux", 0,
 			SLCR_CAN_CLK_CTRL, 8, 6, CLK_DIVIDER_ONE_BASED |
 			CLK_DIVIDER_ALLOW_ZERO, &canclk_lock);
@@ -425,17 +433,21 @@
 			CLK_SET_RATE_PARENT, SLCR_CAN_CLK_CTRL, 1, 0,
 			&canclk_lock);
 	clk = clk_register_mux(NULL, "can0_mio_mux",
-			can_mio_mux_parents, 54, CLK_SET_RATE_PARENT,
-			SLCR_CAN_MIOCLK_CTRL, 0, 6, 0, &canmioclk_lock);
+			can_mio_mux_parents, 54, CLK_SET_RATE_PARENT |
+			CLK_SET_RATE_NO_REPARENT, SLCR_CAN_MIOCLK_CTRL, 0, 6, 0,
+			&canmioclk_lock);
 	clk = clk_register_mux(NULL, "can1_mio_mux",
-			can_mio_mux_parents, 54, CLK_SET_RATE_PARENT,
-			SLCR_CAN_MIOCLK_CTRL, 16, 6, 0, &canmioclk_lock);
+			can_mio_mux_parents, 54, CLK_SET_RATE_PARENT |
+			CLK_SET_RATE_NO_REPARENT, SLCR_CAN_MIOCLK_CTRL, 16, 6,
+			0, &canmioclk_lock);
 	clks[can0] = clk_register_mux(NULL, clk_output_name[can0],
-			can0_mio_mux2_parents, 2, CLK_SET_RATE_PARENT,
-			SLCR_CAN_MIOCLK_CTRL, 6, 1, 0, &canmioclk_lock);
+			can0_mio_mux2_parents, 2, CLK_SET_RATE_PARENT |
+			CLK_SET_RATE_NO_REPARENT, SLCR_CAN_MIOCLK_CTRL, 6, 1, 0,
+			&canmioclk_lock);
 	clks[can1] = clk_register_mux(NULL, clk_output_name[can1],
-			can1_mio_mux2_parents, 2, CLK_SET_RATE_PARENT,
-			SLCR_CAN_MIOCLK_CTRL, 22, 1, 0, &canmioclk_lock);
+			can1_mio_mux2_parents, 2, CLK_SET_RATE_PARENT |
+			CLK_SET_RATE_NO_REPARENT, SLCR_CAN_MIOCLK_CTRL, 22, 1,
+			0, &canmioclk_lock);
 
 	for (i = 0; i < ARRAY_SIZE(dbgtrc_emio_input_names); i++) {
 		int idx = of_property_match_string(np, "clock-names",
@@ -444,13 +456,15 @@
 			dbg_emio_mux_parents[i + 1] = of_clk_get_parent_name(np,
 					idx);
 	}
-	clk = clk_register_mux(NULL, "dbg_mux", periph_parents, 4, 0,
-			SLCR_DBG_CLK_CTRL, 4, 2, 0, &dbgclk_lock);
+	clk = clk_register_mux(NULL, "dbg_mux", periph_parents, 4,
+			CLK_SET_RATE_NO_REPARENT, SLCR_DBG_CLK_CTRL, 4, 2, 0,
+			&dbgclk_lock);
 	clk = clk_register_divider(NULL, "dbg_div", "dbg_mux", 0,
 			SLCR_DBG_CLK_CTRL, 8, 6, CLK_DIVIDER_ONE_BASED |
 			CLK_DIVIDER_ALLOW_ZERO, &dbgclk_lock);
-	clk = clk_register_mux(NULL, "dbg_emio_mux", dbg_emio_mux_parents, 2, 0,
-			SLCR_DBG_CLK_CTRL, 6, 1, 0, &dbgclk_lock);
+	clk = clk_register_mux(NULL, "dbg_emio_mux", dbg_emio_mux_parents, 2,
+			CLK_SET_RATE_NO_REPARENT, SLCR_DBG_CLK_CTRL, 6, 1, 0,
+			&dbgclk_lock);
 	clks[dbg_trc] = clk_register_gate(NULL, clk_output_name[dbg_trc],
 			"dbg_emio_mux", CLK_SET_RATE_PARENT, SLCR_DBG_CLK_CTRL,
 			0, 0, &dbgclk_lock);
diff --git a/drivers/clk/zynq/pll.c b/drivers/clk/zynq/pll.c
index 47e307c..3226f54 100644
--- a/drivers/clk/zynq/pll.c
+++ b/drivers/clk/zynq/pll.c
@@ -50,6 +50,9 @@
 #define PLLCTRL_RESET_MASK	1
 #define PLLCTRL_RESET_SHIFT	0
 
+#define PLL_FBDIV_MIN	13
+#define PLL_FBDIV_MAX	66
+
 /**
  * zynq_pll_round_rate() - Round a clock frequency
  * @hw:		Handle between common and hardware-specific interfaces
@@ -63,10 +66,10 @@
 	u32 fbdiv;
 
 	fbdiv = DIV_ROUND_CLOSEST(rate, *prate);
-	if (fbdiv < 13)
-		fbdiv = 13;
-	else if (fbdiv > 66)
-		fbdiv = 66;
+	if (fbdiv < PLL_FBDIV_MIN)
+		fbdiv = PLL_FBDIV_MIN;
+	else if (fbdiv > PLL_FBDIV_MAX)
+		fbdiv = PLL_FBDIV_MAX;
 
 	return *prate * fbdiv;
 }
@@ -182,7 +185,13 @@
 
 /**
  * clk_register_zynq_pll() - Register PLL with the clock framework
- * @np	Pointer to the DT device node
+ * @name	PLL name
+ * @parent	Parent clock name
+ * @pll_ctrl	Pointer to PLL control register
+ * @pll_status	Pointer to PLL status register
+ * @lock_index	Bit index to this PLL's lock status bit in @pll_status
+ * @lock	Register lock
+ * Returns handle to the registered clock.
  */
 struct clk *clk_register_zynq_pll(const char *name, const char *parent,
 		void __iomem *pll_ctrl, void __iomem *pll_status, u8 lock_index,
diff --git a/drivers/clocksource/samsung_pwm_timer.c b/drivers/clocksource/samsung_pwm_timer.c
index ac60f8b..ab29476 100644
--- a/drivers/clocksource/samsung_pwm_timer.c
+++ b/drivers/clocksource/samsung_pwm_timer.c
@@ -368,10 +368,6 @@
 
 static void __init samsung_timer_resources(void)
 {
-	pwm.timerclk = clk_get(NULL, "timers");
-	if (IS_ERR(pwm.timerclk))
-		panic("failed to get timers clock for timer");
-
 	clk_prepare_enable(pwm.timerclk);
 
 	pwm.tcnt_max = (1UL << pwm.variant.bits) - 1;
@@ -416,6 +412,10 @@
 	memcpy(&pwm.variant, variant, sizeof(pwm.variant));
 	memcpy(pwm.irq, irqs, SAMSUNG_PWM_NUM * sizeof(*irqs));
 
+	pwm.timerclk = clk_get(NULL, "timers");
+	if (IS_ERR(pwm.timerclk))
+		panic("failed to get timers clock for timer");
+
 	_samsung_pwm_clocksource_init();
 }
 
@@ -447,6 +447,10 @@
 		return;
 	}
 
+	pwm.timerclk = of_clk_get_by_name(np, "timers");
+	if (IS_ERR(pwm.timerclk))
+		panic("failed to get timers clock for timer");
+
 	_samsung_pwm_clocksource_init();
 }
 
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index b330219..8e36603 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -27,3 +27,13 @@
 	help
 	  Select this to enable cpuidle for ST-E u8500 processors
 
+config CPU_IDLE_BIG_LITTLE
+	bool "Support for ARM big.LITTLE processors"
+	depends on ARCH_VEXPRESS_TC2_PM
+	select ARM_CPU_SUSPEND
+	select CPU_IDLE_MULTIPLE_DRIVERS
+	help
+	  Select this option to enable CPU idle driver for big.LITTLE based
+	  ARM systems. Driver manages CPUs coordination through MCPM and
+	  define different C-states for little and big cores through the
+	  multiple CPU idle drivers infrastructure.
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index 0b9d200..cea5ef5 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -11,3 +11,4 @@
 obj-$(CONFIG_ARM_KIRKWOOD_CPUIDLE)	+= cpuidle-kirkwood.o
 obj-$(CONFIG_ARM_ZYNQ_CPUIDLE)		+= cpuidle-zynq.o
 obj-$(CONFIG_ARM_U8500_CPUIDLE)         += cpuidle-ux500.o
+obj-$(CONFIG_CPU_IDLE_BIG_LITTLE)	+= cpuidle-big_little.o
diff --git a/drivers/cpuidle/cpuidle-big_little.c b/drivers/cpuidle/cpuidle-big_little.c
new file mode 100644
index 0000000..b45fc62
--- /dev/null
+++ b/drivers/cpuidle/cpuidle-big_little.c
@@ -0,0 +1,209 @@
+/*
+ * Copyright (c) 2013 ARM/Linaro
+ *
+ * Authors: Daniel Lezcano <daniel.lezcano@linaro.org>
+ *          Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+ *          Nicolas Pitre <nicolas.pitre@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Maintainer: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+ * Maintainer: Daniel Lezcano <daniel.lezcano@linaro.org>
+ */
+#include <linux/cpuidle.h>
+#include <linux/cpu_pm.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+
+#include <asm/cpu.h>
+#include <asm/cputype.h>
+#include <asm/cpuidle.h>
+#include <asm/mcpm.h>
+#include <asm/smp_plat.h>
+#include <asm/suspend.h>
+
+static int bl_enter_powerdown(struct cpuidle_device *dev,
+			      struct cpuidle_driver *drv, int idx);
+
+/*
+ * NB: Owing to current menu governor behaviour big and LITTLE
+ * index 1 states have to define exit_latency and target_residency for
+ * cluster state since, when all CPUs in a cluster hit it, the cluster
+ * can be shutdown. This means that when a single CPU enters this state
+ * the exit_latency and target_residency values are somewhat overkill.
+ * There is no notion of cluster states in the menu governor, so CPUs
+ * have to define CPU states where possibly the cluster will be shutdown
+ * depending on the state of other CPUs. idle states entry and exit happen
+ * at random times; however the cluster state provides target_residency
+ * values as if all CPUs in a cluster enter the state at once; this is
+ * somewhat optimistic and behaviour should be fixed either in the governor
+ * or in the MCPM back-ends.
+ * To make this driver 100% generic the number of states and the exit_latency
+ * target_residency values must be obtained from device tree bindings.
+ *
+ * exit_latency: refers to the TC2 vexpress test chip and depends on the
+ * current cluster operating point. It is the time it takes to get the CPU
+ * up and running when the CPU is powered up on cluster wake-up from shutdown.
+ * Current values for big and LITTLE clusters are provided for clusters
+ * running at default operating points.
+ *
+ * target_residency: it is the minimum amount of time the cluster has
+ * to be down to break even in terms of power consumption. cluster
+ * shutdown has inherent dynamic power costs (L2 writebacks to DRAM
+ * being the main factor) that depend on the current operating points.
+ * The current values for both clusters are provided for a CPU whose half
+ * of L2 lines are dirty and require cleaning to DRAM, and takes into
+ * account leakage static power values related to the vexpress TC2 testchip.
+ */
+static struct cpuidle_driver bl_idle_little_driver = {
+	.name = "little_idle",
+	.owner = THIS_MODULE,
+	.states[0] = ARM_CPUIDLE_WFI_STATE,
+	.states[1] = {
+		.enter			= bl_enter_powerdown,
+		.exit_latency		= 700,
+		.target_residency	= 2500,
+		.flags			= CPUIDLE_FLAG_TIME_VALID |
+					  CPUIDLE_FLAG_TIMER_STOP,
+		.name			= "C1",
+		.desc			= "ARM little-cluster power down",
+	},
+	.state_count = 2,
+};
+
+static struct cpuidle_driver bl_idle_big_driver = {
+	.name = "big_idle",
+	.owner = THIS_MODULE,
+	.states[0] = ARM_CPUIDLE_WFI_STATE,
+	.states[1] = {
+		.enter			= bl_enter_powerdown,
+		.exit_latency		= 500,
+		.target_residency	= 2000,
+		.flags			= CPUIDLE_FLAG_TIME_VALID |
+					  CPUIDLE_FLAG_TIMER_STOP,
+		.name			= "C1",
+		.desc			= "ARM big-cluster power down",
+	},
+	.state_count = 2,
+};
+
+/*
+ * notrace prevents trace shims from getting inserted where they
+ * should not. Global jumps and ldrex/strex must not be inserted
+ * in power down sequences where caches and MMU may be turned off.
+ */
+static int notrace bl_powerdown_finisher(unsigned long arg)
+{
+	/* MCPM works with HW CPU identifiers */
+	unsigned int mpidr = read_cpuid_mpidr();
+	unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+	unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+
+	mcpm_set_entry_vector(cpu, cluster, cpu_resume);
+
+	/*
+	 * Residency value passed to mcpm_cpu_suspend back-end
+	 * has to be given clear semantics. Set to 0 as a
+	 * temporary value.
+	 */
+	mcpm_cpu_suspend(0);
+
+	/* return value != 0 means failure */
+	return 1;
+}
+
+/**
+ * bl_enter_powerdown - Programs CPU to enter the specified state
+ * @dev: cpuidle device
+ * @drv: The target state to be programmed
+ * @idx: state index
+ *
+ * Called from the CPUidle framework to program the device to the
+ * specified target state selected by the governor.
+ */
+static int bl_enter_powerdown(struct cpuidle_device *dev,
+				struct cpuidle_driver *drv, int idx)
+{
+	cpu_pm_enter();
+
+	cpu_suspend(0, bl_powerdown_finisher);
+
+	/* signals the MCPM core that CPU is out of low power state */
+	mcpm_cpu_powered_up();
+
+	cpu_pm_exit();
+
+	return idx;
+}
+
+static int __init bl_idle_driver_init(struct cpuidle_driver *drv, int cpu_id)
+{
+	struct cpuinfo_arm *cpu_info;
+	struct cpumask *cpumask;
+	unsigned long cpuid;
+	int cpu;
+
+	cpumask = kzalloc(cpumask_size(), GFP_KERNEL);
+	if (!cpumask)
+		return -ENOMEM;
+
+	for_each_possible_cpu(cpu) {
+		cpu_info = &per_cpu(cpu_data, cpu);
+		cpuid = is_smp() ? cpu_info->cpuid : read_cpuid_id();
+
+		/* read cpu id part number */
+		if ((cpuid & 0xFFF0) == cpu_id)
+			cpumask_set_cpu(cpu, cpumask);
+	}
+
+	drv->cpumask = cpumask;
+
+	return 0;
+}
+
+static int __init bl_idle_init(void)
+{
+	int ret;
+
+	/*
+	 * Initialize the driver just for a compliant set of machines
+	 */
+	if (!of_machine_is_compatible("arm,vexpress,v2p-ca15_a7"))
+		return -ENODEV;
+	/*
+	 * For now the differentiation between little and big cores
+	 * is based on the part number. A7 cores are considered little
+	 * cores, A15 are considered big cores. This distinction may
+	 * evolve in the future with a more generic matching approach.
+	 */
+	ret = bl_idle_driver_init(&bl_idle_little_driver,
+				  ARM_CPU_PART_CORTEX_A7);
+	if (ret)
+		return ret;
+
+	ret = bl_idle_driver_init(&bl_idle_big_driver, ARM_CPU_PART_CORTEX_A15);
+	if (ret)
+		goto out_uninit_little;
+
+	ret = cpuidle_register(&bl_idle_little_driver, NULL);
+	if (ret)
+		goto out_uninit_big;
+
+	ret = cpuidle_register(&bl_idle_big_driver, NULL);
+	if (ret)
+		goto out_unregister_little;
+
+	return 0;
+
+out_unregister_little:
+	cpuidle_unregister(&bl_idle_little_driver);
+out_uninit_big:
+	kfree(bl_idle_big_driver.cpumask);
+out_uninit_little:
+	kfree(bl_idle_little_driver.cpumask);
+
+	return ret;
+}
+device_initcall(bl_idle_init);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index daa4da2..526ec77 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -308,6 +308,15 @@
 	select DMA_ENGINE
 	select DMA_VIRTUAL_CHANNELS
 
+config K3_DMA
+	tristate "Hisilicon K3 DMA support"
+	depends on ARCH_HI3xxx
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Support the DMA engine for Hisilicon K3 platform
+	  devices.
+
 config DMA_ENGINE
 	bool
 
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 6d62ec3..db89035 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -40,3 +40,4 @@
 obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
 obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
 obj-$(CONFIG_TI_CPPI41) += cppi41.o
+obj-$(CONFIG_K3_DMA) += k3dma.o
diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c
index 5a18f82..e69b03c 100644
--- a/drivers/dma/acpi-dma.c
+++ b/drivers/dma/acpi-dma.c
@@ -43,7 +43,6 @@
 	struct list_head resource_list;
 	struct resource_list_entry *rentry;
 	resource_size_t mem = 0, irq = 0;
-	u32 vendor_id;
 	int ret;
 
 	if (grp->shared_info_length != sizeof(struct acpi_csrt_shared_info))
@@ -73,9 +72,8 @@
 	if (si->mmio_base_low != mem || si->gsi_interrupt != irq)
 		return 0;
 
-	vendor_id = le32_to_cpu(grp->vendor_id);
 	dev_dbg(&adev->dev, "matches with %.4s%04X (rev %u)\n",
-		(char *)&vendor_id, grp->device_id, grp->revision);
+		(char *)&grp->vendor_id, grp->device_id, grp->revision);
 
 	/* Check if the request line range is available */
 	if (si->base_request_line == 0 && si->num_handshake_signals == 0)
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 06fe45c..fce46c5 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -24,6 +24,7 @@
  *
  * Documentation: ARM DDI 0196G == PL080
  * Documentation: ARM DDI 0218E == PL081
+ * Documentation: S3C6410 User's Manual == PL080S
  *
  * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any
  * channel.
@@ -36,6 +37,14 @@
  *
  * The PL080 has a dual bus master, PL081 has a single master.
  *
+ * PL080S is a version modified by Samsung and used in S3C64xx SoCs.
+ * It differs in following aspects:
+ * - CH_CONFIG register at different offset,
+ * - separate CH_CONTROL2 register for transfer size,
+ * - bigger maximum transfer size,
+ * - 8-word aligned LLI, instead of 4-word, due to extra CCTL2 word,
+ * - no support for peripheral flow control.
+ *
  * Memory to peripheral transfer may be visualized as
  *	Get data from memory to DMAC
  *	Until no data left
@@ -64,10 +73,7 @@
  *  - Peripheral flow control: the transfer size is ignored (and should be
  *    zero).  The data is transferred from the current LLI entry, until
  *    after the final transfer signalled by LBREQ or LSREQ.  The DMAC
- *    will then move to the next LLI entry.
- *
- * Global TODO:
- * - Break out common code from arch/arm/mach-s3c64xx and share
+ *    will then move to the next LLI entry. Unsupported by PL080S.
  */
 #include <linux/amba/bus.h>
 #include <linux/amba/pl08x.h>
@@ -100,24 +106,16 @@
  * @nomadik: whether the channels have Nomadik security extension bits
  *	that need to be checked for permission before use and some registers are
  *	missing
+ * @pl080s: whether this version is a PL080S, which has separate register and
+ *	LLI word for transfer size.
  */
 struct vendor_data {
+	u8 config_offset;
 	u8 channels;
 	bool dualmaster;
 	bool nomadik;
-};
-
-/*
- * PL08X private data structures
- * An LLI struct - see PL08x TRM.  Note that next uses bit[0] as a bus bit,
- * start & end do not - their bus bit info is in cctl.  Also note that these
- * are fixed 32-bit quantities.
- */
-struct pl08x_lli {
-	u32 src;
-	u32 dst;
-	u32 lli;
-	u32 cctl;
+	bool pl080s;
+	u32 max_transfer_size;
 };
 
 /**
@@ -133,6 +131,8 @@
 	u8 buswidth;
 };
 
+#define IS_BUS_ALIGNED(bus) IS_ALIGNED((bus)->addr, (bus)->buswidth)
+
 /**
  * struct pl08x_phy_chan - holder for the physical channels
  * @id: physical index to this channel
@@ -145,6 +145,7 @@
 struct pl08x_phy_chan {
 	unsigned int id;
 	void __iomem *base;
+	void __iomem *reg_config;
 	spinlock_t lock;
 	struct pl08x_dma_chan *serving;
 	bool locked;
@@ -174,12 +175,13 @@
  * @ccfg: config reg values for current txd
  * @done: this marks completed descriptors, which should not have their
  *   mux released.
+ * @cyclic: indicate cyclic transfers
  */
 struct pl08x_txd {
 	struct virt_dma_desc vd;
 	struct list_head dsg_list;
 	dma_addr_t llis_bus;
-	struct pl08x_lli *llis_va;
+	u32 *llis_va;
 	/* Default cctl value for LLIs */
 	u32 cctl;
 	/*
@@ -188,6 +190,7 @@
 	 */
 	u32 ccfg;
 	bool done;
+	bool cyclic;
 };
 
 /**
@@ -263,17 +266,29 @@
 	struct dma_pool *pool;
 	u8 lli_buses;
 	u8 mem_buses;
+	u8 lli_words;
 };
 
 /*
  * PL08X specific defines
  */
 
-/* Size (bytes) of each LLI buffer allocated for one transfer */
-# define PL08X_LLI_TSFR_SIZE	0x2000
+/* The order of words in an LLI. */
+#define PL080_LLI_SRC		0
+#define PL080_LLI_DST		1
+#define PL080_LLI_LLI		2
+#define PL080_LLI_CCTL		3
+#define PL080S_LLI_CCTL2	4
 
-/* Maximum times we call dma_pool_alloc on this pool without freeing */
-#define MAX_NUM_TSFR_LLIS	(PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli))
+/* Total words in an LLI. */
+#define PL080_LLI_WORDS		4
+#define PL080S_LLI_WORDS	8
+
+/*
+ * Number of LLIs in each LLI buffer allocated for one transfer
+ * (maximum times we call dma_pool_alloc on this pool without freeing)
+ */
+#define MAX_NUM_TSFR_LLIS	512
 #define PL08X_ALIGN		8
 
 static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
@@ -334,10 +349,39 @@
 {
 	unsigned int val;
 
-	val = readl(ch->base + PL080_CH_CONFIG);
+	val = readl(ch->reg_config);
 	return val & PL080_CONFIG_ACTIVE;
 }
 
+static void pl08x_write_lli(struct pl08x_driver_data *pl08x,
+		struct pl08x_phy_chan *phychan, const u32 *lli, u32 ccfg)
+{
+	if (pl08x->vd->pl080s)
+		dev_vdbg(&pl08x->adev->dev,
+			"WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
+			"clli=0x%08x, cctl=0x%08x, cctl2=0x%08x, ccfg=0x%08x\n",
+			phychan->id, lli[PL080_LLI_SRC], lli[PL080_LLI_DST],
+			lli[PL080_LLI_LLI], lli[PL080_LLI_CCTL],
+			lli[PL080S_LLI_CCTL2], ccfg);
+	else
+		dev_vdbg(&pl08x->adev->dev,
+			"WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
+			"clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
+			phychan->id, lli[PL080_LLI_SRC], lli[PL080_LLI_DST],
+			lli[PL080_LLI_LLI], lli[PL080_LLI_CCTL], ccfg);
+
+	writel_relaxed(lli[PL080_LLI_SRC], phychan->base + PL080_CH_SRC_ADDR);
+	writel_relaxed(lli[PL080_LLI_DST], phychan->base + PL080_CH_DST_ADDR);
+	writel_relaxed(lli[PL080_LLI_LLI], phychan->base + PL080_CH_LLI);
+	writel_relaxed(lli[PL080_LLI_CCTL], phychan->base + PL080_CH_CONTROL);
+
+	if (pl08x->vd->pl080s)
+		writel_relaxed(lli[PL080S_LLI_CCTL2],
+				phychan->base + PL080S_CH_CONTROL2);
+
+	writel(ccfg, phychan->reg_config);
+}
+
 /*
  * Set the initial DMA register values i.e. those for the first LLI
  * The next LLI pointer and the configuration interrupt bit have
@@ -350,7 +394,6 @@
 	struct pl08x_phy_chan *phychan = plchan->phychan;
 	struct virt_dma_desc *vd = vchan_next_desc(&plchan->vc);
 	struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
-	struct pl08x_lli *lli;
 	u32 val;
 
 	list_del(&txd->vd.node);
@@ -361,19 +404,7 @@
 	while (pl08x_phy_channel_busy(phychan))
 		cpu_relax();
 
-	lli = &txd->llis_va[0];
-
-	dev_vdbg(&pl08x->adev->dev,
-		"WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
-		"clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
-		phychan->id, lli->src, lli->dst, lli->lli, lli->cctl,
-		txd->ccfg);
-
-	writel(lli->src, phychan->base + PL080_CH_SRC_ADDR);
-	writel(lli->dst, phychan->base + PL080_CH_DST_ADDR);
-	writel(lli->lli, phychan->base + PL080_CH_LLI);
-	writel(lli->cctl, phychan->base + PL080_CH_CONTROL);
-	writel(txd->ccfg, phychan->base + PL080_CH_CONFIG);
+	pl08x_write_lli(pl08x, phychan, &txd->llis_va[0], txd->ccfg);
 
 	/* Enable the DMA channel */
 	/* Do not access config register until channel shows as disabled */
@@ -381,11 +412,11 @@
 		cpu_relax();
 
 	/* Do not access config register until channel shows as inactive */
-	val = readl(phychan->base + PL080_CH_CONFIG);
+	val = readl(phychan->reg_config);
 	while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE))
-		val = readl(phychan->base + PL080_CH_CONFIG);
+		val = readl(phychan->reg_config);
 
-	writel(val | PL080_CONFIG_ENABLE, phychan->base + PL080_CH_CONFIG);
+	writel(val | PL080_CONFIG_ENABLE, phychan->reg_config);
 }
 
 /*
@@ -404,9 +435,9 @@
 	int timeout;
 
 	/* Set the HALT bit and wait for the FIFO to drain */
-	val = readl(ch->base + PL080_CH_CONFIG);
+	val = readl(ch->reg_config);
 	val |= PL080_CONFIG_HALT;
-	writel(val, ch->base + PL080_CH_CONFIG);
+	writel(val, ch->reg_config);
 
 	/* Wait for channel inactive */
 	for (timeout = 1000; timeout; timeout--) {
@@ -423,9 +454,9 @@
 	u32 val;
 
 	/* Clear the HALT bit */
-	val = readl(ch->base + PL080_CH_CONFIG);
+	val = readl(ch->reg_config);
 	val &= ~PL080_CONFIG_HALT;
-	writel(val, ch->base + PL080_CH_CONFIG);
+	writel(val, ch->reg_config);
 }
 
 /*
@@ -437,12 +468,12 @@
 static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x,
 	struct pl08x_phy_chan *ch)
 {
-	u32 val = readl(ch->base + PL080_CH_CONFIG);
+	u32 val = readl(ch->reg_config);
 
 	val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK |
 	         PL080_CONFIG_TC_IRQ_MASK);
 
-	writel(val, ch->base + PL080_CH_CONFIG);
+	writel(val, ch->reg_config);
 
 	writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR);
 	writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR);
@@ -453,6 +484,28 @@
 	/* The source width defines the number of bytes */
 	u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK;
 
+	cctl &= PL080_CONTROL_SWIDTH_MASK;
+
+	switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) {
+	case PL080_WIDTH_8BIT:
+		break;
+	case PL080_WIDTH_16BIT:
+		bytes *= 2;
+		break;
+	case PL080_WIDTH_32BIT:
+		bytes *= 4;
+		break;
+	}
+	return bytes;
+}
+
+static inline u32 get_bytes_in_cctl_pl080s(u32 cctl, u32 cctl1)
+{
+	/* The source width defines the number of bytes */
+	u32 bytes = cctl1 & PL080S_CONTROL_TRANSFER_SIZE_MASK;
+
+	cctl &= PL080_CONTROL_SWIDTH_MASK;
+
 	switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) {
 	case PL080_WIDTH_8BIT:
 		break;
@@ -469,47 +522,66 @@
 /* The channel should be paused when calling this */
 static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
 {
+	struct pl08x_driver_data *pl08x = plchan->host;
+	const u32 *llis_va, *llis_va_limit;
 	struct pl08x_phy_chan *ch;
+	dma_addr_t llis_bus;
 	struct pl08x_txd *txd;
-	size_t bytes = 0;
+	u32 llis_max_words;
+	size_t bytes;
+	u32 clli;
 
 	ch = plchan->phychan;
 	txd = plchan->at;
 
+	if (!ch || !txd)
+		return 0;
+
 	/*
 	 * Follow the LLIs to get the number of remaining
 	 * bytes in the currently active transaction.
 	 */
-	if (ch && txd) {
-		u32 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2;
+	clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2;
 
-		/* First get the remaining bytes in the active transfer */
+	/* First get the remaining bytes in the active transfer */
+	if (pl08x->vd->pl080s)
+		bytes = get_bytes_in_cctl_pl080s(
+				readl(ch->base + PL080_CH_CONTROL),
+				readl(ch->base + PL080S_CH_CONTROL2));
+	else
 		bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL));
 
-		if (clli) {
-			struct pl08x_lli *llis_va = txd->llis_va;
-			dma_addr_t llis_bus = txd->llis_bus;
-			int index;
+	if (!clli)
+		return bytes;
 
-			BUG_ON(clli < llis_bus || clli >= llis_bus +
-				sizeof(struct pl08x_lli) * MAX_NUM_TSFR_LLIS);
+	llis_va = txd->llis_va;
+	llis_bus = txd->llis_bus;
 
-			/*
-			 * Locate the next LLI - as this is an array,
-			 * it's simple maths to find.
-			 */
-			index = (clli - llis_bus) / sizeof(struct pl08x_lli);
+	llis_max_words = pl08x->lli_words * MAX_NUM_TSFR_LLIS;
+	BUG_ON(clli < llis_bus || clli >= llis_bus +
+						sizeof(u32) * llis_max_words);
 
-			for (; index < MAX_NUM_TSFR_LLIS; index++) {
-				bytes += get_bytes_in_cctl(llis_va[index].cctl);
+	/*
+	 * Locate the next LLI - as this is an array,
+	 * it's simple maths to find.
+	 */
+	llis_va += (clli - llis_bus) / sizeof(u32);
 
-				/*
-				 * A LLI pointer of 0 terminates the LLI list
-				 */
-				if (!llis_va[index].lli)
-					break;
-			}
-		}
+	llis_va_limit = llis_va + llis_max_words;
+
+	for (; llis_va < llis_va_limit; llis_va += pl08x->lli_words) {
+		if (pl08x->vd->pl080s)
+			bytes += get_bytes_in_cctl_pl080s(
+						llis_va[PL080_LLI_CCTL],
+						llis_va[PL080S_LLI_CCTL2]);
+		else
+			bytes += get_bytes_in_cctl(llis_va[PL080_LLI_CCTL]);
+
+		/*
+		 * A LLI pointer going backward terminates the LLI list
+		 */
+		if (llis_va[PL080_LLI_LLI] <= clli)
+			break;
 	}
 
 	return bytes;
@@ -720,6 +792,7 @@
 		break;
 	}
 
+	tsize &= PL080_CONTROL_TRANSFER_SIZE_MASK;
 	retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT;
 	return retbits;
 }
@@ -764,20 +837,26 @@
 /*
  * Fills in one LLI for a certain transfer descriptor and advance the counter
  */
-static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
-	int num_llis, int len, u32 cctl)
+static void pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x,
+				    struct pl08x_lli_build_data *bd,
+				    int num_llis, int len, u32 cctl, u32 cctl2)
 {
-	struct pl08x_lli *llis_va = bd->txd->llis_va;
+	u32 offset = num_llis * pl08x->lli_words;
+	u32 *llis_va = bd->txd->llis_va + offset;
 	dma_addr_t llis_bus = bd->txd->llis_bus;
 
 	BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS);
 
-	llis_va[num_llis].cctl = cctl;
-	llis_va[num_llis].src = bd->srcbus.addr;
-	llis_va[num_llis].dst = bd->dstbus.addr;
-	llis_va[num_llis].lli = llis_bus + (num_llis + 1) *
-		sizeof(struct pl08x_lli);
-	llis_va[num_llis].lli |= bd->lli_bus;
+	/* Advance the offset to next LLI. */
+	offset += pl08x->lli_words;
+
+	llis_va[PL080_LLI_SRC] = bd->srcbus.addr;
+	llis_va[PL080_LLI_DST] = bd->dstbus.addr;
+	llis_va[PL080_LLI_LLI] = (llis_bus + sizeof(u32) * offset);
+	llis_va[PL080_LLI_LLI] |= bd->lli_bus;
+	llis_va[PL080_LLI_CCTL] = cctl;
+	if (pl08x->vd->pl080s)
+		llis_va[PL080S_LLI_CCTL2] = cctl2;
 
 	if (cctl & PL080_CONTROL_SRC_INCR)
 		bd->srcbus.addr += len;
@@ -789,14 +868,53 @@
 	bd->remainder -= len;
 }
 
-static inline void prep_byte_width_lli(struct pl08x_lli_build_data *bd,
-		u32 *cctl, u32 len, int num_llis, size_t *total_bytes)
+static inline void prep_byte_width_lli(struct pl08x_driver_data *pl08x,
+			struct pl08x_lli_build_data *bd, u32 *cctl, u32 len,
+			int num_llis, size_t *total_bytes)
 {
 	*cctl = pl08x_cctl_bits(*cctl, 1, 1, len);
-	pl08x_fill_lli_for_desc(bd, num_llis, len, *cctl);
+	pl08x_fill_lli_for_desc(pl08x, bd, num_llis, len, *cctl, len);
 	(*total_bytes) += len;
 }
 
+#ifdef VERBOSE_DEBUG
+static void pl08x_dump_lli(struct pl08x_driver_data *pl08x,
+			   const u32 *llis_va, int num_llis)
+{
+	int i;
+
+	if (pl08x->vd->pl080s) {
+		dev_vdbg(&pl08x->adev->dev,
+			"%-3s %-9s  %-10s %-10s %-10s %-10s %s\n",
+			"lli", "", "csrc", "cdst", "clli", "cctl", "cctl2");
+		for (i = 0; i < num_llis; i++) {
+			dev_vdbg(&pl08x->adev->dev,
+				"%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+				i, llis_va, llis_va[PL080_LLI_SRC],
+				llis_va[PL080_LLI_DST], llis_va[PL080_LLI_LLI],
+				llis_va[PL080_LLI_CCTL],
+				llis_va[PL080S_LLI_CCTL2]);
+			llis_va += pl08x->lli_words;
+		}
+	} else {
+		dev_vdbg(&pl08x->adev->dev,
+			"%-3s %-9s  %-10s %-10s %-10s %s\n",
+			"lli", "", "csrc", "cdst", "clli", "cctl");
+		for (i = 0; i < num_llis; i++) {
+			dev_vdbg(&pl08x->adev->dev,
+				"%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+				i, llis_va, llis_va[PL080_LLI_SRC],
+				llis_va[PL080_LLI_DST], llis_va[PL080_LLI_LLI],
+				llis_va[PL080_LLI_CCTL]);
+			llis_va += pl08x->lli_words;
+		}
+	}
+}
+#else
+static inline void pl08x_dump_lli(struct pl08x_driver_data *pl08x,
+				  const u32 *llis_va, int num_llis) {}
+#endif
+
 /*
  * This fills in the table of LLIs for the transfer descriptor
  * Note that we assume we never have to change the burst sizes
@@ -810,7 +928,7 @@
 	int num_llis = 0;
 	u32 cctl, early_bytes = 0;
 	size_t max_bytes_per_lli, total_bytes;
-	struct pl08x_lli *llis_va;
+	u32 *llis_va, *last_lli;
 	struct pl08x_sg *dsg;
 
 	txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus);
@@ -845,10 +963,13 @@
 
 		pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl);
 
-		dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu\n",
-			bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "",
+		dev_vdbg(&pl08x->adev->dev,
+			"src=0x%08llx%s/%u dst=0x%08llx%s/%u len=%zu\n",
+			(u64)bd.srcbus.addr,
+			cctl & PL080_CONTROL_SRC_INCR ? "+" : "",
 			bd.srcbus.buswidth,
-			bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "",
+			(u64)bd.dstbus.addr,
+			cctl & PL080_CONTROL_DST_INCR ? "+" : "",
 			bd.dstbus.buswidth,
 			bd.remainder);
 		dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n",
@@ -886,8 +1007,8 @@
 				return 0;
 			}
 
-			if ((bd.srcbus.addr % bd.srcbus.buswidth) ||
-					(bd.dstbus.addr % bd.dstbus.buswidth)) {
+			if (!IS_BUS_ALIGNED(&bd.srcbus) ||
+				!IS_BUS_ALIGNED(&bd.dstbus)) {
 				dev_err(&pl08x->adev->dev,
 					"%s src & dst address must be aligned to src"
 					" & dst width if peripheral is flow controller",
@@ -897,7 +1018,8 @@
 
 			cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
 					bd.dstbus.buswidth, 0);
-			pl08x_fill_lli_for_desc(&bd, num_llis++, 0, cctl);
+			pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++,
+					0, cctl, 0);
 			break;
 		}
 
@@ -908,9 +1030,9 @@
 		 */
 		if (bd.remainder < mbus->buswidth)
 			early_bytes = bd.remainder;
-		else if ((mbus->addr) % (mbus->buswidth)) {
-			early_bytes = mbus->buswidth - (mbus->addr) %
-				(mbus->buswidth);
+		else if (!IS_BUS_ALIGNED(mbus)) {
+			early_bytes = mbus->buswidth -
+				(mbus->addr & (mbus->buswidth - 1));
 			if ((bd.remainder - early_bytes) < mbus->buswidth)
 				early_bytes = bd.remainder;
 		}
@@ -919,8 +1041,8 @@
 			dev_vdbg(&pl08x->adev->dev,
 				"%s byte width LLIs (remain 0x%08x)\n",
 				__func__, bd.remainder);
-			prep_byte_width_lli(&bd, &cctl, early_bytes, num_llis++,
-				&total_bytes);
+			prep_byte_width_lli(pl08x, &bd, &cctl, early_bytes,
+				num_llis++, &total_bytes);
 		}
 
 		if (bd.remainder) {
@@ -928,7 +1050,7 @@
 			 * Master now aligned
 			 * - if slave is not then we must set its width down
 			 */
-			if (sbus->addr % sbus->buswidth) {
+			if (!IS_BUS_ALIGNED(sbus)) {
 				dev_dbg(&pl08x->adev->dev,
 					"%s set down bus width to one byte\n",
 					__func__);
@@ -941,7 +1063,7 @@
 			 * MIN(buswidths)
 			 */
 			max_bytes_per_lli = bd.srcbus.buswidth *
-				PL080_CONTROL_TRANSFER_SIZE_MASK;
+						pl08x->vd->max_transfer_size;
 			dev_vdbg(&pl08x->adev->dev,
 				"%s max bytes per lli = %zu\n",
 				__func__, max_bytes_per_lli);
@@ -976,8 +1098,8 @@
 
 				cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
 					bd.dstbus.buswidth, tsize);
-				pl08x_fill_lli_for_desc(&bd, num_llis++,
-						lli_len, cctl);
+				pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++,
+						lli_len, cctl, tsize);
 				total_bytes += lli_len;
 			}
 
@@ -988,8 +1110,8 @@
 				dev_vdbg(&pl08x->adev->dev,
 					"%s align with boundary, send odd bytes (remain %zu)\n",
 					__func__, bd.remainder);
-				prep_byte_width_lli(&bd, &cctl, bd.remainder,
-						num_llis++, &total_bytes);
+				prep_byte_width_lli(pl08x, &bd, &cctl,
+					bd.remainder, num_llis++, &total_bytes);
 			}
 		}
 
@@ -1003,33 +1125,25 @@
 		if (num_llis >= MAX_NUM_TSFR_LLIS) {
 			dev_err(&pl08x->adev->dev,
 				"%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n",
-				__func__, (u32) MAX_NUM_TSFR_LLIS);
+				__func__, MAX_NUM_TSFR_LLIS);
 			return 0;
 		}
 	}
 
 	llis_va = txd->llis_va;
-	/* The final LLI terminates the LLI. */
-	llis_va[num_llis - 1].lli = 0;
-	/* The final LLI element shall also fire an interrupt. */
-	llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN;
+	last_lli = llis_va + (num_llis - 1) * pl08x->lli_words;
 
-#ifdef VERBOSE_DEBUG
-	{
-		int i;
-
-		dev_vdbg(&pl08x->adev->dev,
-			 "%-3s %-9s  %-10s %-10s %-10s %s\n",
-			 "lli", "", "csrc", "cdst", "clli", "cctl");
-		for (i = 0; i < num_llis; i++) {
-			dev_vdbg(&pl08x->adev->dev,
-				 "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n",
-				 i, &llis_va[i], llis_va[i].src,
-				 llis_va[i].dst, llis_va[i].lli, llis_va[i].cctl
-				);
-		}
+	if (txd->cyclic) {
+		/* Link back to the first LLI. */
+		last_lli[PL080_LLI_LLI] = txd->llis_bus | bd.lli_bus;
+	} else {
+		/* The final LLI terminates the LLI. */
+		last_lli[PL080_LLI_LLI] = 0;
+		/* The final LLI element shall also fire an interrupt. */
+		last_lli[PL080_LLI_CCTL] |= PL080_CONTROL_TC_IRQ_EN;
 	}
-#endif
+
+	pl08x_dump_lli(pl08x, llis_va, num_llis);
 
 	return num_llis;
 }
@@ -1305,6 +1419,7 @@
 				  struct dma_slave_config *config)
 {
 	struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+	struct pl08x_driver_data *pl08x = plchan->host;
 
 	if (!plchan->slave)
 		return -EINVAL;
@@ -1314,6 +1429,13 @@
 	    config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
 		return -EINVAL;
 
+	if (config->device_fc && pl08x->vd->pl080s) {
+		dev_err(&pl08x->adev->dev,
+			"%s: PL080S does not support peripheral flow control\n",
+			__func__);
+		return -EINVAL;
+	}
+
 	plchan->cfg = *config;
 
 	return 0;
@@ -1404,25 +1526,19 @@
 	return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
 }
 
-static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
-		struct dma_chan *chan, struct scatterlist *sgl,
-		unsigned int sg_len, enum dma_transfer_direction direction,
-		unsigned long flags, void *context)
+static struct pl08x_txd *pl08x_init_txd(
+		struct dma_chan *chan,
+		enum dma_transfer_direction direction,
+		dma_addr_t *slave_addr)
 {
 	struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
 	struct pl08x_driver_data *pl08x = plchan->host;
 	struct pl08x_txd *txd;
-	struct pl08x_sg *dsg;
-	struct scatterlist *sg;
 	enum dma_slave_buswidth addr_width;
-	dma_addr_t slave_addr;
 	int ret, tmp;
 	u8 src_buses, dst_buses;
 	u32 maxburst, cctl;
 
-	dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
-			__func__, sg_dma_len(sgl), plchan->name);
-
 	txd = pl08x_get_txd(plchan);
 	if (!txd) {
 		dev_err(&pl08x->adev->dev, "%s no txd\n", __func__);
@@ -1436,14 +1552,14 @@
 	 */
 	if (direction == DMA_MEM_TO_DEV) {
 		cctl = PL080_CONTROL_SRC_INCR;
-		slave_addr = plchan->cfg.dst_addr;
+		*slave_addr = plchan->cfg.dst_addr;
 		addr_width = plchan->cfg.dst_addr_width;
 		maxburst = plchan->cfg.dst_maxburst;
 		src_buses = pl08x->mem_buses;
 		dst_buses = plchan->cd->periph_buses;
 	} else if (direction == DMA_DEV_TO_MEM) {
 		cctl = PL080_CONTROL_DST_INCR;
-		slave_addr = plchan->cfg.src_addr;
+		*slave_addr = plchan->cfg.src_addr;
 		addr_width = plchan->cfg.src_addr_width;
 		maxburst = plchan->cfg.src_maxburst;
 		src_buses = plchan->cd->periph_buses;
@@ -1492,24 +1608,107 @@
 	else
 		txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT;
 
+	return txd;
+}
+
+static int pl08x_tx_add_sg(struct pl08x_txd *txd,
+			   enum dma_transfer_direction direction,
+			   dma_addr_t slave_addr,
+			   dma_addr_t buf_addr,
+			   unsigned int len)
+{
+	struct pl08x_sg *dsg;
+
+	dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
+	if (!dsg)
+		return -ENOMEM;
+
+	list_add_tail(&dsg->node, &txd->dsg_list);
+
+	dsg->len = len;
+	if (direction == DMA_MEM_TO_DEV) {
+		dsg->src_addr = buf_addr;
+		dsg->dst_addr = slave_addr;
+	} else {
+		dsg->src_addr = slave_addr;
+		dsg->dst_addr = buf_addr;
+	}
+
+	return 0;
+}
+
+static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
+		struct dma_chan *chan, struct scatterlist *sgl,
+		unsigned int sg_len, enum dma_transfer_direction direction,
+		unsigned long flags, void *context)
+{
+	struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+	struct pl08x_driver_data *pl08x = plchan->host;
+	struct pl08x_txd *txd;
+	struct scatterlist *sg;
+	int ret, tmp;
+	dma_addr_t slave_addr;
+
+	dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
+			__func__, sg_dma_len(sgl), plchan->name);
+
+	txd = pl08x_init_txd(chan, direction, &slave_addr);
+	if (!txd)
+		return NULL;
+
 	for_each_sg(sgl, sg, sg_len, tmp) {
-		dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
-		if (!dsg) {
+		ret = pl08x_tx_add_sg(txd, direction, slave_addr,
+				      sg_dma_address(sg),
+				      sg_dma_len(sg));
+		if (ret) {
 			pl08x_release_mux(plchan);
 			pl08x_free_txd(pl08x, txd);
 			dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n",
 					__func__);
 			return NULL;
 		}
-		list_add_tail(&dsg->node, &txd->dsg_list);
+	}
 
-		dsg->len = sg_dma_len(sg);
-		if (direction == DMA_MEM_TO_DEV) {
-			dsg->src_addr = sg_dma_address(sg);
-			dsg->dst_addr = slave_addr;
-		} else {
-			dsg->src_addr = slave_addr;
-			dsg->dst_addr = sg_dma_address(sg);
+	ret = pl08x_fill_llis_for_desc(plchan->host, txd);
+	if (!ret) {
+		pl08x_release_mux(plchan);
+		pl08x_free_txd(pl08x, txd);
+		return NULL;
+	}
+
+	return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *pl08x_prep_dma_cyclic(
+		struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+		size_t period_len, enum dma_transfer_direction direction,
+		unsigned long flags, void *context)
+{
+	struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+	struct pl08x_driver_data *pl08x = plchan->host;
+	struct pl08x_txd *txd;
+	int ret, tmp;
+	dma_addr_t slave_addr;
+
+	dev_dbg(&pl08x->adev->dev,
+		"%s prepare cyclic transaction of %d/%d bytes %s %s\n",
+		__func__, period_len, buf_len,
+		direction == DMA_MEM_TO_DEV ? "to" : "from",
+		plchan->name);
+
+	txd = pl08x_init_txd(chan, direction, &slave_addr);
+	if (!txd)
+		return NULL;
+
+	txd->cyclic = true;
+	txd->cctl |= PL080_CONTROL_TC_IRQ_EN;
+	for (tmp = 0; tmp < buf_len; tmp += period_len) {
+		ret = pl08x_tx_add_sg(txd, direction, slave_addr,
+				      buf_addr + tmp, period_len);
+		if (ret) {
+			pl08x_release_mux(plchan);
+			pl08x_free_txd(pl08x, txd);
+			return NULL;
 		}
 	}
 
@@ -1652,7 +1851,9 @@
 
 			spin_lock(&plchan->vc.lock);
 			tx = plchan->at;
-			if (tx) {
+			if (tx && tx->cyclic) {
+				vchan_cyclic_callback(&tx->vd);
+			} else if (tx) {
 				plchan->at = NULL;
 				/*
 				 * This descriptor is done, release its mux
@@ -1846,6 +2047,7 @@
 {
 	struct pl08x_driver_data *pl08x;
 	const struct vendor_data *vd = id->data;
+	u32 tsfr_size;
 	int ret = 0;
 	int i;
 
@@ -1873,6 +2075,7 @@
 
 	/* Initialize slave engine */
 	dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask);
+	dma_cap_set(DMA_CYCLIC, pl08x->slave.cap_mask);
 	pl08x->slave.dev = &adev->dev;
 	pl08x->slave.device_alloc_chan_resources = pl08x_alloc_chan_resources;
 	pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources;
@@ -1880,6 +2083,7 @@
 	pl08x->slave.device_tx_status = pl08x_dma_tx_status;
 	pl08x->slave.device_issue_pending = pl08x_issue_pending;
 	pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg;
+	pl08x->slave.device_prep_dma_cyclic = pl08x_prep_dma_cyclic;
 	pl08x->slave.device_control = pl08x_control;
 
 	/* Get the platform data */
@@ -1902,9 +2106,15 @@
 		pl08x->mem_buses = pl08x->pd->mem_buses;
 	}
 
+	if (vd->pl080s)
+		pl08x->lli_words = PL080S_LLI_WORDS;
+	else
+		pl08x->lli_words = PL080_LLI_WORDS;
+	tsfr_size = MAX_NUM_TSFR_LLIS * pl08x->lli_words * sizeof(u32);
+
 	/* A DMA memory pool for LLIs, align on 1-byte boundary */
 	pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev,
-			PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0);
+						tsfr_size, PL08X_ALIGN, 0);
 	if (!pl08x->pool) {
 		ret = -ENOMEM;
 		goto out_no_lli_pool;
@@ -1947,6 +2157,7 @@
 
 		ch->id = i;
 		ch->base = pl08x->base + PL080_Cx_BASE(i);
+		ch->reg_config = ch->base + vd->config_offset;
 		spin_lock_init(&ch->lock);
 
 		/*
@@ -1957,7 +2168,7 @@
 		if (vd->nomadik) {
 			u32 val;
 
-			val = readl(ch->base + PL080_CH_CONFIG);
+			val = readl(ch->reg_config);
 			if (val & (PL080N_CONFIG_ITPROT | PL080N_CONFIG_SECPROT)) {
 				dev_info(&adev->dev, "physical channel %d reserved for secure access only\n", i);
 				ch->locked = true;
@@ -2008,8 +2219,8 @@
 
 	amba_set_drvdata(adev, pl08x);
 	init_pl08x_debugfs(pl08x);
-	dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n",
-		 amba_part(adev), amba_rev(adev),
+	dev_info(&pl08x->adev->dev, "DMA: PL%03x%s rev%u at 0x%08llx irq %d\n",
+		 amba_part(adev), pl08x->vd->pl080s ? "s" : "", amba_rev(adev),
 		 (unsigned long long)adev->res.start, adev->irq[0]);
 
 	return 0;
@@ -2038,22 +2249,41 @@
 
 /* PL080 has 8 channels and the PL080 have just 2 */
 static struct vendor_data vendor_pl080 = {
+	.config_offset = PL080_CH_CONFIG,
 	.channels = 8,
 	.dualmaster = true,
+	.max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
 };
 
 static struct vendor_data vendor_nomadik = {
+	.config_offset = PL080_CH_CONFIG,
 	.channels = 8,
 	.dualmaster = true,
 	.nomadik = true,
+	.max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
+};
+
+static struct vendor_data vendor_pl080s = {
+	.config_offset = PL080S_CH_CONFIG,
+	.channels = 8,
+	.pl080s = true,
+	.max_transfer_size = PL080S_CONTROL_TRANSFER_SIZE_MASK,
 };
 
 static struct vendor_data vendor_pl081 = {
+	.config_offset = PL080_CH_CONFIG,
 	.channels = 2,
 	.dualmaster = false,
+	.max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
 };
 
 static struct amba_id pl08x_ids[] = {
+	/* Samsung PL080S variant */
+	{
+		.id	= 0x0a141080,
+		.mask	= 0xffffffff,
+		.data	= &vendor_pl080s,
+	},
 	/* PL080 */
 	{
 		.id	= 0x00041080,
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 99af4db..9162ac8 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -382,20 +382,30 @@
 EXPORT_SYMBOL(dma_issue_pending_all);
 
 /**
- * nth_chan - returns the nth channel of the given capability
- * @cap: capability to match
- * @n: nth channel desired
- *
- * Defaults to returning the channel with the desired capability and the
- * lowest reference count when 'n' cannot be satisfied.  Must be called
- * under dma_list_mutex.
+ * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu
  */
-static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n)
+static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
+{
+	int node = dev_to_node(chan->device->dev);
+	return node == -1 || cpumask_test_cpu(cpu, cpumask_of_node(node));
+}
+
+/**
+ * min_chan - returns the channel with min count and in the same numa-node as the cpu
+ * @cap: capability to match
+ * @cpu: cpu index which the channel should be close to
+ *
+ * If some channels are close to the given cpu, the one with the lowest
+ * reference count is returned. Otherwise, cpu is ignored and only the
+ * reference count is taken into account.
+ * Must be called under dma_list_mutex.
+ */
+static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
 {
 	struct dma_device *device;
 	struct dma_chan *chan;
-	struct dma_chan *ret = NULL;
 	struct dma_chan *min = NULL;
+	struct dma_chan *localmin = NULL;
 
 	list_for_each_entry(device, &dma_device_list, global_node) {
 		if (!dma_has_cap(cap, device->cap_mask) ||
@@ -404,27 +414,22 @@
 		list_for_each_entry(chan, &device->channels, device_node) {
 			if (!chan->client_count)
 				continue;
-			if (!min)
-				min = chan;
-			else if (chan->table_count < min->table_count)
+			if (!min || chan->table_count < min->table_count)
 				min = chan;
 
-			if (n-- == 0) {
-				ret = chan;
-				break; /* done */
-			}
+			if (dma_chan_is_local(chan, cpu))
+				if (!localmin ||
+				    chan->table_count < localmin->table_count)
+					localmin = chan;
 		}
-		if (ret)
-			break; /* done */
 	}
 
-	if (!ret)
-		ret = min;
+	chan = localmin ? localmin : min;
 
-	if (ret)
-		ret->table_count++;
+	if (chan)
+		chan->table_count++;
 
-	return ret;
+	return chan;
 }
 
 /**
@@ -441,7 +446,6 @@
 	struct dma_device *device;
 	int cpu;
 	int cap;
-	int n;
 
 	/* undo the last distribution */
 	for_each_dma_cap_mask(cap, dma_cap_mask_all)
@@ -460,14 +464,9 @@
 		return;
 
 	/* redistribute available channels */
-	n = 0;
 	for_each_dma_cap_mask(cap, dma_cap_mask_all)
 		for_each_online_cpu(cpu) {
-			if (num_possible_cpus() > 1)
-				chan = nth_chan(cap, n++);
-			else
-				chan = nth_chan(cap, -1);
-
+			chan = min_chan(cap, cpu);
 			per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
 		}
 }
@@ -510,7 +509,33 @@
 }
 
 /**
- * dma_request_channel - try to allocate an exclusive channel
+ * dma_request_slave_channel - try to get specific channel exclusively
+ * @chan: target channel
+ */
+struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
+{
+	int err = -EBUSY;
+
+	/* lock against __dma_request_channel */
+	mutex_lock(&dma_list_mutex);
+
+	if (chan->client_count == 0) {
+		err = dma_chan_get(chan);
+		if (err)
+			pr_debug("%s: failed to get %s: (%d)\n",
+				__func__, dma_chan_name(chan), err);
+	} else
+		chan = NULL;
+
+	mutex_unlock(&dma_list_mutex);
+
+
+	return chan;
+}
+EXPORT_SYMBOL_GPL(dma_get_slave_channel);
+
+/**
+ * __dma_request_channel - try to allocate an exclusive channel
  * @mask: capabilities that the channel must satisfy
  * @fn: optional callback to disposition available channels
  * @fn_param: opaque parameter to pass to dma_filter_fn
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index e88ded2..92f796c 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -25,44 +25,46 @@
 #include <linux/seq_file.h>
 
 static unsigned int test_buf_size = 16384;
-module_param(test_buf_size, uint, S_IRUGO);
+module_param(test_buf_size, uint, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
 
 static char test_channel[20];
-module_param_string(channel, test_channel, sizeof(test_channel), S_IRUGO);
+module_param_string(channel, test_channel, sizeof(test_channel),
+		S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
 
 static char test_device[20];
-module_param_string(device, test_device, sizeof(test_device), S_IRUGO);
+module_param_string(device, test_device, sizeof(test_device),
+		S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
 
 static unsigned int threads_per_chan = 1;
-module_param(threads_per_chan, uint, S_IRUGO);
+module_param(threads_per_chan, uint, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(threads_per_chan,
 		"Number of threads to start per channel (default: 1)");
 
 static unsigned int max_channels;
-module_param(max_channels, uint, S_IRUGO);
+module_param(max_channels, uint, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(max_channels,
 		"Maximum number of channels to use (default: all)");
 
 static unsigned int iterations;
-module_param(iterations, uint, S_IRUGO);
+module_param(iterations, uint, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(iterations,
 		"Iterations before stopping test (default: infinite)");
 
 static unsigned int xor_sources = 3;
-module_param(xor_sources, uint, S_IRUGO);
+module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(xor_sources,
 		"Number of xor source buffers (default: 3)");
 
 static unsigned int pq_sources = 3;
-module_param(pq_sources, uint, S_IRUGO);
+module_param(pq_sources, uint, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(pq_sources,
 		"Number of p+q source buffers (default: 3)");
 
 static int timeout = 3000;
-module_param(timeout, uint, S_IRUGO);
+module_param(timeout, uint, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
 		 "Pass -1 for infinite timeout");
 
@@ -193,7 +195,6 @@
 
 	/* debugfs related stuff */
 	struct dentry		*root;
-	struct dmatest_params	dbgfs_params;
 
 	/* Test results */
 	struct list_head	results;
@@ -406,7 +407,11 @@
 	list_add_tail(&tr->node, &r->results);
 	mutex_unlock(&info->results_lock);
 
-	pr_warn("%s\n", thread_result_get(r->name, tr));
+	if (tr->type == DMATEST_ET_OK)
+		pr_debug("%s\n", thread_result_get(r->name, tr));
+	else
+		pr_warn("%s\n", thread_result_get(r->name, tr));
+
 	return 0;
 }
 
@@ -1007,7 +1012,15 @@
 	result_free(info, NULL);
 
 	/* Copy test parameters */
-	memcpy(params, &info->dbgfs_params, sizeof(*params));
+	params->buf_size = test_buf_size;
+	strlcpy(params->channel, strim(test_channel), sizeof(params->channel));
+	strlcpy(params->device, strim(test_device), sizeof(params->device));
+	params->threads_per_chan = threads_per_chan;
+	params->max_channels = max_channels;
+	params->iterations = iterations;
+	params->xor_sources = xor_sources;
+	params->pq_sources = pq_sources;
+	params->timeout = timeout;
 
 	/* Run test with new parameters */
 	return __run_threaded_test(info);
@@ -1029,71 +1042,6 @@
 	return false;
 }
 
-static ssize_t dtf_write_string(void *to, size_t available, loff_t *ppos,
-		const void __user *from, size_t count)
-{
-	char tmp[20];
-	ssize_t len;
-
-	len = simple_write_to_buffer(tmp, sizeof(tmp) - 1, ppos, from, count);
-	if (len >= 0) {
-		tmp[len] = '\0';
-		strlcpy(to, strim(tmp), available);
-	}
-
-	return len;
-}
-
-static ssize_t dtf_read_channel(struct file *file, char __user *buf,
-		size_t count, loff_t *ppos)
-{
-	struct dmatest_info *info = file->private_data;
-	return simple_read_from_buffer(buf, count, ppos,
-			info->dbgfs_params.channel,
-			strlen(info->dbgfs_params.channel));
-}
-
-static ssize_t dtf_write_channel(struct file *file, const char __user *buf,
-		size_t size, loff_t *ppos)
-{
-	struct dmatest_info *info = file->private_data;
-	return dtf_write_string(info->dbgfs_params.channel,
-				sizeof(info->dbgfs_params.channel),
-				ppos, buf, size);
-}
-
-static const struct file_operations dtf_channel_fops = {
-	.read	= dtf_read_channel,
-	.write	= dtf_write_channel,
-	.open	= simple_open,
-	.llseek	= default_llseek,
-};
-
-static ssize_t dtf_read_device(struct file *file, char __user *buf,
-		size_t count, loff_t *ppos)
-{
-	struct dmatest_info *info = file->private_data;
-	return simple_read_from_buffer(buf, count, ppos,
-			info->dbgfs_params.device,
-			strlen(info->dbgfs_params.device));
-}
-
-static ssize_t dtf_write_device(struct file *file, const char __user *buf,
-		size_t size, loff_t *ppos)
-{
-	struct dmatest_info *info = file->private_data;
-	return dtf_write_string(info->dbgfs_params.device,
-				sizeof(info->dbgfs_params.device),
-				ppos, buf, size);
-}
-
-static const struct file_operations dtf_device_fops = {
-	.read	= dtf_read_device,
-	.write	= dtf_write_device,
-	.open	= simple_open,
-	.llseek	= default_llseek,
-};
-
 static ssize_t dtf_read_run(struct file *file, char __user *user_buf,
 		size_t count, loff_t *ppos)
 {
@@ -1187,8 +1135,6 @@
 static int dmatest_register_dbgfs(struct dmatest_info *info)
 {
 	struct dentry *d;
-	struct dmatest_params *params = &info->dbgfs_params;
-	int ret = -ENOMEM;
 
 	d = debugfs_create_dir("dmatest", NULL);
 	if (IS_ERR(d))
@@ -1198,81 +1144,24 @@
 
 	info->root = d;
 
-	/* Copy initial values */
-	memcpy(params, &info->params, sizeof(*params));
-
-	/* Test parameters */
-
-	d = debugfs_create_u32("test_buf_size", S_IWUSR | S_IRUGO, info->root,
-			       (u32 *)&params->buf_size);
-	if (IS_ERR_OR_NULL(d))
-		goto err_node;
-
-	d = debugfs_create_file("channel", S_IRUGO | S_IWUSR, info->root,
-				info, &dtf_channel_fops);
-	if (IS_ERR_OR_NULL(d))
-		goto err_node;
-
-	d = debugfs_create_file("device", S_IRUGO | S_IWUSR, info->root,
-				info, &dtf_device_fops);
-	if (IS_ERR_OR_NULL(d))
-		goto err_node;
-
-	d = debugfs_create_u32("threads_per_chan", S_IWUSR | S_IRUGO, info->root,
-			       (u32 *)&params->threads_per_chan);
-	if (IS_ERR_OR_NULL(d))
-		goto err_node;
-
-	d = debugfs_create_u32("max_channels", S_IWUSR | S_IRUGO, info->root,
-			       (u32 *)&params->max_channels);
-	if (IS_ERR_OR_NULL(d))
-		goto err_node;
-
-	d = debugfs_create_u32("iterations", S_IWUSR | S_IRUGO, info->root,
-			       (u32 *)&params->iterations);
-	if (IS_ERR_OR_NULL(d))
-		goto err_node;
-
-	d = debugfs_create_u32("xor_sources", S_IWUSR | S_IRUGO, info->root,
-			       (u32 *)&params->xor_sources);
-	if (IS_ERR_OR_NULL(d))
-		goto err_node;
-
-	d = debugfs_create_u32("pq_sources", S_IWUSR | S_IRUGO, info->root,
-			       (u32 *)&params->pq_sources);
-	if (IS_ERR_OR_NULL(d))
-		goto err_node;
-
-	d = debugfs_create_u32("timeout", S_IWUSR | S_IRUGO, info->root,
-			       (u32 *)&params->timeout);
-	if (IS_ERR_OR_NULL(d))
-		goto err_node;
-
 	/* Run or stop threaded test */
-	d = debugfs_create_file("run", S_IWUSR | S_IRUGO, info->root,
-				info, &dtf_run_fops);
-	if (IS_ERR_OR_NULL(d))
-		goto err_node;
+	debugfs_create_file("run", S_IWUSR | S_IRUGO, info->root, info,
+			    &dtf_run_fops);
 
 	/* Results of test in progress */
-	d = debugfs_create_file("results", S_IRUGO, info->root, info,
-				&dtf_results_fops);
-	if (IS_ERR_OR_NULL(d))
-		goto err_node;
+	debugfs_create_file("results", S_IRUGO, info->root, info,
+			    &dtf_results_fops);
 
 	return 0;
 
-err_node:
-	debugfs_remove_recursive(info->root);
 err_root:
 	pr_err("dmatest: Failed to initialize debugfs\n");
-	return ret;
+	return -ENOMEM;
 }
 
 static int __init dmatest_init(void)
 {
 	struct dmatest_info *info = &test_info;
-	struct dmatest_params *params = &info->params;
 	int ret;
 
 	memset(info, 0, sizeof(*info));
@@ -1283,17 +1172,6 @@
 	mutex_init(&info->results_lock);
 	INIT_LIST_HEAD(&info->results);
 
-	/* Set default parameters */
-	params->buf_size = test_buf_size;
-	strlcpy(params->channel, test_channel, sizeof(params->channel));
-	strlcpy(params->device, test_device, sizeof(params->device));
-	params->threads_per_chan = threads_per_chan;
-	params->max_channels = max_channels;
-	params->iterations = iterations;
-	params->xor_sources = xor_sources;
-	params->pq_sources = pq_sources;
-	params->timeout = timeout;
-
 	ret = dmatest_register_dbgfs(info);
 	if (ret)
 		return ret;
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index eea479c..89eb89f 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -37,16 +37,22 @@
  * which does not support descriptor writeback.
  */
 
+static inline bool is_request_line_unset(struct dw_dma_chan *dwc)
+{
+	return dwc->request_line == (typeof(dwc->request_line))~0;
+}
+
 static inline void dwc_set_masters(struct dw_dma_chan *dwc)
 {
 	struct dw_dma *dw = to_dw_dma(dwc->chan.device);
 	struct dw_dma_slave *dws = dwc->chan.private;
 	unsigned char mmax = dw->nr_masters - 1;
 
-	if (dwc->request_line == ~0) {
-		dwc->src_master = min_t(unsigned char, mmax, dwc_get_sms(dws));
-		dwc->dst_master = min_t(unsigned char, mmax, dwc_get_dms(dws));
-	}
+	if (!is_request_line_unset(dwc))
+		return;
+
+	dwc->src_master = min_t(unsigned char, mmax, dwc_get_sms(dws));
+	dwc->dst_master = min_t(unsigned char, mmax, dwc_get_dms(dws));
 }
 
 #define DWC_DEFAULT_CTLLO(_chan) ({				\
@@ -644,10 +650,13 @@
 static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
 {
 	struct dw_dma *dw = dev_id;
-	u32 status;
+	u32 status = dma_readl(dw, STATUS_INT);
 
-	dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__,
-			dma_readl(dw, STATUS_INT));
+	dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status);
+
+	/* Check if we have any interrupt from the DMAC */
+	if (!status)
+		return IRQ_NONE;
 
 	/*
 	 * Just disable the interrupts. We'll turn them back on in the
@@ -984,7 +993,7 @@
 	dwc->direction = sconfig->direction;
 
 	/* Take the request line from slave_id member */
-	if (dwc->request_line == ~0)
+	if (is_request_line_unset(dwc))
 		dwc->request_line = sconfig->slave_id;
 
 	convert_burst(&dwc->dma_sconfig.src_maxburst);
@@ -1089,16 +1098,16 @@
 	enum dma_status		ret;
 
 	ret = dma_cookie_status(chan, cookie, txstate);
-	if (ret != DMA_SUCCESS) {
-		dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
+	if (ret == DMA_SUCCESS)
+		return ret;
 
-		ret = dma_cookie_status(chan, cookie, txstate);
-	}
+	dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
 
+	ret = dma_cookie_status(chan, cookie, txstate);
 	if (ret != DMA_SUCCESS)
 		dma_set_residue(txstate, dwc_get_residue(dwc));
 
-	if (dwc->paused)
+	if (dwc->paused && ret == DMA_IN_PROGRESS)
 		return DMA_PAUSED;
 
 	return ret;
@@ -1560,8 +1569,8 @@
 	/* Disable BLOCK interrupts as well */
 	channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
 
-	err = devm_request_irq(chip->dev, chip->irq, dw_dma_interrupt, 0,
-			       "dw_dmac", dw);
+	err = devm_request_irq(chip->dev, chip->irq, dw_dma_interrupt,
+			       IRQF_SHARED, "dw_dmac", dw);
 	if (err)
 		return err;
 
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index 6c9449c..e35d975 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -253,6 +253,7 @@
 	{ "INTL9C60", 0 },
 	{ }
 };
+MODULE_DEVICE_TABLE(acpi, dw_dma_acpi_id_table);
 #endif
 
 #ifdef CONFIG_PM_SLEEP
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 5f3e532..ff50ff4 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -56,6 +56,7 @@
 	struct list_head		node;
 	int				absync;
 	int				pset_nr;
+	int				processed;
 	struct edmacc_param		pset[0];
 };
 
@@ -69,6 +70,7 @@
 	int				ch_num;
 	bool				alloced;
 	int				slot[EDMA_MAX_SLOTS];
+	int				missed;
 	struct dma_slave_config		cfg;
 };
 
@@ -104,22 +106,34 @@
 /* Dispatch a queued descriptor to the controller (caller holds lock) */
 static void edma_execute(struct edma_chan *echan)
 {
-	struct virt_dma_desc *vdesc = vchan_next_desc(&echan->vchan);
+	struct virt_dma_desc *vdesc;
 	struct edma_desc *edesc;
-	int i;
+	struct device *dev = echan->vchan.chan.device->dev;
+	int i, j, left, nslots;
 
-	if (!vdesc) {
-		echan->edesc = NULL;
-		return;
+	/* If either we processed all psets or we're still not started */
+	if (!echan->edesc ||
+	    echan->edesc->pset_nr == echan->edesc->processed) {
+		/* Get next vdesc */
+		vdesc = vchan_next_desc(&echan->vchan);
+		if (!vdesc) {
+			echan->edesc = NULL;
+			return;
+		}
+		list_del(&vdesc->node);
+		echan->edesc = to_edma_desc(&vdesc->tx);
 	}
 
-	list_del(&vdesc->node);
+	edesc = echan->edesc;
 
-	echan->edesc = edesc = to_edma_desc(&vdesc->tx);
+	/* Find out how many left */
+	left = edesc->pset_nr - edesc->processed;
+	nslots = min(MAX_NR_SG, left);
 
 	/* Write descriptor PaRAM set(s) */
-	for (i = 0; i < edesc->pset_nr; i++) {
-		edma_write_slot(echan->slot[i], &edesc->pset[i]);
+	for (i = 0; i < nslots; i++) {
+		j = i + edesc->processed;
+		edma_write_slot(echan->slot[i], &edesc->pset[j]);
 		dev_dbg(echan->vchan.chan.device->dev,
 			"\n pset[%d]:\n"
 			"  chnum\t%d\n"
@@ -132,24 +146,50 @@
 			"  bidx\t%08x\n"
 			"  cidx\t%08x\n"
 			"  lkrld\t%08x\n",
-			i, echan->ch_num, echan->slot[i],
-			edesc->pset[i].opt,
-			edesc->pset[i].src,
-			edesc->pset[i].dst,
-			edesc->pset[i].a_b_cnt,
-			edesc->pset[i].ccnt,
-			edesc->pset[i].src_dst_bidx,
-			edesc->pset[i].src_dst_cidx,
-			edesc->pset[i].link_bcntrld);
+			j, echan->ch_num, echan->slot[i],
+			edesc->pset[j].opt,
+			edesc->pset[j].src,
+			edesc->pset[j].dst,
+			edesc->pset[j].a_b_cnt,
+			edesc->pset[j].ccnt,
+			edesc->pset[j].src_dst_bidx,
+			edesc->pset[j].src_dst_cidx,
+			edesc->pset[j].link_bcntrld);
 		/* Link to the previous slot if not the last set */
-		if (i != (edesc->pset_nr - 1))
+		if (i != (nslots - 1))
 			edma_link(echan->slot[i], echan->slot[i+1]);
-		/* Final pset links to the dummy pset */
-		else
-			edma_link(echan->slot[i], echan->ecc->dummy_slot);
 	}
 
-	edma_start(echan->ch_num);
+	edesc->processed += nslots;
+
+	/*
+	 * If this is either the last set in a set of SG-list transactions
+	 * then setup a link to the dummy slot, this results in all future
+	 * events being absorbed and that's OK because we're done
+	 */
+	if (edesc->processed == edesc->pset_nr)
+		edma_link(echan->slot[nslots-1], echan->ecc->dummy_slot);
+
+	edma_resume(echan->ch_num);
+
+	if (edesc->processed <= MAX_NR_SG) {
+		dev_dbg(dev, "first transfer starting %d\n", echan->ch_num);
+		edma_start(echan->ch_num);
+	}
+
+	/*
+	 * This happens due to setup times between intermediate transfers
+	 * in long SG lists which have to be broken up into transfers of
+	 * MAX_NR_SG
+	 */
+	if (echan->missed) {
+		dev_dbg(dev, "missed event in execute detected\n");
+		edma_clean_channel(echan->ch_num);
+		edma_stop(echan->ch_num);
+		edma_start(echan->ch_num);
+		edma_trigger_channel(echan->ch_num);
+		echan->missed = 0;
+	}
 }
 
 static int edma_terminate_all(struct edma_chan *echan)
@@ -222,9 +262,9 @@
 	enum dma_slave_buswidth dev_width;
 	u32 burst;
 	struct scatterlist *sg;
-	int i;
 	int acnt, bcnt, ccnt, src, dst, cidx;
 	int src_bidx, dst_bidx, src_cidx, dst_cidx;
+	int i, nslots;
 
 	if (unlikely(!echan || !sgl || !sg_len))
 		return NULL;
@@ -247,12 +287,6 @@
 		return NULL;
 	}
 
-	if (sg_len > MAX_NR_SG) {
-		dev_err(dev, "Exceeded max SG segments %d > %d\n",
-			sg_len, MAX_NR_SG);
-		return NULL;
-	}
-
 	edesc = kzalloc(sizeof(*edesc) + sg_len *
 		sizeof(edesc->pset[0]), GFP_ATOMIC);
 	if (!edesc) {
@@ -262,8 +296,10 @@
 
 	edesc->pset_nr = sg_len;
 
-	for_each_sg(sgl, sg, sg_len, i) {
-		/* Allocate a PaRAM slot, if needed */
+	/* Allocate a PaRAM slot, if needed */
+	nslots = min_t(unsigned, MAX_NR_SG, sg_len);
+
+	for (i = 0; i < nslots; i++) {
 		if (echan->slot[i] < 0) {
 			echan->slot[i] =
 				edma_alloc_slot(EDMA_CTLR(echan->ch_num),
@@ -273,6 +309,10 @@
 				return NULL;
 			}
 		}
+	}
+
+	/* Configure PaRAM sets for each SG */
+	for_each_sg(sgl, sg, sg_len, i) {
 
 		acnt = dev_width;
 
@@ -330,6 +370,12 @@
 		/* Configure A or AB synchronized transfers */
 		if (edesc->absync)
 			edesc->pset[i].opt |= SYNCDIM;
+
+		/* If this is the last in a current SG set of transactions,
+		   enable interrupts so that next set is processed */
+		if (!((i+1) % MAX_NR_SG))
+			edesc->pset[i].opt |= TCINTEN;
+
 		/* If this is the last set, enable completion interrupt flag */
 		if (i == sg_len - 1)
 			edesc->pset[i].opt |= TCINTEN;
@@ -355,27 +401,65 @@
 	struct device *dev = echan->vchan.chan.device->dev;
 	struct edma_desc *edesc;
 	unsigned long flags;
+	struct edmacc_param p;
 
-	/* Stop the channel */
-	edma_stop(echan->ch_num);
+	/* Pause the channel */
+	edma_pause(echan->ch_num);
 
 	switch (ch_status) {
 	case DMA_COMPLETE:
-		dev_dbg(dev, "transfer complete on channel %d\n", ch_num);
-
 		spin_lock_irqsave(&echan->vchan.lock, flags);
 
 		edesc = echan->edesc;
 		if (edesc) {
+			if (edesc->processed == edesc->pset_nr) {
+				dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num);
+				edma_stop(echan->ch_num);
+				vchan_cookie_complete(&edesc->vdesc);
+			} else {
+				dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num);
+			}
+
 			edma_execute(echan);
-			vchan_cookie_complete(&edesc->vdesc);
 		}
 
 		spin_unlock_irqrestore(&echan->vchan.lock, flags);
 
 		break;
 	case DMA_CC_ERROR:
-		dev_dbg(dev, "transfer error on channel %d\n", ch_num);
+		spin_lock_irqsave(&echan->vchan.lock, flags);
+
+		edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p);
+
+		/*
+		 * Issue later based on missed flag which will be sure
+		 * to happen as:
+		 * (1) we finished transmitting an intermediate slot and
+		 *     edma_execute is coming up.
+		 * (2) or we finished current transfer and issue will
+		 *     call edma_execute.
+		 *
+		 * Important note: issuing can be dangerous here and
+		 * lead to some nasty recursion when we are in a NULL
+		 * slot. So we avoid doing so and set the missed flag.
+		 */
+		if (p.a_b_cnt == 0 && p.ccnt == 0) {
+			dev_dbg(dev, "Error occurred, looks like slot is null, just setting miss\n");
+			echan->missed = 1;
+		} else {
+			/*
+			 * The slot is already programmed but the event got
+			 * missed, so its safe to issue it here.
+			 */
+			dev_dbg(dev, "Error occurred but slot is non-null, TRIGGERING\n");
+			edma_clean_channel(echan->ch_num);
+			edma_stop(echan->ch_num);
+			edma_start(echan->ch_num);
+			edma_trigger_channel(echan->ch_num);
+		}
+
+		spin_unlock_irqrestore(&echan->vchan.lock, flags);
+
 		break;
 	default:
 		break;
@@ -502,8 +586,6 @@
 	} else if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) {
 		struct edma_desc *edesc = echan->edesc;
 		txstate->residue = edma_desc_size(edesc);
-	} else {
-		txstate->residue = 0;
 	}
 	spin_unlock_irqrestore(&echan->vchan.lock, flags);
 
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index f2bf8c0..591cd8c 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -1313,15 +1313,7 @@
 					    dma_cookie_t cookie,
 					    struct dma_tx_state *state)
 {
-	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
-	enum dma_status ret;
-	unsigned long flags;
-
-	spin_lock_irqsave(&edmac->lock, flags);
-	ret = dma_cookie_status(chan, cookie, state);
-	spin_unlock_irqrestore(&edmac->lock, flags);
-
-	return ret;
+	return dma_cookie_status(chan, cookie, state);
 }
 
 /**
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 49e8fbd..b3f3e90 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -979,15 +979,7 @@
 					dma_cookie_t cookie,
 					struct dma_tx_state *txstate)
 {
-	struct fsldma_chan *chan = to_fsl_chan(dchan);
-	enum dma_status ret;
-	unsigned long flags;
-
-	spin_lock_irqsave(&chan->desc_lock, flags);
-	ret = dma_cookie_status(dchan, cookie, txstate);
-	spin_unlock_irqrestore(&chan->desc_lock, flags);
-
-	return ret;
+	return dma_cookie_status(dchan, cookie, txstate);
 }
 
 /*----------------------------------------------------------------------------*/
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index ff2aab9..78f8ca5 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -805,10 +805,8 @@
 	}
 	INIT_LIST_HEAD(&imxdmac->ld_free);
 
-	if (imxdmac->sg_list) {
-		kfree(imxdmac->sg_list);
-		imxdmac->sg_list = NULL;
-	}
+	kfree(imxdmac->sg_list);
+	imxdmac->sg_list = NULL;
 }
 
 static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 1e44b8c..fc43603 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -243,7 +243,6 @@
  * @event_id1		for channels that use 2 events
  * @word_size		peripheral access size
  * @buf_tail		ID of the buffer that was processed
- * @done		channel completion
  * @num_bd		max NUM_BD. number of descriptors currently handling
  */
 struct sdma_channel {
@@ -255,7 +254,6 @@
 	unsigned int			event_id1;
 	enum dma_slave_buswidth		word_size;
 	unsigned int			buf_tail;
-	struct completion		done;
 	unsigned int			num_bd;
 	struct sdma_buffer_descriptor	*bd;
 	dma_addr_t			bd_phys;
@@ -307,9 +305,10 @@
 	u32	ram_code_size;
 };
 
-enum sdma_devtype {
-	IMX31_SDMA,	/* runs on i.mx31 */
-	IMX35_SDMA,	/* runs on i.mx35 and later */
+struct sdma_driver_data {
+	int chnenbl0;
+	int num_events;
+	struct sdma_script_start_addrs	*script_addrs;
 };
 
 struct sdma_engine {
@@ -318,8 +317,6 @@
 	struct sdma_channel		channel[MAX_DMA_CHANNELS];
 	struct sdma_channel_control	*channel_control;
 	void __iomem			*regs;
-	enum sdma_devtype		devtype;
-	unsigned int			num_events;
 	struct sdma_context_data	*context;
 	dma_addr_t			context_phys;
 	struct dma_device		dma_device;
@@ -327,15 +324,118 @@
 	struct clk			*clk_ahb;
 	spinlock_t			channel_0_lock;
 	struct sdma_script_start_addrs	*script_addrs;
+	const struct sdma_driver_data	*drvdata;
+};
+
+static struct sdma_driver_data sdma_imx31 = {
+	.chnenbl0 = SDMA_CHNENBL0_IMX31,
+	.num_events = 32,
+};
+
+static struct sdma_script_start_addrs sdma_script_imx25 = {
+	.ap_2_ap_addr = 729,
+	.uart_2_mcu_addr = 904,
+	.per_2_app_addr = 1255,
+	.mcu_2_app_addr = 834,
+	.uartsh_2_mcu_addr = 1120,
+	.per_2_shp_addr = 1329,
+	.mcu_2_shp_addr = 1048,
+	.ata_2_mcu_addr = 1560,
+	.mcu_2_ata_addr = 1479,
+	.app_2_per_addr = 1189,
+	.app_2_mcu_addr = 770,
+	.shp_2_per_addr = 1407,
+	.shp_2_mcu_addr = 979,
+};
+
+static struct sdma_driver_data sdma_imx25 = {
+	.chnenbl0 = SDMA_CHNENBL0_IMX35,
+	.num_events = 48,
+	.script_addrs = &sdma_script_imx25,
+};
+
+static struct sdma_driver_data sdma_imx35 = {
+	.chnenbl0 = SDMA_CHNENBL0_IMX35,
+	.num_events = 48,
+};
+
+static struct sdma_script_start_addrs sdma_script_imx51 = {
+	.ap_2_ap_addr = 642,
+	.uart_2_mcu_addr = 817,
+	.mcu_2_app_addr = 747,
+	.mcu_2_shp_addr = 961,
+	.ata_2_mcu_addr = 1473,
+	.mcu_2_ata_addr = 1392,
+	.app_2_per_addr = 1033,
+	.app_2_mcu_addr = 683,
+	.shp_2_per_addr = 1251,
+	.shp_2_mcu_addr = 892,
+};
+
+static struct sdma_driver_data sdma_imx51 = {
+	.chnenbl0 = SDMA_CHNENBL0_IMX35,
+	.num_events = 48,
+	.script_addrs = &sdma_script_imx51,
+};
+
+static struct sdma_script_start_addrs sdma_script_imx53 = {
+	.ap_2_ap_addr = 642,
+	.app_2_mcu_addr = 683,
+	.mcu_2_app_addr = 747,
+	.uart_2_mcu_addr = 817,
+	.shp_2_mcu_addr = 891,
+	.mcu_2_shp_addr = 960,
+	.uartsh_2_mcu_addr = 1032,
+	.spdif_2_mcu_addr = 1100,
+	.mcu_2_spdif_addr = 1134,
+	.firi_2_mcu_addr = 1193,
+	.mcu_2_firi_addr = 1290,
+};
+
+static struct sdma_driver_data sdma_imx53 = {
+	.chnenbl0 = SDMA_CHNENBL0_IMX35,
+	.num_events = 48,
+	.script_addrs = &sdma_script_imx53,
+};
+
+static struct sdma_script_start_addrs sdma_script_imx6q = {
+	.ap_2_ap_addr = 642,
+	.uart_2_mcu_addr = 817,
+	.mcu_2_app_addr = 747,
+	.per_2_per_addr = 6331,
+	.uartsh_2_mcu_addr = 1032,
+	.mcu_2_shp_addr = 960,
+	.app_2_mcu_addr = 683,
+	.shp_2_mcu_addr = 891,
+	.spdif_2_mcu_addr = 1100,
+	.mcu_2_spdif_addr = 1134,
+};
+
+static struct sdma_driver_data sdma_imx6q = {
+	.chnenbl0 = SDMA_CHNENBL0_IMX35,
+	.num_events = 48,
+	.script_addrs = &sdma_script_imx6q,
 };
 
 static struct platform_device_id sdma_devtypes[] = {
 	{
+		.name = "imx25-sdma",
+		.driver_data = (unsigned long)&sdma_imx25,
+	}, {
 		.name = "imx31-sdma",
-		.driver_data = IMX31_SDMA,
+		.driver_data = (unsigned long)&sdma_imx31,
 	}, {
 		.name = "imx35-sdma",
-		.driver_data = IMX35_SDMA,
+		.driver_data = (unsigned long)&sdma_imx35,
+	}, {
+		.name = "imx51-sdma",
+		.driver_data = (unsigned long)&sdma_imx51,
+	}, {
+		.name = "imx53-sdma",
+		.driver_data = (unsigned long)&sdma_imx53,
+	}, {
+		.name = "imx6q-sdma",
+		.driver_data = (unsigned long)&sdma_imx6q,
 	}, {
 		/* sentinel */
 	}
@@ -343,8 +443,11 @@
 MODULE_DEVICE_TABLE(platform, sdma_devtypes);
 
 static const struct of_device_id sdma_dt_ids[] = {
-	{ .compatible = "fsl,imx31-sdma", .data = &sdma_devtypes[IMX31_SDMA], },
-	{ .compatible = "fsl,imx35-sdma", .data = &sdma_devtypes[IMX35_SDMA], },
+	{ .compatible = "fsl,imx6q-sdma", .data = &sdma_imx6q, },
+	{ .compatible = "fsl,imx53-sdma", .data = &sdma_imx53, },
+	{ .compatible = "fsl,imx51-sdma", .data = &sdma_imx51, },
+	{ .compatible = "fsl,imx35-sdma", .data = &sdma_imx35, },
+	{ .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, },
 	{ /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, sdma_dt_ids);
@@ -356,8 +459,7 @@
 
 static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event)
 {
-	u32 chnenbl0 = (sdma->devtype == IMX31_SDMA ? SDMA_CHNENBL0_IMX31 :
-						      SDMA_CHNENBL0_IMX35);
+	u32 chnenbl0 = sdma->drvdata->chnenbl0;
 	return chnenbl0 + event * 4;
 }
 
@@ -547,8 +649,6 @@
 {
 	struct sdma_channel *sdmac = (struct sdma_channel *) data;
 
-	complete(&sdmac->done);
-
 	if (sdmac->flags & IMX_DMA_SG_LOOP)
 		sdma_handle_channel_loop(sdmac);
 	else
@@ -733,7 +833,7 @@
 	sdmac->per_addr = 0;
 
 	if (sdmac->event_id0) {
-		if (sdmac->event_id0 >= sdmac->sdma->num_events)
+		if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
 			return -EINVAL;
 		sdma_event_enable(sdmac, sdmac->event_id0);
 	}
@@ -812,9 +912,6 @@
 	sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
 
 	sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY);
-
-	init_completion(&sdmac->done);
-
 	return 0;
 out:
 
@@ -1120,15 +1217,12 @@
 }
 
 static enum dma_status sdma_tx_status(struct dma_chan *chan,
-					    dma_cookie_t cookie,
-					    struct dma_tx_state *txstate)
+				      dma_cookie_t cookie,
+				      struct dma_tx_state *txstate)
 {
 	struct sdma_channel *sdmac = to_sdma_chan(chan);
-	dma_cookie_t last_used;
 
-	last_used = chan->cookie;
-
-	dma_set_tx_state(txstate, chan->completed_cookie, last_used,
+	dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
 			sdmac->chn_count - sdmac->chn_real_count);
 
 	return sdmac->status;
@@ -1218,19 +1312,6 @@
 	int i, ret;
 	dma_addr_t ccb_phys;
 
-	switch (sdma->devtype) {
-	case IMX31_SDMA:
-		sdma->num_events = 32;
-		break;
-	case IMX35_SDMA:
-		sdma->num_events = 48;
-		break;
-	default:
-		dev_err(sdma->dev, "Unknown sdma type %d. aborting\n",
-			sdma->devtype);
-		return -ENODEV;
-	}
-
 	clk_enable(sdma->clk_ipg);
 	clk_enable(sdma->clk_ahb);
 
@@ -1257,7 +1338,7 @@
 			MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control));
 
 	/* disable all channels */
-	for (i = 0; i < sdma->num_events; i++)
+	for (i = 0; i < sdma->drvdata->num_events; i++)
 		writel_relaxed(0, sdma->regs + chnenbl_ofs(sdma, i));
 
 	/* All channels have priority 0 */
@@ -1335,10 +1416,21 @@
 	int ret;
 	int irq;
 	struct resource *iores;
-	struct sdma_platform_data *pdata = pdev->dev.platform_data;
+	struct sdma_platform_data *pdata = dev_get_platdata(&pdev->dev);
 	int i;
 	struct sdma_engine *sdma;
 	s32 *saddr_arr;
+	const struct sdma_driver_data *drvdata = NULL;
+
+	if (of_id)
+		drvdata = of_id->data;
+	else if (pdev->id_entry)
+		drvdata = (void *)pdev->id_entry->driver_data;
+
+	if (!drvdata) {
+		dev_err(&pdev->dev, "unable to find driver data\n");
+		return -EINVAL;
+	}
 
 	sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
 	if (!sdma)
@@ -1347,6 +1439,7 @@
 	spin_lock_init(&sdma->channel_0_lock);
 
 	sdma->dev = &pdev->dev;
+	sdma->drvdata = drvdata;
 
 	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	irq = platform_get_irq(pdev, 0);
@@ -1396,10 +1489,6 @@
 	for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++)
 		saddr_arr[i] = -EINVAL;
 
-	if (of_id)
-		pdev->id_entry = of_id->data;
-	sdma->devtype = pdev->id_entry->driver_data;
-
 	dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
 	dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
 
@@ -1431,6 +1520,8 @@
 	if (ret)
 		goto err_init;
 
+	if (sdma->drvdata->script_addrs)
+		sdma_add_scripts(sdma, sdma->drvdata->script_addrs);
 	if (pdata && pdata->script_addrs)
 		sdma_add_scripts(sdma, pdata->script_addrs);
 
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index b642e03..d8ececaf 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -251,7 +251,7 @@
 }
 
 static void pq16_set_src(struct ioat_raw_descriptor *desc[3],
-			dma_addr_t addr, u32 offset, u8 coef, int idx)
+			dma_addr_t addr, u32 offset, u8 coef, unsigned idx)
 {
 	struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *)desc[0];
 	struct ioat_pq16a_descriptor *pq16 =
@@ -1775,15 +1775,12 @@
 	dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
 	dma->device_free_chan_resources = ioat2_free_chan_resources;
 
-	if (is_xeon_cb32(pdev))
-		dma->copy_align = 6;
-
 	dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
 	dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock;
 
 	device->cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET);
 
-	if (is_bwd_noraid(pdev))
+	if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev))
 		device->cap &= ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS);
 
 	/* dca is incompatible with raid operations */
@@ -1793,7 +1790,6 @@
 	if (device->cap & IOAT_CAP_XOR) {
 		is_raid_device = true;
 		dma->max_xor = 8;
-		dma->xor_align = 6;
 
 		dma_cap_set(DMA_XOR, dma->cap_mask);
 		dma->device_prep_dma_xor = ioat3_prep_xor;
@@ -1812,13 +1808,8 @@
 
 		if (device->cap & IOAT_CAP_RAID16SS) {
 			dma_set_maxpq(dma, 16, 0);
-			dma->pq_align = 0;
 		} else {
 			dma_set_maxpq(dma, 8, 0);
-			if (is_xeon_cb32(pdev))
-				dma->pq_align = 6;
-			else
-				dma->pq_align = 0;
 		}
 
 		if (!(device->cap & IOAT_CAP_XOR)) {
@@ -1829,13 +1820,8 @@
 
 			if (device->cap & IOAT_CAP_RAID16SS) {
 				dma->max_xor = 16;
-				dma->xor_align = 0;
 			} else {
 				dma->max_xor = 8;
-				if (is_xeon_cb32(pdev))
-					dma->xor_align = 6;
-				else
-					dma->xor_align = 0;
 			}
 		}
 	}
@@ -1844,14 +1830,6 @@
 	device->cleanup_fn = ioat3_cleanup_event;
 	device->timer_fn = ioat3_timer_event;
 
-	if (is_xeon_cb32(pdev)) {
-		dma_cap_clear(DMA_XOR_VAL, dma->cap_mask);
-		dma->device_prep_dma_xor_val = NULL;
-
-		dma_cap_clear(DMA_PQ_VAL, dma->cap_mask);
-		dma->device_prep_dma_pq_val = NULL;
-	}
-
 	/* starting with CB3.3 super extended descriptors are supported */
 	if (device->cap & IOAT_CAP_RAID16SS) {
 		char pool_name[14];
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index cc727ec..dd8b44a 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -518,7 +518,7 @@
 	struct iop_adma_desc_slot *slot = NULL;
 	int init = iop_chan->slots_allocated ? 0 : 1;
 	struct iop_adma_platform_data *plat_data =
-		iop_chan->device->pdev->dev.platform_data;
+		dev_get_platdata(&iop_chan->device->pdev->dev);
 	int num_descs_in_pool = plat_data->pool_size/IOP_ADMA_SLOT_SIZE;
 
 	/* Allocate descriptor slots */
@@ -1351,7 +1351,7 @@
 	struct iop_adma_device *device = platform_get_drvdata(dev);
 	struct dma_chan *chan, *_chan;
 	struct iop_adma_chan *iop_chan;
-	struct iop_adma_platform_data *plat_data = dev->dev.platform_data;
+	struct iop_adma_platform_data *plat_data = dev_get_platdata(&dev->dev);
 
 	dma_async_device_unregister(&device->common);
 
@@ -1376,7 +1376,7 @@
 	struct iop_adma_device *adev;
 	struct iop_adma_chan *iop_chan;
 	struct dma_device *dma_dev;
-	struct iop_adma_platform_data *plat_data = pdev->dev.platform_data;
+	struct iop_adma_platform_data *plat_data = dev_get_platdata(&pdev->dev);
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (!res)
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index d39c2cd..cb9c0bc 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1593,10 +1593,7 @@
 static enum dma_status idmac_tx_status(struct dma_chan *chan,
 		       dma_cookie_t cookie, struct dma_tx_state *txstate)
 {
-	dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 0);
-	if (cookie != chan->cookie)
-		return DMA_ERROR;
-	return DMA_SUCCESS;
+	return dma_cookie_status(chan, cookie, txstate);
 }
 
 static int __init ipu_idmac_init(struct ipu *ipu)
@@ -1767,7 +1764,6 @@
 	iounmap(ipu->reg_ic);
 	iounmap(ipu->reg_ipu);
 	tasklet_kill(&ipu->tasklet);
-	platform_set_drvdata(pdev, NULL);
 
 	return 0;
 }
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
new file mode 100644
index 0000000..a2c330f
--- /dev/null
+++ b/drivers/dma/k3dma.c
@@ -0,0 +1,837 @@
+/*
+ * Copyright (c) 2013 Linaro Ltd.
+ * Copyright (c) 2013 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/sched.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/of_dma.h>
+
+#include "virt-dma.h"
+
+#define DRIVER_NAME		"k3-dma"
+#define DMA_ALIGN		3
+#define DMA_MAX_SIZE		0x1ffc
+
+#define INT_STAT		0x00
+#define INT_TC1			0x04
+#define INT_ERR1		0x0c
+#define INT_ERR2		0x10
+#define INT_TC1_MASK		0x18
+#define INT_ERR1_MASK		0x20
+#define INT_ERR2_MASK		0x24
+#define INT_TC1_RAW		0x600
+#define INT_ERR1_RAW		0x608
+#define INT_ERR2_RAW		0x610
+#define CH_PRI			0x688
+#define CH_STAT			0x690
+#define CX_CUR_CNT		0x704
+#define CX_LLI			0x800
+#define CX_CNT			0x810
+#define CX_SRC			0x814
+#define CX_DST			0x818
+#define CX_CFG			0x81c
+#define AXI_CFG			0x820
+#define AXI_CFG_DEFAULT		0x201201
+
+#define CX_LLI_CHAIN_EN		0x2
+#define CX_CFG_EN		0x1
+#define CX_CFG_MEM2PER		(0x1 << 2)
+#define CX_CFG_PER2MEM		(0x2 << 2)
+#define CX_CFG_SRCINCR		(0x1 << 31)
+#define CX_CFG_DSTINCR		(0x1 << 30)
+
+struct k3_desc_hw {
+	u32 lli;
+	u32 reserved[3];
+	u32 count;
+	u32 saddr;
+	u32 daddr;
+	u32 config;
+} __aligned(32);
+
+struct k3_dma_desc_sw {
+	struct virt_dma_desc	vd;
+	dma_addr_t		desc_hw_lli;
+	size_t			desc_num;
+	size_t			size;
+	struct k3_desc_hw	desc_hw[0];
+};
+
+struct k3_dma_phy;
+
+struct k3_dma_chan {
+	u32			ccfg;
+	struct virt_dma_chan	vc;
+	struct k3_dma_phy	*phy;
+	struct list_head	node;
+	enum dma_transfer_direction dir;
+	dma_addr_t		dev_addr;
+	enum dma_status		status;
+};
+
+struct k3_dma_phy {
+	u32			idx;
+	void __iomem		*base;
+	struct k3_dma_chan	*vchan;
+	struct k3_dma_desc_sw	*ds_run;
+	struct k3_dma_desc_sw	*ds_done;
+};
+
+struct k3_dma_dev {
+	struct dma_device	slave;
+	void __iomem		*base;
+	struct tasklet_struct	task;
+	spinlock_t		lock;
+	struct list_head	chan_pending;
+	struct k3_dma_phy	*phy;
+	struct k3_dma_chan	*chans;
+	struct clk		*clk;
+	u32			dma_channels;
+	u32			dma_requests;
+};
+
+#define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave)
+
+static struct k3_dma_chan *to_k3_chan(struct dma_chan *chan)
+{
+	return container_of(chan, struct k3_dma_chan, vc.chan);
+}
+
+static void k3_dma_pause_dma(struct k3_dma_phy *phy, bool on)
+{
+	u32 val = 0;
+
+	if (on) {
+		val = readl_relaxed(phy->base + CX_CFG);
+		val |= CX_CFG_EN;
+		writel_relaxed(val, phy->base + CX_CFG);
+	} else {
+		val = readl_relaxed(phy->base + CX_CFG);
+		val &= ~CX_CFG_EN;
+		writel_relaxed(val, phy->base + CX_CFG);
+	}
+}
+
+static void k3_dma_terminate_chan(struct k3_dma_phy *phy, struct k3_dma_dev *d)
+{
+	u32 val = 0;
+
+	k3_dma_pause_dma(phy, false);
+
+	val = 0x1 << phy->idx;
+	writel_relaxed(val, d->base + INT_TC1_RAW);
+	writel_relaxed(val, d->base + INT_ERR1_RAW);
+	writel_relaxed(val, d->base + INT_ERR2_RAW);
+}
+
+static void k3_dma_set_desc(struct k3_dma_phy *phy, struct k3_desc_hw *hw)
+{
+	writel_relaxed(hw->lli, phy->base + CX_LLI);
+	writel_relaxed(hw->count, phy->base + CX_CNT);
+	writel_relaxed(hw->saddr, phy->base + CX_SRC);
+	writel_relaxed(hw->daddr, phy->base + CX_DST);
+	writel_relaxed(AXI_CFG_DEFAULT, phy->base + AXI_CFG);
+	writel_relaxed(hw->config, phy->base + CX_CFG);
+}
+
+static u32 k3_dma_get_curr_cnt(struct k3_dma_dev *d, struct k3_dma_phy *phy)
+{
+	u32 cnt = 0;
+
+	cnt = readl_relaxed(d->base + CX_CUR_CNT + phy->idx * 0x10);
+	cnt &= 0xffff;
+	return cnt;
+}
+
+static u32 k3_dma_get_curr_lli(struct k3_dma_phy *phy)
+{
+	return readl_relaxed(phy->base + CX_LLI);
+}
+
+static u32 k3_dma_get_chan_stat(struct k3_dma_dev *d)
+{
+	return readl_relaxed(d->base + CH_STAT);
+}
+
+static void k3_dma_enable_dma(struct k3_dma_dev *d, bool on)
+{
+	if (on) {
+		/* set same priority */
+		writel_relaxed(0x0, d->base + CH_PRI);
+
+		/* unmask irq */
+		writel_relaxed(0xffff, d->base + INT_TC1_MASK);
+		writel_relaxed(0xffff, d->base + INT_ERR1_MASK);
+		writel_relaxed(0xffff, d->base + INT_ERR2_MASK);
+	} else {
+		/* mask irq */
+		writel_relaxed(0x0, d->base + INT_TC1_MASK);
+		writel_relaxed(0x0, d->base + INT_ERR1_MASK);
+		writel_relaxed(0x0, d->base + INT_ERR2_MASK);
+	}
+}
+
+static irqreturn_t k3_dma_int_handler(int irq, void *dev_id)
+{
+	struct k3_dma_dev *d = (struct k3_dma_dev *)dev_id;
+	struct k3_dma_phy *p;
+	struct k3_dma_chan *c;
+	u32 stat = readl_relaxed(d->base + INT_STAT);
+	u32 tc1  = readl_relaxed(d->base + INT_TC1);
+	u32 err1 = readl_relaxed(d->base + INT_ERR1);
+	u32 err2 = readl_relaxed(d->base + INT_ERR2);
+	u32 i, irq_chan = 0;
+
+	while (stat) {
+		i = __ffs(stat);
+		stat &= (stat - 1);
+		if (likely(tc1 & BIT(i))) {
+			p = &d->phy[i];
+			c = p->vchan;
+			if (c) {
+				unsigned long flags;
+
+				spin_lock_irqsave(&c->vc.lock, flags);
+				vchan_cookie_complete(&p->ds_run->vd);
+				p->ds_done = p->ds_run;
+				spin_unlock_irqrestore(&c->vc.lock, flags);
+			}
+			irq_chan |= BIT(i);
+		}
+		if (unlikely((err1 & BIT(i)) || (err2 & BIT(i))))
+			dev_warn(d->slave.dev, "DMA ERR\n");
+	}
+
+	writel_relaxed(irq_chan, d->base + INT_TC1_RAW);
+	writel_relaxed(err1, d->base + INT_ERR1_RAW);
+	writel_relaxed(err2, d->base + INT_ERR2_RAW);
+
+	if (irq_chan) {
+		tasklet_schedule(&d->task);
+		return IRQ_HANDLED;
+	} else
+		return IRQ_NONE;
+}
+
+static int k3_dma_start_txd(struct k3_dma_chan *c)
+{
+	struct k3_dma_dev *d = to_k3_dma(c->vc.chan.device);
+	struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
+
+	if (!c->phy)
+		return -EAGAIN;
+
+	if (BIT(c->phy->idx) & k3_dma_get_chan_stat(d))
+		return -EAGAIN;
+
+	if (vd) {
+		struct k3_dma_desc_sw *ds =
+			container_of(vd, struct k3_dma_desc_sw, vd);
+		/*
+		 * fetch and remove request from vc->desc_issued
+		 * so vc->desc_issued only contains desc pending
+		 */
+		list_del(&ds->vd.node);
+		c->phy->ds_run = ds;
+		c->phy->ds_done = NULL;
+		/* start dma */
+		k3_dma_set_desc(c->phy, &ds->desc_hw[0]);
+		return 0;
+	}
+	c->phy->ds_done = NULL;
+	c->phy->ds_run = NULL;
+	return -EAGAIN;
+}
+
+static void k3_dma_tasklet(unsigned long arg)
+{
+	struct k3_dma_dev *d = (struct k3_dma_dev *)arg;
+	struct k3_dma_phy *p;
+	struct k3_dma_chan *c, *cn;
+	unsigned pch, pch_alloc = 0;
+
+	/* check new dma request of running channel in vc->desc_issued */
+	list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
+		spin_lock_irq(&c->vc.lock);
+		p = c->phy;
+		if (p && p->ds_done) {
+			if (k3_dma_start_txd(c)) {
+				/* No current txd associated with this channel */
+				dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx);
+				/* Mark this channel free */
+				c->phy = NULL;
+				p->vchan = NULL;
+			}
+		}
+		spin_unlock_irq(&c->vc.lock);
+	}
+
+	/* check new channel request in d->chan_pending */
+	spin_lock_irq(&d->lock);
+	for (pch = 0; pch < d->dma_channels; pch++) {
+		p = &d->phy[pch];
+
+		if (p->vchan == NULL && !list_empty(&d->chan_pending)) {
+			c = list_first_entry(&d->chan_pending,
+				struct k3_dma_chan, node);
+			/* remove from d->chan_pending */
+			list_del_init(&c->node);
+			pch_alloc |= 1 << pch;
+			/* Mark this channel allocated */
+			p->vchan = c;
+			c->phy = p;
+			dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc);
+		}
+	}
+	spin_unlock_irq(&d->lock);
+
+	for (pch = 0; pch < d->dma_channels; pch++) {
+		if (pch_alloc & (1 << pch)) {
+			p = &d->phy[pch];
+			c = p->vchan;
+			if (c) {
+				spin_lock_irq(&c->vc.lock);
+				k3_dma_start_txd(c);
+				spin_unlock_irq(&c->vc.lock);
+			}
+		}
+	}
+}
+
+static int k3_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+	return 0;
+}
+
+static void k3_dma_free_chan_resources(struct dma_chan *chan)
+{
+	struct k3_dma_chan *c = to_k3_chan(chan);
+	struct k3_dma_dev *d = to_k3_dma(chan->device);
+	unsigned long flags;
+
+	spin_lock_irqsave(&d->lock, flags);
+	list_del_init(&c->node);
+	spin_unlock_irqrestore(&d->lock, flags);
+
+	vchan_free_chan_resources(&c->vc);
+	c->ccfg = 0;
+}
+
+static enum dma_status k3_dma_tx_status(struct dma_chan *chan,
+	dma_cookie_t cookie, struct dma_tx_state *state)
+{
+	struct k3_dma_chan *c = to_k3_chan(chan);
+	struct k3_dma_dev *d = to_k3_dma(chan->device);
+	struct k3_dma_phy *p;
+	struct virt_dma_desc *vd;
+	unsigned long flags;
+	enum dma_status ret;
+	size_t bytes = 0;
+
+	ret = dma_cookie_status(&c->vc.chan, cookie, state);
+	if (ret == DMA_SUCCESS)
+		return ret;
+
+	spin_lock_irqsave(&c->vc.lock, flags);
+	p = c->phy;
+	ret = c->status;
+
+	/*
+	 * If the cookie is on our issue queue, then the residue is
+	 * its total size.
+	 */
+	vd = vchan_find_desc(&c->vc, cookie);
+	if (vd) {
+		bytes = container_of(vd, struct k3_dma_desc_sw, vd)->size;
+	} else if ((!p) || (!p->ds_run)) {
+		bytes = 0;
+	} else {
+		struct k3_dma_desc_sw *ds = p->ds_run;
+		u32 clli = 0, index = 0;
+
+		bytes = k3_dma_get_curr_cnt(d, p);
+		clli = k3_dma_get_curr_lli(p);
+		index = (clli - ds->desc_hw_lli) / sizeof(struct k3_desc_hw);
+		for (; index < ds->desc_num; index++) {
+			bytes += ds->desc_hw[index].count;
+			/* end of lli */
+			if (!ds->desc_hw[index].lli)
+				break;
+		}
+	}
+	spin_unlock_irqrestore(&c->vc.lock, flags);
+	dma_set_residue(state, bytes);
+	return ret;
+}
+
+static void k3_dma_issue_pending(struct dma_chan *chan)
+{
+	struct k3_dma_chan *c = to_k3_chan(chan);
+	struct k3_dma_dev *d = to_k3_dma(chan->device);
+	unsigned long flags;
+
+	spin_lock_irqsave(&c->vc.lock, flags);
+	/* add request to vc->desc_issued */
+	if (vchan_issue_pending(&c->vc)) {
+		spin_lock(&d->lock);
+		if (!c->phy) {
+			if (list_empty(&c->node)) {
+				/* if new channel, add chan_pending */
+				list_add_tail(&c->node, &d->chan_pending);
+				/* check in tasklet */
+				tasklet_schedule(&d->task);
+				dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
+			}
+		}
+		spin_unlock(&d->lock);
+	} else
+		dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
+	spin_unlock_irqrestore(&c->vc.lock, flags);
+}
+
+static void k3_dma_fill_desc(struct k3_dma_desc_sw *ds, dma_addr_t dst,
+			dma_addr_t src, size_t len, u32 num, u32 ccfg)
+{
+	if ((num + 1) < ds->desc_num)
+		ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) *
+			sizeof(struct k3_desc_hw);
+	ds->desc_hw[num].lli |= CX_LLI_CHAIN_EN;
+	ds->desc_hw[num].count = len;
+	ds->desc_hw[num].saddr = src;
+	ds->desc_hw[num].daddr = dst;
+	ds->desc_hw[num].config = ccfg;
+}
+
+static struct dma_async_tx_descriptor *k3_dma_prep_memcpy(
+	struct dma_chan *chan,	dma_addr_t dst, dma_addr_t src,
+	size_t len, unsigned long flags)
+{
+	struct k3_dma_chan *c = to_k3_chan(chan);
+	struct k3_dma_desc_sw *ds;
+	size_t copy = 0;
+	int num = 0;
+
+	if (!len)
+		return NULL;
+
+	num = DIV_ROUND_UP(len, DMA_MAX_SIZE);
+	ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC);
+	if (!ds) {
+		dev_dbg(chan->device->dev, "vchan %p: kzalloc fail\n", &c->vc);
+		return NULL;
+	}
+	ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]);
+	ds->size = len;
+	ds->desc_num = num;
+	num = 0;
+
+	if (!c->ccfg) {
+		/* default is memtomem, without calling device_control */
+		c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN;
+		c->ccfg |= (0xf << 20) | (0xf << 24);	/* burst = 16 */
+		c->ccfg |= (0x3 << 12) | (0x3 << 16);	/* width = 64 bit */
+	}
+
+	do {
+		copy = min_t(size_t, len, DMA_MAX_SIZE);
+		k3_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg);
+
+		if (c->dir == DMA_MEM_TO_DEV) {
+			src += copy;
+		} else if (c->dir == DMA_DEV_TO_MEM) {
+			dst += copy;
+		} else {
+			src += copy;
+			dst += copy;
+		}
+		len -= copy;
+	} while (len);
+
+	ds->desc_hw[num-1].lli = 0;	/* end of link */
+	return vchan_tx_prep(&c->vc, &ds->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg(
+	struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen,
+	enum dma_transfer_direction dir, unsigned long flags, void *context)
+{
+	struct k3_dma_chan *c = to_k3_chan(chan);
+	struct k3_dma_desc_sw *ds;
+	size_t len, avail, total = 0;
+	struct scatterlist *sg;
+	dma_addr_t addr, src = 0, dst = 0;
+	int num = sglen, i;
+
+	if (sgl == 0)
+		return NULL;
+
+	for_each_sg(sgl, sg, sglen, i) {
+		avail = sg_dma_len(sg);
+		if (avail > DMA_MAX_SIZE)
+			num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1;
+	}
+
+	ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC);
+	if (!ds) {
+		dev_dbg(chan->device->dev, "vchan %p: kzalloc fail\n", &c->vc);
+		return NULL;
+	}
+	ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]);
+	ds->desc_num = num;
+	num = 0;
+
+	for_each_sg(sgl, sg, sglen, i) {
+		addr = sg_dma_address(sg);
+		avail = sg_dma_len(sg);
+		total += avail;
+
+		do {
+			len = min_t(size_t, avail, DMA_MAX_SIZE);
+
+			if (dir == DMA_MEM_TO_DEV) {
+				src = addr;
+				dst = c->dev_addr;
+			} else if (dir == DMA_DEV_TO_MEM) {
+				src = c->dev_addr;
+				dst = addr;
+			}
+
+			k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg);
+
+			addr += len;
+			avail -= len;
+		} while (avail);
+	}
+
+	ds->desc_hw[num-1].lli = 0;	/* end of link */
+	ds->size = total;
+	return vchan_tx_prep(&c->vc, &ds->vd, flags);
+}
+
+static int k3_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+	unsigned long arg)
+{
+	struct k3_dma_chan *c = to_k3_chan(chan);
+	struct k3_dma_dev *d = to_k3_dma(chan->device);
+	struct dma_slave_config *cfg = (void *)arg;
+	struct k3_dma_phy *p = c->phy;
+	unsigned long flags;
+	u32 maxburst = 0, val = 0;
+	enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
+	LIST_HEAD(head);
+
+	switch (cmd) {
+	case DMA_SLAVE_CONFIG:
+		if (cfg == NULL)
+			return -EINVAL;
+		c->dir = cfg->direction;
+		if (c->dir == DMA_DEV_TO_MEM) {
+			c->ccfg = CX_CFG_DSTINCR;
+			c->dev_addr = cfg->src_addr;
+			maxburst = cfg->src_maxburst;
+			width = cfg->src_addr_width;
+		} else if (c->dir == DMA_MEM_TO_DEV) {
+			c->ccfg = CX_CFG_SRCINCR;
+			c->dev_addr = cfg->dst_addr;
+			maxburst = cfg->dst_maxburst;
+			width = cfg->dst_addr_width;
+		}
+		switch (width) {
+		case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		case DMA_SLAVE_BUSWIDTH_4_BYTES:
+		case DMA_SLAVE_BUSWIDTH_8_BYTES:
+			val =  __ffs(width);
+			break;
+		default:
+			val = 3;
+			break;
+		}
+		c->ccfg |= (val << 12) | (val << 16);
+
+		if ((maxburst == 0) || (maxburst > 16))
+			val = 16;
+		else
+			val = maxburst - 1;
+		c->ccfg |= (val << 20) | (val << 24);
+		c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN;
+
+		/* specific request line */
+		c->ccfg |= c->vc.chan.chan_id << 4;
+		break;
+
+	case DMA_TERMINATE_ALL:
+		dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
+
+		/* Prevent this channel being scheduled */
+		spin_lock(&d->lock);
+		list_del_init(&c->node);
+		spin_unlock(&d->lock);
+
+		/* Clear the tx descriptor lists */
+		spin_lock_irqsave(&c->vc.lock, flags);
+		vchan_get_all_descriptors(&c->vc, &head);
+		if (p) {
+			/* vchan is assigned to a pchan - stop the channel */
+			k3_dma_terminate_chan(p, d);
+			c->phy = NULL;
+			p->vchan = NULL;
+			p->ds_run = p->ds_done = NULL;
+		}
+		spin_unlock_irqrestore(&c->vc.lock, flags);
+		vchan_dma_desc_free_list(&c->vc, &head);
+		break;
+
+	case DMA_PAUSE:
+		dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
+		if (c->status == DMA_IN_PROGRESS) {
+			c->status = DMA_PAUSED;
+			if (p) {
+				k3_dma_pause_dma(p, false);
+			} else {
+				spin_lock(&d->lock);
+				list_del_init(&c->node);
+				spin_unlock(&d->lock);
+			}
+		}
+		break;
+
+	case DMA_RESUME:
+		dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
+		spin_lock_irqsave(&c->vc.lock, flags);
+		if (c->status == DMA_PAUSED) {
+			c->status = DMA_IN_PROGRESS;
+			if (p) {
+				k3_dma_pause_dma(p, true);
+			} else if (!list_empty(&c->vc.desc_issued)) {
+				spin_lock(&d->lock);
+				list_add_tail(&c->node, &d->chan_pending);
+				spin_unlock(&d->lock);
+			}
+		}
+		spin_unlock_irqrestore(&c->vc.lock, flags);
+		break;
+	default:
+		return -ENXIO;
+	}
+	return 0;
+}
+
+static void k3_dma_free_desc(struct virt_dma_desc *vd)
+{
+	struct k3_dma_desc_sw *ds =
+		container_of(vd, struct k3_dma_desc_sw, vd);
+
+	kfree(ds);
+}
+
+static struct of_device_id k3_pdma_dt_ids[] = {
+	{ .compatible = "hisilicon,k3-dma-1.0", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, k3_pdma_dt_ids);
+
+static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
+						struct of_dma *ofdma)
+{
+	struct k3_dma_dev *d = ofdma->of_dma_data;
+	unsigned int request = dma_spec->args[0];
+
+	if (request > d->dma_requests)
+		return NULL;
+
+	return dma_get_slave_channel(&(d->chans[request].vc.chan));
+}
+
+static int k3_dma_probe(struct platform_device *op)
+{
+	struct k3_dma_dev *d;
+	const struct of_device_id *of_id;
+	struct resource *iores;
+	int i, ret, irq = 0;
+
+	iores = platform_get_resource(op, IORESOURCE_MEM, 0);
+	if (!iores)
+		return -EINVAL;
+
+	d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL);
+	if (!d)
+		return -ENOMEM;
+
+	d->base = devm_ioremap_resource(&op->dev, iores);
+	if (IS_ERR(d->base))
+		return PTR_ERR(d->base);
+
+	of_id = of_match_device(k3_pdma_dt_ids, &op->dev);
+	if (of_id) {
+		of_property_read_u32((&op->dev)->of_node,
+				"dma-channels", &d->dma_channels);
+		of_property_read_u32((&op->dev)->of_node,
+				"dma-requests", &d->dma_requests);
+	}
+
+	d->clk = devm_clk_get(&op->dev, NULL);
+	if (IS_ERR(d->clk)) {
+		dev_err(&op->dev, "no dma clk\n");
+		return PTR_ERR(d->clk);
+	}
+
+	irq = platform_get_irq(op, 0);
+	ret = devm_request_irq(&op->dev, irq,
+			k3_dma_int_handler, IRQF_DISABLED, DRIVER_NAME, d);
+	if (ret)
+		return ret;
+
+	/* init phy channel */
+	d->phy = devm_kzalloc(&op->dev,
+		d->dma_channels * sizeof(struct k3_dma_phy), GFP_KERNEL);
+	if (d->phy == NULL)
+		return -ENOMEM;
+
+	for (i = 0; i < d->dma_channels; i++) {
+		struct k3_dma_phy *p = &d->phy[i];
+
+		p->idx = i;
+		p->base = d->base + i * 0x40;
+	}
+
+	INIT_LIST_HEAD(&d->slave.channels);
+	dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
+	dma_cap_set(DMA_MEMCPY, d->slave.cap_mask);
+	d->slave.dev = &op->dev;
+	d->slave.device_alloc_chan_resources = k3_dma_alloc_chan_resources;
+	d->slave.device_free_chan_resources = k3_dma_free_chan_resources;
+	d->slave.device_tx_status = k3_dma_tx_status;
+	d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy;
+	d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg;
+	d->slave.device_issue_pending = k3_dma_issue_pending;
+	d->slave.device_control = k3_dma_control;
+	d->slave.copy_align = DMA_ALIGN;
+	d->slave.chancnt = d->dma_requests;
+
+	/* init virtual channel */
+	d->chans = devm_kzalloc(&op->dev,
+		d->dma_requests * sizeof(struct k3_dma_chan), GFP_KERNEL);
+	if (d->chans == NULL)
+		return -ENOMEM;
+
+	for (i = 0; i < d->dma_requests; i++) {
+		struct k3_dma_chan *c = &d->chans[i];
+
+		c->status = DMA_IN_PROGRESS;
+		INIT_LIST_HEAD(&c->node);
+		c->vc.desc_free = k3_dma_free_desc;
+		vchan_init(&c->vc, &d->slave);
+	}
+
+	/* Enable clock before accessing registers */
+	ret = clk_prepare_enable(d->clk);
+	if (ret < 0) {
+		dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret);
+		return ret;
+	}
+
+	k3_dma_enable_dma(d, true);
+
+	ret = dma_async_device_register(&d->slave);
+	if (ret)
+		return ret;
+
+	ret = of_dma_controller_register((&op->dev)->of_node,
+					k3_of_dma_simple_xlate, d);
+	if (ret)
+		goto of_dma_register_fail;
+
+	spin_lock_init(&d->lock);
+	INIT_LIST_HEAD(&d->chan_pending);
+	tasklet_init(&d->task, k3_dma_tasklet, (unsigned long)d);
+	platform_set_drvdata(op, d);
+	dev_info(&op->dev, "initialized\n");
+
+	return 0;
+
+of_dma_register_fail:
+	dma_async_device_unregister(&d->slave);
+	return ret;
+}
+
+static int k3_dma_remove(struct platform_device *op)
+{
+	struct k3_dma_chan *c, *cn;
+	struct k3_dma_dev *d = platform_get_drvdata(op);
+
+	dma_async_device_unregister(&d->slave);
+	of_dma_controller_free((&op->dev)->of_node);
+
+	list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
+		list_del(&c->vc.chan.device_node);
+		tasklet_kill(&c->vc.task);
+	}
+	tasklet_kill(&d->task);
+	clk_disable_unprepare(d->clk);
+	return 0;
+}
+
+static int k3_dma_suspend(struct device *dev)
+{
+	struct k3_dma_dev *d = dev_get_drvdata(dev);
+	u32 stat = 0;
+
+	stat = k3_dma_get_chan_stat(d);
+	if (stat) {
+		dev_warn(d->slave.dev,
+			"chan %d is running fail to suspend\n", stat);
+		return -1;
+	}
+	k3_dma_enable_dma(d, false);
+	clk_disable_unprepare(d->clk);
+	return 0;
+}
+
+static int k3_dma_resume(struct device *dev)
+{
+	struct k3_dma_dev *d = dev_get_drvdata(dev);
+	int ret = 0;
+
+	ret = clk_prepare_enable(d->clk);
+	if (ret < 0) {
+		dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret);
+		return ret;
+	}
+	k3_dma_enable_dma(d, true);
+	return 0;
+}
+
+SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend, k3_dma_resume);
+
+static struct platform_driver k3_pdma_driver = {
+	.driver		= {
+		.name	= DRIVER_NAME,
+		.owner  = THIS_MODULE,
+		.pm	= &k3_dma_pmops,
+		.of_match_table = k3_pdma_dt_ids,
+	},
+	.probe		= k3_dma_probe,
+	.remove		= k3_dma_remove,
+};
+
+module_platform_driver(k3_pdma_driver);
+
+MODULE_DESCRIPTION("Hisilicon k3 DMA Driver");
+MODULE_ALIAS("platform:k3dma");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index c26699f..ff8d7827 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -18,7 +18,9 @@
 #include <linux/platform_data/mmp_dma.h>
 #include <linux/dmapool.h>
 #include <linux/of_device.h>
+#include <linux/of_dma.h>
 #include <linux/of.h>
+#include <linux/dma/mmp-pdma.h>
 
 #include "dmaengine.h"
 
@@ -47,6 +49,8 @@
 #define DCSR_CMPST	(1 << 10)       /* The Descriptor Compare Status */
 #define DCSR_EORINTR	(1 << 9)        /* The end of Receive */
 
+#define DRCMR(n)	((((n) < 64) ? 0x0100 : 0x1100) + \
+				 (((n) & 0x3f) << 2))
 #define DRCMR_MAPVLD	(1 << 7)	/* Map Valid (read / write) */
 #define DRCMR_CHLNUM	0x1f		/* mask for Channel Number (read / write) */
 
@@ -69,7 +73,7 @@
 #define DCMD_LENGTH	0x01fff		/* length mask (max = 8K - 1) */
 
 #define PDMA_ALIGNMENT		3
-#define PDMA_MAX_DESC_BYTES	0x1000
+#define PDMA_MAX_DESC_BYTES	DCMD_LENGTH
 
 struct mmp_pdma_desc_hw {
 	u32 ddadr;	/* Points to the next descriptor + flags */
@@ -94,6 +98,9 @@
 	struct mmp_pdma_phy *phy;
 	enum dma_transfer_direction dir;
 
+	struct mmp_pdma_desc_sw *cyclic_first;	/* first desc_sw if channel
+						 * is in cyclic mode */
+
 	/* channel's basic info */
 	struct tasklet_struct tasklet;
 	u32 dcmd;
@@ -105,6 +112,7 @@
 	struct list_head chain_pending;	/* Link descriptors queue for pending */
 	struct list_head chain_running;	/* Link descriptors queue for running */
 	bool idle;			/* channel statue machine */
+	bool byte_align;
 
 	struct dma_pool *desc_pool;	/* Descriptors pool */
 };
@@ -121,6 +129,7 @@
 	struct device			*dev;
 	struct dma_device		device;
 	struct mmp_pdma_phy		*phy;
+	spinlock_t phy_lock; /* protect alloc/free phy channels */
 };
 
 #define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx)
@@ -137,15 +146,21 @@
 
 static void enable_chan(struct mmp_pdma_phy *phy)
 {
-	u32 reg;
+	u32 reg, dalgn;
 
 	if (!phy->vchan)
 		return;
 
-	reg = phy->vchan->drcmr;
-	reg = (((reg) < 64) ? 0x0100 : 0x1100) + (((reg) & 0x3f) << 2);
+	reg = DRCMR(phy->vchan->drcmr);
 	writel(DRCMR_MAPVLD | phy->idx, phy->base + reg);
 
+	dalgn = readl(phy->base + DALGN);
+	if (phy->vchan->byte_align)
+		dalgn |= 1 << phy->idx;
+	else
+		dalgn &= ~(1 << phy->idx);
+	writel(dalgn, phy->base + DALGN);
+
 	reg = (phy->idx << 2) + DCSR;
 	writel(readl(phy->base + reg) | DCSR_RUN,
 					phy->base + reg);
@@ -218,7 +233,8 @@
 {
 	int prio, i;
 	struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
-	struct mmp_pdma_phy *phy;
+	struct mmp_pdma_phy *phy, *found = NULL;
+	unsigned long flags;
 
 	/*
 	 * dma channel priorities
@@ -227,6 +243,8 @@
 	 * ch 8 - 11, 24 - 27  <--> (2)
 	 * ch 12 - 15, 28 - 31  <--> (3)
 	 */
+
+	spin_lock_irqsave(&pdev->phy_lock, flags);
 	for (prio = 0; prio <= (((pdev->dma_channels - 1) & 0xf) >> 2); prio++) {
 		for (i = 0; i < pdev->dma_channels; i++) {
 			if (prio != ((i & 0xf) >> 2))
@@ -234,31 +252,34 @@
 			phy = &pdev->phy[i];
 			if (!phy->vchan) {
 				phy->vchan = pchan;
-				return phy;
+				found = phy;
+				goto out_unlock;
 			}
 		}
 	}
 
-	return NULL;
+out_unlock:
+	spin_unlock_irqrestore(&pdev->phy_lock, flags);
+	return found;
 }
 
-/* desc->tx_list ==> pending list */
-static void append_pending_queue(struct mmp_pdma_chan *chan,
-					struct mmp_pdma_desc_sw *desc)
+static void mmp_pdma_free_phy(struct mmp_pdma_chan *pchan)
 {
-	struct mmp_pdma_desc_sw *tail =
-				to_mmp_pdma_desc(chan->chain_pending.prev);
+	struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
+	unsigned long flags;
+	u32 reg;
 
-	if (list_empty(&chan->chain_pending))
-		goto out_splice;
+	if (!pchan->phy)
+		return;
 
-	/* one irq per queue, even appended */
-	tail->desc.ddadr = desc->async_tx.phys;
-	tail->desc.dcmd &= ~DCMD_ENDIRQEN;
+	/* clear the channel mapping in DRCMR */
+	reg = DRCMR(pchan->phy->vchan->drcmr);
+	writel(0, pchan->phy->base + reg);
 
-	/* softly link to pending list */
-out_splice:
-	list_splice_tail_init(&desc->tx_list, &chan->chain_pending);
+	spin_lock_irqsave(&pdev->phy_lock, flags);
+	pchan->phy->vchan = NULL;
+	pchan->phy = NULL;
+	spin_unlock_irqrestore(&pdev->phy_lock, flags);
 }
 
 /**
@@ -277,10 +298,7 @@
 
 	if (list_empty(&chan->chain_pending)) {
 		/* chance to re-fetch phy channel with higher prio */
-		if (chan->phy) {
-			chan->phy->vchan = NULL;
-			chan->phy = NULL;
-		}
+		mmp_pdma_free_phy(chan);
 		dev_dbg(chan->dev, "no pending list\n");
 		return;
 	}
@@ -326,14 +344,16 @@
 		cookie = dma_cookie_assign(&child->async_tx);
 	}
 
-	append_pending_queue(chan, desc);
+	/* softly link to pending list - desc->tx_list ==> pending list */
+	list_splice_tail_init(&desc->tx_list, &chan->chain_pending);
 
 	spin_unlock_irqrestore(&chan->desc_lock, flags);
 
 	return cookie;
 }
 
-struct mmp_pdma_desc_sw *mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan)
+static struct mmp_pdma_desc_sw *
+mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan)
 {
 	struct mmp_pdma_desc_sw *desc;
 	dma_addr_t pdesc;
@@ -377,10 +397,7 @@
 		dev_err(chan->dev, "unable to allocate descriptor pool\n");
 		return -ENOMEM;
 	}
-	if (chan->phy) {
-		chan->phy->vchan = NULL;
-		chan->phy = NULL;
-	}
+	mmp_pdma_free_phy(chan);
 	chan->idle = true;
 	chan->dev_addr = 0;
 	return 1;
@@ -411,10 +428,7 @@
 	chan->desc_pool = NULL;
 	chan->idle = true;
 	chan->dev_addr = 0;
-	if (chan->phy) {
-		chan->phy->vchan = NULL;
-		chan->phy = NULL;
-	}
+	mmp_pdma_free_phy(chan);
 	return;
 }
 
@@ -434,6 +448,7 @@
 		return NULL;
 
 	chan = to_mmp_pdma_chan(dchan);
+	chan->byte_align = false;
 
 	if (!chan->dir) {
 		chan->dir = DMA_MEM_TO_MEM;
@@ -450,6 +465,8 @@
 		}
 
 		copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES);
+		if (dma_src & 0x7 || dma_dst & 0x7)
+			chan->byte_align = true;
 
 		new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy);
 		new->desc.dsadr = dma_src;
@@ -486,6 +503,8 @@
 	new->desc.ddadr = DDADR_STOP;
 	new->desc.dcmd |= DCMD_ENDIRQEN;
 
+	chan->cyclic_first = NULL;
+
 	return &first->async_tx;
 
 fail:
@@ -509,12 +528,16 @@
 	if ((sgl == NULL) || (sg_len == 0))
 		return NULL;
 
+	chan->byte_align = false;
+
 	for_each_sg(sgl, sg, sg_len, i) {
 		addr = sg_dma_address(sg);
 		avail = sg_dma_len(sgl);
 
 		do {
 			len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES);
+			if (addr & 0x7)
+				chan->byte_align = true;
 
 			/* allocate and populate the descriptor */
 			new = mmp_pdma_alloc_descriptor(chan);
@@ -557,6 +580,94 @@
 	new->desc.ddadr = DDADR_STOP;
 	new->desc.dcmd |= DCMD_ENDIRQEN;
 
+	chan->dir = dir;
+	chan->cyclic_first = NULL;
+
+	return &first->async_tx;
+
+fail:
+	if (first)
+		mmp_pdma_free_desc_list(chan, &first->tx_list);
+	return NULL;
+}
+
+static struct dma_async_tx_descriptor *mmp_pdma_prep_dma_cyclic(
+	struct dma_chan *dchan, dma_addr_t buf_addr, size_t len,
+	size_t period_len, enum dma_transfer_direction direction,
+	unsigned long flags, void *context)
+{
+	struct mmp_pdma_chan *chan;
+	struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
+	dma_addr_t dma_src, dma_dst;
+
+	if (!dchan || !len || !period_len)
+		return NULL;
+
+	/* the buffer length must be a multiple of period_len */
+	if (len % period_len != 0)
+		return NULL;
+
+	if (period_len > PDMA_MAX_DESC_BYTES)
+		return NULL;
+
+	chan = to_mmp_pdma_chan(dchan);
+
+	switch (direction) {
+	case DMA_MEM_TO_DEV:
+		dma_src = buf_addr;
+		dma_dst = chan->dev_addr;
+		break;
+	case DMA_DEV_TO_MEM:
+		dma_dst = buf_addr;
+		dma_src = chan->dev_addr;
+		break;
+	default:
+		dev_err(chan->dev, "Unsupported direction for cyclic DMA\n");
+		return NULL;
+	}
+
+	chan->dir = direction;
+
+	do {
+		/* Allocate the link descriptor from DMA pool */
+		new = mmp_pdma_alloc_descriptor(chan);
+		if (!new) {
+			dev_err(chan->dev, "no memory for desc\n");
+			goto fail;
+		}
+
+		new->desc.dcmd = chan->dcmd | DCMD_ENDIRQEN |
+					(DCMD_LENGTH & period_len);
+		new->desc.dsadr = dma_src;
+		new->desc.dtadr = dma_dst;
+
+		if (!first)
+			first = new;
+		else
+			prev->desc.ddadr = new->async_tx.phys;
+
+		new->async_tx.cookie = 0;
+		async_tx_ack(&new->async_tx);
+
+		prev = new;
+		len -= period_len;
+
+		if (chan->dir == DMA_MEM_TO_DEV)
+			dma_src += period_len;
+		else
+			dma_dst += period_len;
+
+		/* Insert the link descriptor to the LD ring */
+		list_add_tail(&new->node, &first->tx_list);
+	} while (len);
+
+	first->async_tx.flags = flags; /* client is in control of this ack */
+	first->async_tx.cookie = -EBUSY;
+
+	/* make the cyclic link */
+	new->desc.ddadr = first->async_tx.phys;
+	chan->cyclic_first = first;
+
 	return &first->async_tx;
 
 fail:
@@ -581,10 +692,7 @@
 	switch (cmd) {
 	case DMA_TERMINATE_ALL:
 		disable_chan(chan->phy);
-		if (chan->phy) {
-			chan->phy->vchan = NULL;
-			chan->phy = NULL;
-		}
+		mmp_pdma_free_phy(chan);
 		spin_lock_irqsave(&chan->desc_lock, flags);
 		mmp_pdma_free_desc_list(chan, &chan->chain_pending);
 		mmp_pdma_free_desc_list(chan, &chan->chain_running);
@@ -619,8 +727,13 @@
 			chan->dcmd |= DCMD_BURST32;
 
 		chan->dir = cfg->direction;
-		chan->drcmr = cfg->slave_id;
 		chan->dev_addr = addr;
+		/* FIXME: drivers should be ported over to use the filter
+		 * function. Once that's done, the following two lines can
+		 * be removed.
+		 */
+		if (cfg->slave_id)
+			chan->drcmr = cfg->slave_id;
 		break;
 	default:
 		return -ENOSYS;
@@ -632,15 +745,7 @@
 static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan,
 			dma_cookie_t cookie, struct dma_tx_state *txstate)
 {
-	struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
-	enum dma_status ret;
-	unsigned long flags;
-
-	spin_lock_irqsave(&chan->desc_lock, flags);
-	ret = dma_cookie_status(dchan, cookie, txstate);
-	spin_unlock_irqrestore(&chan->desc_lock, flags);
-
-	return ret;
+	return dma_cookie_status(dchan, cookie, txstate);
 }
 
 /**
@@ -669,29 +774,51 @@
 	LIST_HEAD(chain_cleanup);
 	unsigned long flags;
 
-	/* submit pending list; callback for each desc; free desc */
+	if (chan->cyclic_first) {
+		dma_async_tx_callback cb = NULL;
+		void *cb_data = NULL;
 
+		spin_lock_irqsave(&chan->desc_lock, flags);
+		desc = chan->cyclic_first;
+		cb = desc->async_tx.callback;
+		cb_data = desc->async_tx.callback_param;
+		spin_unlock_irqrestore(&chan->desc_lock, flags);
+
+		if (cb)
+			cb(cb_data);
+
+		return;
+	}
+
+	/* submit pending list; callback for each desc; free desc */
 	spin_lock_irqsave(&chan->desc_lock, flags);
 
-	/* update the cookie if we have some descriptors to cleanup */
-	if (!list_empty(&chan->chain_running)) {
-		dma_cookie_t cookie;
+	list_for_each_entry_safe(desc, _desc, &chan->chain_running, node) {
+		/*
+		 * move the descriptors to a temporary list so we can drop
+		 * the lock during the entire cleanup operation
+		 */
+		list_del(&desc->node);
+		list_add(&desc->node, &chain_cleanup);
 
-		desc = to_mmp_pdma_desc(chan->chain_running.prev);
-		cookie = desc->async_tx.cookie;
-		dma_cookie_complete(&desc->async_tx);
-
-		dev_dbg(chan->dev, "completed_cookie=%d\n", cookie);
+		/*
+		 * Look for the first list entry which has the ENDIRQEN flag
+		 * set. That is the descriptor we got an interrupt for, so
+		 * complete that transaction and its cookie.
+		 */
+		if (desc->desc.dcmd & DCMD_ENDIRQEN) {
+			dma_cookie_t cookie = desc->async_tx.cookie;
+			dma_cookie_complete(&desc->async_tx);
+			dev_dbg(chan->dev, "completed_cookie=%d\n", cookie);
+			break;
+		}
 	}
 
 	/*
-	 * move the descriptors to a temporary list so we can drop the lock
-	 * during the entire cleanup operation
+	 * The hardware is idle and ready for more when the
+	 * chain_running list is empty.
 	 */
-	list_splice_tail_init(&chan->chain_running, &chain_cleanup);
-
-	/* the hardware is now idle and ready for more */
-	chan->idle = true;
+	chan->idle = list_empty(&chan->chain_running);
 
 	/* Start any pending transactions automatically */
 	start_pending_queue(chan);
@@ -763,6 +890,39 @@
 };
 MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids);
 
+static struct dma_chan *mmp_pdma_dma_xlate(struct of_phandle_args *dma_spec,
+					   struct of_dma *ofdma)
+{
+	struct mmp_pdma_device *d = ofdma->of_dma_data;
+	struct dma_chan *chan, *candidate;
+
+retry:
+	candidate = NULL;
+
+	/* walk the list of channels registered with the current instance and
+	 * find one that is currently unused */
+	list_for_each_entry(chan, &d->device.channels, device_node)
+		if (chan->client_count == 0) {
+			candidate = chan;
+			break;
+		}
+
+	if (!candidate)
+		return NULL;
+
+	/* dma_get_slave_channel will return NULL if we lost a race between
+	 * the lookup and the reservation */
+	chan = dma_get_slave_channel(candidate);
+
+	if (chan) {
+		struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan);
+		c->drcmr = dma_spec->args[0];
+		return chan;
+	}
+
+	goto retry;
+}
+
 static int mmp_pdma_probe(struct platform_device *op)
 {
 	struct mmp_pdma_device *pdev;
@@ -777,10 +937,9 @@
 		return -ENOMEM;
 	pdev->dev = &op->dev;
 
-	iores = platform_get_resource(op, IORESOURCE_MEM, 0);
-	if (!iores)
-		return -EINVAL;
+	spin_lock_init(&pdev->phy_lock);
 
+	iores = platform_get_resource(op, IORESOURCE_MEM, 0);
 	pdev->base = devm_ioremap_resource(pdev->dev, iores);
 	if (IS_ERR(pdev->base))
 		return PTR_ERR(pdev->base);
@@ -825,13 +984,15 @@
 
 	dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
 	dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask);
-	dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
+	dma_cap_set(DMA_CYCLIC, pdev->device.cap_mask);
+	dma_cap_set(DMA_PRIVATE, pdev->device.cap_mask);
 	pdev->device.dev = &op->dev;
 	pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources;
 	pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources;
 	pdev->device.device_tx_status = mmp_pdma_tx_status;
 	pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy;
 	pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg;
+	pdev->device.device_prep_dma_cyclic = mmp_pdma_prep_dma_cyclic;
 	pdev->device.device_issue_pending = mmp_pdma_issue_pending;
 	pdev->device.device_control = mmp_pdma_control;
 	pdev->device.copy_align = PDMA_ALIGNMENT;
@@ -847,7 +1008,17 @@
 		return ret;
 	}
 
-	dev_info(pdev->device.dev, "initialized\n");
+	if (op->dev.of_node) {
+		/* Device-tree DMA controller registration */
+		ret = of_dma_controller_register(op->dev.of_node,
+						 mmp_pdma_dma_xlate, pdev);
+		if (ret < 0) {
+			dev_err(&op->dev, "of_dma_controller_register failed\n");
+			return ret;
+		}
+	}
+
+	dev_info(pdev->device.dev, "initialized %d channels\n", dma_channels);
 	return 0;
 }
 
@@ -867,6 +1038,19 @@
 	.remove		= mmp_pdma_remove,
 };
 
+bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param)
+{
+	struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan);
+
+	if (chan->device->dev->driver != &mmp_pdma_driver.driver)
+		return false;
+
+	c->drcmr = *(unsigned int *) param;
+
+	return true;
+}
+EXPORT_SYMBOL_GPL(mmp_pdma_filter_fn);
+
 module_platform_driver(mmp_pdma_driver);
 
 MODULE_DESCRIPTION("MARVELL MMP Periphera DMA Driver");
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index 9b93665..38cb517 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -460,7 +460,8 @@
 {
 	struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
 
-	dma_set_residue(txstate, tdmac->buf_len - tdmac->pos);
+	dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
+			 tdmac->buf_len - tdmac->pos);
 
 	return tdmac->status;
 }
@@ -549,9 +550,6 @@
 	}
 
 	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!iores)
-		return -EINVAL;
-
 	tdev->base = devm_ioremap_resource(&pdev->dev, iores);
 	if (IS_ERR(tdev->base))
 		return PTR_ERR(tdev->base);
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 2d95673..2fe4353 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -556,15 +556,7 @@
 mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
 	       struct dma_tx_state *txstate)
 {
-	struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
-	enum dma_status ret;
-	unsigned long flags;
-
-	spin_lock_irqsave(&mchan->lock, flags);
-	ret = dma_cookie_status(chan, cookie, txstate);
-	spin_unlock_irqrestore(&mchan->lock, flags);
-
-	return ret;
+	return dma_cookie_status(chan, cookie, txstate);
 }
 
 /* Prepare descriptor for memory to memory copy */
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 200f1a3..536dcb8 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -64,7 +64,7 @@
 				int src_idx)
 {
 	struct mv_xor_desc *hw_desc = desc->hw_desc;
-	return hw_desc->phy_src_addr[src_idx];
+	return hw_desc->phy_src_addr[mv_phy_src_idx(src_idx)];
 }
 
 
@@ -107,32 +107,32 @@
 				 int index, dma_addr_t addr)
 {
 	struct mv_xor_desc *hw_desc = desc->hw_desc;
-	hw_desc->phy_src_addr[index] = addr;
+	hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr;
 	if (desc->type == DMA_XOR)
 		hw_desc->desc_command |= (1 << index);
 }
 
 static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
 {
-	return __raw_readl(XOR_CURR_DESC(chan));
+	return readl_relaxed(XOR_CURR_DESC(chan));
 }
 
 static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
 					u32 next_desc_addr)
 {
-	__raw_writel(next_desc_addr, XOR_NEXT_DESC(chan));
+	writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan));
 }
 
 static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
 {
-	u32 val = __raw_readl(XOR_INTR_MASK(chan));
+	u32 val = readl_relaxed(XOR_INTR_MASK(chan));
 	val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
-	__raw_writel(val, XOR_INTR_MASK(chan));
+	writel_relaxed(val, XOR_INTR_MASK(chan));
 }
 
 static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
 {
-	u32 intr_cause = __raw_readl(XOR_INTR_CAUSE(chan));
+	u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan));
 	intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
 	return intr_cause;
 }
@@ -149,13 +149,13 @@
 {
 	u32 val = ~(1 << (chan->idx * 16));
 	dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
-	__raw_writel(val, XOR_INTR_CAUSE(chan));
+	writel_relaxed(val, XOR_INTR_CAUSE(chan));
 }
 
 static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
 {
 	u32 val = 0xFFFF0000 >> (chan->idx * 16);
-	__raw_writel(val, XOR_INTR_CAUSE(chan));
+	writel_relaxed(val, XOR_INTR_CAUSE(chan));
 }
 
 static int mv_can_chain(struct mv_xor_desc_slot *desc)
@@ -173,7 +173,7 @@
 			       enum dma_transaction_type type)
 {
 	u32 op_mode;
-	u32 config = __raw_readl(XOR_CONFIG(chan));
+	u32 config = readl_relaxed(XOR_CONFIG(chan));
 
 	switch (type) {
 	case DMA_XOR:
@@ -192,7 +192,14 @@
 
 	config &= ~0x7;
 	config |= op_mode;
-	__raw_writel(config, XOR_CONFIG(chan));
+
+#if defined(__BIG_ENDIAN)
+	config |= XOR_DESCRIPTOR_SWAP;
+#else
+	config &= ~XOR_DESCRIPTOR_SWAP;
+#endif
+
+	writel_relaxed(config, XOR_CONFIG(chan));
 	chan->current_type = type;
 }
 
@@ -201,14 +208,14 @@
 	u32 activation;
 
 	dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
-	activation = __raw_readl(XOR_ACTIVATION(chan));
+	activation = readl_relaxed(XOR_ACTIVATION(chan));
 	activation |= 0x1;
-	__raw_writel(activation, XOR_ACTIVATION(chan));
+	writel_relaxed(activation, XOR_ACTIVATION(chan));
 }
 
 static char mv_chan_is_busy(struct mv_xor_chan *chan)
 {
-	u32 state = __raw_readl(XOR_ACTIVATION(chan));
+	u32 state = readl_relaxed(XOR_ACTIVATION(chan));
 
 	state = (state >> 4) & 0x3;
 
@@ -647,7 +654,7 @@
 
 	dev_dbg(mv_chan_to_devp(mv_chan),
 		"%s sw_desc %p async_tx %p\n",
-		__func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0);
+		__func__, sw_desc, sw_desc ? &sw_desc->async_tx : NULL);
 
 	return sw_desc ? &sw_desc->async_tx : NULL;
 }
@@ -755,22 +762,22 @@
 {
 	u32 val;
 
-	val = __raw_readl(XOR_CONFIG(chan));
+	val = readl_relaxed(XOR_CONFIG(chan));
 	dev_err(mv_chan_to_devp(chan), "config       0x%08x\n", val);
 
-	val = __raw_readl(XOR_ACTIVATION(chan));
+	val = readl_relaxed(XOR_ACTIVATION(chan));
 	dev_err(mv_chan_to_devp(chan), "activation   0x%08x\n", val);
 
-	val = __raw_readl(XOR_INTR_CAUSE(chan));
+	val = readl_relaxed(XOR_INTR_CAUSE(chan));
 	dev_err(mv_chan_to_devp(chan), "intr cause   0x%08x\n", val);
 
-	val = __raw_readl(XOR_INTR_MASK(chan));
+	val = readl_relaxed(XOR_INTR_MASK(chan));
 	dev_err(mv_chan_to_devp(chan), "intr mask    0x%08x\n", val);
 
-	val = __raw_readl(XOR_ERROR_CAUSE(chan));
+	val = readl_relaxed(XOR_ERROR_CAUSE(chan));
 	dev_err(mv_chan_to_devp(chan), "error cause  0x%08x\n", val);
 
-	val = __raw_readl(XOR_ERROR_ADDR(chan));
+	val = readl_relaxed(XOR_ERROR_ADDR(chan));
 	dev_err(mv_chan_to_devp(chan), "error addr   0x%08x\n", val);
 }
 
@@ -1029,10 +1036,8 @@
 	struct dma_device *dma_dev;
 
 	mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
-	if (!mv_chan) {
-		ret = -ENOMEM;
-		goto err_free_dma;
-	}
+	if (!mv_chan)
+		return ERR_PTR(-ENOMEM);
 
 	mv_chan->idx = idx;
 	mv_chan->irq = irq;
@@ -1166,7 +1171,7 @@
 {
 	const struct mbus_dram_target_info *dram;
 	struct mv_xor_device *xordev;
-	struct mv_xor_platform_data *pdata = pdev->dev.platform_data;
+	struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
 	struct resource *res;
 	int i, ret;
 
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
index c619359..06b067f 100644
--- a/drivers/dma/mv_xor.h
+++ b/drivers/dma/mv_xor.h
@@ -29,8 +29,10 @@
 #define MV_XOR_THRESHOLD		1
 #define MV_XOR_MAX_CHANNELS             2
 
+/* Values for the XOR_CONFIG register */
 #define XOR_OPERATION_MODE_XOR		0
 #define XOR_OPERATION_MODE_MEMCPY	2
+#define XOR_DESCRIPTOR_SWAP		BIT(14)
 
 #define XOR_CURR_DESC(chan)	(chan->mmr_base + 0x210 + (chan->idx * 4))
 #define XOR_NEXT_DESC(chan)	(chan->mmr_base + 0x200 + (chan->idx * 4))
@@ -143,7 +145,16 @@
 #endif
 };
 
-/* This structure describes XOR descriptor size 64bytes	*/
+/*
+ * This structure describes XOR descriptor size 64bytes. The
+ * mv_phy_src_idx() macro must be used when indexing the values of the
+ * phy_src_addr[] array. This is due to the fact that the 'descriptor
+ * swap' feature, used on big endian systems, swaps descriptors data
+ * within blocks of 8 bytes. So two consecutive values of the
+ * phy_src_addr[] array are actually swapped in big-endian, which
+ * explains the different mv_phy_src_idx() implementation.
+ */
+#if defined(__LITTLE_ENDIAN)
 struct mv_xor_desc {
 	u32 status;		/* descriptor execution status */
 	u32 crc32_result;	/* result of CRC-32 calculation */
@@ -155,6 +166,21 @@
 	u32 reserved0;
 	u32 reserved1;
 };
+#define mv_phy_src_idx(src_idx) (src_idx)
+#else
+struct mv_xor_desc {
+	u32 crc32_result;	/* result of CRC-32 calculation */
+	u32 status;		/* descriptor execution status */
+	u32 phy_next_desc;	/* next descriptor address pointer */
+	u32 desc_command;	/* type of operation to be carried out */
+	u32 phy_dest_addr;	/* destination block address */
+	u32 byte_count;		/* size of src/dst blocks in bytes */
+	u32 phy_src_addr[8];	/* source block addresses */
+	u32 reserved1;
+	u32 reserved0;
+};
+#define mv_phy_src_idx(src_idx) (src_idx ^ 1)
+#endif
 
 #define to_mv_sw_desc(addr_hw_desc)		\
 	container_of(addr_hw_desc, struct mv_xor_desc_slot, hw_desc)
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 7195930..ccd13df 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -23,7 +23,6 @@
 #include <linux/dmaengine.h>
 #include <linux/delay.h>
 #include <linux/module.h>
-#include <linux/fsl/mxs-dma.h>
 #include <linux/stmp_device.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
@@ -197,24 +196,6 @@
 	return container_of(chan, struct mxs_dma_chan, chan);
 }
 
-int mxs_dma_is_apbh(struct dma_chan *chan)
-{
-	struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
-	struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
-
-	return dma_is_apbh(mxs_dma);
-}
-EXPORT_SYMBOL_GPL(mxs_dma_is_apbh);
-
-int mxs_dma_is_apbx(struct dma_chan *chan)
-{
-	struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
-	struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
-
-	return !dma_is_apbh(mxs_dma);
-}
-EXPORT_SYMBOL_GPL(mxs_dma_is_apbx);
-
 static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
 {
 	struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
@@ -349,13 +330,9 @@
 static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
 {
 	struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
-	struct mxs_dma_data *data = chan->private;
 	struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
 	int ret;
 
-	if (data)
-		mxs_chan->chan_irq = data->chan_irq;
-
 	mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev,
 				CCW_BLOCK_SIZE, &mxs_chan->ccw_phys,
 				GFP_KERNEL);
@@ -622,10 +599,8 @@
 			dma_cookie_t cookie, struct dma_tx_state *txstate)
 {
 	struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
-	dma_cookie_t last_used;
 
-	last_used = chan->cookie;
-	dma_set_tx_state(txstate, chan->completed_cookie, last_used, 0);
+	dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 0);
 
 	return mxs_chan->status;
 }
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
index 75334bd..0b88dd3 100644
--- a/drivers/dma/of-dma.c
+++ b/drivers/dma/of-dma.c
@@ -160,7 +160,8 @@
 
 	count = of_property_count_strings(np, "dma-names");
 	if (count < 0) {
-		pr_err("%s: dma-names property missing or empty\n", __func__);
+		pr_err("%s: dma-names property of node '%s' missing or empty\n",
+			__func__, np->full_name);
 		return NULL;
 	}
 
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 0bbdea5..61fdc54 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -564,14 +564,7 @@
 static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
 				    struct dma_tx_state *txstate)
 {
-	struct pch_dma_chan *pd_chan = to_pd_chan(chan);
-	enum dma_status ret;
-
-	spin_lock_irq(&pd_chan->lock);
-	ret = dma_cookie_status(chan, cookie, txstate);
-	spin_unlock_irq(&pd_chan->lock);
-
-	return ret;
+	return dma_cookie_status(chan, cookie, txstate);
 }
 
 static void pd_issue_pending(struct dma_chan *chan)
@@ -1036,3 +1029,4 @@
 		   "DMA controller driver");
 MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
 MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, pch_dma_id_table);
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index fa645d8..a562d24 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -545,6 +545,8 @@
 
 	/* List of to be xfered descriptors */
 	struct list_head work_list;
+	/* List of completed descriptors */
+	struct list_head completed_list;
 
 	/* Pointer to the DMAC that manages this channel,
 	 * NULL if the channel is available to be acquired.
@@ -2198,66 +2200,6 @@
 	return container_of(tx, struct dma_pl330_desc, txd);
 }
 
-static inline void free_desc_list(struct list_head *list)
-{
-	struct dma_pl330_dmac *pdmac;
-	struct dma_pl330_desc *desc;
-	struct dma_pl330_chan *pch = NULL;
-	unsigned long flags;
-
-	/* Finish off the work list */
-	list_for_each_entry(desc, list, node) {
-		dma_async_tx_callback callback;
-		void *param;
-
-		/* All desc in a list belong to same channel */
-		pch = desc->pchan;
-		callback = desc->txd.callback;
-		param = desc->txd.callback_param;
-
-		if (callback)
-			callback(param);
-
-		desc->pchan = NULL;
-	}
-
-	/* pch will be unset if list was empty */
-	if (!pch)
-		return;
-
-	pdmac = pch->dmac;
-
-	spin_lock_irqsave(&pdmac->pool_lock, flags);
-	list_splice_tail_init(list, &pdmac->desc_pool);
-	spin_unlock_irqrestore(&pdmac->pool_lock, flags);
-}
-
-static inline void handle_cyclic_desc_list(struct list_head *list)
-{
-	struct dma_pl330_desc *desc;
-	struct dma_pl330_chan *pch = NULL;
-	unsigned long flags;
-
-	list_for_each_entry(desc, list, node) {
-		dma_async_tx_callback callback;
-
-		/* Change status to reload it */
-		desc->status = PREP;
-		pch = desc->pchan;
-		callback = desc->txd.callback;
-		if (callback)
-			callback(desc->txd.callback_param);
-	}
-
-	/* pch will be unset if list was empty */
-	if (!pch)
-		return;
-
-	spin_lock_irqsave(&pch->lock, flags);
-	list_splice_tail_init(list, &pch->work_list);
-	spin_unlock_irqrestore(&pch->lock, flags);
-}
-
 static inline void fill_queue(struct dma_pl330_chan *pch)
 {
 	struct dma_pl330_desc *desc;
@@ -2291,7 +2233,6 @@
 	struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
 	struct dma_pl330_desc *desc, *_dt;
 	unsigned long flags;
-	LIST_HEAD(list);
 
 	spin_lock_irqsave(&pch->lock, flags);
 
@@ -2300,7 +2241,7 @@
 		if (desc->status == DONE) {
 			if (!pch->cyclic)
 				dma_cookie_complete(&desc->txd);
-			list_move_tail(&desc->node, &list);
+			list_move_tail(&desc->node, &pch->completed_list);
 		}
 
 	/* Try to submit a req imm. next to the last completed cookie */
@@ -2309,12 +2250,31 @@
 	/* Make sure the PL330 Channel thread is active */
 	pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START);
 
-	spin_unlock_irqrestore(&pch->lock, flags);
+	while (!list_empty(&pch->completed_list)) {
+		dma_async_tx_callback callback;
+		void *callback_param;
 
-	if (pch->cyclic)
-		handle_cyclic_desc_list(&list);
-	else
-		free_desc_list(&list);
+		desc = list_first_entry(&pch->completed_list,
+					struct dma_pl330_desc, node);
+
+		callback = desc->txd.callback;
+		callback_param = desc->txd.callback_param;
+
+		if (pch->cyclic) {
+			desc->status = PREP;
+			list_move_tail(&desc->node, &pch->work_list);
+		} else {
+			desc->status = FREE;
+			list_move_tail(&desc->node, &pch->dmac->desc_pool);
+		}
+
+		if (callback) {
+			spin_unlock_irqrestore(&pch->lock, flags);
+			callback(callback_param);
+			spin_lock_irqsave(&pch->lock, flags);
+		}
+	}
+	spin_unlock_irqrestore(&pch->lock, flags);
 }
 
 static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
@@ -2409,7 +2369,7 @@
 static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
 {
 	struct dma_pl330_chan *pch = to_pchan(chan);
-	struct dma_pl330_desc *desc, *_dt;
+	struct dma_pl330_desc *desc;
 	unsigned long flags;
 	struct dma_pl330_dmac *pdmac = pch->dmac;
 	struct dma_slave_config *slave_config;
@@ -2423,12 +2383,18 @@
 		pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
 
 		/* Mark all desc done */
-		list_for_each_entry_safe(desc, _dt, &pch->work_list , node) {
-			desc->status = DONE;
-			list_move_tail(&desc->node, &list);
+		list_for_each_entry(desc, &pch->work_list , node) {
+			desc->status = FREE;
+			dma_cookie_complete(&desc->txd);
 		}
 
-		list_splice_tail_init(&list, &pdmac->desc_pool);
+		list_for_each_entry(desc, &pch->completed_list , node) {
+			desc->status = FREE;
+			dma_cookie_complete(&desc->txd);
+		}
+
+		list_splice_tail_init(&pch->work_list, &pdmac->desc_pool);
+		list_splice_tail_init(&pch->completed_list, &pdmac->desc_pool);
 		spin_unlock_irqrestore(&pch->lock, flags);
 		break;
 	case DMA_SLAVE_CONFIG:
@@ -2814,6 +2780,28 @@
 	return &desc->txd;
 }
 
+static void __pl330_giveback_desc(struct dma_pl330_dmac *pdmac,
+				  struct dma_pl330_desc *first)
+{
+	unsigned long flags;
+	struct dma_pl330_desc *desc;
+
+	if (!first)
+		return;
+
+	spin_lock_irqsave(&pdmac->pool_lock, flags);
+
+	while (!list_empty(&first->node)) {
+		desc = list_entry(first->node.next,
+				struct dma_pl330_desc, node);
+		list_move_tail(&desc->node, &pdmac->desc_pool);
+	}
+
+	list_move_tail(&first->node, &pdmac->desc_pool);
+
+	spin_unlock_irqrestore(&pdmac->pool_lock, flags);
+}
+
 static struct dma_async_tx_descriptor *
 pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
 		unsigned int sg_len, enum dma_transfer_direction direction,
@@ -2822,7 +2810,6 @@
 	struct dma_pl330_desc *first, *desc = NULL;
 	struct dma_pl330_chan *pch = to_pchan(chan);
 	struct scatterlist *sg;
-	unsigned long flags;
 	int i;
 	dma_addr_t addr;
 
@@ -2842,20 +2829,7 @@
 			dev_err(pch->dmac->pif.dev,
 				"%s:%d Unable to fetch desc\n",
 				__func__, __LINE__);
-			if (!first)
-				return NULL;
-
-			spin_lock_irqsave(&pdmac->pool_lock, flags);
-
-			while (!list_empty(&first->node)) {
-				desc = list_entry(first->node.next,
-						struct dma_pl330_desc, node);
-				list_move_tail(&desc->node, &pdmac->desc_pool);
-			}
-
-			list_move_tail(&first->node, &pdmac->desc_pool);
-
-			spin_unlock_irqrestore(&pdmac->pool_lock, flags);
+			__pl330_giveback_desc(pdmac, first);
 
 			return NULL;
 		}
@@ -2896,6 +2870,25 @@
 		return IRQ_NONE;
 }
 
+#define PL330_DMA_BUSWIDTHS \
+	BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
+	BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+	BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+	BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+	BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
+
+static int pl330_dma_device_slave_caps(struct dma_chan *dchan,
+	struct dma_slave_caps *caps)
+{
+	caps->src_addr_widths = PL330_DMA_BUSWIDTHS;
+	caps->dstn_addr_widths = PL330_DMA_BUSWIDTHS;
+	caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+	caps->cmd_pause = false;
+	caps->cmd_terminate = true;
+
+	return 0;
+}
+
 static int
 pl330_probe(struct amba_device *adev, const struct amba_id *id)
 {
@@ -2908,7 +2901,7 @@
 	int i, ret, irq;
 	int num_chan;
 
-	pdat = adev->dev.platform_data;
+	pdat = dev_get_platdata(&adev->dev);
 
 	/* Allocate a new DMAC and its Channels */
 	pdmac = devm_kzalloc(&adev->dev, sizeof(*pdmac), GFP_KERNEL);
@@ -2971,6 +2964,7 @@
 			pch->chan.private = adev->dev.of_node;
 
 		INIT_LIST_HEAD(&pch->work_list);
+		INIT_LIST_HEAD(&pch->completed_list);
 		spin_lock_init(&pch->lock);
 		pch->pl330_chid = NULL;
 		pch->chan.device = pd;
@@ -3000,6 +2994,7 @@
 	pd->device_prep_slave_sg = pl330_prep_slave_sg;
 	pd->device_control = pl330_control;
 	pd->device_issue_pending = pl330_issue_pending;
+	pd->device_slave_caps = pl330_dma_device_slave_caps;
 
 	ret = dma_async_device_register(pd);
 	if (ret) {
@@ -3015,6 +3010,14 @@
 			"unable to register DMA to the generic DT DMA helpers\n");
 		}
 	}
+	/*
+	 * This is the limit for transfers with a buswidth of 1, larger
+	 * buswidths will have larger limits.
+	 */
+	ret = dma_set_max_seg_size(&adev->dev, 1900800);
+	if (ret)
+		dev_err(&adev->dev, "unable to set the seg size\n");
+
 
 	dev_info(&adev->dev,
 		"Loaded driver for PL330 DMAC-%d\n", adev->periphid);
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig
index 5c1dee2..dadd9e01 100644
--- a/drivers/dma/sh/Kconfig
+++ b/drivers/dma/sh/Kconfig
@@ -22,3 +22,13 @@
 	depends on SH_DMAE_BASE
 	help
 	  Enable support for the Renesas SUDMAC controllers.
+
+config RCAR_HPB_DMAE
+	tristate "Renesas R-Car HPB DMAC support"
+	depends on SH_DMAE_BASE
+	help
+	  Enable support for the Renesas R-Car series DMA controllers.
+
+config SHDMA_R8A73A4
+	def_bool y
+	depends on ARCH_R8A73A4 && SH_DMAE != n
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile
index c962138..e856af2 100644
--- a/drivers/dma/sh/Makefile
+++ b/drivers/dma/sh/Makefile
@@ -1,3 +1,9 @@
 obj-$(CONFIG_SH_DMAE_BASE) += shdma-base.o shdma-of.o
 obj-$(CONFIG_SH_DMAE) += shdma.o
+shdma-y := shdmac.o
+ifeq ($(CONFIG_OF),y)
+shdma-$(CONFIG_SHDMA_R8A73A4) += shdma-r8a73a4.o
+endif
+shdma-objs := $(shdma-y)
 obj-$(CONFIG_SUDMAC) += sudmac.o
+obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o
diff --git a/drivers/dma/sh/rcar-hpbdma.c b/drivers/dma/sh/rcar-hpbdma.c
new file mode 100644
index 0000000..45a5202
--- /dev/null
+++ b/drivers/dma/sh/rcar-hpbdma.c
@@ -0,0 +1,655 @@
+/*
+ * Copyright (C) 2011-2013 Renesas Electronics Corporation
+ * Copyright (C) 2013 Cogent Embedded, Inc.
+ *
+ * This file is based on the drivers/dma/sh/shdma.c
+ *
+ * Renesas SuperH DMA Engine support
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * - DMA of SuperH does not have Hardware DMA chain mode.
+ * - max DMA size is 16MB.
+ *
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_data/dma-rcar-hpbdma.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/shdma-base.h>
+#include <linux/slab.h>
+
+/* DMA channel registers */
+#define HPB_DMAE_DSAR0	0x00
+#define HPB_DMAE_DDAR0	0x04
+#define HPB_DMAE_DTCR0	0x08
+#define HPB_DMAE_DSAR1	0x0C
+#define HPB_DMAE_DDAR1	0x10
+#define HPB_DMAE_DTCR1	0x14
+#define HPB_DMAE_DSASR	0x18
+#define HPB_DMAE_DDASR	0x1C
+#define HPB_DMAE_DTCSR	0x20
+#define HPB_DMAE_DPTR	0x24
+#define HPB_DMAE_DCR	0x28
+#define HPB_DMAE_DCMDR	0x2C
+#define HPB_DMAE_DSTPR	0x30
+#define HPB_DMAE_DSTSR	0x34
+#define HPB_DMAE_DDBGR	0x38
+#define HPB_DMAE_DDBGR2	0x3C
+#define HPB_DMAE_CHAN(n)	(0x40 * (n))
+
+/* DMA command register (DCMDR) bits */
+#define HPB_DMAE_DCMDR_BDOUT	BIT(7)
+#define HPB_DMAE_DCMDR_DQSPD	BIT(6)
+#define HPB_DMAE_DCMDR_DQSPC	BIT(5)
+#define HPB_DMAE_DCMDR_DMSPD	BIT(4)
+#define HPB_DMAE_DCMDR_DMSPC	BIT(3)
+#define HPB_DMAE_DCMDR_DQEND	BIT(2)
+#define HPB_DMAE_DCMDR_DNXT	BIT(1)
+#define HPB_DMAE_DCMDR_DMEN	BIT(0)
+
+/* DMA forced stop register (DSTPR) bits */
+#define HPB_DMAE_DSTPR_DMSTP	BIT(0)
+
+/* DMA status register (DSTSR) bits */
+#define HPB_DMAE_DSTSR_DMSTS	BIT(0)
+
+/* DMA common registers */
+#define HPB_DMAE_DTIMR		0x00
+#define HPB_DMAE_DINTSR0		0x0C
+#define HPB_DMAE_DINTSR1		0x10
+#define HPB_DMAE_DINTCR0		0x14
+#define HPB_DMAE_DINTCR1		0x18
+#define HPB_DMAE_DINTMR0		0x1C
+#define HPB_DMAE_DINTMR1		0x20
+#define HPB_DMAE_DACTSR0		0x24
+#define HPB_DMAE_DACTSR1		0x28
+#define HPB_DMAE_HSRSTR(n)	(0x40 + (n) * 4)
+#define HPB_DMAE_HPB_DMASPR(n)	(0x140 + (n) * 4)
+#define HPB_DMAE_HPB_DMLVLR0	0x160
+#define HPB_DMAE_HPB_DMLVLR1	0x164
+#define HPB_DMAE_HPB_DMSHPT0	0x168
+#define HPB_DMAE_HPB_DMSHPT1	0x16C
+
+#define HPB_DMA_SLAVE_NUMBER 256
+#define HPB_DMA_TCR_MAX 0x01000000	/* 16 MiB */
+
+struct hpb_dmae_chan {
+	struct shdma_chan shdma_chan;
+	int xfer_mode;			/* DMA transfer mode */
+#define XFER_SINGLE	1
+#define XFER_DOUBLE	2
+	unsigned plane_idx;		/* current DMA information set */
+	bool first_desc;		/* first/next transfer */
+	int xmit_shift;			/* log_2(bytes_per_xfer) */
+	void __iomem *base;
+	const struct hpb_dmae_slave_config *cfg;
+	char dev_id[16];		/* unique name per DMAC of channel */
+};
+
+struct hpb_dmae_device {
+	struct shdma_dev shdma_dev;
+	spinlock_t reg_lock;		/* comm_reg operation lock */
+	struct hpb_dmae_pdata *pdata;
+	void __iomem *chan_reg;
+	void __iomem *comm_reg;
+	void __iomem *reset_reg;
+	void __iomem *mode_reg;
+};
+
+struct hpb_dmae_regs {
+	u32 sar; /* SAR / source address */
+	u32 dar; /* DAR / destination address */
+	u32 tcr; /* TCR / transfer count */
+};
+
+struct hpb_desc {
+	struct shdma_desc shdma_desc;
+	struct hpb_dmae_regs hw;
+	unsigned plane_idx;
+};
+
+#define to_chan(schan) container_of(schan, struct hpb_dmae_chan, shdma_chan)
+#define to_desc(sdesc) container_of(sdesc, struct hpb_desc, shdma_desc)
+#define to_dev(sc) container_of(sc->shdma_chan.dma_chan.device, \
+				struct hpb_dmae_device, shdma_dev.dma_dev)
+
+static void ch_reg_write(struct hpb_dmae_chan *hpb_dc, u32 data, u32 reg)
+{
+	iowrite32(data, hpb_dc->base + reg);
+}
+
+static u32 ch_reg_read(struct hpb_dmae_chan *hpb_dc, u32 reg)
+{
+	return ioread32(hpb_dc->base + reg);
+}
+
+static void dcmdr_write(struct hpb_dmae_device *hpbdev, u32 data)
+{
+	iowrite32(data, hpbdev->chan_reg + HPB_DMAE_DCMDR);
+}
+
+static void hsrstr_write(struct hpb_dmae_device *hpbdev, u32 ch)
+{
+	iowrite32(0x1, hpbdev->comm_reg + HPB_DMAE_HSRSTR(ch));
+}
+
+static u32 dintsr_read(struct hpb_dmae_device *hpbdev, u32 ch)
+{
+	u32 v;
+
+	if (ch < 32)
+		v = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTSR0) >> ch;
+	else
+		v = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTSR1) >> (ch - 32);
+	return v & 0x1;
+}
+
+static void dintcr_write(struct hpb_dmae_device *hpbdev, u32 ch)
+{
+	if (ch < 32)
+		iowrite32((0x1 << ch), hpbdev->comm_reg + HPB_DMAE_DINTCR0);
+	else
+		iowrite32((0x1 << (ch - 32)),
+			  hpbdev->comm_reg + HPB_DMAE_DINTCR1);
+}
+
+static void asyncmdr_write(struct hpb_dmae_device *hpbdev, u32 data)
+{
+	iowrite32(data, hpbdev->mode_reg);
+}
+
+static u32 asyncmdr_read(struct hpb_dmae_device *hpbdev)
+{
+	return ioread32(hpbdev->mode_reg);
+}
+
+static void hpb_dmae_enable_int(struct hpb_dmae_device *hpbdev, u32 ch)
+{
+	u32 intreg;
+
+	spin_lock_irq(&hpbdev->reg_lock);
+	if (ch < 32) {
+		intreg = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTMR0);
+		iowrite32(BIT(ch) | intreg,
+			  hpbdev->comm_reg + HPB_DMAE_DINTMR0);
+	} else {
+		intreg = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTMR1);
+		iowrite32(BIT(ch - 32) | intreg,
+			  hpbdev->comm_reg + HPB_DMAE_DINTMR1);
+	}
+	spin_unlock_irq(&hpbdev->reg_lock);
+}
+
+static void hpb_dmae_async_reset(struct hpb_dmae_device *hpbdev, u32 data)
+{
+	u32 rstr;
+	int timeout = 10000;	/* 100 ms */
+
+	spin_lock(&hpbdev->reg_lock);
+	rstr = ioread32(hpbdev->reset_reg);
+	rstr |= data;
+	iowrite32(rstr, hpbdev->reset_reg);
+	do {
+		rstr = ioread32(hpbdev->reset_reg);
+		if ((rstr & data) == data)
+			break;
+		udelay(10);
+	} while (timeout--);
+
+	if (timeout < 0)
+		dev_err(hpbdev->shdma_dev.dma_dev.dev,
+			"%s timeout\n", __func__);
+
+	rstr &= ~data;
+	iowrite32(rstr, hpbdev->reset_reg);
+	spin_unlock(&hpbdev->reg_lock);
+}
+
+static void hpb_dmae_set_async_mode(struct hpb_dmae_device *hpbdev,
+				    u32 mask, u32 data)
+{
+	u32 mode;
+
+	spin_lock_irq(&hpbdev->reg_lock);
+	mode = asyncmdr_read(hpbdev);
+	mode &= ~mask;
+	mode |= data;
+	asyncmdr_write(hpbdev, mode);
+	spin_unlock_irq(&hpbdev->reg_lock);
+}
+
+static void hpb_dmae_ctl_stop(struct hpb_dmae_device *hpbdev)
+{
+	dcmdr_write(hpbdev, HPB_DMAE_DCMDR_DQSPD);
+}
+
+static void hpb_dmae_reset(struct hpb_dmae_device *hpbdev)
+{
+	u32 ch;
+
+	for (ch = 0; ch < hpbdev->pdata->num_hw_channels; ch++)
+		hsrstr_write(hpbdev, ch);
+}
+
+static unsigned int calc_xmit_shift(struct hpb_dmae_chan *hpb_chan)
+{
+	struct hpb_dmae_device *hpbdev = to_dev(hpb_chan);
+	struct hpb_dmae_pdata *pdata = hpbdev->pdata;
+	int width = ch_reg_read(hpb_chan, HPB_DMAE_DCR);
+	int i;
+
+	switch (width & (HPB_DMAE_DCR_SPDS_MASK | HPB_DMAE_DCR_DPDS_MASK)) {
+	case HPB_DMAE_DCR_SPDS_8BIT | HPB_DMAE_DCR_DPDS_8BIT:
+	default:
+		i = XMIT_SZ_8BIT;
+		break;
+	case HPB_DMAE_DCR_SPDS_16BIT | HPB_DMAE_DCR_DPDS_16BIT:
+		i = XMIT_SZ_16BIT;
+		break;
+	case HPB_DMAE_DCR_SPDS_32BIT | HPB_DMAE_DCR_DPDS_32BIT:
+		i = XMIT_SZ_32BIT;
+		break;
+	}
+	return pdata->ts_shift[i];
+}
+
+static void hpb_dmae_set_reg(struct hpb_dmae_chan *hpb_chan,
+			     struct hpb_dmae_regs *hw, unsigned plane)
+{
+	ch_reg_write(hpb_chan, hw->sar,
+		     plane ? HPB_DMAE_DSAR1 : HPB_DMAE_DSAR0);
+	ch_reg_write(hpb_chan, hw->dar,
+		     plane ? HPB_DMAE_DDAR1 : HPB_DMAE_DDAR0);
+	ch_reg_write(hpb_chan, hw->tcr >> hpb_chan->xmit_shift,
+		     plane ? HPB_DMAE_DTCR1 : HPB_DMAE_DTCR0);
+}
+
+static void hpb_dmae_start(struct hpb_dmae_chan *hpb_chan, bool next)
+{
+	ch_reg_write(hpb_chan, (next ? HPB_DMAE_DCMDR_DNXT : 0) |
+		     HPB_DMAE_DCMDR_DMEN, HPB_DMAE_DCMDR);
+}
+
+static void hpb_dmae_halt(struct shdma_chan *schan)
+{
+	struct hpb_dmae_chan *chan = to_chan(schan);
+
+	ch_reg_write(chan, HPB_DMAE_DCMDR_DQEND, HPB_DMAE_DCMDR);
+	ch_reg_write(chan, HPB_DMAE_DSTPR_DMSTP, HPB_DMAE_DSTPR);
+}
+
+static const struct hpb_dmae_slave_config *
+hpb_dmae_find_slave(struct hpb_dmae_chan *hpb_chan, int slave_id)
+{
+	struct hpb_dmae_device *hpbdev = to_dev(hpb_chan);
+	struct hpb_dmae_pdata *pdata = hpbdev->pdata;
+	int i;
+
+	if (slave_id >= HPB_DMA_SLAVE_NUMBER)
+		return NULL;
+
+	for (i = 0; i < pdata->num_slaves; i++)
+		if (pdata->slaves[i].id == slave_id)
+			return pdata->slaves + i;
+
+	return NULL;
+}
+
+static void hpb_dmae_start_xfer(struct shdma_chan *schan,
+				struct shdma_desc *sdesc)
+{
+	struct hpb_dmae_chan *chan = to_chan(schan);
+	struct hpb_dmae_device *hpbdev = to_dev(chan);
+	struct hpb_desc *desc = to_desc(sdesc);
+
+	if (chan->cfg->flags & HPB_DMAE_SET_ASYNC_RESET)
+		hpb_dmae_async_reset(hpbdev, chan->cfg->rstr);
+
+	desc->plane_idx = chan->plane_idx;
+	hpb_dmae_set_reg(chan, &desc->hw, chan->plane_idx);
+	hpb_dmae_start(chan, !chan->first_desc);
+
+	if (chan->xfer_mode == XFER_DOUBLE) {
+		chan->plane_idx ^= 1;
+		chan->first_desc = false;
+	}
+}
+
+static bool hpb_dmae_desc_completed(struct shdma_chan *schan,
+				    struct shdma_desc *sdesc)
+{
+	/*
+	 * This is correct since we always have at most single
+	 * outstanding DMA transfer per channel, and by the time
+	 * we get completion interrupt the transfer is completed.
+	 * This will change if we ever use alternating DMA
+	 * information sets and submit two descriptors at once.
+	 */
+	return true;
+}
+
+static bool hpb_dmae_chan_irq(struct shdma_chan *schan, int irq)
+{
+	struct hpb_dmae_chan *chan = to_chan(schan);
+	struct hpb_dmae_device *hpbdev = to_dev(chan);
+	int ch = chan->cfg->dma_ch;
+
+	/* Check Complete DMA Transfer */
+	if (dintsr_read(hpbdev, ch)) {
+		/* Clear Interrupt status */
+		dintcr_write(hpbdev, ch);
+		return true;
+	}
+	return false;
+}
+
+static int hpb_dmae_desc_setup(struct shdma_chan *schan,
+			       struct shdma_desc *sdesc,
+			       dma_addr_t src, dma_addr_t dst, size_t *len)
+{
+	struct hpb_desc *desc = to_desc(sdesc);
+
+	if (*len > (size_t)HPB_DMA_TCR_MAX)
+		*len = (size_t)HPB_DMA_TCR_MAX;
+
+	desc->hw.sar = src;
+	desc->hw.dar = dst;
+	desc->hw.tcr = *len;
+
+	return 0;
+}
+
+static size_t hpb_dmae_get_partial(struct shdma_chan *schan,
+				   struct shdma_desc *sdesc)
+{
+	struct hpb_desc *desc = to_desc(sdesc);
+	struct hpb_dmae_chan *chan = to_chan(schan);
+	u32 tcr = ch_reg_read(chan, desc->plane_idx ?
+			      HPB_DMAE_DTCR1 : HPB_DMAE_DTCR0);
+
+	return (desc->hw.tcr - tcr) << chan->xmit_shift;
+}
+
+static bool hpb_dmae_channel_busy(struct shdma_chan *schan)
+{
+	struct hpb_dmae_chan *chan = to_chan(schan);
+	u32 dstsr = ch_reg_read(chan, HPB_DMAE_DSTSR);
+
+	return (dstsr & HPB_DMAE_DSTSR_DMSTS) == HPB_DMAE_DSTSR_DMSTS;
+}
+
+static int
+hpb_dmae_alloc_chan_resources(struct hpb_dmae_chan *hpb_chan,
+			      const struct hpb_dmae_slave_config *cfg)
+{
+	struct hpb_dmae_device *hpbdev = to_dev(hpb_chan);
+	struct hpb_dmae_pdata *pdata = hpbdev->pdata;
+	const struct hpb_dmae_channel *channel = pdata->channels;
+	int slave_id = cfg->id;
+	int i, err;
+
+	for (i = 0; i < pdata->num_channels; i++, channel++) {
+		if (channel->s_id == slave_id) {
+			struct device *dev = hpb_chan->shdma_chan.dev;
+
+			hpb_chan->base = hpbdev->chan_reg +
+				HPB_DMAE_CHAN(cfg->dma_ch);
+
+			dev_dbg(dev, "Detected Slave device\n");
+			dev_dbg(dev, " -- slave_id       : 0x%x\n", slave_id);
+			dev_dbg(dev, " -- cfg->dma_ch    : %d\n", cfg->dma_ch);
+			dev_dbg(dev, " -- channel->ch_irq: %d\n",
+				channel->ch_irq);
+			break;
+		}
+	}
+
+	err = shdma_request_irq(&hpb_chan->shdma_chan, channel->ch_irq,
+				IRQF_SHARED, hpb_chan->dev_id);
+	if (err) {
+		dev_err(hpb_chan->shdma_chan.dev,
+			"DMA channel request_irq %d failed with error %d\n",
+			channel->ch_irq, err);
+		return err;
+	}
+
+	hpb_chan->plane_idx = 0;
+	hpb_chan->first_desc = true;
+
+	if ((cfg->dcr & (HPB_DMAE_DCR_CT | HPB_DMAE_DCR_DIP)) == 0) {
+		hpb_chan->xfer_mode = XFER_SINGLE;
+	} else if ((cfg->dcr & (HPB_DMAE_DCR_CT | HPB_DMAE_DCR_DIP)) ==
+		   (HPB_DMAE_DCR_CT | HPB_DMAE_DCR_DIP)) {
+		hpb_chan->xfer_mode = XFER_DOUBLE;
+	} else {
+		dev_err(hpb_chan->shdma_chan.dev, "DCR setting error");
+		shdma_free_irq(&hpb_chan->shdma_chan);
+		return -EINVAL;
+	}
+
+	if (cfg->flags & HPB_DMAE_SET_ASYNC_MODE)
+		hpb_dmae_set_async_mode(hpbdev, cfg->mdm, cfg->mdr);
+	ch_reg_write(hpb_chan, cfg->dcr, HPB_DMAE_DCR);
+	ch_reg_write(hpb_chan, cfg->port, HPB_DMAE_DPTR);
+	hpb_chan->xmit_shift = calc_xmit_shift(hpb_chan);
+	hpb_dmae_enable_int(hpbdev, cfg->dma_ch);
+
+	return 0;
+}
+
+static int hpb_dmae_set_slave(struct shdma_chan *schan, int slave_id, bool try)
+{
+	struct hpb_dmae_chan *chan = to_chan(schan);
+	const struct hpb_dmae_slave_config *sc =
+		hpb_dmae_find_slave(chan, slave_id);
+
+	if (!sc)
+		return -ENODEV;
+	if (try)
+		return 0;
+	chan->cfg = sc;
+	return hpb_dmae_alloc_chan_resources(chan, sc);
+}
+
+static void hpb_dmae_setup_xfer(struct shdma_chan *schan, int slave_id)
+{
+}
+
+static dma_addr_t hpb_dmae_slave_addr(struct shdma_chan *schan)
+{
+	struct hpb_dmae_chan *chan = to_chan(schan);
+
+	return chan->cfg->addr;
+}
+
+static struct shdma_desc *hpb_dmae_embedded_desc(void *buf, int i)
+{
+	return &((struct hpb_desc *)buf)[i].shdma_desc;
+}
+
+static const struct shdma_ops hpb_dmae_ops = {
+	.desc_completed = hpb_dmae_desc_completed,
+	.halt_channel = hpb_dmae_halt,
+	.channel_busy = hpb_dmae_channel_busy,
+	.slave_addr = hpb_dmae_slave_addr,
+	.desc_setup = hpb_dmae_desc_setup,
+	.set_slave = hpb_dmae_set_slave,
+	.setup_xfer = hpb_dmae_setup_xfer,
+	.start_xfer = hpb_dmae_start_xfer,
+	.embedded_desc = hpb_dmae_embedded_desc,
+	.chan_irq = hpb_dmae_chan_irq,
+	.get_partial = hpb_dmae_get_partial,
+};
+
+static int hpb_dmae_chan_probe(struct hpb_dmae_device *hpbdev, int id)
+{
+	struct shdma_dev *sdev = &hpbdev->shdma_dev;
+	struct platform_device *pdev =
+		to_platform_device(hpbdev->shdma_dev.dma_dev.dev);
+	struct hpb_dmae_chan *new_hpb_chan;
+	struct shdma_chan *schan;
+
+	/* Alloc channel */
+	new_hpb_chan = devm_kzalloc(&pdev->dev,
+				    sizeof(struct hpb_dmae_chan), GFP_KERNEL);
+	if (!new_hpb_chan) {
+		dev_err(hpbdev->shdma_dev.dma_dev.dev,
+			"No free memory for allocating DMA channels!\n");
+		return -ENOMEM;
+	}
+
+	schan = &new_hpb_chan->shdma_chan;
+	shdma_chan_probe(sdev, schan, id);
+
+	if (pdev->id >= 0)
+		snprintf(new_hpb_chan->dev_id, sizeof(new_hpb_chan->dev_id),
+			 "hpb-dmae%d.%d", pdev->id, id);
+	else
+		snprintf(new_hpb_chan->dev_id, sizeof(new_hpb_chan->dev_id),
+			 "hpb-dma.%d", id);
+
+	return 0;
+}
+
+static int hpb_dmae_probe(struct platform_device *pdev)
+{
+	struct hpb_dmae_pdata *pdata = pdev->dev.platform_data;
+	struct hpb_dmae_device *hpbdev;
+	struct dma_device *dma_dev;
+	struct resource *chan, *comm, *rest, *mode, *irq_res;
+	int err, i;
+
+	/* Get platform data */
+	if (!pdata || !pdata->num_channels)
+		return -ENODEV;
+
+	chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	comm = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	rest = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+	mode = platform_get_resource(pdev, IORESOURCE_MEM, 3);
+
+	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!irq_res)
+		return -ENODEV;
+
+	hpbdev = devm_kzalloc(&pdev->dev, sizeof(struct hpb_dmae_device),
+			      GFP_KERNEL);
+	if (!hpbdev) {
+		dev_err(&pdev->dev, "Not enough memory\n");
+		return -ENOMEM;
+	}
+
+	hpbdev->chan_reg = devm_ioremap_resource(&pdev->dev, chan);
+	if (IS_ERR(hpbdev->chan_reg))
+		return PTR_ERR(hpbdev->chan_reg);
+
+	hpbdev->comm_reg = devm_ioremap_resource(&pdev->dev, comm);
+	if (IS_ERR(hpbdev->comm_reg))
+		return PTR_ERR(hpbdev->comm_reg);
+
+	hpbdev->reset_reg = devm_ioremap_resource(&pdev->dev, rest);
+	if (IS_ERR(hpbdev->reset_reg))
+		return PTR_ERR(hpbdev->reset_reg);
+
+	hpbdev->mode_reg = devm_ioremap_resource(&pdev->dev, mode);
+	if (IS_ERR(hpbdev->mode_reg))
+		return PTR_ERR(hpbdev->mode_reg);
+
+	dma_dev = &hpbdev->shdma_dev.dma_dev;
+
+	spin_lock_init(&hpbdev->reg_lock);
+
+	/* Platform data */
+	hpbdev->pdata = pdata;
+
+	pm_runtime_enable(&pdev->dev);
+	err = pm_runtime_get_sync(&pdev->dev);
+	if (err < 0)
+		dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err);
+
+	/* Reset DMA controller */
+	hpb_dmae_reset(hpbdev);
+
+	pm_runtime_put(&pdev->dev);
+
+	dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+	dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
+
+	hpbdev->shdma_dev.ops = &hpb_dmae_ops;
+	hpbdev->shdma_dev.desc_size = sizeof(struct hpb_desc);
+	err = shdma_init(&pdev->dev, &hpbdev->shdma_dev, pdata->num_channels);
+	if (err < 0)
+		goto error;
+
+	/* Create DMA channels */
+	for (i = 0; i < pdata->num_channels; i++)
+		hpb_dmae_chan_probe(hpbdev, i);
+
+	platform_set_drvdata(pdev, hpbdev);
+	err = dma_async_device_register(dma_dev);
+	if (!err)
+		return 0;
+
+	shdma_cleanup(&hpbdev->shdma_dev);
+error:
+	pm_runtime_disable(&pdev->dev);
+	return err;
+}
+
+static void hpb_dmae_chan_remove(struct hpb_dmae_device *hpbdev)
+{
+	struct dma_device *dma_dev = &hpbdev->shdma_dev.dma_dev;
+	struct shdma_chan *schan;
+	int i;
+
+	shdma_for_each_chan(schan, &hpbdev->shdma_dev, i) {
+		BUG_ON(!schan);
+
+		shdma_free_irq(schan);
+		shdma_chan_remove(schan);
+	}
+	dma_dev->chancnt = 0;
+}
+
+static int hpb_dmae_remove(struct platform_device *pdev)
+{
+	struct hpb_dmae_device *hpbdev = platform_get_drvdata(pdev);
+
+	dma_async_device_unregister(&hpbdev->shdma_dev.dma_dev);
+
+	pm_runtime_disable(&pdev->dev);
+
+	hpb_dmae_chan_remove(hpbdev);
+
+	return 0;
+}
+
+static void hpb_dmae_shutdown(struct platform_device *pdev)
+{
+	struct hpb_dmae_device *hpbdev = platform_get_drvdata(pdev);
+	hpb_dmae_ctl_stop(hpbdev);
+}
+
+static struct platform_driver hpb_dmae_driver = {
+	.probe		= hpb_dmae_probe,
+	.remove		= hpb_dmae_remove,
+	.shutdown	= hpb_dmae_shutdown,
+	.driver = {
+		.owner	= THIS_MODULE,
+		.name	= "hpb-dma-engine",
+	},
+};
+module_platform_driver(hpb_dmae_driver);
+
+MODULE_AUTHOR("Max Filippov <max.filippov@cogentembedded.com>");
+MODULE_DESCRIPTION("Renesas HPB DMA Engine driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/sh/shdma-arm.h b/drivers/dma/sh/shdma-arm.h
new file mode 100644
index 0000000..a2b8258
--- /dev/null
+++ b/drivers/dma/sh/shdma-arm.h
@@ -0,0 +1,51 @@
+/*
+ * Renesas SuperH DMA Engine support
+ *
+ * Copyright (C) 2013 Renesas Electronics, Inc.
+ *
+ * This is free software; you can redistribute it and/or modify it under the
+ * terms of version 2 the GNU General Public License as published by the Free
+ * Software Foundation.
+ */
+
+#ifndef SHDMA_ARM_H
+#define SHDMA_ARM_H
+
+#include "shdma.h"
+
+/* Transmit sizes and respective CHCR register values */
+enum {
+	XMIT_SZ_8BIT		= 0,
+	XMIT_SZ_16BIT		= 1,
+	XMIT_SZ_32BIT		= 2,
+	XMIT_SZ_64BIT		= 7,
+	XMIT_SZ_128BIT		= 3,
+	XMIT_SZ_256BIT		= 4,
+	XMIT_SZ_512BIT		= 5,
+};
+
+/* log2(size / 8) - used to calculate number of transfers */
+#define SH_DMAE_TS_SHIFT {		\
+	[XMIT_SZ_8BIT]		= 0,	\
+	[XMIT_SZ_16BIT]		= 1,	\
+	[XMIT_SZ_32BIT]		= 2,	\
+	[XMIT_SZ_64BIT]		= 3,	\
+	[XMIT_SZ_128BIT]	= 4,	\
+	[XMIT_SZ_256BIT]	= 5,	\
+	[XMIT_SZ_512BIT]	= 6,	\
+}
+
+#define TS_LOW_BIT	0x3 /* --xx */
+#define TS_HI_BIT	0xc /* xx-- */
+
+#define TS_LOW_SHIFT	(3)
+#define TS_HI_SHIFT	(20 - 2)	/* 2 bits for shifted low TS */
+
+#define TS_INDEX2VAL(i) \
+	((((i) & TS_LOW_BIT) << TS_LOW_SHIFT) |\
+	 (((i) & TS_HI_BIT)  << TS_HI_SHIFT))
+
+#define CHCR_TX(xmit_sz) (DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL((xmit_sz)))
+#define CHCR_RX(xmit_sz) (DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL((xmit_sz)))
+
+#endif
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 28ca361..d94ab59 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -171,7 +171,8 @@
 	return NULL;
 }
 
-static int shdma_setup_slave(struct shdma_chan *schan, int slave_id)
+static int shdma_setup_slave(struct shdma_chan *schan, int slave_id,
+			     dma_addr_t slave_addr)
 {
 	struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
 	const struct shdma_ops *ops = sdev->ops;
@@ -179,7 +180,7 @@
 
 	if (schan->dev->of_node) {
 		match = schan->hw_req;
-		ret = ops->set_slave(schan, match, true);
+		ret = ops->set_slave(schan, match, slave_addr, true);
 		if (ret < 0)
 			return ret;
 
@@ -194,7 +195,7 @@
 	if (test_and_set_bit(slave_id, shdma_slave_used))
 		return -EBUSY;
 
-	ret = ops->set_slave(schan, match, false);
+	ret = ops->set_slave(schan, match, slave_addr, false);
 	if (ret < 0) {
 		clear_bit(slave_id, shdma_slave_used);
 		return ret;
@@ -236,7 +237,7 @@
 	if (!schan->dev->of_node && match >= slave_num)
 		return false;
 
-	ret = ops->set_slave(schan, match, true);
+	ret = ops->set_slave(schan, match, 0, true);
 	if (ret < 0)
 		return false;
 
@@ -259,7 +260,7 @@
 	 */
 	if (slave) {
 		/* Legacy mode: .private is set in filter */
-		ret = shdma_setup_slave(schan, slave->slave_id);
+		ret = shdma_setup_slave(schan, slave->slave_id, 0);
 		if (ret < 0)
 			goto esetslave;
 	} else {
@@ -680,7 +681,9 @@
 		 * channel, while using it...
 		 */
 		config = (struct dma_slave_config *)arg;
-		ret = shdma_setup_slave(schan, config->slave_id);
+		ret = shdma_setup_slave(schan, config->slave_id,
+					config->direction == DMA_DEV_TO_MEM ?
+					config->src_addr : config->dst_addr);
 		if (ret < 0)
 			return ret;
 		break;
@@ -831,8 +834,8 @@
 int shdma_request_irq(struct shdma_chan *schan, int irq,
 			   unsigned long flags, const char *name)
 {
-	int ret = request_threaded_irq(irq, chan_irq, chan_irqt,
-				       flags, name, schan);
+	int ret = devm_request_threaded_irq(schan->dev, irq, chan_irq,
+					    chan_irqt, flags, name, schan);
 
 	schan->irq = ret < 0 ? ret : irq;
 
@@ -840,13 +843,6 @@
 }
 EXPORT_SYMBOL(shdma_request_irq);
 
-void shdma_free_irq(struct shdma_chan *schan)
-{
-	if (schan->irq >= 0)
-		free_irq(schan->irq, schan);
-}
-EXPORT_SYMBOL(shdma_free_irq);
-
 void shdma_chan_probe(struct shdma_dev *sdev,
 			   struct shdma_chan *schan, int id)
 {
diff --git a/drivers/dma/sh/shdma-of.c b/drivers/dma/sh/shdma-of.c
index 11bcb05..06473a0 100644
--- a/drivers/dma/sh/shdma-of.c
+++ b/drivers/dma/sh/shdma-of.c
@@ -42,12 +42,9 @@
 
 static int shdma_of_probe(struct platform_device *pdev)
 {
-	const struct of_dev_auxdata *lookup = pdev->dev.platform_data;
+	const struct of_dev_auxdata *lookup = dev_get_platdata(&pdev->dev);
 	int ret;
 
-	if (!lookup)
-		return -EINVAL;
-
 	ret = of_dma_controller_register(pdev->dev.of_node,
 					 shdma_of_xlate, pdev);
 	if (ret < 0)
diff --git a/drivers/dma/sh/shdma-r8a73a4.c b/drivers/dma/sh/shdma-r8a73a4.c
new file mode 100644
index 0000000..4fb9997
--- /dev/null
+++ b/drivers/dma/sh/shdma-r8a73a4.c
@@ -0,0 +1,77 @@
+/*
+ * Renesas SuperH DMA Engine support for r8a73a4 (APE6) SoCs
+ *
+ * Copyright (C) 2013 Renesas Electronics, Inc.
+ *
+ * This is free software; you can redistribute it and/or modify it under the
+ * terms of version 2 the GNU General Public License as published by the Free
+ * Software Foundation.
+ */
+#include <linux/sh_dma.h>
+
+#include "shdma-arm.h"
+
+const unsigned int dma_ts_shift[] = SH_DMAE_TS_SHIFT;
+
+static const struct sh_dmae_slave_config dma_slaves[] = {
+	{
+		.chcr		= CHCR_TX(XMIT_SZ_32BIT),
+		.mid_rid	= 0xd1,		/* MMC0 Tx */
+	}, {
+		.chcr		= CHCR_RX(XMIT_SZ_32BIT),
+		.mid_rid	= 0xd2,		/* MMC0 Rx */
+	}, {
+		.chcr		= CHCR_TX(XMIT_SZ_32BIT),
+		.mid_rid	= 0xe1,		/* MMC1 Tx */
+	}, {
+		.chcr		= CHCR_RX(XMIT_SZ_32BIT),
+		.mid_rid	= 0xe2,		/* MMC1 Rx */
+	},
+};
+
+#define DMAE_CHANNEL(a, b)				\
+	{						\
+		.offset         = (a) - 0x20,		\
+		.dmars          = (a) - 0x20 + 0x40,	\
+		.chclr_bit	= (b),			\
+		.chclr_offset	= 0x80 - 0x20,		\
+	}
+
+static const struct sh_dmae_channel dma_channels[] = {
+	DMAE_CHANNEL(0x8000, 0),
+	DMAE_CHANNEL(0x8080, 1),
+	DMAE_CHANNEL(0x8100, 2),
+	DMAE_CHANNEL(0x8180, 3),
+	DMAE_CHANNEL(0x8200, 4),
+	DMAE_CHANNEL(0x8280, 5),
+	DMAE_CHANNEL(0x8300, 6),
+	DMAE_CHANNEL(0x8380, 7),
+	DMAE_CHANNEL(0x8400, 8),
+	DMAE_CHANNEL(0x8480, 9),
+	DMAE_CHANNEL(0x8500, 10),
+	DMAE_CHANNEL(0x8580, 11),
+	DMAE_CHANNEL(0x8600, 12),
+	DMAE_CHANNEL(0x8680, 13),
+	DMAE_CHANNEL(0x8700, 14),
+	DMAE_CHANNEL(0x8780, 15),
+	DMAE_CHANNEL(0x8800, 16),
+	DMAE_CHANNEL(0x8880, 17),
+	DMAE_CHANNEL(0x8900, 18),
+	DMAE_CHANNEL(0x8980, 19),
+};
+
+const struct sh_dmae_pdata r8a73a4_dma_pdata = {
+	.slave		= dma_slaves,
+	.slave_num	= ARRAY_SIZE(dma_slaves),
+	.channel	= dma_channels,
+	.channel_num	= ARRAY_SIZE(dma_channels),
+	.ts_low_shift	= TS_LOW_SHIFT,
+	.ts_low_mask	= TS_LOW_BIT << TS_LOW_SHIFT,
+	.ts_high_shift	= TS_HI_SHIFT,
+	.ts_high_mask	= TS_HI_BIT << TS_HI_SHIFT,
+	.ts_shift	= dma_ts_shift,
+	.ts_shift_num	= ARRAY_SIZE(dma_ts_shift),
+	.dmaor_init     = DMAOR_DME,
+	.chclr_present	= 1,
+	.chclr_bitwise	= 1,
+};
diff --git a/drivers/dma/sh/shdma.h b/drivers/dma/sh/shdma.h
index 9314e93..758a57b 100644
--- a/drivers/dma/sh/shdma.h
+++ b/drivers/dma/sh/shdma.h
@@ -28,18 +28,19 @@
 	struct shdma_chan shdma_chan;
 	const struct sh_dmae_slave_config *config; /* Slave DMA configuration */
 	int xmit_shift;			/* log_2(bytes_per_xfer) */
-	u32 __iomem *base;
+	void __iomem *base;
 	char dev_id[16];		/* unique name per DMAC of channel */
 	int pm_error;
+	dma_addr_t slave_addr;
 };
 
 struct sh_dmae_device {
 	struct shdma_dev shdma_dev;
 	struct sh_dmae_chan *chan[SH_DMAE_MAX_CHANNELS];
-	struct sh_dmae_pdata *pdata;
+	const struct sh_dmae_pdata *pdata;
 	struct list_head node;
-	u32 __iomem *chan_reg;
-	u16 __iomem *dmars;
+	void __iomem *chan_reg;
+	void __iomem *dmars;
 	unsigned int chcr_offset;
 	u32 chcr_ie_bit;
 };
@@ -61,4 +62,11 @@
 #define to_sh_dev(chan) container_of(chan->shdma_chan.dma_chan.device,\
 				     struct sh_dmae_device, shdma_dev.dma_dev)
 
+#ifdef CONFIG_SHDMA_R8A73A4
+extern const struct sh_dmae_pdata r8a73a4_dma_pdata;
+#define r8a73a4_shdma_devid (&r8a73a4_dma_pdata)
+#else
+#define r8a73a4_shdma_devid NULL
+#endif
+
 #endif	/* __DMA_SHDMA_H */
diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdmac.c
similarity index 88%
rename from drivers/dma/sh/shdma.c
rename to drivers/dma/sh/shdmac.c
index 5039fbc..1069e88 100644
--- a/drivers/dma/sh/shdma.c
+++ b/drivers/dma/sh/shdmac.c
@@ -20,6 +20,8 @@
 
 #include <linux/init.h>
 #include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
 #include <linux/slab.h>
 #include <linux/interrupt.h>
 #include <linux/dmaengine.h>
@@ -35,6 +37,15 @@
 #include "../dmaengine.h"
 #include "shdma.h"
 
+/* DMA register */
+#define SAR	0x00
+#define DAR	0x04
+#define TCR	0x08
+#define CHCR	0x0C
+#define DMAOR	0x40
+
+#define TEND	0x18 /* USB-DMAC */
+
 #define SH_DMAE_DRV_NAME "sh-dma-engine"
 
 /* Default MEMCPY transfer size = 2^2 = 4 bytes */
@@ -49,27 +60,37 @@
 static DEFINE_SPINLOCK(sh_dmae_lock);
 static LIST_HEAD(sh_dmae_devices);
 
-static void chclr_write(struct sh_dmae_chan *sh_dc, u32 data)
+/*
+ * Different DMAC implementations provide different ways to clear DMA channels:
+ * (1) none - no CHCLR registers are available
+ * (2) one CHCLR register per channel - 0 has to be written to it to clear
+ *     channel buffers
+ * (3) one CHCLR per several channels - 1 has to be written to the bit,
+ *     corresponding to the specific channel to reset it
+ */
+static void channel_clear(struct sh_dmae_chan *sh_dc)
 {
 	struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
+	const struct sh_dmae_channel *chan_pdata = shdev->pdata->channel +
+		sh_dc->shdma_chan.id;
+	u32 val = shdev->pdata->chclr_bitwise ? 1 << chan_pdata->chclr_bit : 0;
 
-	__raw_writel(data, shdev->chan_reg +
-		     shdev->pdata->channel[sh_dc->shdma_chan.id].chclr_offset);
+	__raw_writel(val, shdev->chan_reg + chan_pdata->chclr_offset);
 }
 
 static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
 {
-	__raw_writel(data, sh_dc->base + reg / sizeof(u32));
+	__raw_writel(data, sh_dc->base + reg);
 }
 
 static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
 {
-	return __raw_readl(sh_dc->base + reg / sizeof(u32));
+	return __raw_readl(sh_dc->base + reg);
 }
 
 static u16 dmaor_read(struct sh_dmae_device *shdev)
 {
-	u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
+	void __iomem *addr = shdev->chan_reg + DMAOR;
 
 	if (shdev->pdata->dmaor_is_32bit)
 		return __raw_readl(addr);
@@ -79,7 +100,7 @@
 
 static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
 {
-	u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
+	void __iomem *addr = shdev->chan_reg + DMAOR;
 
 	if (shdev->pdata->dmaor_is_32bit)
 		__raw_writel(data, addr);
@@ -91,14 +112,14 @@
 {
 	struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
 
-	__raw_writel(data, sh_dc->base + shdev->chcr_offset / sizeof(u32));
+	__raw_writel(data, sh_dc->base + shdev->chcr_offset);
 }
 
 static u32 chcr_read(struct sh_dmae_chan *sh_dc)
 {
 	struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
 
-	return __raw_readl(sh_dc->base + shdev->chcr_offset / sizeof(u32));
+	return __raw_readl(sh_dc->base + shdev->chcr_offset);
 }
 
 /*
@@ -133,7 +154,7 @@
 		for (i = 0; i < shdev->pdata->channel_num; i++) {
 			struct sh_dmae_chan *sh_chan = shdev->chan[i];
 			if (sh_chan)
-				chclr_write(sh_chan, 0);
+				channel_clear(sh_chan);
 		}
 	}
 
@@ -167,7 +188,7 @@
 static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
 {
 	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
-	struct sh_dmae_pdata *pdata = shdev->pdata;
+	const struct sh_dmae_pdata *pdata = shdev->pdata;
 	int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
 		((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
 
@@ -180,7 +201,7 @@
 static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
 {
 	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
-	struct sh_dmae_pdata *pdata = shdev->pdata;
+	const struct sh_dmae_pdata *pdata = shdev->pdata;
 	int i;
 
 	for (i = 0; i < pdata->ts_shift_num; i++)
@@ -240,9 +261,9 @@
 static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
 {
 	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
-	struct sh_dmae_pdata *pdata = shdev->pdata;
+	const struct sh_dmae_pdata *pdata = shdev->pdata;
 	const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->shdma_chan.id];
-	u16 __iomem *addr = shdev->dmars;
+	void __iomem *addr = shdev->dmars;
 	unsigned int shift = chan_pdata->dmars_bit;
 
 	if (dmae_is_busy(sh_chan))
@@ -253,8 +274,8 @@
 
 	/* in the case of a missing DMARS resource use first memory window */
 	if (!addr)
-		addr = (u16 __iomem *)shdev->chan_reg;
-	addr += chan_pdata->dmars / sizeof(u16);
+		addr = shdev->chan_reg;
+	addr += chan_pdata->dmars;
 
 	__raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
 		     addr);
@@ -309,7 +330,7 @@
 	struct sh_dmae_chan *sh_chan, int match)
 {
 	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
-	struct sh_dmae_pdata *pdata = shdev->pdata;
+	const struct sh_dmae_pdata *pdata = shdev->pdata;
 	const struct sh_dmae_slave_config *cfg;
 	int i;
 
@@ -323,7 +344,7 @@
 	} else {
 		for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
 			if (cfg->mid_rid == match) {
-				sh_chan->shdma_chan.slave_id = cfg->slave_id;
+				sh_chan->shdma_chan.slave_id = i;
 				return cfg;
 			}
 	}
@@ -332,7 +353,7 @@
 }
 
 static int sh_dmae_set_slave(struct shdma_chan *schan,
-			     int slave_id, bool try)
+			     int slave_id, dma_addr_t slave_addr, bool try)
 {
 	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
 						    shdma_chan);
@@ -340,8 +361,10 @@
 	if (!cfg)
 		return -ENXIO;
 
-	if (!try)
+	if (!try) {
 		sh_chan->config = cfg;
+		sh_chan->slave_addr = slave_addr ? : cfg->addr;
+	}
 
 	return 0;
 }
@@ -505,7 +528,8 @@
 	struct shdma_chan *schan;
 	int err;
 
-	sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
+	sh_chan = devm_kzalloc(sdev->dma_dev.dev, sizeof(struct sh_dmae_chan),
+			       GFP_KERNEL);
 	if (!sh_chan) {
 		dev_err(sdev->dma_dev.dev,
 			"No free memory for allocating dma channels!\n");
@@ -517,7 +541,7 @@
 
 	shdma_chan_probe(sdev, schan, id);
 
-	sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32);
+	sh_chan->base = shdev->chan_reg + chan_pdata->offset;
 
 	/* set up channel irq */
 	if (pdev->id >= 0)
@@ -541,7 +565,6 @@
 err_no_irq:
 	/* remove from dmaengine device node */
 	shdma_chan_remove(schan);
-	kfree(sh_chan);
 	return err;
 }
 
@@ -552,14 +575,9 @@
 	int i;
 
 	shdma_for_each_chan(schan, &shdev->shdma_dev, i) {
-		struct sh_dmae_chan *sh_chan = container_of(schan,
-					struct sh_dmae_chan, shdma_chan);
 		BUG_ON(!schan);
 
-		shdma_free_irq(&sh_chan->shdma_chan);
-
 		shdma_chan_remove(schan);
-		kfree(sh_chan);
 	}
 	dma_dev->chancnt = 0;
 }
@@ -636,7 +654,7 @@
 	 * This is an exclusive slave DMA operation, may only be called after a
 	 * successful slave configuration.
 	 */
-	return sh_chan->config->addr;
+	return sh_chan->slave_addr;
 }
 
 static struct shdma_desc *sh_dmae_embedded_desc(void *buf, int i)
@@ -658,9 +676,15 @@
 	.get_partial = sh_dmae_get_partial,
 };
 
+static const struct of_device_id sh_dmae_of_match[] = {
+	{.compatible = "renesas,shdma-r8a73a4", .data = r8a73a4_shdma_devid,},
+	{}
+};
+MODULE_DEVICE_TABLE(of, sh_dmae_of_match);
+
 static int sh_dmae_probe(struct platform_device *pdev)
 {
-	struct sh_dmae_pdata *pdata = pdev->dev.platform_data;
+	const struct sh_dmae_pdata *pdata;
 	unsigned long irqflags = IRQF_DISABLED,
 		chan_flag[SH_DMAE_MAX_CHANNELS] = {};
 	int errirq, chan_irq[SH_DMAE_MAX_CHANNELS];
@@ -669,6 +693,11 @@
 	struct dma_device *dma_dev;
 	struct resource *chan, *dmars, *errirq_res, *chanirq_res;
 
+	if (pdev->dev.of_node)
+		pdata = of_match_device(sh_dmae_of_match, &pdev->dev)->data;
+	else
+		pdata = dev_get_platdata(&pdev->dev);
+
 	/* get platform data */
 	if (!pdata || !pdata->channel_num)
 		return -ENODEV;
@@ -696,33 +725,22 @@
 	if (!chan || !errirq_res)
 		return -ENODEV;
 
-	if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) {
-		dev_err(&pdev->dev, "DMAC register region already claimed\n");
-		return -EBUSY;
-	}
-
-	if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) {
-		dev_err(&pdev->dev, "DMAC DMARS region already claimed\n");
-		err = -EBUSY;
-		goto ermrdmars;
-	}
-
-	err = -ENOMEM;
-	shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
+	shdev = devm_kzalloc(&pdev->dev, sizeof(struct sh_dmae_device),
+			     GFP_KERNEL);
 	if (!shdev) {
 		dev_err(&pdev->dev, "Not enough memory\n");
-		goto ealloc;
+		return -ENOMEM;
 	}
 
 	dma_dev = &shdev->shdma_dev.dma_dev;
 
-	shdev->chan_reg = ioremap(chan->start, resource_size(chan));
-	if (!shdev->chan_reg)
-		goto emapchan;
+	shdev->chan_reg = devm_ioremap_resource(&pdev->dev, chan);
+	if (IS_ERR(shdev->chan_reg))
+		return PTR_ERR(shdev->chan_reg);
 	if (dmars) {
-		shdev->dmars = ioremap(dmars->start, resource_size(dmars));
-		if (!shdev->dmars)
-			goto emapdmars;
+		shdev->dmars = devm_ioremap_resource(&pdev->dev, dmars);
+		if (IS_ERR(shdev->dmars))
+			return PTR_ERR(shdev->dmars);
 	}
 
 	if (!pdata->slave_only)
@@ -783,8 +801,8 @@
 
 	errirq = errirq_res->start;
 
-	err = request_irq(errirq, sh_dmae_err, irqflags,
-			  "DMAC Address Error", shdev);
+	err = devm_request_irq(&pdev->dev, errirq, sh_dmae_err, irqflags,
+			       "DMAC Address Error", shdev);
 	if (err) {
 		dev_err(&pdev->dev,
 			"DMA failed requesting irq #%d, error %d\n",
@@ -862,7 +880,6 @@
 	sh_dmae_chan_remove(shdev);
 
 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
-	free_irq(errirq, shdev);
 eirq_err:
 #endif
 rst_err:
@@ -873,21 +890,9 @@
 	pm_runtime_put(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
 
-	platform_set_drvdata(pdev, NULL);
 	shdma_cleanup(&shdev->shdma_dev);
 eshdma:
-	if (dmars)
-		iounmap(shdev->dmars);
-emapdmars:
-	iounmap(shdev->chan_reg);
 	synchronize_rcu();
-emapchan:
-	kfree(shdev);
-ealloc:
-	if (dmars)
-		release_mem_region(dmars->start, resource_size(dmars));
-ermrdmars:
-	release_mem_region(chan->start, resource_size(chan));
 
 	return err;
 }
@@ -896,14 +901,9 @@
 {
 	struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
 	struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
-	struct resource *res;
-	int errirq = platform_get_irq(pdev, 0);
 
 	dma_async_device_unregister(dma_dev);
 
-	if (errirq > 0)
-		free_irq(errirq, shdev);
-
 	spin_lock_irq(&sh_dmae_lock);
 	list_del_rcu(&shdev->node);
 	spin_unlock_irq(&sh_dmae_lock);
@@ -913,31 +913,11 @@
 	sh_dmae_chan_remove(shdev);
 	shdma_cleanup(&shdev->shdma_dev);
 
-	if (shdev->dmars)
-		iounmap(shdev->dmars);
-	iounmap(shdev->chan_reg);
-
-	platform_set_drvdata(pdev, NULL);
-
 	synchronize_rcu();
-	kfree(shdev);
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (res)
-		release_mem_region(res->start, resource_size(res));
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-	if (res)
-		release_mem_region(res->start, resource_size(res));
 
 	return 0;
 }
 
-static const struct of_device_id sh_dmae_of_match[] = {
-	{ .compatible = "renesas,shdma", },
-	{ }
-};
-MODULE_DEVICE_TABLE(of, sh_dmae_of_match);
-
 static struct platform_driver sh_dmae_driver = {
 	.driver 	= {
 		.owner	= THIS_MODULE,
diff --git a/drivers/dma/sh/sudmac.c b/drivers/dma/sh/sudmac.c
index e7c94bb..c7e9cdf 100644
--- a/drivers/dma/sh/sudmac.c
+++ b/drivers/dma/sh/sudmac.c
@@ -150,7 +150,8 @@
 	return NULL;
 }
 
-static int sudmac_set_slave(struct shdma_chan *schan, int slave_id, bool try)
+static int sudmac_set_slave(struct shdma_chan *schan, int slave_id,
+			    dma_addr_t slave_addr, bool try)
 {
 	struct sudmac_chan *sc = to_chan(schan);
 	const struct sudmac_slave_config *cfg = sudmac_find_slave(sc, slave_id);
@@ -298,11 +299,8 @@
 	int i;
 
 	shdma_for_each_chan(schan, &su_dev->shdma_dev, i) {
-		struct sudmac_chan *sc = to_chan(schan);
-
 		BUG_ON(!schan);
 
-		shdma_free_irq(&sc->shdma_chan);
 		shdma_chan_remove(schan);
 	}
 	dma_dev->chancnt = 0;
@@ -335,7 +333,7 @@
 
 static int sudmac_probe(struct platform_device *pdev)
 {
-	struct sudmac_pdata *pdata = pdev->dev.platform_data;
+	struct sudmac_pdata *pdata = dev_get_platdata(&pdev->dev);
 	int err, i;
 	struct sudmac_device *su_dev;
 	struct dma_device *dma_dev;
@@ -345,9 +343,8 @@
 	if (!pdata)
 		return -ENODEV;
 
-	chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-	if (!chan || !irq_res)
+	if (!irq_res)
 		return -ENODEV;
 
 	err = -ENOMEM;
@@ -360,9 +357,10 @@
 
 	dma_dev = &su_dev->shdma_dev.dma_dev;
 
-	su_dev->chan_reg = devm_request_and_ioremap(&pdev->dev, chan);
-	if (!su_dev->chan_reg)
-		return err;
+	chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	su_dev->chan_reg = devm_ioremap_resource(&pdev->dev, chan);
+	if (IS_ERR(su_dev->chan_reg))
+		return PTR_ERR(su_dev->chan_reg);
 
 	dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
 
@@ -373,7 +371,7 @@
 		return err;
 
 	/* platform data */
-	su_dev->pdata = pdev->dev.platform_data;
+	su_dev->pdata = dev_get_platdata(&pdev->dev);
 
 	platform_set_drvdata(pdev, su_dev);
 
@@ -393,7 +391,6 @@
 chan_probe_err:
 	sudmac_chan_remove(su_dev);
 
-	platform_set_drvdata(pdev, NULL);
 	shdma_cleanup(&su_dev->shdma_dev);
 
 	return err;
@@ -407,7 +404,6 @@
 	dma_async_device_unregister(dma_dev);
 	sudmac_chan_remove(su_dev);
 	shdma_cleanup(&su_dev->shdma_dev);
-	platform_set_drvdata(pdev, NULL);
 
 	return 0;
 }
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index 716b23e..6aec3ad 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -9,6 +9,7 @@
 #include <linux/module.h>
 #include <linux/dmaengine.h>
 #include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/slab.h>
@@ -73,6 +74,11 @@
 	int				mode;
 };
 
+struct sirfsoc_dma_regs {
+	u32				ctrl[SIRFSOC_DMA_CHANNELS];
+	u32				interrupt_en;
+};
+
 struct sirfsoc_dma {
 	struct dma_device		dma;
 	struct tasklet_struct		tasklet;
@@ -81,10 +87,13 @@
 	int				irq;
 	struct clk			*clk;
 	bool				is_marco;
+	struct sirfsoc_dma_regs		regs_save;
 };
 
 #define DRV_NAME	"sirfsoc_dma"
 
+static int sirfsoc_dma_runtime_suspend(struct device *dev);
+
 /* Convert struct dma_chan to struct sirfsoc_dma_chan */
 static inline
 struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c)
@@ -393,6 +402,8 @@
 	LIST_HEAD(descs);
 	int i;
 
+	pm_runtime_get_sync(sdma->dma.dev);
+
 	/* Alloc descriptors for this channel */
 	for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) {
 		sdesc = kzalloc(sizeof(*sdesc), GFP_KERNEL);
@@ -425,6 +436,7 @@
 static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan)
 {
 	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
 	struct sirfsoc_dma_desc *sdesc, *tmp;
 	unsigned long flags;
 	LIST_HEAD(descs);
@@ -445,6 +457,8 @@
 	/* Free descriptors */
 	list_for_each_entry_safe(sdesc, tmp, &descs, node)
 		kfree(sdesc);
+
+	pm_runtime_put(sdma->dma.dev);
 }
 
 /* Send pending descriptor to hardware */
@@ -595,7 +609,7 @@
 	spin_unlock_irqrestore(&schan->lock, iflags);
 
 	if (!sdesc)
-		return 0;
+		return NULL;
 
 	/* Place descriptor in prepared list */
 	spin_lock_irqsave(&schan->lock, iflags);
@@ -723,14 +737,14 @@
 
 	tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma);
 
-	clk_prepare_enable(sdma->clk);
-
 	/* Register DMA engine */
 	dev_set_drvdata(dev, sdma);
+
 	ret = dma_async_device_register(dma);
 	if (ret)
 		goto free_irq;
 
+	pm_runtime_enable(&op->dev);
 	dev_info(dev, "initialized SIRFSOC DMAC driver\n");
 
 	return 0;
@@ -747,13 +761,124 @@
 	struct device *dev = &op->dev;
 	struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
 
-	clk_disable_unprepare(sdma->clk);
 	dma_async_device_unregister(&sdma->dma);
 	free_irq(sdma->irq, sdma);
 	irq_dispose_mapping(sdma->irq);
+	pm_runtime_disable(&op->dev);
+	if (!pm_runtime_status_suspended(&op->dev))
+		sirfsoc_dma_runtime_suspend(&op->dev);
+
 	return 0;
 }
 
+static int sirfsoc_dma_runtime_suspend(struct device *dev)
+{
+	struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
+
+	clk_disable_unprepare(sdma->clk);
+	return 0;
+}
+
+static int sirfsoc_dma_runtime_resume(struct device *dev)
+{
+	struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
+	int ret;
+
+	ret = clk_prepare_enable(sdma->clk);
+	if (ret < 0) {
+		dev_err(dev, "clk_enable failed: %d\n", ret);
+		return ret;
+	}
+	return 0;
+}
+
+static int sirfsoc_dma_pm_suspend(struct device *dev)
+{
+	struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
+	struct sirfsoc_dma_regs *save = &sdma->regs_save;
+	struct sirfsoc_dma_desc *sdesc;
+	struct sirfsoc_dma_chan *schan;
+	int ch;
+	int ret;
+
+	/*
+	 * if we were runtime-suspended before, resume to enable clock
+	 * before accessing register
+	 */
+	if (pm_runtime_status_suspended(dev)) {
+		ret = sirfsoc_dma_runtime_resume(dev);
+		if (ret < 0)
+			return ret;
+	}
+
+	/*
+	 * DMA controller will lose all registers while suspending
+	 * so we need to save registers for active channels
+	 */
+	for (ch = 0; ch < SIRFSOC_DMA_CHANNELS; ch++) {
+		schan = &sdma->channels[ch];
+		if (list_empty(&schan->active))
+			continue;
+		sdesc = list_first_entry(&schan->active,
+			struct sirfsoc_dma_desc,
+			node);
+		save->ctrl[ch] = readl_relaxed(sdma->base +
+			ch * 0x10 + SIRFSOC_DMA_CH_CTRL);
+	}
+	save->interrupt_en = readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN);
+
+	/* Disable clock */
+	sirfsoc_dma_runtime_suspend(dev);
+
+	return 0;
+}
+
+static int sirfsoc_dma_pm_resume(struct device *dev)
+{
+	struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
+	struct sirfsoc_dma_regs *save = &sdma->regs_save;
+	struct sirfsoc_dma_desc *sdesc;
+	struct sirfsoc_dma_chan *schan;
+	int ch;
+	int ret;
+
+	/* Enable clock before accessing register */
+	ret = sirfsoc_dma_runtime_resume(dev);
+	if (ret < 0)
+		return ret;
+
+	writel_relaxed(save->interrupt_en, sdma->base + SIRFSOC_DMA_INT_EN);
+	for (ch = 0; ch < SIRFSOC_DMA_CHANNELS; ch++) {
+		schan = &sdma->channels[ch];
+		if (list_empty(&schan->active))
+			continue;
+		sdesc = list_first_entry(&schan->active,
+			struct sirfsoc_dma_desc,
+			node);
+		writel_relaxed(sdesc->width,
+			sdma->base + SIRFSOC_DMA_WIDTH_0 + ch * 4);
+		writel_relaxed(sdesc->xlen,
+			sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_XLEN);
+		writel_relaxed(sdesc->ylen,
+			sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_YLEN);
+		writel_relaxed(save->ctrl[ch],
+			sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_CTRL);
+		writel_relaxed(sdesc->addr >> 2,
+			sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_ADDR);
+	}
+
+	/* if we were runtime-suspended before, suspend again */
+	if (pm_runtime_status_suspended(dev))
+		sirfsoc_dma_runtime_suspend(dev);
+
+	return 0;
+}
+
+static const struct dev_pm_ops sirfsoc_dma_pm_ops = {
+	SET_RUNTIME_PM_OPS(sirfsoc_dma_runtime_suspend, sirfsoc_dma_runtime_resume, NULL)
+	SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_dma_pm_suspend, sirfsoc_dma_pm_resume)
+};
+
 static struct of_device_id sirfsoc_dma_match[] = {
 	{ .compatible = "sirf,prima2-dmac", },
 	{ .compatible = "sirf,marco-dmac", },
@@ -766,6 +891,7 @@
 	.driver = {
 		.name = DRV_NAME,
 		.owner = THIS_MODULE,
+		.pm = &sirfsoc_dma_pm_ops,
 		.of_match_table	= sirfsoc_dma_match,
 	},
 };
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 5ab5880..82d2b97 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -2591,6 +2591,9 @@
 	int i;
 
 	sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT);
+	if (!sg)
+		return NULL;
+
 	for (i = 0; i < periods; i++) {
 		sg_dma_address(&sg[i]) = dma_addr;
 		sg_dma_len(&sg[i]) = period_len;
@@ -3139,7 +3142,7 @@
 
 static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
 {
-	struct stedma40_platform_data *plat_data = pdev->dev.platform_data;
+	struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
 	struct clk *clk = NULL;
 	void __iomem *virtbase = NULL;
 	struct resource *res = NULL;
@@ -3226,8 +3229,8 @@
 	num_log_chans = num_phy_chans * D40_MAX_LOG_CHAN_PER_PHY;
 
 	dev_info(&pdev->dev,
-		 "hardware rev: %d @ 0x%x with %d physical and %d logical channels\n",
-		 rev, res->start, num_phy_chans, num_log_chans);
+		 "hardware rev: %d @ %pa with %d physical and %d logical channels\n",
+		 rev, &res->start, num_phy_chans, num_log_chans);
 
 	base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
 		       (num_phy_chans + num_log_chans + num_memcpy_chans) *
@@ -3485,7 +3488,7 @@
 {
 	struct stedma40_platform_data *pdata;
 	int num_phy = 0, num_memcpy = 0, num_disabled = 0;
-	const const __be32 *list;
+	const __be32 *list;
 
 	pdata = devm_kzalloc(&pdev->dev,
 			     sizeof(struct stedma40_platform_data),
@@ -3516,7 +3519,7 @@
 	list = of_get_property(np, "disabled-channels", &num_disabled);
 	num_disabled /= sizeof(*list);
 
-	if (num_disabled > STEDMA40_MAX_PHYS || num_disabled < 0) {
+	if (num_disabled >= STEDMA40_MAX_PHYS || num_disabled < 0) {
 		d40_err(&pdev->dev,
 			"Invalid number of disabled channels specified (%d)\n",
 			num_disabled);
@@ -3535,7 +3538,7 @@
 
 static int __init d40_probe(struct platform_device *pdev)
 {
-	struct stedma40_platform_data *plat_data = pdev->dev.platform_data;
+	struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
 	struct device_node *np = pdev->dev.of_node;
 	int ret = -ENOENT;
 	struct d40_base *base = NULL;
@@ -3579,9 +3582,7 @@
 	if (request_mem_region(res->start, resource_size(res),
 			       D40_NAME " I/O lcpa") == NULL) {
 		ret = -EBUSY;
-		d40_err(&pdev->dev,
-			"Failed to request LCPA region 0x%x-0x%x\n",
-			res->start, res->end);
+		d40_err(&pdev->dev, "Failed to request LCPA region %pR\n", res);
 		goto failure;
 	}
 
@@ -3589,8 +3590,8 @@
 	val = readl(base->virtbase + D40_DREG_LCPA);
 	if (res->start != val && val != 0) {
 		dev_warn(&pdev->dev,
-			 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
-			 __func__, val, res->start);
+			 "[%s] Mismatch LCPA dma 0x%x, def %pa\n",
+			 __func__, val, &res->start);
 	} else
 		writel(res->start, base->virtbase + D40_DREG_LCPA);
 
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index f137914..5d4986e 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -767,13 +767,11 @@
 	unsigned long flags;
 	unsigned int residual;
 
-	spin_lock_irqsave(&tdc->lock, flags);
-
 	ret = dma_cookie_status(dc, cookie, txstate);
-	if (ret == DMA_SUCCESS) {
-		spin_unlock_irqrestore(&tdc->lock, flags);
+	if (ret == DMA_SUCCESS)
 		return ret;
-	}
+
+	spin_lock_irqsave(&tdc->lock, flags);
 
 	/* Check on wait_ack desc status */
 	list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index 0ef43c1..28af214 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -669,7 +669,7 @@
 
 static int td_probe(struct platform_device *pdev)
 {
-	struct timb_dma_platform_data *pdata = pdev->dev.platform_data;
+	struct timb_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
 	struct timb_dma *td;
 	struct resource *iomem;
 	int irq;
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index a59fb48..71e8e77 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -962,15 +962,14 @@
 	enum dma_status ret;
 
 	ret = dma_cookie_status(chan, cookie, txstate);
-	if (ret != DMA_SUCCESS) {
-		spin_lock_bh(&dc->lock);
-		txx9dmac_scan_descriptors(dc);
-		spin_unlock_bh(&dc->lock);
+	if (ret == DMA_SUCCESS)
+		return DMA_SUCCESS;
 
-		ret = dma_cookie_status(chan, cookie, txstate);
-	}
+	spin_lock_bh(&dc->lock);
+	txx9dmac_scan_descriptors(dc);
+	spin_unlock_bh(&dc->lock);
 
-	return ret;
+	return dma_cookie_status(chan, cookie, txstate);
 }
 
 static void txx9dmac_chain_dynamic(struct txx9dmac_chan *dc,
@@ -1118,9 +1117,10 @@
 
 static int __init txx9dmac_chan_probe(struct platform_device *pdev)
 {
-	struct txx9dmac_chan_platform_data *cpdata = pdev->dev.platform_data;
+	struct txx9dmac_chan_platform_data *cpdata =
+			dev_get_platdata(&pdev->dev);
 	struct platform_device *dmac_dev = cpdata->dmac_dev;
-	struct txx9dmac_platform_data *pdata = dmac_dev->dev.platform_data;
+	struct txx9dmac_platform_data *pdata = dev_get_platdata(&dmac_dev->dev);
 	struct txx9dmac_chan *dc;
 	int err;
 	int ch = pdev->id % TXX9_DMA_MAX_NR_CHANNELS;
@@ -1203,7 +1203,7 @@
 
 static int __init txx9dmac_probe(struct platform_device *pdev)
 {
-	struct txx9dmac_platform_data *pdata = pdev->dev.platform_data;
+	struct txx9dmac_platform_data *pdata = dev_get_platdata(&pdev->dev);
 	struct resource *io;
 	struct txx9dmac_dev *ddev;
 	u32 mcr;
@@ -1282,7 +1282,7 @@
 {
 	struct platform_device *pdev = to_platform_device(dev);
 	struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
-	struct txx9dmac_platform_data *pdata = pdev->dev.platform_data;
+	struct txx9dmac_platform_data *pdata = dev_get_platdata(&pdev->dev);
 	u32 mcr;
 
 	mcr = TXX9_DMA_MCR_MSTEN | MCR_LE;
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 232fa8f..fa0affb 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -14,7 +14,7 @@
  * of and an antecedent to, SMBIOS, which stands for System
  * Management BIOS.  See further: http://www.dmtf.org/standards
  */
-static char dmi_empty_string[] = "        ";
+static const char dmi_empty_string[] = "        ";
 
 static u16 __initdata dmi_ver;
 /*
@@ -49,7 +49,7 @@
 	return "";
 }
 
-static char * __init dmi_string(const struct dmi_header *dm, u8 s)
+static const char * __init dmi_string(const struct dmi_header *dm, u8 s)
 {
 	const char *bp = dmi_string_nosave(dm, s);
 	char *str;
@@ -62,8 +62,6 @@
 	str = dmi_alloc(len);
 	if (str != NULL)
 		strcpy(str, bp);
-	else
-		printk(KERN_ERR "dmi_string: cannot allocate %Zu bytes.\n", len);
 
 	return str;
 }
@@ -133,17 +131,18 @@
 	return sum == 0;
 }
 
-static char *dmi_ident[DMI_STRING_MAX];
+static const char *dmi_ident[DMI_STRING_MAX];
 static LIST_HEAD(dmi_devices);
 int dmi_available;
 
 /*
  *	Save a DMI string
  */
-static void __init dmi_save_ident(const struct dmi_header *dm, int slot, int string)
+static void __init dmi_save_ident(const struct dmi_header *dm, int slot,
+		int string)
 {
-	const char *d = (const char*) dm;
-	char *p;
+	const char *d = (const char *) dm;
+	const char *p;
 
 	if (dmi_ident[slot])
 		return;
@@ -155,9 +154,10 @@
 	dmi_ident[slot] = p;
 }
 
-static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, int index)
+static void __init dmi_save_uuid(const struct dmi_header *dm, int slot,
+		int index)
 {
-	const u8 *d = (u8*) dm + index;
+	const u8 *d = (u8 *) dm + index;
 	char *s;
 	int is_ff = 1, is_00 = 1, i;
 
@@ -188,12 +188,13 @@
 	else
 		sprintf(s, "%pUB", d);
 
-        dmi_ident[slot] = s;
+	dmi_ident[slot] = s;
 }
 
-static void __init dmi_save_type(const struct dmi_header *dm, int slot, int index)
+static void __init dmi_save_type(const struct dmi_header *dm, int slot,
+		int index)
 {
-	const u8 *d = (u8*) dm + index;
+	const u8 *d = (u8 *) dm + index;
 	char *s;
 
 	if (dmi_ident[slot])
@@ -216,10 +217,8 @@
 		return;
 
 	dev = dmi_alloc(sizeof(*dev) + strlen(name) + 1);
-	if (!dev) {
-		printk(KERN_ERR "dmi_save_one_device: out of memory.\n");
+	if (!dev)
 		return;
-	}
 
 	dev->type = type;
 	strcpy((char *)(dev + 1), name);
@@ -249,17 +248,14 @@
 	struct dmi_device *dev;
 
 	for (i = 1; i <= count; i++) {
-		char *devname = dmi_string(dm, i);
+		const char *devname = dmi_string(dm, i);
 
 		if (devname == dmi_empty_string)
 			continue;
 
 		dev = dmi_alloc(sizeof(*dev));
-		if (!dev) {
-			printk(KERN_ERR
-			   "dmi_save_oem_strings_devices: out of memory.\n");
+		if (!dev)
 			break;
-		}
 
 		dev->type = DMI_DEV_TYPE_OEM_STRING;
 		dev->name = devname;
@@ -272,21 +268,17 @@
 static void __init dmi_save_ipmi_device(const struct dmi_header *dm)
 {
 	struct dmi_device *dev;
-	void * data;
+	void *data;
 
 	data = dmi_alloc(dm->length);
-	if (data == NULL) {
-		printk(KERN_ERR "dmi_save_ipmi_device: out of memory.\n");
+	if (data == NULL)
 		return;
-	}
 
 	memcpy(data, dm, dm->length);
 
 	dev = dmi_alloc(sizeof(*dev));
-	if (!dev) {
-		printk(KERN_ERR "dmi_save_ipmi_device: out of memory.\n");
+	if (!dev)
 		return;
-	}
 
 	dev->type = DMI_DEV_TYPE_IPMI;
 	dev->name = "IPMI controller";
@@ -301,10 +293,9 @@
 	struct dmi_dev_onboard *onboard_dev;
 
 	onboard_dev = dmi_alloc(sizeof(*onboard_dev) + strlen(name) + 1);
-	if (!onboard_dev) {
-		printk(KERN_ERR "dmi_save_dev_onboard: out of memory.\n");
+	if (!onboard_dev)
 		return;
-	}
+
 	onboard_dev->instance = instance;
 	onboard_dev->segment = segment;
 	onboard_dev->bus = bus;
@@ -320,7 +311,7 @@
 
 static void __init dmi_save_extended_devices(const struct dmi_header *dm)
 {
-	const u8 *d = (u8*) dm + 5;
+	const u8 *d = (u8 *) dm + 5;
 
 	/* Skip disabled device */
 	if ((*d & 0x80) == 0)
@@ -338,7 +329,7 @@
  */
 static void __init dmi_decode(const struct dmi_header *dm, void *dummy)
 {
-	switch(dm->type) {
+	switch (dm->type) {
 	case 0:		/* BIOS Information */
 		dmi_save_ident(dm, DMI_BIOS_VENDOR, 4);
 		dmi_save_ident(dm, DMI_BIOS_VERSION, 5);
@@ -502,13 +493,7 @@
 			dmi_available = 1;
 			goto out;
 		}
-	}
-	else {
-		/*
-		 * no iounmap() for that ioremap(); it would be a no-op, but
-		 * it's so early in setup that sucker gets confused into doing
-		 * what it shouldn't if we actually call it.
-		 */
+	} else {
 		p = dmi_ioremap(0xF0000, 0x10000);
 		if (p == NULL)
 			goto error;
@@ -533,7 +518,7 @@
 		dmi_iounmap(p, 0x10000);
 	}
  error:
-	printk(KERN_INFO "DMI not present or invalid.\n");
+	pr_info("DMI not present or invalid.\n");
  out:
 	dmi_initialized = 1;
 }
@@ -669,7 +654,7 @@
 
 /**
  *	dmi_name_in_vendors - Check if string is in the DMI system or board vendor name
- *	@str: 	Case sensitive Name
+ *	@str: Case sensitive Name
  */
 int dmi_name_in_vendors(const char *str)
 {
@@ -696,13 +681,13 @@
  *	A new search is initiated by passing %NULL as the @from argument.
  *	If @from is not %NULL, searches continue from next device.
  */
-const struct dmi_device * dmi_find_device(int type, const char *name,
+const struct dmi_device *dmi_find_device(int type, const char *name,
 				    const struct dmi_device *from)
 {
 	const struct list_head *head = from ? &from->list : &dmi_devices;
 	struct list_head *d;
 
-	for(d = head->next; d != &dmi_devices; d = d->next) {
+	for (d = head->next; d != &dmi_devices; d = d->next) {
 		const struct dmi_device *dev =
 			list_entry(d, struct dmi_device, list);
 
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index acba0b9..6eb535f 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -525,7 +525,7 @@
 		u32 data_type;
 	} param;
 
-	rc = strict_strtoul(buf, 0, &val);
+	rc = kstrtoul(buf, 0, &val);
 	if (rc)
 		return rc;
 
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index ba9876f..0dfaf20 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -195,8 +195,8 @@
 		return;
 
 	for (;; index++) {
-		ret = of_parse_phandle_with_args(np, "gpio-ranges",
-				"#gpio-range-cells", index, &pinspec);
+		ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3,
+				index, &pinspec);
 		if (ret)
 			break;
 
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 55ab924..a6f4cb5 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -857,7 +857,7 @@
 		u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
 		u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
 		u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
-		u32 rpstat, cagf;
+		u32 rpstat, cagf, reqf;
 		u32 rpupei, rpcurup, rpprevup;
 		u32 rpdownei, rpcurdown, rpprevdown;
 		int max_freq;
@@ -869,6 +869,14 @@
 
 		gen6_gt_force_wake_get(dev_priv);
 
+		reqf = I915_READ(GEN6_RPNSWREQ);
+		reqf &= ~GEN6_TURBO_DISABLE;
+		if (IS_HASWELL(dev))
+			reqf >>= 24;
+		else
+			reqf >>= 25;
+		reqf *= GT_FREQUENCY_MULTIPLIER;
+
 		rpstat = I915_READ(GEN6_RPSTAT1);
 		rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
 		rpcurup = I915_READ(GEN6_RP_CUR_UP);
@@ -893,6 +901,7 @@
 			   gt_perf_status & 0xff);
 		seq_printf(m, "Render p-state limit: %d\n",
 			   rp_state_limits & 0xff);
+		seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
 		seq_printf(m, "CAGF: %dMHz\n", cagf);
 		seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
 			   GEN6_CURICONT_MASK);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index fdaa091..9b265a4 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1290,9 +1290,12 @@
 	 * then we do not take part in VGA arbitration and the
 	 * vga_client_register() fails with -ENODEV.
 	 */
-	ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
-	if (ret && ret != -ENODEV)
-		goto out;
+	if (!HAS_PCH_SPLIT(dev)) {
+		ret = vga_client_register(dev->pdev, dev, NULL,
+					  i915_vga_set_decode);
+		if (ret && ret != -ENODEV)
+			goto out;
+	}
 
 	intel_register_dsm_handler();
 
@@ -1348,6 +1351,12 @@
 	 */
 	intel_fbdev_initial_config(dev);
 
+	/*
+	 * Must do this after fbcon init so that
+	 * vgacon_save_screen() works during the handover.
+	 */
+	i915_disable_vga_mem(dev);
+
 	/* Only enable hotplug handling once the fbdev is fully set up. */
 	dev_priv->enable_hotplug_processing = true;
 
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index ccb28ea..69d8ed5 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -157,25 +157,6 @@
 static struct drm_driver driver;
 extern int intel_agp_enabled;
 
-#define INTEL_VGA_DEVICE(id, info) {		\
-	.class = PCI_BASE_CLASS_DISPLAY << 16,	\
-	.class_mask = 0xff0000,			\
-	.vendor = 0x8086,			\
-	.device = id,				\
-	.subvendor = PCI_ANY_ID,		\
-	.subdevice = PCI_ANY_ID,		\
-	.driver_data = (unsigned long) info }
-
-#define INTEL_QUANTA_VGA_DEVICE(info) {		\
-	.class = PCI_BASE_CLASS_DISPLAY << 16,	\
-	.class_mask = 0xff0000,			\
-	.vendor = 0x8086,			\
-	.device = 0x16a,			\
-	.subvendor = 0x152d,			\
-	.subdevice = 0x8990,			\
-	.driver_data = (unsigned long) info }
-
-
 static const struct intel_device_info intel_i830_info = {
 	.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
 	.has_overlay = 1, .overlay_needs_physical = 1,
@@ -350,118 +331,41 @@
 	.has_vebox_ring = 1,
 };
 
+/*
+ * Make sure any device matches here are from most specific to most
+ * general.  For example, since the Quanta match is based on the subsystem
+ * and subvendor IDs, we need it to come before the more general IVB
+ * PCI ID matches, otherwise we'll use the wrong info struct above.
+ */
+#define INTEL_PCI_IDS \
+	INTEL_I830_IDS(&intel_i830_info),	\
+	INTEL_I845G_IDS(&intel_845g_info),	\
+	INTEL_I85X_IDS(&intel_i85x_info),	\
+	INTEL_I865G_IDS(&intel_i865g_info),	\
+	INTEL_I915G_IDS(&intel_i915g_info),	\
+	INTEL_I915GM_IDS(&intel_i915gm_info),	\
+	INTEL_I945G_IDS(&intel_i945g_info),	\
+	INTEL_I945GM_IDS(&intel_i945gm_info),	\
+	INTEL_I965G_IDS(&intel_i965g_info),	\
+	INTEL_G33_IDS(&intel_g33_info),		\
+	INTEL_I965GM_IDS(&intel_i965gm_info),	\
+	INTEL_GM45_IDS(&intel_gm45_info), 	\
+	INTEL_G45_IDS(&intel_g45_info), 	\
+	INTEL_PINEVIEW_IDS(&intel_pineview_info),	\
+	INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),	\
+	INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),	\
+	INTEL_SNB_D_IDS(&intel_sandybridge_d_info),	\
+	INTEL_SNB_M_IDS(&intel_sandybridge_m_info),	\
+	INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
+	INTEL_IVB_M_IDS(&intel_ivybridge_m_info),	\
+	INTEL_IVB_D_IDS(&intel_ivybridge_d_info),	\
+	INTEL_HSW_D_IDS(&intel_haswell_d_info), \
+	INTEL_HSW_M_IDS(&intel_haswell_m_info), \
+	INTEL_VLV_M_IDS(&intel_valleyview_m_info),	\
+	INTEL_VLV_D_IDS(&intel_valleyview_d_info)
+
 static const struct pci_device_id pciidlist[] = {		/* aka */
-	INTEL_VGA_DEVICE(0x3577, &intel_i830_info),		/* I830_M */
-	INTEL_VGA_DEVICE(0x2562, &intel_845g_info),		/* 845_G */
-	INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),		/* I855_GM */
-	INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
-	INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),		/* I865_G */
-	INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),		/* I915_G */
-	INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),		/* E7221_G */
-	INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info),		/* I915_GM */
-	INTEL_VGA_DEVICE(0x2772, &intel_i945g_info),		/* I945_G */
-	INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info),		/* I945_GM */
-	INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info),		/* I945_GME */
-	INTEL_VGA_DEVICE(0x2972, &intel_i965g_info),		/* I946_GZ */
-	INTEL_VGA_DEVICE(0x2982, &intel_i965g_info),		/* G35_G */
-	INTEL_VGA_DEVICE(0x2992, &intel_i965g_info),		/* I965_Q */
-	INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info),		/* I965_G */
-	INTEL_VGA_DEVICE(0x29b2, &intel_g33_info),		/* Q35_G */
-	INTEL_VGA_DEVICE(0x29c2, &intel_g33_info),		/* G33_G */
-	INTEL_VGA_DEVICE(0x29d2, &intel_g33_info),		/* Q33_G */
-	INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info),		/* I965_GM */
-	INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info),		/* I965_GME */
-	INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info),		/* GM45_G */
-	INTEL_VGA_DEVICE(0x2e02, &intel_g45_info),		/* IGD_E_G */
-	INTEL_VGA_DEVICE(0x2e12, &intel_g45_info),		/* Q45_G */
-	INTEL_VGA_DEVICE(0x2e22, &intel_g45_info),		/* G45_G */
-	INTEL_VGA_DEVICE(0x2e32, &intel_g45_info),		/* G41_G */
-	INTEL_VGA_DEVICE(0x2e42, &intel_g45_info),		/* B43_G */
-	INTEL_VGA_DEVICE(0x2e92, &intel_g45_info),		/* B43_G.1 */
-	INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
-	INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
-	INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
-	INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
-	INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
-	INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info),
-	INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info),
-	INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
-	INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
-	INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
-	INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
-	INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */
-	INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */
-	INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
-	INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
-	INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
-	INTEL_QUANTA_VGA_DEVICE(&intel_ivybridge_q_info), /* Quanta transcode */
-	INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
-	INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
-	INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
-	INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT3 desktop */
-	INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
-	INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
-	INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT3 server */
-	INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
-	INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
-	INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */
-	INTEL_VGA_DEVICE(0x040B, &intel_haswell_d_info), /* GT1 reserved */
-	INTEL_VGA_DEVICE(0x041B, &intel_haswell_d_info), /* GT2 reserved */
-	INTEL_VGA_DEVICE(0x042B, &intel_haswell_d_info), /* GT3 reserved */
-	INTEL_VGA_DEVICE(0x040E, &intel_haswell_d_info), /* GT1 reserved */
-	INTEL_VGA_DEVICE(0x041E, &intel_haswell_d_info), /* GT2 reserved */
-	INTEL_VGA_DEVICE(0x042E, &intel_haswell_d_info), /* GT3 reserved */
-	INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */
-	INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */
-	INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT3 desktop */
-	INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */
-	INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */
-	INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT3 server */
-	INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */
-	INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */
-	INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT3 mobile */
-	INTEL_VGA_DEVICE(0x0C0B, &intel_haswell_d_info), /* SDV GT1 reserved */
-	INTEL_VGA_DEVICE(0x0C1B, &intel_haswell_d_info), /* SDV GT2 reserved */
-	INTEL_VGA_DEVICE(0x0C2B, &intel_haswell_d_info), /* SDV GT3 reserved */
-	INTEL_VGA_DEVICE(0x0C0E, &intel_haswell_d_info), /* SDV GT1 reserved */
-	INTEL_VGA_DEVICE(0x0C1E, &intel_haswell_d_info), /* SDV GT2 reserved */
-	INTEL_VGA_DEVICE(0x0C2E, &intel_haswell_d_info), /* SDV GT3 reserved */
-	INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */
-	INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */
-	INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT3 desktop */
-	INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */
-	INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */
-	INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT3 server */
-	INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */
-	INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */
-	INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT3 mobile */
-	INTEL_VGA_DEVICE(0x0A0B, &intel_haswell_d_info), /* ULT GT1 reserved */
-	INTEL_VGA_DEVICE(0x0A1B, &intel_haswell_d_info), /* ULT GT2 reserved */
-	INTEL_VGA_DEVICE(0x0A2B, &intel_haswell_d_info), /* ULT GT3 reserved */
-	INTEL_VGA_DEVICE(0x0A0E, &intel_haswell_m_info), /* ULT GT1 reserved */
-	INTEL_VGA_DEVICE(0x0A1E, &intel_haswell_m_info), /* ULT GT2 reserved */
-	INTEL_VGA_DEVICE(0x0A2E, &intel_haswell_m_info), /* ULT GT3 reserved */
-	INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */
-	INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */
-	INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT3 desktop */
-	INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */
-	INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */
-	INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT3 server */
-	INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */
-	INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */
-	INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT3 mobile */
-	INTEL_VGA_DEVICE(0x0D0B, &intel_haswell_d_info), /* CRW GT1 reserved */
-	INTEL_VGA_DEVICE(0x0D1B, &intel_haswell_d_info), /* CRW GT2 reserved */
-	INTEL_VGA_DEVICE(0x0D2B, &intel_haswell_d_info), /* CRW GT3 reserved */
-	INTEL_VGA_DEVICE(0x0D0E, &intel_haswell_d_info), /* CRW GT1 reserved */
-	INTEL_VGA_DEVICE(0x0D1E, &intel_haswell_d_info), /* CRW GT2 reserved */
-	INTEL_VGA_DEVICE(0x0D2E, &intel_haswell_d_info), /* CRW GT3 reserved */
-	INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
-	INTEL_VGA_DEVICE(0x0f31, &intel_valleyview_m_info),
-	INTEL_VGA_DEVICE(0x0f32, &intel_valleyview_m_info),
-	INTEL_VGA_DEVICE(0x0f33, &intel_valleyview_m_info),
-	INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
-	INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
+	INTEL_PCI_IDS,
 	{0, 0, 0}
 };
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 52a3785..35874b3 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1236,6 +1236,13 @@
 
 	unsigned int fsb_freq, mem_freq, is_ddr3;
 
+	/**
+	 * wq - Driver workqueue for GEM.
+	 *
+	 * NOTE: Work items scheduled here are not allowed to grab any modeset
+	 * locks, for otherwise the flushing done in the pageflip code will
+	 * result in deadlocks.
+	 */
 	struct workqueue_struct *wq;
 
 	/* Display functions */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 2d1cb10..d9e337f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -212,7 +212,7 @@
 void *i915_gem_object_alloc(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	return kmem_cache_alloc(dev_priv->slab, GFP_KERNEL | __GFP_ZERO);
+	return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL);
 }
 
 void i915_gem_object_free(struct drm_i915_gem_object *obj)
@@ -1695,6 +1695,7 @@
 __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
 		  bool purgeable_only)
 {
+	struct list_head still_bound_list;
 	struct drm_i915_gem_object *obj, *next;
 	long count = 0;
 
@@ -1709,23 +1710,55 @@
 		}
 	}
 
-	list_for_each_entry_safe(obj, next, &dev_priv->mm.bound_list,
-				 global_list) {
+	/*
+	 * As we may completely rewrite the bound list whilst unbinding
+	 * (due to retiring requests) we have to strictly process only
+	 * one element of the list at the time, and recheck the list
+	 * on every iteration.
+	 */
+	INIT_LIST_HEAD(&still_bound_list);
+	while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
 		struct i915_vma *vma, *v;
 
+		obj = list_first_entry(&dev_priv->mm.bound_list,
+				       typeof(*obj), global_list);
+		list_move_tail(&obj->global_list, &still_bound_list);
+
 		if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
 			continue;
 
+		/*
+		 * Hold a reference whilst we unbind this object, as we may
+		 * end up waiting for and retiring requests. This might
+		 * release the final reference (held by the active list)
+		 * and result in the object being freed from under us.
+		 * in this object being freed.
+		 *
+		 * Note 1: Shrinking the bound list is special since only active
+		 * (and hence bound objects) can contain such limbo objects, so
+		 * we don't need special tricks for shrinking the unbound list.
+		 * The only other place where we have to be careful with active
+		 * objects suddenly disappearing due to retiring requests is the
+		 * eviction code.
+		 *
+		 * Note 2: Even though the bound list doesn't hold a reference
+		 * to the object we can safely grab one here: The final object
+		 * unreferencing and the bound_list are both protected by the
+		 * dev->struct_mutex and so we won't ever be able to observe an
+		 * object on the bound_list with a reference count equals 0.
+		 */
+		drm_gem_object_reference(&obj->base);
+
 		list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
 			if (i915_vma_unbind(vma))
 				break;
 
-		if (!i915_gem_object_put_pages(obj)) {
+		if (i915_gem_object_put_pages(obj) == 0)
 			count += obj->base.size >> PAGE_SHIFT;
-			if (count >= target)
-				return count;
-		}
+
+		drm_gem_object_unreference(&obj->base);
 	}
+	list_splice(&still_bound_list, &dev_priv->mm.bound_list);
 
 	return count;
 }
@@ -1774,7 +1807,6 @@
 
 	page_count = obj->base.size / PAGE_SIZE;
 	if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
-		sg_free_table(st);
 		kfree(st);
 		return -ENOMEM;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index e918b05..7d5752f 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -42,27 +42,24 @@
 
 	ret = i915_mutex_lock_interruptible(obj->base.dev);
 	if (ret)
-		return ERR_PTR(ret);
+		goto err;
 
 	ret = i915_gem_object_get_pages(obj);
-	if (ret) {
-		st = ERR_PTR(ret);
-		goto out;
-	}
+	if (ret)
+		goto err_unlock;
+
+	i915_gem_object_pin_pages(obj);
 
 	/* Copy sg so that we make an independent mapping */
 	st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
 	if (st == NULL) {
-		st = ERR_PTR(-ENOMEM);
-		goto out;
+		ret = -ENOMEM;
+		goto err_unpin;
 	}
 
 	ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
-	if (ret) {
-		kfree(st);
-		st = ERR_PTR(ret);
-		goto out;
-	}
+	if (ret)
+		goto err_free;
 
 	src = obj->pages->sgl;
 	dst = st->sgl;
@@ -73,17 +70,23 @@
 	}
 
 	if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
-		sg_free_table(st);
-		kfree(st);
-		st = ERR_PTR(-ENOMEM);
-		goto out;
+		ret =-ENOMEM;
+		goto err_free_sg;
 	}
 
-	i915_gem_object_pin_pages(obj);
-
-out:
 	mutex_unlock(&obj->base.dev->struct_mutex);
 	return st;
+
+err_free_sg:
+	sg_free_table(st);
+err_free:
+	kfree(st);
+err_unpin:
+	i915_gem_object_unpin_pages(obj);
+err_unlock:
+	mutex_unlock(&obj->base.dev->struct_mutex);
+err:
+	return ERR_PTR(ret);
 }
 
 static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 792c52a..bf34577 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -310,6 +310,9 @@
 	else
 		ret = relocate_entry_gtt(obj, reloc);
 
+	if (ret)
+		return ret;
+
 	/* and update the user's relocation entry */
 	reloc->presumed_offset = target_offset;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 9969d10b..e15a1d9 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -201,6 +201,9 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int bios_reserved = 0;
 
+	if (dev_priv->gtt.stolen_size == 0)
+		return 0;
+
 	dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
 	if (dev_priv->mm.stolen_base == 0)
 		return 0;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 558e568..aba9d74 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -641,7 +641,7 @@
 		if (WARN_ON(ring->id != RCS))
 			return NULL;
 
-		obj = ring->private;
+		obj = ring->scratch.obj;
 		if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
 		    acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
 			return i915_error_object_create(dev_priv, obj);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index a03b445..83cce0c 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1027,8 +1027,13 @@
 		dev_priv->display.hpd_irq_setup(dev);
 	spin_unlock(&dev_priv->irq_lock);
 
-	queue_work(dev_priv->wq,
-		   &dev_priv->hotplug_work);
+	/*
+	 * Our hotplug handler can grab modeset locks (by calling down into the
+	 * fb helpers). Hence it must not be run on our own dev-priv->wq work
+	 * queue for otherwise the flush_work in the pageflip code will
+	 * deadlock.
+	 */
+	schedule_work(&dev_priv->hotplug_work);
 }
 
 static void gmbus_irq_handler(struct drm_device *dev)
@@ -1655,7 +1660,13 @@
 			wake_up_all(&ring->irq_queue);
 	}
 
-	queue_work(dev_priv->wq, &dev_priv->gpu_error.work);
+	/*
+	 * Our reset work can grab modeset locks (since it needs to reset the
+	 * state of outstanding pagelips). Hence it must not be run on our own
+	 * dev-priv->wq work queue for otherwise the flush_work in the pageflip
+	 * code will deadlock.
+	 */
+	schedule_work(&dev_priv->gpu_error.work);
 }
 
 static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
@@ -2027,9 +2038,9 @@
 
 	for_each_ring(ring, dev_priv, i) {
 		if (ring->hangcheck.score > FIRE) {
-			DRM_ERROR("%s on %s\n",
-				  stuck[i] ? "stuck" : "no progress",
-				  ring->name);
+			DRM_INFO("%s on %s\n",
+				 stuck[i] ? "stuck" : "no progress",
+				 ring->name);
 			rings_hung++;
 		}
 	}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index b6a58f7..c159e1a 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -33,21 +33,6 @@
 #define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a))
 #define _MASKED_BIT_DISABLE(a) ((a) << 16)
 
-/*
- * The Bridge device's PCI config space has information about the
- * fb aperture size and the amount of pre-reserved memory.
- * This is all handled in the intel-gtt.ko module. i915.ko only
- * cares about the vga bit for the vga rbiter.
- */
-#define INTEL_GMCH_CTRL		0x52
-#define INTEL_GMCH_VGA_DISABLE  (1 << 1)
-#define SNB_GMCH_CTRL		0x50
-#define    SNB_GMCH_GGMS_SHIFT	8 /* GTT Graphics Memory Size */
-#define    SNB_GMCH_GGMS_MASK	0x3
-#define    SNB_GMCH_GMS_SHIFT   3 /* Graphics Mode Select */
-#define    SNB_GMCH_GMS_MASK    0x1f
-
-
 /* PCI config space */
 
 #define HPLLCC	0xc0 /* 855 only */
@@ -245,6 +230,7 @@
  *   address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
  */
 #define MI_LOAD_REGISTER_IMM(x)	MI_INSTR(0x22, 2*x-1)
+#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*x-1)
 #define MI_FLUSH_DW		MI_INSTR(0x26, 1) /* for GEN6 */
 #define   MI_FLUSH_DW_STORE_INDEX	(1<<21)
 #define   MI_INVALIDATE_TLB		(1<<18)
@@ -693,6 +679,23 @@
 #define   FPGA_DBG_RM_NOCLAIM	(1<<31)
 
 #define DERRMR		0x44050
+#define   DERRMR_PIPEA_SCANLINE		(1<<0)
+#define   DERRMR_PIPEA_PRI_FLIP_DONE	(1<<1)
+#define   DERRMR_PIPEA_SPR_FLIP_DONE	(1<<2)
+#define   DERRMR_PIPEA_VBLANK		(1<<3)
+#define   DERRMR_PIPEA_HBLANK		(1<<5)
+#define   DERRMR_PIPEB_SCANLINE 	(1<<8)
+#define   DERRMR_PIPEB_PRI_FLIP_DONE	(1<<9)
+#define   DERRMR_PIPEB_SPR_FLIP_DONE	(1<<10)
+#define   DERRMR_PIPEB_VBLANK		(1<<11)
+#define   DERRMR_PIPEB_HBLANK		(1<<13)
+/* Note that PIPEC is not a simple translation of PIPEA/PIPEB */
+#define   DERRMR_PIPEC_SCANLINE		(1<<14)
+#define   DERRMR_PIPEC_PRI_FLIP_DONE	(1<<15)
+#define   DERRMR_PIPEC_SPR_FLIP_DONE	(1<<20)
+#define   DERRMR_PIPEC_VBLANK		(1<<21)
+#define   DERRMR_PIPEC_HBLANK		(1<<22)
+
 
 /* GM45+ chicken bits -- debug workaround bits that may be required
  * for various sorts of correct behavior.  The top 16 bits of each are
@@ -3310,6 +3313,7 @@
 #define   MCURSOR_PIPE_A	0x00
 #define   MCURSOR_PIPE_B	(1 << 28)
 #define   MCURSOR_GAMMA_ENABLE  (1 << 26)
+#define   CURSOR_TRICKLE_FEED_DISABLE	(1 << 14)
 #define _CURABASE		(dev_priv->info->display_mmio_offset + 0x70084)
 #define _CURAPOS		(dev_priv->info->display_mmio_offset + 0x70088)
 #define   CURSOR_POS_MASK       0x007FF
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index a777e7f..c8c4112 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -224,6 +224,18 @@
 	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
 }
 
+static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
+				     struct device_attribute *attr, char *buf)
+{
+	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_device *dev = minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			vlv_gpu_freq(dev_priv->mem_freq,
+				     dev_priv->rps.rpe_delay));
+}
+
 static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 {
 	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
@@ -366,6 +378,7 @@
 static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
 static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
 
+static DEVICE_ATTR(vlv_rpe_freq_mhz, S_IRUGO, vlv_rpe_freq_mhz_show, NULL);
 
 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
 static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
@@ -409,6 +422,14 @@
 	NULL,
 };
 
+static const struct attribute *vlv_attrs[] = {
+	&dev_attr_gt_cur_freq_mhz.attr,
+	&dev_attr_gt_max_freq_mhz.attr,
+	&dev_attr_gt_min_freq_mhz.attr,
+	&dev_attr_vlv_rpe_freq_mhz.attr,
+	NULL,
+};
+
 static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
 				struct bin_attribute *attr, char *buf,
 				loff_t off, size_t count)
@@ -492,11 +513,13 @@
 			DRM_ERROR("l3 parity sysfs setup failed\n");
 	}
 
-	if (INTEL_INFO(dev)->gen >= 6) {
+	ret = 0;
+	if (IS_VALLEYVIEW(dev))
+		ret = sysfs_create_files(&dev->primary->kdev.kobj, vlv_attrs);
+	else if (INTEL_INFO(dev)->gen >= 6)
 		ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs);
-		if (ret)
-			DRM_ERROR("gen6 sysfs setup failed\n");
-	}
+	if (ret)
+		DRM_ERROR("RPS sysfs setup failed\n");
 
 	ret = sysfs_create_bin_file(&dev->primary->kdev.kobj,
 				    &error_state_attr);
@@ -507,7 +530,10 @@
 void i915_teardown_sysfs(struct drm_device *dev)
 {
 	sysfs_remove_bin_file(&dev->primary->kdev.kobj, &error_state_attr);
-	sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
+	if (IS_VALLEYVIEW(dev))
+		sysfs_remove_files(&dev->primary->kdev.kobj, vlv_attrs);
+	else
+		sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
 	device_remove_bin_file(&dev->primary->kdev,  &dpf_attrs);
 #ifdef CONFIG_PM
 	sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index b5a3875..ea9022e 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -688,7 +688,7 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crt *crt = intel_attached_crt(connector);
 
-	if (HAS_PCH_SPLIT(dev)) {
+	if (INTEL_INFO(dev)->gen >= 5) {
 		u32 adpa;
 
 		adpa = I915_READ(crt->adpa_reg);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 38452d8..2489d0b 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2077,8 +2077,10 @@
 	else
 		dspcntr &= ~DISPPLANE_TILED;
 
-	/* must disable */
-	dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
+	if (IS_HASWELL(dev))
+		dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE;
+	else
+		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
 
 	I915_WRITE(reg, dspcntr);
 
@@ -6762,8 +6764,10 @@
 			cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
 			cntl |= CURSOR_MODE_DISABLE;
 		}
-		if (IS_HASWELL(dev))
+		if (IS_HASWELL(dev)) {
 			cntl |= CURSOR_PIPE_CSC_ENABLE;
+			cntl &= ~CURSOR_TRICKLE_FEED_DISABLE;
+		}
 		I915_WRITE(CURCNTR_IVB(pipe), cntl);
 
 		intel_crtc->cursor_visible = visible;
@@ -7309,8 +7313,7 @@
 		}
 	}
 
-	pipe_config->adjusted_mode.clock = clock.dot *
-		pipe_config->pixel_multiplier;
+	pipe_config->adjusted_mode.clock = clock.dot;
 }
 
 static void ironlake_crtc_clock_get(struct intel_crtc *crtc,
@@ -7828,12 +7831,6 @@
 	return ret;
 }
 
-/*
- * On gen7 we currently use the blit ring because (in early silicon at least)
- * the render ring doesn't give us interrpts for page flip completion, which
- * means clients will hang after the first flip is queued.  Fortunately the
- * blit ring generates interrupts properly, so use it instead.
- */
 static int intel_gen7_queue_flip(struct drm_device *dev,
 				 struct drm_crtc *crtc,
 				 struct drm_framebuffer *fb,
@@ -7842,9 +7839,13 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
+	struct intel_ring_buffer *ring;
 	uint32_t plane_bit = 0;
-	int ret;
+	int len, ret;
+
+	ring = obj->ring;
+	if (IS_VALLEYVIEW(dev) || ring == NULL || ring->id != RCS)
+		ring = &dev_priv->ring[BCS];
 
 	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
 	if (ret)
@@ -7866,10 +7867,34 @@
 		goto err_unpin;
 	}
 
-	ret = intel_ring_begin(ring, 4);
+	len = 4;
+	if (ring->id == RCS)
+		len += 6;
+
+	ret = intel_ring_begin(ring, len);
 	if (ret)
 		goto err_unpin;
 
+	/* Unmask the flip-done completion message. Note that the bspec says that
+	 * we should do this for both the BCS and RCS, and that we must not unmask
+	 * more than one flip event at any time (or ensure that one flip message
+	 * can be sent by waiting for flip-done prior to queueing new flips).
+	 * Experimentation says that BCS works despite DERRMR masking all
+	 * flip-done completion events and that unmasking all planes at once
+	 * for the RCS also doesn't appear to drop events. Setting the DERRMR
+	 * to zero does lead to lockups within MI_DISPLAY_FLIP.
+	 */
+	if (ring->id == RCS) {
+		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+		intel_ring_emit(ring, DERRMR);
+		intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
+					DERRMR_PIPEB_PRI_FLIP_DONE |
+					DERRMR_PIPEC_PRI_FLIP_DONE));
+		intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1));
+		intel_ring_emit(ring, DERRMR);
+		intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
+	}
+
 	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
 	intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
 	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
@@ -10022,6 +10047,33 @@
 	POSTING_READ(vga_reg);
 }
 
+static void i915_enable_vga_mem(struct drm_device *dev)
+{
+	/* Enable VGA memory on Intel HD */
+	if (HAS_PCH_SPLIT(dev)) {
+		vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+		outb(inb(VGA_MSR_READ) | VGA_MSR_MEM_EN, VGA_MSR_WRITE);
+		vga_set_legacy_decoding(dev->pdev, VGA_RSRC_LEGACY_IO |
+						   VGA_RSRC_LEGACY_MEM |
+						   VGA_RSRC_NORMAL_IO |
+						   VGA_RSRC_NORMAL_MEM);
+		vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+	}
+}
+
+void i915_disable_vga_mem(struct drm_device *dev)
+{
+	/* Disable VGA memory on Intel HD */
+	if (HAS_PCH_SPLIT(dev)) {
+		vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+		outb(inb(VGA_MSR_READ) & ~VGA_MSR_MEM_EN, VGA_MSR_WRITE);
+		vga_set_legacy_decoding(dev->pdev, VGA_RSRC_LEGACY_IO |
+						   VGA_RSRC_NORMAL_IO |
+						   VGA_RSRC_NORMAL_MEM);
+		vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+	}
+}
+
 void intel_modeset_init_hw(struct drm_device *dev)
 {
 	intel_init_power_well(dev);
@@ -10300,6 +10352,7 @@
 	if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
 		DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
 		i915_disable_vga(dev);
+		i915_disable_vga_mem(dev);
 	}
 }
 
@@ -10513,6 +10566,8 @@
 
 	intel_disable_fbc(dev);
 
+	i915_enable_vga_mem(dev);
+
 	intel_disable_gt_powersave(dev);
 
 	ironlake_teardown_rc6(dev);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 1760808..a47799e 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -551,7 +551,7 @@
 			    struct drm_display_mode *fixed_mode);
 extern void intel_panel_fini(struct intel_panel *panel);
 
-extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
+extern void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
 				   struct drm_display_mode *adjusted_mode);
 extern void intel_pch_panel_fitting(struct intel_crtc *crtc,
 				    struct intel_crtc_config *pipe_config,
@@ -792,5 +792,6 @@
 extern void hsw_pc8_restore_interrupts(struct drm_device *dev);
 extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
 extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
+extern void i915_disable_vga_mem(struct drm_device *dev);
 
 #endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 4d33278..831a5c0 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -128,8 +128,8 @@
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
-	struct drm_display_mode *fixed_mode =
-		lvds_encoder->attached_connector->base.panel.fixed_mode;
+	const struct drm_display_mode *adjusted_mode =
+		&crtc->config.adjusted_mode;
 	int pipe = crtc->pipe;
 	u32 temp;
 
@@ -183,9 +183,9 @@
 			temp &= ~LVDS_ENABLE_DITHER;
 	}
 	temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
-	if (fixed_mode->flags & DRM_MODE_FLAG_NHSYNC)
+	if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
 		temp |= LVDS_HSYNC_POLARITY;
-	if (fixed_mode->flags & DRM_MODE_FLAG_NVSYNC)
+	if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
 		temp |= LVDS_VSYNC_POLARITY;
 
 	I915_WRITE(lvds_encoder->reg, temp);
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index cfb8fb6..119771f 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -173,7 +173,7 @@
 		return ASLE_BACKLIGHT_FAILED;
 
 	intel_panel_set_backlight(dev, bclp, 255);
-	iowrite32((bclp*0x64)/0xff | ASLE_CBLV_VALID, &asle->cblv);
+	iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index a43c33b..42114ec 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -36,20 +36,12 @@
 #define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
 
 void
-intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
+intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
 		       struct drm_display_mode *adjusted_mode)
 {
-	adjusted_mode->hdisplay = fixed_mode->hdisplay;
-	adjusted_mode->hsync_start = fixed_mode->hsync_start;
-	adjusted_mode->hsync_end = fixed_mode->hsync_end;
-	adjusted_mode->htotal = fixed_mode->htotal;
+	drm_mode_copy(adjusted_mode, fixed_mode);
 
-	adjusted_mode->vdisplay = fixed_mode->vdisplay;
-	adjusted_mode->vsync_start = fixed_mode->vsync_start;
-	adjusted_mode->vsync_end = fixed_mode->vsync_end;
-	adjusted_mode->vtotal = fixed_mode->vtotal;
-
-	adjusted_mode->clock = fixed_mode->clock;
+	drm_mode_set_crtcinfo(adjusted_mode, 0);
 }
 
 /* adjusted_mode has been preset to be the panel's fixed mode */
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 4605682..0c115cc 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3447,14 +3447,24 @@
 static void gen6_enable_rps_interrupts(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 enabled_intrs;
 
 	spin_lock_irq(&dev_priv->irq_lock);
 	WARN_ON(dev_priv->rps.pm_iir);
 	snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
 	I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
 	spin_unlock_irq(&dev_priv->irq_lock);
+
 	/* only unmask PM interrupts we need. Mask all others. */
-	I915_WRITE(GEN6_PMINTRMSK, ~GEN6_PM_RPS_EVENTS);
+	enabled_intrs = GEN6_PM_RPS_EVENTS;
+
+	/* IVB and SNB hard hangs on looping batchbuffer
+	 * if GEN6_PM_UP_EI_EXPIRED is masked.
+	 */
+	if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
+		enabled_intrs |= GEN6_PM_RP_UP_EI_EXPIRED;
+
+	I915_WRITE(GEN6_PMINTRMSK, ~enabled_intrs);
 }
 
 static void gen6_enable_rps(struct drm_device *dev)
@@ -4950,8 +4960,6 @@
 			I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
 			GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
 
-	g4x_disable_trickle_feed(dev);
-
 	/* WaVSRefCountFullforceMissDisable:hsw */
 	gen7_setup_fixed_func_scheduler(dev_priv);
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index f05ccea..460ee10 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -33,16 +33,6 @@
 #include "i915_trace.h"
 #include "intel_drv.h"
 
-/*
- * 965+ support PIPE_CONTROL commands, which provide finer grained control
- * over cache flushing.
- */
-struct pipe_control {
-	struct drm_i915_gem_object *obj;
-	volatile u32 *cpu_page;
-	u32 gtt_offset;
-};
-
 static inline int ring_space(struct intel_ring_buffer *ring)
 {
 	int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
@@ -175,8 +165,7 @@
 static int
 intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
 {
-	struct pipe_control *pc = ring->private;
-	u32 scratch_addr = pc->gtt_offset + 128;
+	u32 scratch_addr = ring->scratch.gtt_offset + 128;
 	int ret;
 
 
@@ -213,8 +202,7 @@
                          u32 invalidate_domains, u32 flush_domains)
 {
 	u32 flags = 0;
-	struct pipe_control *pc = ring->private;
-	u32 scratch_addr = pc->gtt_offset + 128;
+	u32 scratch_addr = ring->scratch.gtt_offset + 128;
 	int ret;
 
 	/* Force SNB workarounds for PIPE_CONTROL flushes */
@@ -306,8 +294,7 @@
 		       u32 invalidate_domains, u32 flush_domains)
 {
 	u32 flags = 0;
-	struct pipe_control *pc = ring->private;
-	u32 scratch_addr = pc->gtt_offset + 128;
+	u32 scratch_addr = ring->scratch.gtt_offset + 128;
 	int ret;
 
 	/*
@@ -481,68 +468,43 @@
 static int
 init_pipe_control(struct intel_ring_buffer *ring)
 {
-	struct pipe_control *pc;
-	struct drm_i915_gem_object *obj;
 	int ret;
 
-	if (ring->private)
+	if (ring->scratch.obj)
 		return 0;
 
-	pc = kmalloc(sizeof(*pc), GFP_KERNEL);
-	if (!pc)
-		return -ENOMEM;
-
-	obj = i915_gem_alloc_object(ring->dev, 4096);
-	if (obj == NULL) {
+	ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096);
+	if (ring->scratch.obj == NULL) {
 		DRM_ERROR("Failed to allocate seqno page\n");
 		ret = -ENOMEM;
 		goto err;
 	}
 
-	i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+	i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
 
-	ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false);
+	ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, true, false);
 	if (ret)
 		goto err_unref;
 
-	pc->gtt_offset = i915_gem_obj_ggtt_offset(obj);
-	pc->cpu_page = kmap(sg_page(obj->pages->sgl));
-	if (pc->cpu_page == NULL) {
+	ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj);
+	ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl));
+	if (ring->scratch.cpu_page == NULL) {
 		ret = -ENOMEM;
 		goto err_unpin;
 	}
 
 	DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
-			 ring->name, pc->gtt_offset);
-
-	pc->obj = obj;
-	ring->private = pc;
+			 ring->name, ring->scratch.gtt_offset);
 	return 0;
 
 err_unpin:
-	i915_gem_object_unpin(obj);
+	i915_gem_object_unpin(ring->scratch.obj);
 err_unref:
-	drm_gem_object_unreference(&obj->base);
+	drm_gem_object_unreference(&ring->scratch.obj->base);
 err:
-	kfree(pc);
 	return ret;
 }
 
-static void
-cleanup_pipe_control(struct intel_ring_buffer *ring)
-{
-	struct pipe_control *pc = ring->private;
-	struct drm_i915_gem_object *obj;
-
-	obj = pc->obj;
-
-	kunmap(sg_page(obj->pages->sgl));
-	i915_gem_object_unpin(obj);
-	drm_gem_object_unreference(&obj->base);
-
-	kfree(pc);
-}
-
 static int init_render_ring(struct intel_ring_buffer *ring)
 {
 	struct drm_device *dev = ring->dev;
@@ -607,16 +569,16 @@
 {
 	struct drm_device *dev = ring->dev;
 
-	if (!ring->private)
+	if (ring->scratch.obj == NULL)
 		return;
 
-	if (HAS_BROKEN_CS_TLB(dev))
-		drm_gem_object_unreference(to_gem_object(ring->private));
+	if (INTEL_INFO(dev)->gen >= 5) {
+		kunmap(sg_page(ring->scratch.obj->pages->sgl));
+		i915_gem_object_unpin(ring->scratch.obj);
+	}
 
-	if (INTEL_INFO(dev)->gen >= 5)
-		cleanup_pipe_control(ring);
-
-	ring->private = NULL;
+	drm_gem_object_unreference(&ring->scratch.obj->base);
+	ring->scratch.obj = NULL;
 }
 
 static void
@@ -742,8 +704,7 @@
 static int
 pc_render_add_request(struct intel_ring_buffer *ring)
 {
-	struct pipe_control *pc = ring->private;
-	u32 scratch_addr = pc->gtt_offset + 128;
+	u32 scratch_addr = ring->scratch.gtt_offset + 128;
 	int ret;
 
 	/* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
@@ -761,7 +722,7 @@
 	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
 			PIPE_CONTROL_WRITE_FLUSH |
 			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
-	intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+	intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
 	intel_ring_emit(ring, ring->outstanding_lazy_request);
 	intel_ring_emit(ring, 0);
 	PIPE_CONTROL_FLUSH(ring, scratch_addr);
@@ -780,7 +741,7 @@
 			PIPE_CONTROL_WRITE_FLUSH |
 			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
 			PIPE_CONTROL_NOTIFY);
-	intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+	intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
 	intel_ring_emit(ring, ring->outstanding_lazy_request);
 	intel_ring_emit(ring, 0);
 	intel_ring_advance(ring);
@@ -814,15 +775,13 @@
 static u32
 pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
 {
-	struct pipe_control *pc = ring->private;
-	return pc->cpu_page[0];
+	return ring->scratch.cpu_page[0];
 }
 
 static void
 pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
 {
-	struct pipe_control *pc = ring->private;
-	pc->cpu_page[0] = seqno;
+	ring->scratch.cpu_page[0] = seqno;
 }
 
 static bool
@@ -1141,8 +1100,7 @@
 		intel_ring_emit(ring, MI_NOOP);
 		intel_ring_advance(ring);
 	} else {
-		struct drm_i915_gem_object *obj = ring->private;
-		u32 cs_offset = i915_gem_obj_ggtt_offset(obj);
+		u32 cs_offset = ring->scratch.gtt_offset;
 
 		if (len > I830_BATCH_LIMIT)
 			return -ENOSPC;
@@ -1835,7 +1793,8 @@
 			return ret;
 		}
 
-		ring->private = obj;
+		ring->scratch.obj = obj;
+		ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
 	}
 
 	return intel_init_ring_buffer(dev, ring);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 432ad53..68b1ca974 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -155,7 +155,11 @@
 
 	struct intel_ring_hangcheck hangcheck;
 
-	void *private;
+	struct {
+		struct drm_i915_gem_object *obj;
+		u32 gtt_offset;
+		volatile u32 *cpu_page;
+	} scratch;
 };
 
 static inline bool
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 317e058..85037b9 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1151,11 +1151,10 @@
 {
 	struct drm_device *dev = intel_encoder->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_crtc *crtc = intel_encoder->base.crtc;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_crtc *crtc = to_intel_crtc(intel_encoder->base.crtc);
 	struct drm_display_mode *adjusted_mode =
-		&intel_crtc->config.adjusted_mode;
-	struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
+		&crtc->config.adjusted_mode;
+	struct drm_display_mode *mode = &crtc->config.requested_mode;
 	struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder);
 	u32 sdvox;
 	struct intel_sdvo_in_out_map in_out;
@@ -1213,13 +1212,15 @@
 	 * adjusted_mode.
 	 */
 	intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
+	input_dtd.part1.clock /= crtc->config.pixel_multiplier;
+
 	if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
 		input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags;
 	if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd))
 		DRM_INFO("Setting input timings on %s failed\n",
 			 SDVO_NAME(intel_sdvo));
 
-	switch (intel_crtc->config.pixel_multiplier) {
+	switch (crtc->config.pixel_multiplier) {
 	default:
 		WARN(1, "unknown pixel mutlipler specified\n");
 	case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
@@ -1252,9 +1253,9 @@
 	}
 
 	if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
-		sdvox |= SDVO_PIPE_SEL_CPT(intel_crtc->pipe);
+		sdvox |= SDVO_PIPE_SEL_CPT(crtc->pipe);
 	else
-		sdvox |= SDVO_PIPE_SEL(intel_crtc->pipe);
+		sdvox |= SDVO_PIPE_SEL(crtc->pipe);
 
 	if (intel_sdvo->has_hdmi_audio)
 		sdvox |= SDVO_AUDIO_ENABLE;
@@ -1264,7 +1265,7 @@
 	} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
 		/* done in crtc_mode_set as it lives inside the dpll register */
 	} else {
-		sdvox |= (intel_crtc->config.pixel_multiplier - 1)
+		sdvox |= (crtc->config.pixel_multiplier - 1)
 			<< SDVO_PORT_MULTIPLY_SHIFT;
 	}
 
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 78b621c..ad6ec4b 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -260,8 +260,11 @@
 	if (obj->tiling_mode != I915_TILING_NONE)
 		sprctl |= SPRITE_TILED;
 
-	/* must disable */
-	sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
+	if (IS_HASWELL(dev))
+		sprctl &= ~SPRITE_TRICKLE_FEED_DISABLE;
+	else
+		sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
+
 	sprctl |= SPRITE_ENABLE;
 
 	if (IS_HASWELL(dev))
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 8f5bc86..8649f1c 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -261,7 +261,7 @@
 	}
 }
 
-void intel_uncore_sanitize(struct drm_device *dev)
+static void intel_uncore_forcewake_reset(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
@@ -272,6 +272,11 @@
 		if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
 			__gen6_gt_force_wake_mt_reset(dev_priv);
 	}
+}
+
+void intel_uncore_sanitize(struct drm_device *dev)
+{
+	intel_uncore_forcewake_reset(dev);
 
 	/* BIOS often leaves RC6 enabled, but disable it for hw init */
 	intel_disable_gt_powersave(dev);
@@ -549,6 +554,8 @@
 	/* Spin waiting for the device to ack the reset request */
 	ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
 
+	intel_uncore_forcewake_reset(dev);
+
 	/* If reset with a user forcewake, try to restore, otherwise turn it off */
 	if (dev_priv->uncore.forcewake_count)
 		dev_priv->uncore.funcs.force_wake_get(dev_priv);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 8863644..e893c53 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -636,7 +636,8 @@
 		nouveau_fbcon_set_suspend(drm_dev, 0);
 
 	nouveau_fbcon_zfill_all(drm_dev);
-	nouveau_display_resume(drm_dev);
+	if (drm_dev->mode_config.num_crtc)
+		nouveau_display_resume(drm_dev);
 	nv_suspend_set_printk_level(NV_DBG_DEBUG);
 	return 0;
 }
@@ -671,7 +672,8 @@
 	if (drm_dev->mode_config.num_crtc)
 		nouveau_fbcon_set_suspend(drm_dev, 0);
 	nouveau_fbcon_zfill_all(drm_dev);
-	nouveau_display_resume(drm_dev);
+	if (drm_dev->mode_config.num_crtc)
+		nouveau_display_resume(drm_dev);
 	nv_suspend_set_printk_level(NV_DBG_DEBUG);
 	return 0;
 }
@@ -906,7 +908,8 @@
 	pci_set_master(pdev);
 
 	ret = nouveau_do_resume(drm_dev);
-	nouveau_display_resume(drm_dev);
+	if (drm_dev->mode_config.num_crtc)
+		nouveau_display_resume(drm_dev);
 	drm_kms_helper_poll_enable(drm_dev);
 	/* do magic */
 	nv_mask(device, 0x88488, (1 << 25), (1 << 25));
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index e893f6e..af02597 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -257,9 +257,9 @@
 		if (!conflict->bridge_has_one_vga) {
 			vga_irq_set_state(conflict, false);
 			flags |= PCI_VGA_STATE_CHANGE_DECODES;
-			if (lwants & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM))
+			if (match & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM))
 				pci_bits |= PCI_COMMAND_MEMORY;
-			if (lwants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO))
+			if (match & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO))
 				pci_bits |= PCI_COMMAND_IO;
 		}
 
@@ -267,11 +267,11 @@
 			flags |= PCI_VGA_STATE_CHANGE_BRIDGE;
 
 		pci_set_vga_state(conflict->pdev, false, pci_bits, flags);
-		conflict->owns &= ~lwants;
+		conflict->owns &= ~match;
 		/* If he also owned non-legacy, that is no longer the case */
-		if (lwants & VGA_RSRC_LEGACY_MEM)
+		if (match & VGA_RSRC_LEGACY_MEM)
 			conflict->owns &= ~VGA_RSRC_NORMAL_MEM;
-		if (lwants & VGA_RSRC_LEGACY_IO)
+		if (match & VGA_RSRC_LEGACY_IO)
 			conflict->owns &= ~VGA_RSRC_NORMAL_IO;
 	}
 
@@ -644,10 +644,12 @@
 static inline void vga_update_device_decodes(struct vga_device *vgadev,
 					     int new_decodes)
 {
-	int old_decodes;
-	struct vga_device *new_vgadev, *conflict;
+	int old_decodes, decodes_removed, decodes_unlocked;
 
 	old_decodes = vgadev->decodes;
+	decodes_removed = ~new_decodes & old_decodes;
+	decodes_unlocked = vgadev->locks & decodes_removed;
+	vgadev->owns &= ~decodes_removed;
 	vgadev->decodes = new_decodes;
 
 	pr_info("vgaarb: device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n",
@@ -656,31 +658,22 @@
 		vga_iostate_to_str(vgadev->decodes),
 		vga_iostate_to_str(vgadev->owns));
 
-
-	/* if we own the decodes we should move them along to
-	   another card */
-	if ((vgadev->owns & old_decodes) && (vga_count > 1)) {
-		/* set us to own nothing */
-		vgadev->owns &= ~old_decodes;
-		list_for_each_entry(new_vgadev, &vga_list, list) {
-			if ((new_vgadev != vgadev) &&
-			    (new_vgadev->decodes & VGA_RSRC_LEGACY_MASK)) {
-				pr_info("vgaarb: transferring owner from PCI:%s to PCI:%s\n", pci_name(vgadev->pdev), pci_name(new_vgadev->pdev));
-				conflict = __vga_tryget(new_vgadev, VGA_RSRC_LEGACY_MASK);
-				if (!conflict)
-					__vga_put(new_vgadev, VGA_RSRC_LEGACY_MASK);
-				break;
-			}
-		}
+	/* if we removed locked decodes, lock count goes to zero, and release */
+	if (decodes_unlocked) {
+		if (decodes_unlocked & VGA_RSRC_LEGACY_IO)
+			vgadev->io_lock_cnt = 0;
+		if (decodes_unlocked & VGA_RSRC_LEGACY_MEM)
+			vgadev->mem_lock_cnt = 0;
+		__vga_put(vgadev, decodes_unlocked);
 	}
 
 	/* change decodes counter */
-	if (old_decodes != new_decodes) {
-		if (new_decodes & (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM))
-			vga_decode_count++;
-		else
-			vga_decode_count--;
-	}
+	if (old_decodes & VGA_RSRC_LEGACY_MASK &&
+	    !(new_decodes & VGA_RSRC_LEGACY_MASK))
+		vga_decode_count--;
+	if (!(old_decodes & VGA_RSRC_LEGACY_MASK) &&
+	    new_decodes & VGA_RSRC_LEGACY_MASK)
+		vga_decode_count++;
 	pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count);
 }
 
diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c
index 89cfd64..ef91b8a 100644
--- a/drivers/hwmon/hwmon-vid.c
+++ b/drivers/hwmon/hwmon-vid.c
@@ -246,7 +246,7 @@
  */
 static u8 get_via_model_d_vrm(void)
 {
-	unsigned int vid, brand, dummy;
+	unsigned int vid, brand, __maybe_unused dummy;
 	static const char *brands[4] = {
 		"C7-M", "C7", "Eden", "C7-D"
 	};
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index 18c0623..70a39a8 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -233,8 +233,7 @@
 		return -ENOMEM;
 
 	if (dev_get_platdata(&client->dev)) {
-		pdata =
-		  (struct ina2xx_platform_data *)dev_get_platdata(&client->dev);
+		pdata = dev_get_platdata(&client->dev);
 		shunt = pdata->shunt_uohms;
 	} else if (!of_property_read_u32(client->dev.of_node,
 				"shunt-resistor", &val)) {
diff --git a/drivers/iommu/msm_iommu_dev.c b/drivers/iommu/msm_iommu_dev.c
index 0a1c962..08ba497 100644
--- a/drivers/iommu/msm_iommu_dev.c
+++ b/drivers/iommu/msm_iommu_dev.c
@@ -282,7 +282,6 @@
 		clk_put(drv->pclk);
 		memset(drv, 0, sizeof(*drv));
 		kfree(drv);
-		platform_set_drvdata(pdev, NULL);
 	}
 	return 0;
 }
@@ -366,7 +365,6 @@
 	if (drv) {
 		memset(drv, 0, sizeof(struct msm_iommu_ctx_drvdata));
 		kfree(drv);
-		platform_set_drvdata(pdev, NULL);
 	}
 	return 0;
 }
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 0ba3766..bcd78a7 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1008,8 +1008,6 @@
 	struct resource *res;
 	struct omap_iommu *obj = platform_get_drvdata(pdev);
 
-	platform_set_drvdata(pdev, NULL);
-
 	iopgtable_clear_entry_all(obj);
 
 	irq = platform_get_irq(pdev, 0);
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 81e8cd4..c60b901 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -2,6 +2,7 @@
 
 obj-$(CONFIG_ARCH_BCM2835)		+= irq-bcm2835.o
 obj-$(CONFIG_ARCH_EXYNOS)		+= exynos-combiner.o
+obj-$(CONFIG_ARCH_MMP)			+= irq-mmp.o
 obj-$(CONFIG_ARCH_MVEBU)		+= irq-armada-370-xp.o
 obj-$(CONFIG_ARCH_MXS)			+= irq-mxs.o
 obj-$(CONFIG_ARCH_S3C24XX)		+= irq-s3c24xx.o
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index ee7c503..d0e9480 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -453,6 +453,12 @@
 	writel_relaxed(1, base + GIC_CPU_CTRL);
 }
 
+void gic_cpu_if_down(void)
+{
+	void __iomem *cpu_base = gic_data_cpu_base(&gic_data[0]);
+	writel_relaxed(0, cpu_base + GIC_CPU_CTRL);
+}
+
 #ifdef CONFIG_CPU_PM
 /*
  * Saves the GIC distributor registers during suspend or idle.  Must be called
diff --git a/arch/arm/mach-mmp/irq.c b/drivers/irqchip/irq-mmp.c
similarity index 62%
rename from arch/arm/mach-mmp/irq.c
rename to drivers/irqchip/irq-mmp.c
index 3c71246..2cb7cd0 100644
--- a/arch/arm/mach-mmp/irq.c
+++ b/drivers/irqchip/irq-mmp.c
@@ -21,19 +21,20 @@
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
 
-#include <mach/irqs.h>
+#include <asm/exception.h>
+#include <asm/mach/irq.h>
 
-#ifdef CONFIG_CPU_MMP2
-#include <mach/pm-mmp2.h>
-#endif
-#ifdef CONFIG_CPU_PXA910
-#include <mach/pm-pxa910.h>
-#endif
-
-#include "common.h"
+#include "irqchip.h"
 
 #define MAX_ICU_NR		16
 
+#define PJ1_INT_SEL		0x10c
+#define PJ4_INT_SEL		0x104
+
+/* bit fields in PJ1_INT_SEL and PJ4_INT_SEL */
+#define SEL_INT_PENDING		(1 << 6)
+#define SEL_INT_NUM_MASK	0x3f
+
 struct icu_chip_data {
 	int			nr_irqs;
 	unsigned int		virq_base;
@@ -54,7 +55,7 @@
 	unsigned int	conf_mask;
 };
 
-void __iomem *mmp_icu_base;
+static void __iomem *mmp_icu_base;
 static struct icu_chip_data icu_data[MAX_ICU_NR];
 static int max_icu_nr;
 
@@ -122,7 +123,7 @@
 	}
 }
 
-static struct irq_chip icu_irq_chip = {
+struct irq_chip icu_irq_chip = {
 	.name		= "icu_irq",
 	.irq_mask	= icu_mask_irq,
 	.irq_mask_ack	= icu_mask_ack_irq,
@@ -193,6 +194,32 @@
 	.conf_mask	= 0x7f,
 };
 
+static asmlinkage void __exception_irq_entry
+mmp_handle_irq(struct pt_regs *regs)
+{
+	int irq, hwirq;
+
+	hwirq = readl_relaxed(mmp_icu_base + PJ1_INT_SEL);
+	if (!(hwirq & SEL_INT_PENDING))
+		return;
+	hwirq &= SEL_INT_NUM_MASK;
+	irq = irq_find_mapping(icu_data[0].domain, hwirq);
+	handle_IRQ(irq, regs);
+}
+
+static asmlinkage void __exception_irq_entry
+mmp2_handle_irq(struct pt_regs *regs)
+{
+	int irq, hwirq;
+
+	hwirq = readl_relaxed(mmp_icu_base + PJ4_INT_SEL);
+	if (!(hwirq & SEL_INT_PENDING))
+		return;
+	hwirq &= SEL_INT_NUM_MASK;
+	irq = irq_find_mapping(icu_data[0].domain, hwirq);
+	handle_IRQ(irq, regs);
+}
+
 /* MMP (ARMv5) */
 void __init icu_init_irq(void)
 {
@@ -214,15 +241,13 @@
 		set_irq_flags(irq, IRQF_VALID);
 	}
 	irq_set_default_host(icu_data[0].domain);
-#ifdef CONFIG_CPU_PXA910
-	icu_irq_chip.irq_set_wake = pxa910_set_wake;
-#endif
+	set_handle_irq(mmp_handle_irq);
 }
 
 /* MMP2 (ARMv7) */
 void __init mmp2_init_icu(void)
 {
-	int irq;
+	int irq, end;
 
 	max_icu_nr = 8;
 	mmp_icu_base = ioremap(0xd4282000, 0x1000);
@@ -236,11 +261,12 @@
 						   &icu_data[0]);
 	icu_data[1].reg_status = mmp_icu_base + 0x150;
 	icu_data[1].reg_mask = mmp_icu_base + 0x168;
-	icu_data[1].clr_mfp_irq_base = IRQ_MMP2_PMIC_BASE;
-	icu_data[1].clr_mfp_hwirq = IRQ_MMP2_PMIC - IRQ_MMP2_PMIC_BASE;
+	icu_data[1].clr_mfp_irq_base = icu_data[0].virq_base +
+				icu_data[0].nr_irqs;
+	icu_data[1].clr_mfp_hwirq = 1;		/* offset to IRQ_MMP2_PMIC_BASE */
 	icu_data[1].nr_irqs = 2;
 	icu_data[1].cascade_irq = 4;
-	icu_data[1].virq_base = IRQ_MMP2_PMIC_BASE;
+	icu_data[1].virq_base = icu_data[0].virq_base + icu_data[0].nr_irqs;
 	icu_data[1].domain = irq_domain_add_legacy(NULL, icu_data[1].nr_irqs,
 						   icu_data[1].virq_base, 0,
 						   &irq_domain_simple_ops,
@@ -249,7 +275,7 @@
 	icu_data[2].reg_mask = mmp_icu_base + 0x16c;
 	icu_data[2].nr_irqs = 2;
 	icu_data[2].cascade_irq = 5;
-	icu_data[2].virq_base = IRQ_MMP2_RTC_BASE;
+	icu_data[2].virq_base = icu_data[1].virq_base + icu_data[1].nr_irqs;
 	icu_data[2].domain = irq_domain_add_legacy(NULL, icu_data[2].nr_irqs,
 						   icu_data[2].virq_base, 0,
 						   &irq_domain_simple_ops,
@@ -258,7 +284,7 @@
 	icu_data[3].reg_mask = mmp_icu_base + 0x17c;
 	icu_data[3].nr_irqs = 3;
 	icu_data[3].cascade_irq = 9;
-	icu_data[3].virq_base = IRQ_MMP2_KEYPAD_BASE;
+	icu_data[3].virq_base = icu_data[2].virq_base + icu_data[2].nr_irqs;
 	icu_data[3].domain = irq_domain_add_legacy(NULL, icu_data[3].nr_irqs,
 						   icu_data[3].virq_base, 0,
 						   &irq_domain_simple_ops,
@@ -267,7 +293,7 @@
 	icu_data[4].reg_mask = mmp_icu_base + 0x170;
 	icu_data[4].nr_irqs = 5;
 	icu_data[4].cascade_irq = 17;
-	icu_data[4].virq_base = IRQ_MMP2_TWSI_BASE;
+	icu_data[4].virq_base = icu_data[3].virq_base + icu_data[3].nr_irqs;
 	icu_data[4].domain = irq_domain_add_legacy(NULL, icu_data[4].nr_irqs,
 						   icu_data[4].virq_base, 0,
 						   &irq_domain_simple_ops,
@@ -276,7 +302,7 @@
 	icu_data[5].reg_mask = mmp_icu_base + 0x174;
 	icu_data[5].nr_irqs = 15;
 	icu_data[5].cascade_irq = 35;
-	icu_data[5].virq_base = IRQ_MMP2_MISC_BASE;
+	icu_data[5].virq_base = icu_data[4].virq_base + icu_data[4].nr_irqs;
 	icu_data[5].domain = irq_domain_add_legacy(NULL, icu_data[5].nr_irqs,
 						   icu_data[5].virq_base, 0,
 						   &irq_domain_simple_ops,
@@ -285,7 +311,7 @@
 	icu_data[6].reg_mask = mmp_icu_base + 0x178;
 	icu_data[6].nr_irqs = 2;
 	icu_data[6].cascade_irq = 51;
-	icu_data[6].virq_base = IRQ_MMP2_MIPI_HSI1_BASE;
+	icu_data[6].virq_base = icu_data[5].virq_base + icu_data[5].nr_irqs;
 	icu_data[6].domain = irq_domain_add_legacy(NULL, icu_data[6].nr_irqs,
 						   icu_data[6].virq_base, 0,
 						   &irq_domain_simple_ops,
@@ -294,170 +320,176 @@
 	icu_data[7].reg_mask = mmp_icu_base + 0x184;
 	icu_data[7].nr_irqs = 2;
 	icu_data[7].cascade_irq = 55;
-	icu_data[7].virq_base = IRQ_MMP2_MIPI_HSI0_BASE;
+	icu_data[7].virq_base = icu_data[6].virq_base + icu_data[6].nr_irqs;
 	icu_data[7].domain = irq_domain_add_legacy(NULL, icu_data[7].nr_irqs,
 						   icu_data[7].virq_base, 0,
 						   &irq_domain_simple_ops,
 						   &icu_data[7]);
-	for (irq = 0; irq < IRQ_MMP2_MUX_END; irq++) {
+	end = icu_data[7].virq_base + icu_data[7].nr_irqs;
+	for (irq = 0; irq < end; irq++) {
 		icu_mask_irq(irq_get_irq_data(irq));
-		switch (irq) {
-		case IRQ_MMP2_PMIC_MUX:
-		case IRQ_MMP2_RTC_MUX:
-		case IRQ_MMP2_KEYPAD_MUX:
-		case IRQ_MMP2_TWSI_MUX:
-		case IRQ_MMP2_MISC_MUX:
-		case IRQ_MMP2_MIPI_HSI1_MUX:
-		case IRQ_MMP2_MIPI_HSI0_MUX:
+		if (irq == icu_data[1].cascade_irq ||
+		    irq == icu_data[2].cascade_irq ||
+		    irq == icu_data[3].cascade_irq ||
+		    irq == icu_data[4].cascade_irq ||
+		    irq == icu_data[5].cascade_irq ||
+		    irq == icu_data[6].cascade_irq ||
+		    irq == icu_data[7].cascade_irq) {
 			irq_set_chip(irq, &icu_irq_chip);
 			irq_set_chained_handler(irq, icu_mux_irq_demux);
-			break;
-		default:
+		} else {
 			irq_set_chip_and_handler(irq, &icu_irq_chip,
 						 handle_level_irq);
-			break;
 		}
 		set_irq_flags(irq, IRQF_VALID);
 	}
 	irq_set_default_host(icu_data[0].domain);
-#ifdef CONFIG_CPU_MMP2
-	icu_irq_chip.irq_set_wake = mmp2_set_wake;
-#endif
+	set_handle_irq(mmp2_handle_irq);
 }
 
 #ifdef CONFIG_OF
-static const struct of_device_id intc_ids[] __initconst = {
-	{ .compatible = "mrvl,mmp-intc", .data = &mmp_conf },
-	{ .compatible = "mrvl,mmp2-intc", .data = &mmp2_conf },
-	{}
-};
-
-static const struct of_device_id mmp_mux_irq_match[] __initconst = {
-	{ .compatible = "mrvl,mmp2-mux-intc" },
-	{}
-};
-
-int __init mmp2_mux_init(struct device_node *parent)
+static int __init mmp_init_bases(struct device_node *node)
 {
-	struct device_node *node;
-	const struct of_device_id *of_id;
-	struct resource res;
-	int i, irq_base, ret, irq;
-	u32 nr_irqs, mfp_irq;
-
-	node = parent;
-	max_icu_nr = 1;
-	for (i = 1; i < MAX_ICU_NR; i++) {
-		node = of_find_matching_node(node, mmp_mux_irq_match);
-		if (!node)
-			break;
-		of_id = of_match_node(&mmp_mux_irq_match[0], node);
-		ret = of_property_read_u32(node, "mrvl,intc-nr-irqs",
-					   &nr_irqs);
-		if (ret) {
-			pr_err("Not found mrvl,intc-nr-irqs property\n");
-			ret = -EINVAL;
-			goto err;
-		}
-		ret = of_address_to_resource(node, 0, &res);
-		if (ret < 0) {
-			pr_err("Not found reg property\n");
-			ret = -EINVAL;
-			goto err;
-		}
-		icu_data[i].reg_status = mmp_icu_base + res.start;
-		ret = of_address_to_resource(node, 1, &res);
-		if (ret < 0) {
-			pr_err("Not found reg property\n");
-			ret = -EINVAL;
-			goto err;
-		}
-		icu_data[i].reg_mask = mmp_icu_base + res.start;
-		icu_data[i].cascade_irq = irq_of_parse_and_map(node, 0);
-		if (!icu_data[i].cascade_irq) {
-			ret = -EINVAL;
-			goto err;
-		}
-
-		irq_base = irq_alloc_descs(-1, 0, nr_irqs, 0);
-		if (irq_base < 0) {
-			pr_err("Failed to allocate IRQ numbers for mux intc\n");
-			ret = irq_base;
-			goto err;
-		}
-		if (!of_property_read_u32(node, "mrvl,clr-mfp-irq",
-					  &mfp_irq)) {
-			icu_data[i].clr_mfp_irq_base = irq_base;
-			icu_data[i].clr_mfp_hwirq = mfp_irq;
-		}
-		irq_set_chained_handler(icu_data[i].cascade_irq,
-					icu_mux_irq_demux);
-		icu_data[i].nr_irqs = nr_irqs;
-		icu_data[i].virq_base = irq_base;
-		icu_data[i].domain = irq_domain_add_legacy(node, nr_irqs,
-							   irq_base, 0,
-							   &mmp_irq_domain_ops,
-							   &icu_data[i]);
-		for (irq = irq_base; irq < irq_base + nr_irqs; irq++)
-			icu_mask_irq(irq_get_irq_data(irq));
-	}
-	max_icu_nr = i;
-	return 0;
-err:
-	of_node_put(node);
-	max_icu_nr = i;
-	return ret;
-}
-
-void __init mmp_dt_irq_init(void)
-{
-	struct device_node *node;
-	const struct of_device_id *of_id;
-	struct mmp_intc_conf *conf;
-	int nr_irqs, irq_base, ret, irq;
-
-	node = of_find_matching_node(NULL, intc_ids);
-	if (!node) {
-		pr_err("Failed to find interrupt controller in arch-mmp\n");
-		return;
-	}
-	of_id = of_match_node(intc_ids, node);
-	conf = of_id->data;
+	int ret, nr_irqs, irq, i = 0;
 
 	ret = of_property_read_u32(node, "mrvl,intc-nr-irqs", &nr_irqs);
 	if (ret) {
 		pr_err("Not found mrvl,intc-nr-irqs property\n");
-		return;
+		return ret;
 	}
 
 	mmp_icu_base = of_iomap(node, 0);
 	if (!mmp_icu_base) {
 		pr_err("Failed to get interrupt controller register\n");
-		return;
+		return -ENOMEM;
 	}
 
-	irq_base = irq_alloc_descs(-1, 0, nr_irqs - NR_IRQS_LEGACY, 0);
-	if (irq_base < 0) {
-		pr_err("Failed to allocate IRQ numbers\n");
-		goto err;
-	} else if (irq_base != NR_IRQS_LEGACY) {
-		pr_err("ICU's irqbase should be started from 0\n");
-		goto err;
-	}
-	icu_data[0].conf_enable = conf->conf_enable;
-	icu_data[0].conf_disable = conf->conf_disable;
-	icu_data[0].conf_mask = conf->conf_mask;
-	icu_data[0].nr_irqs = nr_irqs;
 	icu_data[0].virq_base = 0;
-	icu_data[0].domain = irq_domain_add_legacy(node, nr_irqs, 0, 0,
+	icu_data[0].domain = irq_domain_add_linear(node, nr_irqs,
 						   &mmp_irq_domain_ops,
 						   &icu_data[0]);
-	irq_set_default_host(icu_data[0].domain);
-	for (irq = 0; irq < nr_irqs; irq++)
-		icu_mask_irq(irq_get_irq_data(irq));
-	mmp2_mux_init(node);
-	return;
+	for (irq = 0; irq < nr_irqs; irq++) {
+		ret = irq_create_mapping(icu_data[0].domain, irq);
+		if (!ret) {
+			pr_err("Failed to mapping hwirq\n");
+			goto err;
+		}
+		if (!irq)
+			icu_data[0].virq_base = ret;
+	}
+	icu_data[0].nr_irqs = nr_irqs;
+	return 0;
 err:
+	if (icu_data[0].virq_base) {
+		for (i = 0; i < irq; i++)
+			irq_dispose_mapping(icu_data[0].virq_base + i);
+	}
+	irq_domain_remove(icu_data[0].domain);
 	iounmap(mmp_icu_base);
+	return -EINVAL;
 }
+
+static int __init mmp_of_init(struct device_node *node,
+			      struct device_node *parent)
+{
+	int ret;
+
+	ret = mmp_init_bases(node);
+	if (ret < 0)
+		return ret;
+
+	icu_data[0].conf_enable = mmp_conf.conf_enable;
+	icu_data[0].conf_disable = mmp_conf.conf_disable;
+	icu_data[0].conf_mask = mmp_conf.conf_mask;
+	irq_set_default_host(icu_data[0].domain);
+	set_handle_irq(mmp_handle_irq);
+	max_icu_nr = 1;
+	return 0;
+}
+IRQCHIP_DECLARE(mmp_intc, "mrvl,mmp-intc", mmp_of_init);
+
+static int __init mmp2_of_init(struct device_node *node,
+			       struct device_node *parent)
+{
+	int ret;
+
+	ret = mmp_init_bases(node);
+	if (ret < 0)
+		return ret;
+
+	icu_data[0].conf_enable = mmp2_conf.conf_enable;
+	icu_data[0].conf_disable = mmp2_conf.conf_disable;
+	icu_data[0].conf_mask = mmp2_conf.conf_mask;
+	irq_set_default_host(icu_data[0].domain);
+	set_handle_irq(mmp2_handle_irq);
+	max_icu_nr = 1;
+	return 0;
+}
+IRQCHIP_DECLARE(mmp2_intc, "mrvl,mmp2-intc", mmp2_of_init);
+
+static int __init mmp2_mux_of_init(struct device_node *node,
+				   struct device_node *parent)
+{
+	struct resource res;
+	int i, ret, irq, j = 0;
+	u32 nr_irqs, mfp_irq;
+
+	if (!parent)
+		return -ENODEV;
+
+	i = max_icu_nr;
+	ret = of_property_read_u32(node, "mrvl,intc-nr-irqs",
+				   &nr_irqs);
+	if (ret) {
+		pr_err("Not found mrvl,intc-nr-irqs property\n");
+		return -EINVAL;
+	}
+	ret = of_address_to_resource(node, 0, &res);
+	if (ret < 0) {
+		pr_err("Not found reg property\n");
+		return -EINVAL;
+	}
+	icu_data[i].reg_status = mmp_icu_base + res.start;
+	ret = of_address_to_resource(node, 1, &res);
+	if (ret < 0) {
+		pr_err("Not found reg property\n");
+		return -EINVAL;
+	}
+	icu_data[i].reg_mask = mmp_icu_base + res.start;
+	icu_data[i].cascade_irq = irq_of_parse_and_map(node, 0);
+	if (!icu_data[i].cascade_irq)
+		return -EINVAL;
+
+	icu_data[i].virq_base = 0;
+	icu_data[i].domain = irq_domain_add_linear(node, nr_irqs,
+						   &mmp_irq_domain_ops,
+						   &icu_data[i]);
+	for (irq = 0; irq < nr_irqs; irq++) {
+		ret = irq_create_mapping(icu_data[i].domain, irq);
+		if (!ret) {
+			pr_err("Failed to mapping hwirq\n");
+			goto err;
+		}
+		if (!irq)
+			icu_data[i].virq_base = ret;
+	}
+	icu_data[i].nr_irqs = nr_irqs;
+	if (!of_property_read_u32(node, "mrvl,clr-mfp-irq",
+				  &mfp_irq)) {
+		icu_data[i].clr_mfp_irq_base = icu_data[i].virq_base;
+		icu_data[i].clr_mfp_hwirq = mfp_irq;
+	}
+	irq_set_chained_handler(icu_data[i].cascade_irq,
+				icu_mux_irq_demux);
+	max_icu_nr++;
+	return 0;
+err:
+	if (icu_data[i].virq_base) {
+		for (j = 0; j < irq; j++)
+			irq_dispose_mapping(icu_data[i].virq_base + j);
+	}
+	irq_domain_remove(icu_data[i].domain);
+	return -EINVAL;
+}
+IRQCHIP_DECLARE(mmp2_mux_intc, "mrvl,mmp2-mux-intc", mmp2_mux_of_init);
 #endif
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index 7f910c7..3c92780 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -2295,8 +2295,8 @@
 static void
 hfcpci_softirq(void *arg)
 {
-	(void) driver_for_each_device(&hfc_driver.driver, NULL, arg,
-				      _hfcpci_softirq);
+	WARN_ON_ONCE(driver_for_each_device(&hfc_driver.driver, NULL, arg,
+				      _hfcpci_softirq) != 0);
 
 	/* if next event would be in the past ... */
 	if ((s32)(hfc_jiffies + tics - jiffies) <= 0)
diff --git a/drivers/isdn/hisax/amd7930_fn.c b/drivers/isdn/hisax/amd7930_fn.c
index 1063bab..36817e0 100644
--- a/drivers/isdn/hisax/amd7930_fn.c
+++ b/drivers/isdn/hisax/amd7930_fn.c
@@ -314,7 +314,7 @@
 
 							t += sprintf(t, "Amd7930: empty_Dfifo cnt: %d |", cs->rcvidx);
 							QuickHex(t, cs->rcvbuf, cs->rcvidx);
-							debugl1(cs, cs->dlog);
+							debugl1(cs, "%s", cs->dlog);
 						}
 						/* moves received data in sk-buffer */
 						memcpy(skb_put(skb, cs->rcvidx), cs->rcvbuf, cs->rcvidx);
@@ -406,7 +406,7 @@
 
 		t += sprintf(t, "Amd7930: fill_Dfifo cnt: %d |", count);
 		QuickHex(t, deb_ptr, count);
-		debugl1(cs, cs->dlog);
+		debugl1(cs, "%s", cs->dlog);
 	}
 	/* AMD interrupts on */
 	AmdIrqOn(cs);
diff --git a/drivers/isdn/hisax/avm_pci.c b/drivers/isdn/hisax/avm_pci.c
index ee9b9a0..d1427bd 100644
--- a/drivers/isdn/hisax/avm_pci.c
+++ b/drivers/isdn/hisax/avm_pci.c
@@ -285,7 +285,7 @@
 		t += sprintf(t, "hdlc_empty_fifo %c cnt %d",
 			     bcs->channel ? 'B' : 'A', count);
 		QuickHex(t, p, count);
-		debugl1(cs, bcs->blog);
+		debugl1(cs, "%s", bcs->blog);
 	}
 }
 
@@ -345,7 +345,7 @@
 		t += sprintf(t, "hdlc_fill_fifo %c cnt %d",
 			     bcs->channel ? 'B' : 'A', count);
 		QuickHex(t, p, count);
-		debugl1(cs, bcs->blog);
+		debugl1(cs, "%s", bcs->blog);
 	}
 }
 
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
index bf04d2a..b33f53b 100644
--- a/drivers/isdn/hisax/config.c
+++ b/drivers/isdn/hisax/config.c
@@ -1896,7 +1896,7 @@
 				ptr--;
 				*ptr++ = '\n';
 				*ptr = 0;
-				HiSax_putstatus(cs, NULL, cs->dlog);
+				HiSax_putstatus(cs, NULL, "%s", cs->dlog);
 			} else
 				HiSax_putstatus(cs, "LogEcho: ",
 						"warning Frame too big (%d)",
diff --git a/drivers/isdn/hisax/diva.c b/drivers/isdn/hisax/diva.c
index 8d0cf6e..4fc90de 100644
--- a/drivers/isdn/hisax/diva.c
+++ b/drivers/isdn/hisax/diva.c
@@ -427,7 +427,7 @@
 		t += sprintf(t, "hscx_empty_fifo %c cnt %d",
 			     bcs->hw.hscx.hscx ? 'B' : 'A', count);
 		QuickHex(t, ptr, count);
-		debugl1(cs, bcs->blog);
+		debugl1(cs, "%s", bcs->blog);
 	}
 }
 
@@ -469,7 +469,7 @@
 		t += sprintf(t, "hscx_fill_fifo %c cnt %d",
 			     bcs->hw.hscx.hscx ? 'B' : 'A', count);
 		QuickHex(t, ptr, count);
-		debugl1(cs, bcs->blog);
+		debugl1(cs, "%s", bcs->blog);
 	}
 }
 
diff --git a/drivers/isdn/hisax/elsa.c b/drivers/isdn/hisax/elsa.c
index 1df6f9a..2be1c8a 100644
--- a/drivers/isdn/hisax/elsa.c
+++ b/drivers/isdn/hisax/elsa.c
@@ -535,7 +535,7 @@
 		t = tmp;
 		t += sprintf(tmp, "Arcofi data");
 		QuickHex(t, p, cs->dc.isac.mon_rxp);
-		debugl1(cs, tmp);
+		debugl1(cs, "%s", tmp);
 		if ((cs->dc.isac.mon_rxp == 2) && (cs->dc.isac.mon_rx[0] == 0xa0)) {
 			switch (cs->dc.isac.mon_rx[1]) {
 			case 0x80:
diff --git a/drivers/isdn/hisax/elsa_ser.c b/drivers/isdn/hisax/elsa_ser.c
index d4c98d3..3f84dd8 100644
--- a/drivers/isdn/hisax/elsa_ser.c
+++ b/drivers/isdn/hisax/elsa_ser.c
@@ -344,7 +344,7 @@
 
 		t += sprintf(t, "modem read cnt %d", cs->hw.elsa.rcvcnt);
 		QuickHex(t, cs->hw.elsa.rcvbuf, cs->hw.elsa.rcvcnt);
-		debugl1(cs, tmp);
+		debugl1(cs, "%s", tmp);
 	}
 	cs->hw.elsa.rcvcnt = 0;
 }
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index 3ccd724..497bd02 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -901,7 +901,7 @@
 					ptr--;
 					*ptr++ = '\n';
 					*ptr = 0;
-					HiSax_putstatus(cs, NULL, cs->dlog);
+					HiSax_putstatus(cs, NULL, "%s", cs->dlog);
 				} else
 					HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", total - 3);
 			}
diff --git a/drivers/isdn/hisax/hfc_sx.c b/drivers/isdn/hisax/hfc_sx.c
index dc4574f..fa1fefd 100644
--- a/drivers/isdn/hisax/hfc_sx.c
+++ b/drivers/isdn/hisax/hfc_sx.c
@@ -674,7 +674,7 @@
 					ptr--;
 					*ptr++ = '\n';
 					*ptr = 0;
-					HiSax_putstatus(cs, NULL, cs->dlog);
+					HiSax_putstatus(cs, NULL, "%s", cs->dlog);
 				} else
 					HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", skb->len);
 			}
diff --git a/drivers/isdn/hisax/hscx_irq.c b/drivers/isdn/hisax/hscx_irq.c
index f398d48..a8d6188 100644
--- a/drivers/isdn/hisax/hscx_irq.c
+++ b/drivers/isdn/hisax/hscx_irq.c
@@ -75,7 +75,7 @@
 		t += sprintf(t, "hscx_empty_fifo %c cnt %d",
 			     bcs->hw.hscx.hscx ? 'B' : 'A', count);
 		QuickHex(t, ptr, count);
-		debugl1(cs, bcs->blog);
+		debugl1(cs, "%s", bcs->blog);
 	}
 }
 
@@ -115,7 +115,7 @@
 		t += sprintf(t, "hscx_fill_fifo %c cnt %d",
 			     bcs->hw.hscx.hscx ? 'B' : 'A', count);
 		QuickHex(t, ptr, count);
-		debugl1(cs, bcs->blog);
+		debugl1(cs, "%s", bcs->blog);
 	}
 }
 
diff --git a/drivers/isdn/hisax/icc.c b/drivers/isdn/hisax/icc.c
index db5321f..51dae91 100644
--- a/drivers/isdn/hisax/icc.c
+++ b/drivers/isdn/hisax/icc.c
@@ -134,7 +134,7 @@
 
 		t += sprintf(t, "icc_empty_fifo cnt %d", count);
 		QuickHex(t, ptr, count);
-		debugl1(cs, cs->dlog);
+		debugl1(cs, "%s", cs->dlog);
 	}
 }
 
@@ -176,7 +176,7 @@
 
 		t += sprintf(t, "icc_fill_fifo cnt %d", count);
 		QuickHex(t, ptr, count);
-		debugl1(cs, cs->dlog);
+		debugl1(cs, "%s", cs->dlog);
 	}
 }
 
diff --git a/drivers/isdn/hisax/ipacx.c b/drivers/isdn/hisax/ipacx.c
index 74feb5c..5faa5de 100644
--- a/drivers/isdn/hisax/ipacx.c
+++ b/drivers/isdn/hisax/ipacx.c
@@ -260,7 +260,7 @@
 
 		t += sprintf(t, "dch_empty_fifo() cnt %d", count);
 		QuickHex(t, ptr, count);
-		debugl1(cs, cs->dlog);
+		debugl1(cs, "%s", cs->dlog);
 	}
 }
 
@@ -307,7 +307,7 @@
 
 		t += sprintf(t, "dch_fill_fifo() cnt %d", count);
 		QuickHex(t, ptr, count);
-		debugl1(cs, cs->dlog);
+		debugl1(cs, "%s", cs->dlog);
 	}
 }
 
@@ -539,7 +539,7 @@
 
 		t += sprintf(t, "bch_empty_fifo() B-%d cnt %d", hscx, count);
 		QuickHex(t, ptr, count);
-		debugl1(cs, bcs->blog);
+		debugl1(cs, "%s", bcs->blog);
 	}
 }
 
@@ -582,7 +582,7 @@
 
 		t += sprintf(t, "chb_fill_fifo() B-%d cnt %d", hscx, count);
 		QuickHex(t, ptr, count);
-		debugl1(cs, bcs->blog);
+		debugl1(cs, "%s", bcs->blog);
 	}
 }
 
diff --git a/drivers/isdn/hisax/isac.c b/drivers/isdn/hisax/isac.c
index a365ccc..7fdf78f 100644
--- a/drivers/isdn/hisax/isac.c
+++ b/drivers/isdn/hisax/isac.c
@@ -137,7 +137,7 @@
 
 		t += sprintf(t, "isac_empty_fifo cnt %d", count);
 		QuickHex(t, ptr, count);
-		debugl1(cs, cs->dlog);
+		debugl1(cs, "%s", cs->dlog);
 	}
 }
 
@@ -179,7 +179,7 @@
 
 		t += sprintf(t, "isac_fill_fifo cnt %d", count);
 		QuickHex(t, ptr, count);
-		debugl1(cs, cs->dlog);
+		debugl1(cs, "%s", cs->dlog);
 	}
 }
 
diff --git a/drivers/isdn/hisax/isar.c b/drivers/isdn/hisax/isar.c
index 7fdf347..f4956c7 100644
--- a/drivers/isdn/hisax/isar.c
+++ b/drivers/isdn/hisax/isar.c
@@ -74,7 +74,7 @@
 				t = tmp;
 				t += sprintf(t, "sendmbox cnt %d", len);
 				QuickHex(t, &msg[len-i], (i > 64) ? 64 : i);
-				debugl1(cs, tmp);
+				debugl1(cs, "%s", tmp);
 				i -= 64;
 			}
 		}
@@ -105,7 +105,7 @@
 				t = tmp;
 				t += sprintf(t, "rcv_mbox cnt %d", ireg->clsb);
 				QuickHex(t, &msg[ireg->clsb - i], (i > 64) ? 64 : i);
-				debugl1(cs, tmp);
+				debugl1(cs, "%s", tmp);
 				i -= 64;
 			}
 		}
@@ -1248,7 +1248,7 @@
 			tp += sprintf(debbuf, "msg iis(%x) msb(%x)",
 				      ireg->iis, ireg->cmsb);
 			QuickHex(tp, (u_char *)ireg->par, ireg->clsb);
-			debugl1(cs, debbuf);
+			debugl1(cs, "%s", debbuf);
 		}
 		break;
 	case ISAR_IIS_INVMSG:
diff --git a/drivers/isdn/hisax/jade.c b/drivers/isdn/hisax/jade.c
index f946c58..e2ae787 100644
--- a/drivers/isdn/hisax/jade.c
+++ b/drivers/isdn/hisax/jade.c
@@ -81,10 +81,7 @@
 	int jade = bcs->hw.hscx.hscx;
 
 	if (cs->debug & L1_DEB_HSCX) {
-		char tmp[40];
-		sprintf(tmp, "jade %c mode %d ichan %d",
-			'A' + jade, mode, bc);
-		debugl1(cs, tmp);
+		debugl1(cs, "jade %c mode %d ichan %d", 'A' + jade, mode, bc);
 	}
 	bcs->mode = mode;
 	bcs->channel = bc;
@@ -257,23 +254,18 @@
 clear_pending_jade_ints(struct IsdnCardState *cs)
 {
 	int val;
-	char tmp[64];
 
 	cs->BC_Write_Reg(cs, 0, jade_HDLC_IMR, 0x00);
 	cs->BC_Write_Reg(cs, 1, jade_HDLC_IMR, 0x00);
 
 	val = cs->BC_Read_Reg(cs, 1, jade_HDLC_ISR);
-	sprintf(tmp, "jade B ISTA %x", val);
-	debugl1(cs, tmp);
+	debugl1(cs, "jade B ISTA %x", val);
 	val = cs->BC_Read_Reg(cs, 0, jade_HDLC_ISR);
-	sprintf(tmp, "jade A ISTA %x", val);
-	debugl1(cs, tmp);
+	debugl1(cs, "jade A ISTA %x", val);
 	val = cs->BC_Read_Reg(cs, 1, jade_HDLC_STAR);
-	sprintf(tmp, "jade B STAR %x", val);
-	debugl1(cs, tmp);
+	debugl1(cs, "jade B STAR %x", val);
 	val = cs->BC_Read_Reg(cs, 0, jade_HDLC_STAR);
-	sprintf(tmp, "jade A STAR %x", val);
-	debugl1(cs, tmp);
+	debugl1(cs, "jade A STAR %x", val);
 	/* Unmask ints */
 	cs->BC_Write_Reg(cs, 0, jade_HDLC_IMR, 0xF8);
 	cs->BC_Write_Reg(cs, 1, jade_HDLC_IMR, 0xF8);
diff --git a/drivers/isdn/hisax/jade_irq.c b/drivers/isdn/hisax/jade_irq.c
index f521fc8..b930da9 100644
--- a/drivers/isdn/hisax/jade_irq.c
+++ b/drivers/isdn/hisax/jade_irq.c
@@ -65,7 +65,7 @@
 		t += sprintf(t, "jade_empty_fifo %c cnt %d",
 			     bcs->hw.hscx.hscx ? 'B' : 'A', count);
 		QuickHex(t, ptr, count);
-		debugl1(cs, bcs->blog);
+		debugl1(cs, "%s", bcs->blog);
 	}
 }
 
@@ -105,7 +105,7 @@
 		t += sprintf(t, "jade_fill_fifo %c cnt %d",
 			     bcs->hw.hscx.hscx ? 'B' : 'A', count);
 		QuickHex(t, ptr, count);
-		debugl1(cs, bcs->blog);
+		debugl1(cs, "%s", bcs->blog);
 	}
 }
 
diff --git a/drivers/isdn/hisax/l3_1tr6.c b/drivers/isdn/hisax/l3_1tr6.c
index 4c1bca5..875402e 100644
--- a/drivers/isdn/hisax/l3_1tr6.c
+++ b/drivers/isdn/hisax/l3_1tr6.c
@@ -63,7 +63,7 @@
 {
 	dev_kfree_skb(skb);
 	if (pc->st->l3.debug & L3_DEB_WARN)
-		l3_debug(pc->st, msg);
+		l3_debug(pc->st, "%s", msg);
 	l3_1tr6_release_req(pc, 0, NULL);
 }
 
@@ -161,7 +161,6 @@
 {
 	u_char *p;
 	int bcfound = 0;
-	char tmp[80];
 	struct sk_buff *skb = arg;
 
 	/* Channel Identification */
@@ -214,10 +213,9 @@
 	/* Signal all services, linklevel takes care of Service-Indicator */
 	if (bcfound) {
 		if ((pc->para.setup.si1 != 7) && (pc->st->l3.debug & L3_DEB_WARN)) {
-			sprintf(tmp, "non-digital call: %s -> %s",
+			l3_debug(pc->st, "non-digital call: %s -> %s",
 				pc->para.setup.phone,
 				pc->para.setup.eazmsn);
-			l3_debug(pc->st, tmp);
 		}
 		newl3state(pc, 6);
 		pc->st->l3.l3l4(pc->st, CC_SETUP | INDICATION, pc);
@@ -301,7 +299,7 @@
 {
 	u_char *p;
 	int i, tmpcharge = 0;
-	char a_charge[8], tmp[32];
+	char a_charge[8];
 	struct sk_buff *skb = arg;
 
 	p = skb->data;
@@ -316,8 +314,8 @@
 			pc->st->l3.l3l4(pc->st, CC_CHARGE | INDICATION, pc);
 		}
 		if (pc->st->l3.debug & L3_DEB_CHARGE) {
-			sprintf(tmp, "charging info %d", pc->para.chargeinfo);
-			l3_debug(pc->st, tmp);
+			l3_debug(pc->st, "charging info %d",
+				 pc->para.chargeinfo);
 		}
 	} else if (pc->st->l3.debug & L3_DEB_CHARGE)
 		l3_debug(pc->st, "charging info not found");
@@ -399,7 +397,7 @@
 	struct sk_buff *skb = arg;
 	u_char *p;
 	int i, tmpcharge = 0;
-	char a_charge[8], tmp[32];
+	char a_charge[8];
 
 	StopAllL3Timer(pc);
 	p = skb->data;
@@ -414,8 +412,8 @@
 			pc->st->l3.l3l4(pc->st, CC_CHARGE | INDICATION, pc);
 		}
 		if (pc->st->l3.debug & L3_DEB_CHARGE) {
-			sprintf(tmp, "charging info %d", pc->para.chargeinfo);
-			l3_debug(pc->st, tmp);
+			l3_debug(pc->st, "charging info %d",
+				 pc->para.chargeinfo);
 		}
 	} else if (pc->st->l3.debug & L3_DEB_CHARGE)
 		l3_debug(pc->st, "charging info not found");
@@ -746,7 +744,6 @@
 	int i, mt, cr;
 	struct l3_process *proc;
 	struct sk_buff *skb = arg;
-	char tmp[80];
 
 	switch (pr) {
 	case (DL_DATA | INDICATION):
@@ -762,26 +759,23 @@
 	}
 	if (skb->len < 4) {
 		if (st->l3.debug & L3_DEB_PROTERR) {
-			sprintf(tmp, "up1tr6 len only %d", skb->len);
-			l3_debug(st, tmp);
+			l3_debug(st, "up1tr6 len only %d", skb->len);
 		}
 		dev_kfree_skb(skb);
 		return;
 	}
 	if ((skb->data[0] & 0xfe) != PROTO_DIS_N0) {
 		if (st->l3.debug & L3_DEB_PROTERR) {
-			sprintf(tmp, "up1tr6%sunexpected discriminator %x message len %d",
+			l3_debug(st, "up1tr6%sunexpected discriminator %x message len %d",
 				(pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ",
 				skb->data[0], skb->len);
-			l3_debug(st, tmp);
 		}
 		dev_kfree_skb(skb);
 		return;
 	}
 	if (skb->data[1] != 1) {
 		if (st->l3.debug & L3_DEB_PROTERR) {
-			sprintf(tmp, "up1tr6 CR len not 1");
-			l3_debug(st, tmp);
+			l3_debug(st, "up1tr6 CR len not 1");
 		}
 		dev_kfree_skb(skb);
 		return;
@@ -791,9 +785,8 @@
 	if (skb->data[0] == PROTO_DIS_N0) {
 		dev_kfree_skb(skb);
 		if (st->l3.debug & L3_DEB_STATE) {
-			sprintf(tmp, "up1tr6%s N0 mt %x unhandled",
+			l3_debug(st, "up1tr6%s N0 mt %x unhandled",
 				(pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ", mt);
-			l3_debug(st, tmp);
 		}
 	} else if (skb->data[0] == PROTO_DIS_N1) {
 		if (!(proc = getl3proc(st, cr))) {
@@ -801,8 +794,7 @@
 				if (cr < 128) {
 					if (!(proc = new_l3_process(st, cr))) {
 						if (st->l3.debug & L3_DEB_PROTERR) {
-							sprintf(tmp, "up1tr6 no roc mem");
-							l3_debug(st, tmp);
+							l3_debug(st, "up1tr6 no roc mem");
 						}
 						dev_kfree_skb(skb);
 						return;
@@ -821,8 +813,7 @@
 			} else {
 				if (!(proc = new_l3_process(st, cr))) {
 					if (st->l3.debug & L3_DEB_PROTERR) {
-						sprintf(tmp, "up1tr6 no roc mem");
-						l3_debug(st, tmp);
+						l3_debug(st, "up1tr6 no roc mem");
 					}
 					dev_kfree_skb(skb);
 					return;
@@ -837,18 +828,16 @@
 		if (i == ARRAY_SIZE(datastln1)) {
 			dev_kfree_skb(skb);
 			if (st->l3.debug & L3_DEB_STATE) {
-				sprintf(tmp, "up1tr6%sstate %d mt %x unhandled",
+				l3_debug(st, "up1tr6%sstate %d mt %x unhandled",
 					(pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ",
 					proc->state, mt);
-				l3_debug(st, tmp);
 			}
 			return;
 		} else {
 			if (st->l3.debug & L3_DEB_STATE) {
-				sprintf(tmp, "up1tr6%sstate %d mt %x",
+				l3_debug(st, "up1tr6%sstate %d mt %x",
 					(pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ",
 					proc->state, mt);
-				l3_debug(st, tmp);
 			}
 			datastln1[i].rout(proc, pr, skb);
 		}
@@ -861,7 +850,6 @@
 	int i, cr;
 	struct l3_process *proc;
 	struct Channel *chan;
-	char tmp[80];
 
 	if ((DL_ESTABLISH | REQUEST) == pr) {
 		l3_msg(st, pr, NULL);
@@ -888,15 +876,13 @@
 			break;
 	if (i == ARRAY_SIZE(downstl)) {
 		if (st->l3.debug & L3_DEB_STATE) {
-			sprintf(tmp, "down1tr6 state %d prim %d unhandled",
+			l3_debug(st, "down1tr6 state %d prim %d unhandled",
 				proc->state, pr);
-			l3_debug(st, tmp);
 		}
 	} else {
 		if (st->l3.debug & L3_DEB_STATE) {
-			sprintf(tmp, "down1tr6 state %d prim %d",
+			l3_debug(st, "down1tr6 state %d prim %d",
 				proc->state, pr);
-			l3_debug(st, tmp);
 		}
 		downstl[i].rout(proc, pr, arg);
 	}
diff --git a/drivers/isdn/hisax/netjet.c b/drivers/isdn/hisax/netjet.c
index b646eed..233e432 100644
--- a/drivers/isdn/hisax/netjet.c
+++ b/drivers/isdn/hisax/netjet.c
@@ -176,7 +176,7 @@
 		else
 			j = i;
 		QuickHex(t, p, j);
-		debugl1(cs, tmp);
+		debugl1(cs, "%s", tmp);
 		p += j;
 		i -= j;
 		t = tmp;
diff --git a/drivers/isdn/hisax/q931.c b/drivers/isdn/hisax/q931.c
index 041bf52..af1b020 100644
--- a/drivers/isdn/hisax/q931.c
+++ b/drivers/isdn/hisax/q931.c
@@ -1179,7 +1179,7 @@
 		dp--;
 		*dp++ = '\n';
 		*dp = 0;
-		HiSax_putstatus(cs, NULL, cs->dlog);
+		HiSax_putstatus(cs, NULL, "%s", cs->dlog);
 	} else
 		HiSax_putstatus(cs, "LogFrame: ", "warning Frame too big (%d)", size);
 }
@@ -1246,7 +1246,7 @@
 	}
 	if (finish) {
 		*dp = 0;
-		HiSax_putstatus(cs, NULL, cs->dlog);
+		HiSax_putstatus(cs, NULL, "%s", cs->dlog);
 		return;
 	}
 	if ((0xfe & buf[0]) == PROTO_DIS_N0) {	/* 1TR6 */
@@ -1509,5 +1509,5 @@
 		dp += sprintf(dp, "Unknown protocol %x!", buf[0]);
 	}
 	*dp = 0;
-	HiSax_putstatus(cs, NULL, cs->dlog);
+	HiSax_putstatus(cs, NULL, "%s", cs->dlog);
 }
diff --git a/drivers/isdn/hisax/w6692.c b/drivers/isdn/hisax/w6692.c
index d8cac69..a858955 100644
--- a/drivers/isdn/hisax/w6692.c
+++ b/drivers/isdn/hisax/w6692.c
@@ -154,7 +154,7 @@
 
 		t += sprintf(t, "W6692_empty_fifo cnt %d", count);
 		QuickHex(t, ptr, count);
-		debugl1(cs, cs->dlog);
+		debugl1(cs, "%s", cs->dlog);
 	}
 }
 
@@ -196,7 +196,7 @@
 
 		t += sprintf(t, "W6692_fill_fifo cnt %d", count);
 		QuickHex(t, ptr, count);
-		debugl1(cs, cs->dlog);
+		debugl1(cs, "%s", cs->dlog);
 	}
 }
 
@@ -226,7 +226,7 @@
 		t += sprintf(t, "W6692B_empty_fifo %c cnt %d",
 			     bcs->channel + '1', count);
 		QuickHex(t, ptr, count);
-		debugl1(cs, bcs->blog);
+		debugl1(cs, "%s", bcs->blog);
 	}
 }
 
@@ -264,7 +264,7 @@
 		t += sprintf(t, "W6692B_fill_fifo %c cnt %d",
 			     bcs->channel + '1', count);
 		QuickHex(t, ptr, count);
-		debugl1(cs, bcs->blog);
+		debugl1(cs, "%s", bcs->blog);
 	}
 }
 
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 5ef78ef..2acc43f 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -3,7 +3,7 @@
 #
 
 dm-mod-y	+= dm.o dm-table.o dm-target.o dm-linear.o dm-stripe.o \
-		   dm-ioctl.o dm-io.o dm-kcopyd.o dm-sysfs.o
+		   dm-ioctl.o dm-io.o dm-kcopyd.o dm-sysfs.o dm-stats.o
 dm-multipath-y	+= dm-path-selector.o dm-mpath.o
 dm-snapshot-y	+= dm-snap.o dm-exception-store.o dm-snap-transient.o \
 		    dm-snap-persistent.o
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 0df3ec0..2956976 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -67,9 +67,11 @@
 #define MIGRATION_COUNT_WINDOW 10
 
 /*
- * The block size of the device holding cache data must be >= 32KB
+ * The block size of the device holding cache data must be
+ * between 32KB and 1GB.
  */
 #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (32 * 1024 >> SECTOR_SHIFT)
+#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
 
 /*
  * FIXME: the cache is read/write for the time being.
@@ -101,6 +103,8 @@
 	struct dm_target *ti;
 	struct dm_target_callbacks callbacks;
 
+	struct dm_cache_metadata *cmd;
+
 	/*
 	 * Metadata is written to this device.
 	 */
@@ -117,11 +121,6 @@
 	struct dm_dev *cache_dev;
 
 	/*
-	 * Cache features such as write-through.
-	 */
-	struct cache_features features;
-
-	/*
 	 * Size of the origin device in _complete_ blocks and native sectors.
 	 */
 	dm_oblock_t origin_blocks;
@@ -138,8 +137,6 @@
 	uint32_t sectors_per_block;
 	int sectors_per_block_shift;
 
-	struct dm_cache_metadata *cmd;
-
 	spinlock_t lock;
 	struct bio_list deferred_bios;
 	struct bio_list deferred_flush_bios;
@@ -148,8 +145,8 @@
 	struct list_head completed_migrations;
 	struct list_head need_commit_migrations;
 	sector_t migration_threshold;
-	atomic_t nr_migrations;
 	wait_queue_head_t migration_wait;
+	atomic_t nr_migrations;
 
 	/*
 	 * cache_size entries, dirty if set
@@ -160,9 +157,16 @@
 	/*
 	 * origin_blocks entries, discarded if set.
 	 */
-	uint32_t discard_block_size; /* a power of 2 times sectors per block */
 	dm_dblock_t discard_nr_blocks;
 	unsigned long *discard_bitset;
+	uint32_t discard_block_size; /* a power of 2 times sectors per block */
+
+	/*
+	 * Rather than reconstructing the table line for the status we just
+	 * save it and regurgitate.
+	 */
+	unsigned nr_ctr_args;
+	const char **ctr_args;
 
 	struct dm_kcopyd_client *copier;
 	struct workqueue_struct *wq;
@@ -187,14 +191,12 @@
 	bool loaded_mappings:1;
 	bool loaded_discards:1;
 
-	struct cache_stats stats;
-
 	/*
-	 * Rather than reconstructing the table line for the status we just
-	 * save it and regurgitate.
+	 * Cache features such as write-through.
 	 */
-	unsigned nr_ctr_args;
-	const char **ctr_args;
+	struct cache_features features;
+
+	struct cache_stats stats;
 };
 
 struct per_bio_data {
@@ -1687,24 +1689,25 @@
 static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as,
 			    char **error)
 {
-	unsigned long tmp;
+	unsigned long block_size;
 
 	if (!at_least_one_arg(as, error))
 		return -EINVAL;
 
-	if (kstrtoul(dm_shift_arg(as), 10, &tmp) || !tmp ||
-	    tmp < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
-	    tmp & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
+	if (kstrtoul(dm_shift_arg(as), 10, &block_size) || !block_size ||
+	    block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
+	    block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
+	    block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
 		*error = "Invalid data block size";
 		return -EINVAL;
 	}
 
-	if (tmp > ca->cache_sectors) {
+	if (block_size > ca->cache_sectors) {
 		*error = "Data block size is larger than the cache device";
 		return -EINVAL;
 	}
 
-	ca->block_size = tmp;
+	ca->block_size = block_size;
 
 	return 0;
 }
@@ -2609,9 +2612,17 @@
 static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
 {
 	struct cache *cache = ti->private;
+	uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
 
-	blk_limits_io_min(limits, 0);
-	blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
+	/*
+	 * If the system-determined stacked limits are compatible with the
+	 * cache's blocksize (io_opt is a factor) do not override them.
+	 */
+	if (io_opt_sectors < cache->sectors_per_block ||
+	    do_div(io_opt_sectors, cache->sectors_per_block)) {
+		blk_limits_io_min(limits, 0);
+		blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
+	}
 	set_discard_limits(cache, limits);
 }
 
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 6d2d41a..0fce0bc 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1645,20 +1645,14 @@
 	}
 
 	ret = -ENOMEM;
-	cc->io_queue = alloc_workqueue("kcryptd_io",
-				       WQ_NON_REENTRANT|
-				       WQ_MEM_RECLAIM,
-				       1);
+	cc->io_queue = alloc_workqueue("kcryptd_io", WQ_MEM_RECLAIM, 1);
 	if (!cc->io_queue) {
 		ti->error = "Couldn't create kcryptd io queue";
 		goto bad;
 	}
 
 	cc->crypt_queue = alloc_workqueue("kcryptd",
-					  WQ_NON_REENTRANT|
-					  WQ_CPU_INTENSIVE|
-					  WQ_MEM_RECLAIM,
-					  1);
+					  WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
 	if (!cc->crypt_queue) {
 		ti->error = "Couldn't create kcryptd queue";
 		goto bad;
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index f1b7586..afe0814 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -877,7 +877,7 @@
 	unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0;
 
 	if (new_data < param->data ||
-	    invalid_str(new_data, (void *) param + param_size) ||
+	    invalid_str(new_data, (void *) param + param_size) || !*new_data ||
 	    strlen(new_data) > (change_uuid ? DM_UUID_LEN - 1 : DM_NAME_LEN - 1)) {
 		DMWARN("Invalid new mapped device name or uuid string supplied.");
 		return -EINVAL;
@@ -1262,44 +1262,37 @@
 
 	r = dm_table_create(&t, get_mode(param), param->target_count, md);
 	if (r)
-		goto out;
+		goto err;
 
+	/* Protect md->type and md->queue against concurrent table loads. */
+	dm_lock_md_type(md);
 	r = populate_table(t, param, param_size);
-	if (r) {
-		dm_table_destroy(t);
-		goto out;
-	}
+	if (r)
+		goto err_unlock_md_type;
 
 	immutable_target_type = dm_get_immutable_target_type(md);
 	if (immutable_target_type &&
 	    (immutable_target_type != dm_table_get_immutable_target_type(t))) {
 		DMWARN("can't replace immutable target type %s",
 		       immutable_target_type->name);
-		dm_table_destroy(t);
 		r = -EINVAL;
-		goto out;
+		goto err_unlock_md_type;
 	}
 
-	/* Protect md->type and md->queue against concurrent table loads. */
-	dm_lock_md_type(md);
 	if (dm_get_md_type(md) == DM_TYPE_NONE)
 		/* Initial table load: acquire type of table. */
 		dm_set_md_type(md, dm_table_get_type(t));
 	else if (dm_get_md_type(md) != dm_table_get_type(t)) {
 		DMWARN("can't change device type after initial table load.");
-		dm_table_destroy(t);
-		dm_unlock_md_type(md);
 		r = -EINVAL;
-		goto out;
+		goto err_unlock_md_type;
 	}
 
 	/* setup md->queue to reflect md's type (may block) */
 	r = dm_setup_md_queue(md);
 	if (r) {
 		DMWARN("unable to set up device queue for new table.");
-		dm_table_destroy(t);
-		dm_unlock_md_type(md);
-		goto out;
+		goto err_unlock_md_type;
 	}
 	dm_unlock_md_type(md);
 
@@ -1309,9 +1302,8 @@
 	if (!hc || hc->md != md) {
 		DMWARN("device has been removed from the dev hash table.");
 		up_write(&_hash_lock);
-		dm_table_destroy(t);
 		r = -ENXIO;
-		goto out;
+		goto err_destroy_table;
 	}
 
 	if (hc->new_map)
@@ -1322,7 +1314,6 @@
 	param->flags |= DM_INACTIVE_PRESENT_FLAG;
 	__dev_status(md, param);
 
-out:
 	if (old_map) {
 		dm_sync_table(md);
 		dm_table_destroy(old_map);
@@ -1330,6 +1321,15 @@
 
 	dm_put(md);
 
+	return 0;
+
+err_unlock_md_type:
+	dm_unlock_md_type(md);
+err_destroy_table:
+	dm_table_destroy(t);
+err:
+	dm_put(md);
+
 	return r;
 }
 
@@ -1455,20 +1455,26 @@
 	return 0;
 }
 
-static bool buffer_test_overflow(char *result, unsigned maxlen)
-{
-	return !maxlen || strlen(result) + 1 >= maxlen;
-}
-
 /*
- * Process device-mapper dependent messages.
+ * Process device-mapper dependent messages.  Messages prefixed with '@'
+ * are processed by the DM core.  All others are delivered to the target.
  * Returns a number <= 1 if message was processed by device mapper.
  * Returns 2 if message should be delivered to the target.
  */
 static int message_for_md(struct mapped_device *md, unsigned argc, char **argv,
 			  char *result, unsigned maxlen)
 {
-	return 2;
+	int r;
+
+	if (**argv != '@')
+		return 2; /* no '@' prefix, deliver to target */
+
+	r = dm_stats_message(md, argc, argv, result, maxlen);
+	if (r < 2)
+		return r;
+
+	DMERR("Unsupported message sent to DM core: %s", argv[0]);
+	return -EINVAL;
 }
 
 /*
@@ -1542,7 +1548,7 @@
 
 	if (r == 1) {
 		param->flags |= DM_DATA_OUT_FLAG;
-		if (buffer_test_overflow(result, maxlen))
+		if (dm_message_test_buffer_overflow(result, maxlen))
 			param->flags |= DM_BUFFER_FULL_FLAG;
 		else
 			param->data_size = param->data_start + strlen(result) + 1;
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index d581fe5..3a7cade 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -833,8 +833,7 @@
 		goto bad_slab;
 
 	INIT_WORK(&kc->kcopyd_work, do_work);
-	kc->kcopyd_wq = alloc_workqueue("kcopyd",
-					WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
+	kc->kcopyd_wq = alloc_workqueue("kcopyd", WQ_MEM_RECLAIM, 0);
 	if (!kc->kcopyd_wq)
 		goto bad_workqueue;
 
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 699b5be..9584443 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -1080,8 +1080,7 @@
 	ti->per_bio_data_size = sizeof(struct dm_raid1_bio_record);
 	ti->discard_zeroes_data_unsupported = true;
 
-	ms->kmirrord_wq = alloc_workqueue("kmirrord",
-					  WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
+	ms->kmirrord_wq = alloc_workqueue("kmirrord", WQ_MEM_RECLAIM, 0);
 	if (!ms->kmirrord_wq) {
 		DMERR("couldn't start kmirrord");
 		r = -ENOMEM;
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
new file mode 100644
index 0000000..8ae31e8
--- /dev/null
+++ b/drivers/md/dm-stats.c
@@ -0,0 +1,969 @@
+#include <linux/errno.h>
+#include <linux/numa.h>
+#include <linux/slab.h>
+#include <linux/rculist.h>
+#include <linux/threads.h>
+#include <linux/preempt.h>
+#include <linux/irqflags.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/device-mapper.h>
+
+#include "dm.h"
+#include "dm-stats.h"
+
+#define DM_MSG_PREFIX "stats"
+
+static int dm_stat_need_rcu_barrier;
+
+/*
+ * Using 64-bit values to avoid overflow (which is a
+ * problem that block/genhd.c's IO accounting has).
+ */
+struct dm_stat_percpu {
+	unsigned long long sectors[2];
+	unsigned long long ios[2];
+	unsigned long long merges[2];
+	unsigned long long ticks[2];
+	unsigned long long io_ticks[2];
+	unsigned long long io_ticks_total;
+	unsigned long long time_in_queue;
+};
+
+struct dm_stat_shared {
+	atomic_t in_flight[2];
+	unsigned long stamp;
+	struct dm_stat_percpu tmp;
+};
+
+struct dm_stat {
+	struct list_head list_entry;
+	int id;
+	size_t n_entries;
+	sector_t start;
+	sector_t end;
+	sector_t step;
+	const char *program_id;
+	const char *aux_data;
+	struct rcu_head rcu_head;
+	size_t shared_alloc_size;
+	size_t percpu_alloc_size;
+	struct dm_stat_percpu *stat_percpu[NR_CPUS];
+	struct dm_stat_shared stat_shared[0];
+};
+
+struct dm_stats_last_position {
+	sector_t last_sector;
+	unsigned last_rw;
+};
+
+/*
+ * A typo on the command line could possibly make the kernel run out of memory
+ * and crash. To prevent the crash we account all used memory. We fail if we
+ * exhaust 1/4 of all memory or 1/2 of vmalloc space.
+ */
+#define DM_STATS_MEMORY_FACTOR		4
+#define DM_STATS_VMALLOC_FACTOR		2
+
+static DEFINE_SPINLOCK(shared_memory_lock);
+
+static unsigned long shared_memory_amount;
+
+static bool __check_shared_memory(size_t alloc_size)
+{
+	size_t a;
+
+	a = shared_memory_amount + alloc_size;
+	if (a < shared_memory_amount)
+		return false;
+	if (a >> PAGE_SHIFT > totalram_pages / DM_STATS_MEMORY_FACTOR)
+		return false;
+#ifdef CONFIG_MMU
+	if (a > (VMALLOC_END - VMALLOC_START) / DM_STATS_VMALLOC_FACTOR)
+		return false;
+#endif
+	return true;
+}
+
+static bool check_shared_memory(size_t alloc_size)
+{
+	bool ret;
+
+	spin_lock_irq(&shared_memory_lock);
+
+	ret = __check_shared_memory(alloc_size);
+
+	spin_unlock_irq(&shared_memory_lock);
+
+	return ret;
+}
+
+static bool claim_shared_memory(size_t alloc_size)
+{
+	spin_lock_irq(&shared_memory_lock);
+
+	if (!__check_shared_memory(alloc_size)) {
+		spin_unlock_irq(&shared_memory_lock);
+		return false;
+	}
+
+	shared_memory_amount += alloc_size;
+
+	spin_unlock_irq(&shared_memory_lock);
+
+	return true;
+}
+
+static void free_shared_memory(size_t alloc_size)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&shared_memory_lock, flags);
+
+	if (WARN_ON_ONCE(shared_memory_amount < alloc_size)) {
+		spin_unlock_irqrestore(&shared_memory_lock, flags);
+		DMCRIT("Memory usage accounting bug.");
+		return;
+	}
+
+	shared_memory_amount -= alloc_size;
+
+	spin_unlock_irqrestore(&shared_memory_lock, flags);
+}
+
+static void *dm_kvzalloc(size_t alloc_size, int node)
+{
+	void *p;
+
+	if (!claim_shared_memory(alloc_size))
+		return NULL;
+
+	if (alloc_size <= KMALLOC_MAX_SIZE) {
+		p = kzalloc_node(alloc_size, GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN, node);
+		if (p)
+			return p;
+	}
+	p = vzalloc_node(alloc_size, node);
+	if (p)
+		return p;
+
+	free_shared_memory(alloc_size);
+
+	return NULL;
+}
+
+static void dm_kvfree(void *ptr, size_t alloc_size)
+{
+	if (!ptr)
+		return;
+
+	free_shared_memory(alloc_size);
+
+	if (is_vmalloc_addr(ptr))
+		vfree(ptr);
+	else
+		kfree(ptr);
+}
+
+static void dm_stat_free(struct rcu_head *head)
+{
+	int cpu;
+	struct dm_stat *s = container_of(head, struct dm_stat, rcu_head);
+
+	kfree(s->program_id);
+	kfree(s->aux_data);
+	for_each_possible_cpu(cpu)
+		dm_kvfree(s->stat_percpu[cpu], s->percpu_alloc_size);
+	dm_kvfree(s, s->shared_alloc_size);
+}
+
+static int dm_stat_in_flight(struct dm_stat_shared *shared)
+{
+	return atomic_read(&shared->in_flight[READ]) +
+	       atomic_read(&shared->in_flight[WRITE]);
+}
+
+void dm_stats_init(struct dm_stats *stats)
+{
+	int cpu;
+	struct dm_stats_last_position *last;
+
+	mutex_init(&stats->mutex);
+	INIT_LIST_HEAD(&stats->list);
+	stats->last = alloc_percpu(struct dm_stats_last_position);
+	for_each_possible_cpu(cpu) {
+		last = per_cpu_ptr(stats->last, cpu);
+		last->last_sector = (sector_t)ULLONG_MAX;
+		last->last_rw = UINT_MAX;
+	}
+}
+
+void dm_stats_cleanup(struct dm_stats *stats)
+{
+	size_t ni;
+	struct dm_stat *s;
+	struct dm_stat_shared *shared;
+
+	while (!list_empty(&stats->list)) {
+		s = container_of(stats->list.next, struct dm_stat, list_entry);
+		list_del(&s->list_entry);
+		for (ni = 0; ni < s->n_entries; ni++) {
+			shared = &s->stat_shared[ni];
+			if (WARN_ON(dm_stat_in_flight(shared))) {
+				DMCRIT("leaked in-flight counter at index %lu "
+				       "(start %llu, end %llu, step %llu): reads %d, writes %d",
+				       (unsigned long)ni,
+				       (unsigned long long)s->start,
+				       (unsigned long long)s->end,
+				       (unsigned long long)s->step,
+				       atomic_read(&shared->in_flight[READ]),
+				       atomic_read(&shared->in_flight[WRITE]));
+			}
+		}
+		dm_stat_free(&s->rcu_head);
+	}
+	free_percpu(stats->last);
+}
+
+static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
+			   sector_t step, const char *program_id, const char *aux_data,
+			   void (*suspend_callback)(struct mapped_device *),
+			   void (*resume_callback)(struct mapped_device *),
+			   struct mapped_device *md)
+{
+	struct list_head *l;
+	struct dm_stat *s, *tmp_s;
+	sector_t n_entries;
+	size_t ni;
+	size_t shared_alloc_size;
+	size_t percpu_alloc_size;
+	struct dm_stat_percpu *p;
+	int cpu;
+	int ret_id;
+	int r;
+
+	if (end < start || !step)
+		return -EINVAL;
+
+	n_entries = end - start;
+	if (dm_sector_div64(n_entries, step))
+		n_entries++;
+
+	if (n_entries != (size_t)n_entries || !(size_t)(n_entries + 1))
+		return -EOVERFLOW;
+
+	shared_alloc_size = sizeof(struct dm_stat) + (size_t)n_entries * sizeof(struct dm_stat_shared);
+	if ((shared_alloc_size - sizeof(struct dm_stat)) / sizeof(struct dm_stat_shared) != n_entries)
+		return -EOVERFLOW;
+
+	percpu_alloc_size = (size_t)n_entries * sizeof(struct dm_stat_percpu);
+	if (percpu_alloc_size / sizeof(struct dm_stat_percpu) != n_entries)
+		return -EOVERFLOW;
+
+	if (!check_shared_memory(shared_alloc_size + num_possible_cpus() * percpu_alloc_size))
+		return -ENOMEM;
+
+	s = dm_kvzalloc(shared_alloc_size, NUMA_NO_NODE);
+	if (!s)
+		return -ENOMEM;
+
+	s->n_entries = n_entries;
+	s->start = start;
+	s->end = end;
+	s->step = step;
+	s->shared_alloc_size = shared_alloc_size;
+	s->percpu_alloc_size = percpu_alloc_size;
+
+	s->program_id = kstrdup(program_id, GFP_KERNEL);
+	if (!s->program_id) {
+		r = -ENOMEM;
+		goto out;
+	}
+	s->aux_data = kstrdup(aux_data, GFP_KERNEL);
+	if (!s->aux_data) {
+		r = -ENOMEM;
+		goto out;
+	}
+
+	for (ni = 0; ni < n_entries; ni++) {
+		atomic_set(&s->stat_shared[ni].in_flight[READ], 0);
+		atomic_set(&s->stat_shared[ni].in_flight[WRITE], 0);
+	}
+
+	for_each_possible_cpu(cpu) {
+		p = dm_kvzalloc(percpu_alloc_size, cpu_to_node(cpu));
+		if (!p) {
+			r = -ENOMEM;
+			goto out;
+		}
+		s->stat_percpu[cpu] = p;
+	}
+
+	/*
+	 * Suspend/resume to make sure there is no i/o in flight,
+	 * so that newly created statistics will be exact.
+	 *
+	 * (note: we couldn't suspend earlier because we must not
+	 * allocate memory while suspended)
+	 */
+	suspend_callback(md);
+
+	mutex_lock(&stats->mutex);
+	s->id = 0;
+	list_for_each(l, &stats->list) {
+		tmp_s = container_of(l, struct dm_stat, list_entry);
+		if (WARN_ON(tmp_s->id < s->id)) {
+			r = -EINVAL;
+			goto out_unlock_resume;
+		}
+		if (tmp_s->id > s->id)
+			break;
+		if (unlikely(s->id == INT_MAX)) {
+			r = -ENFILE;
+			goto out_unlock_resume;
+		}
+		s->id++;
+	}
+	ret_id = s->id;
+	list_add_tail_rcu(&s->list_entry, l);
+	mutex_unlock(&stats->mutex);
+
+	resume_callback(md);
+
+	return ret_id;
+
+out_unlock_resume:
+	mutex_unlock(&stats->mutex);
+	resume_callback(md);
+out:
+	dm_stat_free(&s->rcu_head);
+	return r;
+}
+
+static struct dm_stat *__dm_stats_find(struct dm_stats *stats, int id)
+{
+	struct dm_stat *s;
+
+	list_for_each_entry(s, &stats->list, list_entry) {
+		if (s->id > id)
+			break;
+		if (s->id == id)
+			return s;
+	}
+
+	return NULL;
+}
+
+static int dm_stats_delete(struct dm_stats *stats, int id)
+{
+	struct dm_stat *s;
+	int cpu;
+
+	mutex_lock(&stats->mutex);
+
+	s = __dm_stats_find(stats, id);
+	if (!s) {
+		mutex_unlock(&stats->mutex);
+		return -ENOENT;
+	}
+
+	list_del_rcu(&s->list_entry);
+	mutex_unlock(&stats->mutex);
+
+	/*
+	 * vfree can't be called from RCU callback
+	 */
+	for_each_possible_cpu(cpu)
+		if (is_vmalloc_addr(s->stat_percpu))
+			goto do_sync_free;
+	if (is_vmalloc_addr(s)) {
+do_sync_free:
+		synchronize_rcu_expedited();
+		dm_stat_free(&s->rcu_head);
+	} else {
+		ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
+		call_rcu(&s->rcu_head, dm_stat_free);
+	}
+	return 0;
+}
+
+static int dm_stats_list(struct dm_stats *stats, const char *program,
+			 char *result, unsigned maxlen)
+{
+	struct dm_stat *s;
+	sector_t len;
+	unsigned sz = 0;
+
+	/*
+	 * Output format:
+	 *   <region_id>: <start_sector>+<length> <step> <program_id> <aux_data>
+	 */
+
+	mutex_lock(&stats->mutex);
+	list_for_each_entry(s, &stats->list, list_entry) {
+		if (!program || !strcmp(program, s->program_id)) {
+			len = s->end - s->start;
+			DMEMIT("%d: %llu+%llu %llu %s %s\n", s->id,
+				(unsigned long long)s->start,
+				(unsigned long long)len,
+				(unsigned long long)s->step,
+				s->program_id,
+				s->aux_data);
+		}
+	}
+	mutex_unlock(&stats->mutex);
+
+	return 1;
+}
+
+static void dm_stat_round(struct dm_stat_shared *shared, struct dm_stat_percpu *p)
+{
+	/*
+	 * This is racy, but so is part_round_stats_single.
+	 */
+	unsigned long now = jiffies;
+	unsigned in_flight_read;
+	unsigned in_flight_write;
+	unsigned long difference = now - shared->stamp;
+
+	if (!difference)
+		return;
+	in_flight_read = (unsigned)atomic_read(&shared->in_flight[READ]);
+	in_flight_write = (unsigned)atomic_read(&shared->in_flight[WRITE]);
+	if (in_flight_read)
+		p->io_ticks[READ] += difference;
+	if (in_flight_write)
+		p->io_ticks[WRITE] += difference;
+	if (in_flight_read + in_flight_write) {
+		p->io_ticks_total += difference;
+		p->time_in_queue += (in_flight_read + in_flight_write) * difference;
+	}
+	shared->stamp = now;
+}
+
+static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
+			      unsigned long bi_rw, sector_t len, bool merged,
+			      bool end, unsigned long duration)
+{
+	unsigned long idx = bi_rw & REQ_WRITE;
+	struct dm_stat_shared *shared = &s->stat_shared[entry];
+	struct dm_stat_percpu *p;
+
+	/*
+	 * For strict correctness we should use local_irq_disable/enable
+	 * instead of preempt_disable/enable.
+	 *
+	 * This is racy if the driver finishes bios from non-interrupt
+	 * context as well as from interrupt context or from more different
+	 * interrupts.
+	 *
+	 * However, the race only results in not counting some events,
+	 * so it is acceptable.
+	 *
+	 * part_stat_lock()/part_stat_unlock() have this race too.
+	 */
+	preempt_disable();
+	p = &s->stat_percpu[smp_processor_id()][entry];
+
+	if (!end) {
+		dm_stat_round(shared, p);
+		atomic_inc(&shared->in_flight[idx]);
+	} else {
+		dm_stat_round(shared, p);
+		atomic_dec(&shared->in_flight[idx]);
+		p->sectors[idx] += len;
+		p->ios[idx] += 1;
+		p->merges[idx] += merged;
+		p->ticks[idx] += duration;
+	}
+
+	preempt_enable();
+}
+
+static void __dm_stat_bio(struct dm_stat *s, unsigned long bi_rw,
+			  sector_t bi_sector, sector_t end_sector,
+			  bool end, unsigned long duration,
+			  struct dm_stats_aux *stats_aux)
+{
+	sector_t rel_sector, offset, todo, fragment_len;
+	size_t entry;
+
+	if (end_sector <= s->start || bi_sector >= s->end)
+		return;
+	if (unlikely(bi_sector < s->start)) {
+		rel_sector = 0;
+		todo = end_sector - s->start;
+	} else {
+		rel_sector = bi_sector - s->start;
+		todo = end_sector - bi_sector;
+	}
+	if (unlikely(end_sector > s->end))
+		todo -= (end_sector - s->end);
+
+	offset = dm_sector_div64(rel_sector, s->step);
+	entry = rel_sector;
+	do {
+		if (WARN_ON_ONCE(entry >= s->n_entries)) {
+			DMCRIT("Invalid area access in region id %d", s->id);
+			return;
+		}
+		fragment_len = todo;
+		if (fragment_len > s->step - offset)
+			fragment_len = s->step - offset;
+		dm_stat_for_entry(s, entry, bi_rw, fragment_len,
+				  stats_aux->merged, end, duration);
+		todo -= fragment_len;
+		entry++;
+		offset = 0;
+	} while (unlikely(todo != 0));
+}
+
+void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
+			 sector_t bi_sector, unsigned bi_sectors, bool end,
+			 unsigned long duration, struct dm_stats_aux *stats_aux)
+{
+	struct dm_stat *s;
+	sector_t end_sector;
+	struct dm_stats_last_position *last;
+
+	if (unlikely(!bi_sectors))
+		return;
+
+	end_sector = bi_sector + bi_sectors;
+
+	if (!end) {
+		/*
+		 * A race condition can at worst result in the merged flag being
+		 * misrepresented, so we don't have to disable preemption here.
+		 */
+		last = __this_cpu_ptr(stats->last);
+		stats_aux->merged =
+			(bi_sector == (ACCESS_ONCE(last->last_sector) &&
+				       ((bi_rw & (REQ_WRITE | REQ_DISCARD)) ==
+					(ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD)))
+				       ));
+		ACCESS_ONCE(last->last_sector) = end_sector;
+		ACCESS_ONCE(last->last_rw) = bi_rw;
+	}
+
+	rcu_read_lock();
+
+	list_for_each_entry_rcu(s, &stats->list, list_entry)
+		__dm_stat_bio(s, bi_rw, bi_sector, end_sector, end, duration, stats_aux);
+
+	rcu_read_unlock();
+}
+
+static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared *shared,
+						   struct dm_stat *s, size_t x)
+{
+	int cpu;
+	struct dm_stat_percpu *p;
+
+	local_irq_disable();
+	p = &s->stat_percpu[smp_processor_id()][x];
+	dm_stat_round(shared, p);
+	local_irq_enable();
+
+	memset(&shared->tmp, 0, sizeof(shared->tmp));
+	for_each_possible_cpu(cpu) {
+		p = &s->stat_percpu[cpu][x];
+		shared->tmp.sectors[READ] += ACCESS_ONCE(p->sectors[READ]);
+		shared->tmp.sectors[WRITE] += ACCESS_ONCE(p->sectors[WRITE]);
+		shared->tmp.ios[READ] += ACCESS_ONCE(p->ios[READ]);
+		shared->tmp.ios[WRITE] += ACCESS_ONCE(p->ios[WRITE]);
+		shared->tmp.merges[READ] += ACCESS_ONCE(p->merges[READ]);
+		shared->tmp.merges[WRITE] += ACCESS_ONCE(p->merges[WRITE]);
+		shared->tmp.ticks[READ] += ACCESS_ONCE(p->ticks[READ]);
+		shared->tmp.ticks[WRITE] += ACCESS_ONCE(p->ticks[WRITE]);
+		shared->tmp.io_ticks[READ] += ACCESS_ONCE(p->io_ticks[READ]);
+		shared->tmp.io_ticks[WRITE] += ACCESS_ONCE(p->io_ticks[WRITE]);
+		shared->tmp.io_ticks_total += ACCESS_ONCE(p->io_ticks_total);
+		shared->tmp.time_in_queue += ACCESS_ONCE(p->time_in_queue);
+	}
+}
+
+static void __dm_stat_clear(struct dm_stat *s, size_t idx_start, size_t idx_end,
+			    bool init_tmp_percpu_totals)
+{
+	size_t x;
+	struct dm_stat_shared *shared;
+	struct dm_stat_percpu *p;
+
+	for (x = idx_start; x < idx_end; x++) {
+		shared = &s->stat_shared[x];
+		if (init_tmp_percpu_totals)
+			__dm_stat_init_temporary_percpu_totals(shared, s, x);
+		local_irq_disable();
+		p = &s->stat_percpu[smp_processor_id()][x];
+		p->sectors[READ] -= shared->tmp.sectors[READ];
+		p->sectors[WRITE] -= shared->tmp.sectors[WRITE];
+		p->ios[READ] -= shared->tmp.ios[READ];
+		p->ios[WRITE] -= shared->tmp.ios[WRITE];
+		p->merges[READ] -= shared->tmp.merges[READ];
+		p->merges[WRITE] -= shared->tmp.merges[WRITE];
+		p->ticks[READ] -= shared->tmp.ticks[READ];
+		p->ticks[WRITE] -= shared->tmp.ticks[WRITE];
+		p->io_ticks[READ] -= shared->tmp.io_ticks[READ];
+		p->io_ticks[WRITE] -= shared->tmp.io_ticks[WRITE];
+		p->io_ticks_total -= shared->tmp.io_ticks_total;
+		p->time_in_queue -= shared->tmp.time_in_queue;
+		local_irq_enable();
+	}
+}
+
+static int dm_stats_clear(struct dm_stats *stats, int id)
+{
+	struct dm_stat *s;
+
+	mutex_lock(&stats->mutex);
+
+	s = __dm_stats_find(stats, id);
+	if (!s) {
+		mutex_unlock(&stats->mutex);
+		return -ENOENT;
+	}
+
+	__dm_stat_clear(s, 0, s->n_entries, true);
+
+	mutex_unlock(&stats->mutex);
+
+	return 1;
+}
+
+/*
+ * This is like jiffies_to_msec, but works for 64-bit values.
+ */
+static unsigned long long dm_jiffies_to_msec64(unsigned long long j)
+{
+	unsigned long long result = 0;
+	unsigned mult;
+
+	if (j)
+		result = jiffies_to_msecs(j & 0x3fffff);
+	if (j >= 1 << 22) {
+		mult = jiffies_to_msecs(1 << 22);
+		result += (unsigned long long)mult * (unsigned long long)jiffies_to_msecs((j >> 22) & 0x3fffff);
+	}
+	if (j >= 1ULL << 44)
+		result += (unsigned long long)mult * (unsigned long long)mult * (unsigned long long)jiffies_to_msecs(j >> 44);
+
+	return result;
+}
+
+static int dm_stats_print(struct dm_stats *stats, int id,
+			  size_t idx_start, size_t idx_len,
+			  bool clear, char *result, unsigned maxlen)
+{
+	unsigned sz = 0;
+	struct dm_stat *s;
+	size_t x;
+	sector_t start, end, step;
+	size_t idx_end;
+	struct dm_stat_shared *shared;
+
+	/*
+	 * Output format:
+	 *   <start_sector>+<length> counters
+	 */
+
+	mutex_lock(&stats->mutex);
+
+	s = __dm_stats_find(stats, id);
+	if (!s) {
+		mutex_unlock(&stats->mutex);
+		return -ENOENT;
+	}
+
+	idx_end = idx_start + idx_len;
+	if (idx_end < idx_start ||
+	    idx_end > s->n_entries)
+		idx_end = s->n_entries;
+
+	if (idx_start > idx_end)
+		idx_start = idx_end;
+
+	step = s->step;
+	start = s->start + (step * idx_start);
+
+	for (x = idx_start; x < idx_end; x++, start = end) {
+		shared = &s->stat_shared[x];
+		end = start + step;
+		if (unlikely(end > s->end))
+			end = s->end;
+
+		__dm_stat_init_temporary_percpu_totals(shared, s, x);
+
+		DMEMIT("%llu+%llu %llu %llu %llu %llu %llu %llu %llu %llu %d %llu %llu %llu %llu\n",
+		       (unsigned long long)start,
+		       (unsigned long long)step,
+		       shared->tmp.ios[READ],
+		       shared->tmp.merges[READ],
+		       shared->tmp.sectors[READ],
+		       dm_jiffies_to_msec64(shared->tmp.ticks[READ]),
+		       shared->tmp.ios[WRITE],
+		       shared->tmp.merges[WRITE],
+		       shared->tmp.sectors[WRITE],
+		       dm_jiffies_to_msec64(shared->tmp.ticks[WRITE]),
+		       dm_stat_in_flight(shared),
+		       dm_jiffies_to_msec64(shared->tmp.io_ticks_total),
+		       dm_jiffies_to_msec64(shared->tmp.time_in_queue),
+		       dm_jiffies_to_msec64(shared->tmp.io_ticks[READ]),
+		       dm_jiffies_to_msec64(shared->tmp.io_ticks[WRITE]));
+
+		if (unlikely(sz + 1 >= maxlen))
+			goto buffer_overflow;
+	}
+
+	if (clear)
+		__dm_stat_clear(s, idx_start, idx_end, false);
+
+buffer_overflow:
+	mutex_unlock(&stats->mutex);
+
+	return 1;
+}
+
+static int dm_stats_set_aux(struct dm_stats *stats, int id, const char *aux_data)
+{
+	struct dm_stat *s;
+	const char *new_aux_data;
+
+	mutex_lock(&stats->mutex);
+
+	s = __dm_stats_find(stats, id);
+	if (!s) {
+		mutex_unlock(&stats->mutex);
+		return -ENOENT;
+	}
+
+	new_aux_data = kstrdup(aux_data, GFP_KERNEL);
+	if (!new_aux_data) {
+		mutex_unlock(&stats->mutex);
+		return -ENOMEM;
+	}
+
+	kfree(s->aux_data);
+	s->aux_data = new_aux_data;
+
+	mutex_unlock(&stats->mutex);
+
+	return 0;
+}
+
+static int message_stats_create(struct mapped_device *md,
+				unsigned argc, char **argv,
+				char *result, unsigned maxlen)
+{
+	int id;
+	char dummy;
+	unsigned long long start, end, len, step;
+	unsigned divisor;
+	const char *program_id, *aux_data;
+
+	/*
+	 * Input format:
+	 *   <range> <step> [<program_id> [<aux_data>]]
+	 */
+
+	if (argc < 3 || argc > 5)
+		return -EINVAL;
+
+	if (!strcmp(argv[1], "-")) {
+		start = 0;
+		len = dm_get_size(md);
+		if (!len)
+			len = 1;
+	} else if (sscanf(argv[1], "%llu+%llu%c", &start, &len, &dummy) != 2 ||
+		   start != (sector_t)start || len != (sector_t)len)
+		return -EINVAL;
+
+	end = start + len;
+	if (start >= end)
+		return -EINVAL;
+
+	if (sscanf(argv[2], "/%u%c", &divisor, &dummy) == 1) {
+		step = end - start;
+		if (do_div(step, divisor))
+			step++;
+		if (!step)
+			step = 1;
+	} else if (sscanf(argv[2], "%llu%c", &step, &dummy) != 1 ||
+		   step != (sector_t)step || !step)
+		return -EINVAL;
+
+	program_id = "-";
+	aux_data = "-";
+
+	if (argc > 3)
+		program_id = argv[3];
+
+	if (argc > 4)
+		aux_data = argv[4];
+
+	/*
+	 * If a buffer overflow happens after we created the region,
+	 * it's too late (the userspace would retry with a larger
+	 * buffer, but the region id that caused the overflow is already
+	 * leaked).  So we must detect buffer overflow in advance.
+	 */
+	snprintf(result, maxlen, "%d", INT_MAX);
+	if (dm_message_test_buffer_overflow(result, maxlen))
+		return 1;
+
+	id = dm_stats_create(dm_get_stats(md), start, end, step, program_id, aux_data,
+			     dm_internal_suspend, dm_internal_resume, md);
+	if (id < 0)
+		return id;
+
+	snprintf(result, maxlen, "%d", id);
+
+	return 1;
+}
+
+static int message_stats_delete(struct mapped_device *md,
+				unsigned argc, char **argv)
+{
+	int id;
+	char dummy;
+
+	if (argc != 2)
+		return -EINVAL;
+
+	if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
+		return -EINVAL;
+
+	return dm_stats_delete(dm_get_stats(md), id);
+}
+
+static int message_stats_clear(struct mapped_device *md,
+			       unsigned argc, char **argv)
+{
+	int id;
+	char dummy;
+
+	if (argc != 2)
+		return -EINVAL;
+
+	if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
+		return -EINVAL;
+
+	return dm_stats_clear(dm_get_stats(md), id);
+}
+
+static int message_stats_list(struct mapped_device *md,
+			      unsigned argc, char **argv,
+			      char *result, unsigned maxlen)
+{
+	int r;
+	const char *program = NULL;
+
+	if (argc < 1 || argc > 2)
+		return -EINVAL;
+
+	if (argc > 1) {
+		program = kstrdup(argv[1], GFP_KERNEL);
+		if (!program)
+			return -ENOMEM;
+	}
+
+	r = dm_stats_list(dm_get_stats(md), program, result, maxlen);
+
+	kfree(program);
+
+	return r;
+}
+
+static int message_stats_print(struct mapped_device *md,
+			       unsigned argc, char **argv, bool clear,
+			       char *result, unsigned maxlen)
+{
+	int id;
+	char dummy;
+	unsigned long idx_start = 0, idx_len = ULONG_MAX;
+
+	if (argc != 2 && argc != 4)
+		return -EINVAL;
+
+	if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
+		return -EINVAL;
+
+	if (argc > 3) {
+		if (strcmp(argv[2], "-") &&
+		    sscanf(argv[2], "%lu%c", &idx_start, &dummy) != 1)
+			return -EINVAL;
+		if (strcmp(argv[3], "-") &&
+		    sscanf(argv[3], "%lu%c", &idx_len, &dummy) != 1)
+			return -EINVAL;
+	}
+
+	return dm_stats_print(dm_get_stats(md), id, idx_start, idx_len, clear,
+			      result, maxlen);
+}
+
+static int message_stats_set_aux(struct mapped_device *md,
+				 unsigned argc, char **argv)
+{
+	int id;
+	char dummy;
+
+	if (argc != 3)
+		return -EINVAL;
+
+	if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
+		return -EINVAL;
+
+	return dm_stats_set_aux(dm_get_stats(md), id, argv[2]);
+}
+
+int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
+		     char *result, unsigned maxlen)
+{
+	int r;
+
+	if (dm_request_based(md)) {
+		DMWARN("Statistics are only supported for bio-based devices");
+		return -EOPNOTSUPP;
+	}
+
+	/* All messages here must start with '@' */
+	if (!strcasecmp(argv[0], "@stats_create"))
+		r = message_stats_create(md, argc, argv, result, maxlen);
+	else if (!strcasecmp(argv[0], "@stats_delete"))
+		r = message_stats_delete(md, argc, argv);
+	else if (!strcasecmp(argv[0], "@stats_clear"))
+		r = message_stats_clear(md, argc, argv);
+	else if (!strcasecmp(argv[0], "@stats_list"))
+		r = message_stats_list(md, argc, argv, result, maxlen);
+	else if (!strcasecmp(argv[0], "@stats_print"))
+		r = message_stats_print(md, argc, argv, false, result, maxlen);
+	else if (!strcasecmp(argv[0], "@stats_print_clear"))
+		r = message_stats_print(md, argc, argv, true, result, maxlen);
+	else if (!strcasecmp(argv[0], "@stats_set_aux"))
+		r = message_stats_set_aux(md, argc, argv);
+	else
+		return 2; /* this wasn't a stats message */
+
+	if (r == -EINVAL)
+		DMWARN("Invalid parameters for message %s", argv[0]);
+
+	return r;
+}
+
+int __init dm_statistics_init(void)
+{
+	dm_stat_need_rcu_barrier = 0;
+	return 0;
+}
+
+void dm_statistics_exit(void)
+{
+	if (dm_stat_need_rcu_barrier)
+		rcu_barrier();
+	if (WARN_ON(shared_memory_amount))
+		DMCRIT("shared_memory_amount leaked: %lu", shared_memory_amount);
+}
+
+module_param_named(stats_current_allocated_bytes, shared_memory_amount, ulong, S_IRUGO);
+MODULE_PARM_DESC(stats_current_allocated_bytes, "Memory currently used by statistics");
diff --git a/drivers/md/dm-stats.h b/drivers/md/dm-stats.h
new file mode 100644
index 0000000..e7c4984
--- /dev/null
+++ b/drivers/md/dm-stats.h
@@ -0,0 +1,40 @@
+#ifndef DM_STATS_H
+#define DM_STATS_H
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+
+int dm_statistics_init(void);
+void dm_statistics_exit(void);
+
+struct dm_stats {
+	struct mutex mutex;
+	struct list_head list;	/* list of struct dm_stat */
+	struct dm_stats_last_position __percpu *last;
+	sector_t last_sector;
+	unsigned last_rw;
+};
+
+struct dm_stats_aux {
+	bool merged;
+};
+
+void dm_stats_init(struct dm_stats *st);
+void dm_stats_cleanup(struct dm_stats *st);
+
+struct mapped_device;
+
+int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
+		     char *result, unsigned maxlen);
+
+void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
+			 sector_t bi_sector, unsigned bi_sectors, bool end,
+			 unsigned long duration, struct dm_stats_aux *aux);
+
+static inline bool dm_stats_used(struct dm_stats *st)
+{
+	return !list_empty(&st->list);
+}
+
+#endif
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index d907ca6..73c1712 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -4,6 +4,7 @@
  * This file is released under the GPL.
  */
 
+#include "dm.h"
 #include <linux/device-mapper.h>
 
 #include <linux/module.h>
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index f221812..8f87835 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -860,14 +860,17 @@
 static int dm_table_set_type(struct dm_table *t)
 {
 	unsigned i;
-	unsigned bio_based = 0, request_based = 0;
+	unsigned bio_based = 0, request_based = 0, hybrid = 0;
 	struct dm_target *tgt;
 	struct dm_dev_internal *dd;
 	struct list_head *devices;
+	unsigned live_md_type;
 
 	for (i = 0; i < t->num_targets; i++) {
 		tgt = t->targets + i;
-		if (dm_target_request_based(tgt))
+		if (dm_target_hybrid(tgt))
+			hybrid = 1;
+		else if (dm_target_request_based(tgt))
 			request_based = 1;
 		else
 			bio_based = 1;
@@ -879,6 +882,19 @@
 		}
 	}
 
+	if (hybrid && !bio_based && !request_based) {
+		/*
+		 * The targets can work either way.
+		 * Determine the type from the live device.
+		 * Default to bio-based if device is new.
+		 */
+		live_md_type = dm_get_md_type(t->md);
+		if (live_md_type == DM_TYPE_REQUEST_BASED)
+			request_based = 1;
+		else
+			bio_based = 1;
+	}
+
 	if (bio_based) {
 		/* We must use this table as bio-based */
 		t->type = DM_TYPE_BIO_BASED;
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
index 37ba5db..242e3ce 100644
--- a/drivers/md/dm-target.c
+++ b/drivers/md/dm-target.c
@@ -131,12 +131,19 @@
 	return -EIO;
 }
 
+static int io_err_map_rq(struct dm_target *ti, struct request *clone,
+			 union map_info *map_context)
+{
+	return -EIO;
+}
+
 static struct target_type error_target = {
 	.name = "error",
-	.version = {1, 1, 0},
+	.version = {1, 2, 0},
 	.ctr  = io_err_ctr,
 	.dtr  = io_err_dtr,
 	.map  = io_err_map,
+	.map_rq = io_err_map_rq,
 };
 
 int __init dm_target_init(void)
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 88f2f80..ed06342 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -887,7 +887,8 @@
 
 	r = dm_pool_commit_metadata(pool->pmd);
 	if (r)
-		DMERR_LIMIT("commit failed: error = %d", r);
+		DMERR_LIMIT("%s: commit failed: error = %d",
+			    dm_device_name(pool->pool_md), r);
 
 	return r;
 }
@@ -917,6 +918,13 @@
 	unsigned long flags;
 	struct pool *pool = tc->pool;
 
+	/*
+	 * Once no_free_space is set we must not allow allocation to succeed.
+	 * Otherwise it is difficult to explain, debug, test and support.
+	 */
+	if (pool->no_free_space)
+		return -ENOSPC;
+
 	r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
 	if (r)
 		return r;
@@ -931,31 +939,30 @@
 	}
 
 	if (!free_blocks) {
-		if (pool->no_free_space)
+		/*
+		 * Try to commit to see if that will free up some
+		 * more space.
+		 */
+		(void) commit_or_fallback(pool);
+
+		r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
+		if (r)
+			return r;
+
+		/*
+		 * If we still have no space we set a flag to avoid
+		 * doing all this checking and return -ENOSPC.  This
+		 * flag serves as a latch that disallows allocations from
+		 * this pool until the admin takes action (e.g. resize or
+		 * table reload).
+		 */
+		if (!free_blocks) {
+			DMWARN("%s: no free space available.",
+			       dm_device_name(pool->pool_md));
+			spin_lock_irqsave(&pool->lock, flags);
+			pool->no_free_space = 1;
+			spin_unlock_irqrestore(&pool->lock, flags);
 			return -ENOSPC;
-		else {
-			/*
-			 * Try to commit to see if that will free up some
-			 * more space.
-			 */
-			(void) commit_or_fallback(pool);
-
-			r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
-			if (r)
-				return r;
-
-			/*
-			 * If we still have no space we set a flag to avoid
-			 * doing all this checking and return -ENOSPC.
-			 */
-			if (!free_blocks) {
-				DMWARN("%s: no free space available.",
-				       dm_device_name(pool->pool_md));
-				spin_lock_irqsave(&pool->lock, flags);
-				pool->no_free_space = 1;
-				spin_unlock_irqrestore(&pool->lock, flags);
-				return -ENOSPC;
-			}
 		}
 	}
 
@@ -1085,6 +1092,7 @@
 {
 	int r;
 	dm_block_t data_block;
+	struct pool *pool = tc->pool;
 
 	r = alloc_data_block(tc, &data_block);
 	switch (r) {
@@ -1094,13 +1102,14 @@
 		break;
 
 	case -ENOSPC:
-		no_space(tc->pool, cell);
+		no_space(pool, cell);
 		break;
 
 	default:
 		DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
 			    __func__, r);
-		cell_error(tc->pool, cell);
+		set_pool_mode(pool, PM_READ_ONLY);
+		cell_error(pool, cell);
 		break;
 	}
 }
@@ -1386,7 +1395,8 @@
 
 	switch (mode) {
 	case PM_FAIL:
-		DMERR("switching pool to failure mode");
+		DMERR("%s: switching pool to failure mode",
+		      dm_device_name(pool->pool_md));
 		pool->process_bio = process_bio_fail;
 		pool->process_discard = process_bio_fail;
 		pool->process_prepared_mapping = process_prepared_mapping_fail;
@@ -1394,10 +1404,12 @@
 		break;
 
 	case PM_READ_ONLY:
-		DMERR("switching pool to read-only mode");
+		DMERR("%s: switching pool to read-only mode",
+		      dm_device_name(pool->pool_md));
 		r = dm_pool_abort_metadata(pool->pmd);
 		if (r) {
-			DMERR("aborting transaction failed");
+			DMERR("%s: aborting transaction failed",
+			      dm_device_name(pool->pool_md));
 			set_pool_mode(pool, PM_FAIL);
 		} else {
 			dm_pool_metadata_read_only(pool->pmd);
@@ -2156,19 +2168,22 @@
 
 	r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size);
 	if (r) {
-		DMERR("failed to retrieve data device size");
+		DMERR("%s: failed to retrieve data device size",
+		      dm_device_name(pool->pool_md));
 		return r;
 	}
 
 	if (data_size < sb_data_size) {
-		DMERR("pool target (%llu blocks) too small: expected %llu",
+		DMERR("%s: pool target (%llu blocks) too small: expected %llu",
+		      dm_device_name(pool->pool_md),
 		      (unsigned long long)data_size, sb_data_size);
 		return -EINVAL;
 
 	} else if (data_size > sb_data_size) {
 		r = dm_pool_resize_data_dev(pool->pmd, data_size);
 		if (r) {
-			DMERR("failed to resize data device");
+			DMERR("%s: failed to resize data device",
+			      dm_device_name(pool->pool_md));
 			set_pool_mode(pool, PM_READ_ONLY);
 			return r;
 		}
@@ -2192,19 +2207,22 @@
 
 	r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size);
 	if (r) {
-		DMERR("failed to retrieve data device size");
+		DMERR("%s: failed to retrieve metadata device size",
+		      dm_device_name(pool->pool_md));
 		return r;
 	}
 
 	if (metadata_dev_size < sb_metadata_dev_size) {
-		DMERR("metadata device (%llu blocks) too small: expected %llu",
+		DMERR("%s: metadata device (%llu blocks) too small: expected %llu",
+		      dm_device_name(pool->pool_md),
 		      metadata_dev_size, sb_metadata_dev_size);
 		return -EINVAL;
 
 	} else if (metadata_dev_size > sb_metadata_dev_size) {
 		r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
 		if (r) {
-			DMERR("failed to resize metadata device");
+			DMERR("%s: failed to resize metadata device",
+			      dm_device_name(pool->pool_md));
 			return r;
 		}
 
@@ -2530,37 +2548,43 @@
 
 		r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id);
 		if (r) {
-			DMERR("dm_pool_get_metadata_transaction_id returned %d", r);
+			DMERR("%s: dm_pool_get_metadata_transaction_id returned %d",
+			      dm_device_name(pool->pool_md), r);
 			goto err;
 		}
 
 		r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata);
 		if (r) {
-			DMERR("dm_pool_get_free_metadata_block_count returned %d", r);
+			DMERR("%s: dm_pool_get_free_metadata_block_count returned %d",
+			      dm_device_name(pool->pool_md), r);
 			goto err;
 		}
 
 		r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata);
 		if (r) {
-			DMERR("dm_pool_get_metadata_dev_size returned %d", r);
+			DMERR("%s: dm_pool_get_metadata_dev_size returned %d",
+			      dm_device_name(pool->pool_md), r);
 			goto err;
 		}
 
 		r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data);
 		if (r) {
-			DMERR("dm_pool_get_free_block_count returned %d", r);
+			DMERR("%s: dm_pool_get_free_block_count returned %d",
+			      dm_device_name(pool->pool_md), r);
 			goto err;
 		}
 
 		r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data);
 		if (r) {
-			DMERR("dm_pool_get_data_dev_size returned %d", r);
+			DMERR("%s: dm_pool_get_data_dev_size returned %d",
+			      dm_device_name(pool->pool_md), r);
 			goto err;
 		}
 
 		r = dm_pool_get_metadata_snap(pool->pmd, &held_root);
 		if (r) {
-			DMERR("dm_pool_get_metadata_snap returned %d", r);
+			DMERR("%s: dm_pool_get_metadata_snap returned %d",
+			      dm_device_name(pool->pool_md), r);
 			goto err;
 		}
 
@@ -2648,9 +2672,17 @@
 {
 	struct pool_c *pt = ti->private;
 	struct pool *pool = pt->pool;
+	uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
 
-	blk_limits_io_min(limits, 0);
-	blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
+	/*
+	 * If the system-determined stacked limits are compatible with the
+	 * pool's blocksize (io_opt is a factor) do not override them.
+	 */
+	if (io_opt_sectors < pool->sectors_per_block ||
+	    do_div(io_opt_sectors, pool->sectors_per_block)) {
+		blk_limits_io_min(limits, 0);
+		blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
+	}
 
 	/*
 	 * pt->adjusted_pf is a staging area for the actual features to use.
@@ -2669,7 +2701,7 @@
 	.name = "thin-pool",
 	.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
 		    DM_TARGET_IMMUTABLE,
-	.version = {1, 8, 0},
+	.version = {1, 9, 0},
 	.module = THIS_MODULE,
 	.ctr = pool_ctr,
 	.dtr = pool_dtr,
@@ -2956,7 +2988,7 @@
 
 static struct target_type thin_target = {
 	.name = "thin",
-	.version = {1, 8, 0},
+	.version = {1, 9, 0},
 	.module	= THIS_MODULE,
 	.ctr = thin_ctr,
 	.dtr = thin_dtr,
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 9e39d2b..6a5e9ed 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -60,6 +60,7 @@
 	struct bio *bio;
 	unsigned long start_time;
 	spinlock_t endio_lock;
+	struct dm_stats_aux stats_aux;
 };
 
 /*
@@ -198,6 +199,8 @@
 
 	/* zero-length flush that will be cloned and submitted to targets */
 	struct bio flush_bio;
+
+	struct dm_stats stats;
 };
 
 /*
@@ -269,6 +272,7 @@
 	dm_io_init,
 	dm_kcopyd_init,
 	dm_interface_init,
+	dm_statistics_init,
 };
 
 static void (*_exits[])(void) = {
@@ -279,6 +283,7 @@
 	dm_io_exit,
 	dm_kcopyd_exit,
 	dm_interface_exit,
+	dm_statistics_exit,
 };
 
 static int __init dm_init(void)
@@ -384,6 +389,16 @@
 	return r;
 }
 
+sector_t dm_get_size(struct mapped_device *md)
+{
+	return get_capacity(md->disk);
+}
+
+struct dm_stats *dm_get_stats(struct mapped_device *md)
+{
+	return &md->stats;
+}
+
 static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
 {
 	struct mapped_device *md = bdev->bd_disk->private_data;
@@ -466,8 +481,9 @@
 static void start_io_acct(struct dm_io *io)
 {
 	struct mapped_device *md = io->md;
+	struct bio *bio = io->bio;
 	int cpu;
-	int rw = bio_data_dir(io->bio);
+	int rw = bio_data_dir(bio);
 
 	io->start_time = jiffies;
 
@@ -476,6 +492,10 @@
 	part_stat_unlock();
 	atomic_set(&dm_disk(md)->part0.in_flight[rw],
 		atomic_inc_return(&md->pending[rw]));
+
+	if (unlikely(dm_stats_used(&md->stats)))
+		dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector,
+				    bio_sectors(bio), false, 0, &io->stats_aux);
 }
 
 static void end_io_acct(struct dm_io *io)
@@ -491,6 +511,10 @@
 	part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
 	part_stat_unlock();
 
+	if (unlikely(dm_stats_used(&md->stats)))
+		dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector,
+				    bio_sectors(bio), true, duration, &io->stats_aux);
+
 	/*
 	 * After this is decremented the bio must not be touched if it is
 	 * a flush.
@@ -1519,7 +1543,7 @@
 	return;
 }
 
-static int dm_request_based(struct mapped_device *md)
+int dm_request_based(struct mapped_device *md)
 {
 	return blk_queue_stackable(md->queue);
 }
@@ -1946,8 +1970,7 @@
 	add_disk(md->disk);
 	format_dev_t(md->name, MKDEV(_major, minor));
 
-	md->wq = alloc_workqueue("kdmflush",
-				 WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
+	md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0);
 	if (!md->wq)
 		goto bad_thread;
 
@@ -1959,6 +1982,8 @@
 	md->flush_bio.bi_bdev = md->bdev;
 	md->flush_bio.bi_rw = WRITE_FLUSH;
 
+	dm_stats_init(&md->stats);
+
 	/* Populate the mapping, nobody knows we exist yet */
 	spin_lock(&_minor_lock);
 	old_md = idr_replace(&_minor_idr, md, minor);
@@ -2010,6 +2035,7 @@
 
 	put_disk(md->disk);
 	blk_cleanup_queue(md->queue);
+	dm_stats_cleanup(&md->stats);
 	module_put(THIS_MODULE);
 	kfree(md);
 }
@@ -2151,7 +2177,7 @@
 	/*
 	 * Wipe any geometry if the size of the table changed.
 	 */
-	if (size != get_capacity(md->disk))
+	if (size != dm_get_size(md))
 		memset(&md->geometry, 0, sizeof(md->geometry));
 
 	__set_size(md, size);
@@ -2236,11 +2262,13 @@
 
 void dm_set_md_type(struct mapped_device *md, unsigned type)
 {
+	BUG_ON(!mutex_is_locked(&md->type_lock));
 	md->type = type;
 }
 
 unsigned dm_get_md_type(struct mapped_device *md)
 {
+	BUG_ON(!mutex_is_locked(&md->type_lock));
 	return md->type;
 }
 
@@ -2695,6 +2723,38 @@
 	return r;
 }
 
+/*
+ * Internal suspend/resume works like userspace-driven suspend. It waits
+ * until all bios finish and prevents issuing new bios to the target drivers.
+ * It may be used only from the kernel.
+ *
+ * Internal suspend holds md->suspend_lock, which prevents interaction with
+ * userspace-driven suspend.
+ */
+
+void dm_internal_suspend(struct mapped_device *md)
+{
+	mutex_lock(&md->suspend_lock);
+	if (dm_suspended_md(md))
+		return;
+
+	set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
+	synchronize_srcu(&md->io_barrier);
+	flush_workqueue(md->wq);
+	dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
+}
+
+void dm_internal_resume(struct mapped_device *md)
+{
+	if (dm_suspended_md(md))
+		goto done;
+
+	dm_queue_flush(md);
+
+done:
+	mutex_unlock(&md->suspend_lock);
+}
+
 /*-----------------------------------------------------------------
  * Event notification.
  *---------------------------------------------------------------*/
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 45b97da..5e604cc 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -16,6 +16,8 @@
 #include <linux/blkdev.h>
 #include <linux/hdreg.h>
 
+#include "dm-stats.h"
+
 /*
  * Suspend feature flags
  */
@@ -89,10 +91,21 @@
 #define dm_target_is_valid(t) ((t)->table)
 
 /*
+ * To check whether the target type is bio-based or not (request-based).
+ */
+#define dm_target_bio_based(t) ((t)->type->map != NULL)
+
+/*
  * To check whether the target type is request-based or not (bio-based).
  */
 #define dm_target_request_based(t) ((t)->type->map_rq != NULL)
 
+/*
+ * To check whether the target type is a hybrid (capable of being
+ * either request-based or bio-based).
+ */
+#define dm_target_hybrid(t) (dm_target_bio_based(t) && dm_target_request_based(t))
+
 /*-----------------------------------------------------------------
  * A registry of target types.
  *---------------------------------------------------------------*/
@@ -146,10 +159,16 @@
 void dm_destroy_immediate(struct mapped_device *md);
 int dm_open_count(struct mapped_device *md);
 int dm_lock_for_deletion(struct mapped_device *md);
+int dm_request_based(struct mapped_device *md);
+sector_t dm_get_size(struct mapped_device *md);
+struct dm_stats *dm_get_stats(struct mapped_device *md);
 
 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
 		      unsigned cookie);
 
+void dm_internal_suspend(struct mapped_device *md);
+void dm_internal_resume(struct mapped_device *md);
+
 int dm_io_init(void);
 void dm_io_exit(void);
 
@@ -162,4 +181,12 @@
 struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size);
 void dm_free_md_mempools(struct dm_md_mempools *pools);
 
+/*
+ * Helpers that are used by DM core
+ */
+static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
+{
+	return !maxlen || strlen(result) + 1 >= maxlen;
+}
+
 #endif
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 9f13e13..adf4d7e 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1180,7 +1180,7 @@
 			mddev->bitmap_info.offset =
 				mddev->bitmap_info.default_offset;
 			mddev->bitmap_info.space =
-				mddev->bitmap_info.space;
+				mddev->bitmap_info.default_space;
 		}
 
 	} else if (mddev->pers == NULL) {
@@ -3429,7 +3429,7 @@
 		mddev->safemode_delay = (msec*HZ)/1000;
 		if (mddev->safemode_delay == 0)
 			mddev->safemode_delay = 1;
-		if (mddev->safemode_delay < old_delay)
+		if (mddev->safemode_delay < old_delay || old_delay == 0)
 			md_safemode_timeout((unsigned long)mddev);
 	}
 	return len;
@@ -5144,7 +5144,7 @@
 	
 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
 	
-	if (mddev->flags)
+	if (mddev->flags & MD_UPDATE_SB_FLAGS)
 		md_update_sb(mddev, 0);
 
 	md_new_event(mddev);
@@ -5289,7 +5289,7 @@
 	md_super_wait(mddev);
 
 	if (mddev->ro == 0 &&
-	    (!mddev->in_sync || mddev->flags)) {
+	    (!mddev->in_sync || (mddev->flags & MD_UPDATE_SB_FLAGS))) {
 		/* mark array as shutdown cleanly */
 		mddev->in_sync = 1;
 		md_update_sb(mddev, 1);
@@ -5337,8 +5337,14 @@
 		err = -EBUSY;
 		goto out;
 	}
-	if (bdev)
-		sync_blockdev(bdev);
+	if (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags)) {
+		/* Someone opened the device since we flushed it
+		 * so page cache could be dirty and it is too late
+		 * to flush.  So abort
+		 */
+		mutex_unlock(&mddev->open_mutex);
+		return -EBUSY;
+	}
 	if (mddev->pers) {
 		__md_stop_writes(mddev);
 
@@ -5373,14 +5379,14 @@
 		mutex_unlock(&mddev->open_mutex);
 		return -EBUSY;
 	}
-	if (bdev)
-		/* It is possible IO was issued on some other
-		 * open file which was closed before we took ->open_mutex.
-		 * As that was not the last close __blkdev_put will not
-		 * have called sync_blockdev, so we must.
+	if (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags)) {
+		/* Someone opened the device since we flushed it
+		 * so page cache could be dirty and it is too late
+		 * to flush.  So abort
 		 */
-		sync_blockdev(bdev);
-
+		mutex_unlock(&mddev->open_mutex);
+		return -EBUSY;
+	}
 	if (mddev->pers) {
 		if (mddev->ro)
 			set_disk_ro(disk, 0);
@@ -5628,10 +5634,7 @@
 	char *ptr, *buf = NULL;
 	int err = -ENOMEM;
 
-	if (md_allow_write(mddev))
-		file = kmalloc(sizeof(*file), GFP_NOIO);
-	else
-		file = kmalloc(sizeof(*file), GFP_KERNEL);
+	file = kmalloc(sizeof(*file), GFP_NOIO);
 
 	if (!file)
 		goto out;
@@ -6420,6 +6423,20 @@
 						 !test_bit(MD_RECOVERY_NEEDED,
 							   &mddev->flags),
 						 msecs_to_jiffies(5000));
+	if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
+		/* Need to flush page cache, and ensure no-one else opens
+		 * and writes
+		 */
+		mutex_lock(&mddev->open_mutex);
+		if (atomic_read(&mddev->openers) > 1) {
+			mutex_unlock(&mddev->open_mutex);
+			err = -EBUSY;
+			goto abort;
+		}
+		set_bit(MD_STILL_CLOSED, &mddev->flags);
+		mutex_unlock(&mddev->open_mutex);
+		sync_blockdev(bdev);
+	}
 	err = mddev_lock(mddev);
 	if (err) {
 		printk(KERN_INFO 
@@ -6673,6 +6690,7 @@
 
 	err = 0;
 	atomic_inc(&mddev->openers);
+	clear_bit(MD_STILL_CLOSED, &mddev->flags);
 	mutex_unlock(&mddev->open_mutex);
 
 	check_disk_change(bdev);
@@ -7817,7 +7835,7 @@
 				sysfs_notify_dirent_safe(mddev->sysfs_state);
 		}
 
-		if (mddev->flags)
+		if (mddev->flags & MD_UPDATE_SB_FLAGS)
 			md_update_sb(mddev, 0);
 
 		if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 20f02c0..608050c 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -204,12 +204,16 @@
 	struct md_personality		*pers;
 	dev_t				unit;
 	int				md_minor;
-	struct list_head 		disks;
+	struct list_head		disks;
 	unsigned long			flags;
 #define MD_CHANGE_DEVS	0	/* Some device status has changed */
 #define MD_CHANGE_CLEAN 1	/* transition to or from 'clean' */
 #define MD_CHANGE_PENDING 2	/* switch from 'clean' to 'active' in progress */
+#define MD_UPDATE_SB_FLAGS (1 | 2 | 4)	/* If these are set, md_update_sb needed */
 #define MD_ARRAY_FIRST_USE 3    /* First use of array, needs initialization */
+#define MD_STILL_CLOSED	4	/* If set, then array has not been opened since
+				 * md_ioctl checked on it.
+				 */
 
 	int				suspended;
 	atomic_t			active_io;
@@ -218,7 +222,7 @@
 						       * are happening, so run/
 						       * takeover/stop are not safe
 						       */
-	int				ready; /* See when safe to pass 
+	int				ready; /* See when safe to pass
 						* IO requests down */
 	struct gendisk			*gendisk;
 
diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
index 81b5138..a7e8bf2 100644
--- a/drivers/md/persistent-data/dm-block-manager.c
+++ b/drivers/md/persistent-data/dm-block-manager.c
@@ -615,6 +615,11 @@
 }
 EXPORT_SYMBOL_GPL(dm_bm_flush_and_unlock);
 
+void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b)
+{
+	dm_bufio_prefetch(bm->bufio, b, 1);
+}
+
 void dm_bm_set_read_only(struct dm_block_manager *bm)
 {
 	bm->read_only = true;
diff --git a/drivers/md/persistent-data/dm-block-manager.h b/drivers/md/persistent-data/dm-block-manager.h
index be5bff6..9a82083 100644
--- a/drivers/md/persistent-data/dm-block-manager.h
+++ b/drivers/md/persistent-data/dm-block-manager.h
@@ -108,6 +108,11 @@
 int dm_bm_flush_and_unlock(struct dm_block_manager *bm,
 			   struct dm_block *superblock);
 
+ /*
+  * Request data be prefetched into the cache.
+  */
+void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b);
+
 /*
  * Switches the bm to a read only mode.  Once read-only mode
  * has been entered the following functions will return -EPERM.
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index 3586542..468e371 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -161,6 +161,7 @@
 };
 
 struct del_stack {
+	struct dm_btree_info *info;
 	struct dm_transaction_manager *tm;
 	int top;
 	struct frame spine[MAX_SPINE_DEPTH];
@@ -183,6 +184,20 @@
 	return s->top >= 0;
 }
 
+static void prefetch_children(struct del_stack *s, struct frame *f)
+{
+	unsigned i;
+	struct dm_block_manager *bm = dm_tm_get_bm(s->tm);
+
+	for (i = 0; i < f->nr_children; i++)
+		dm_bm_prefetch(bm, value64(f->n, i));
+}
+
+static bool is_internal_level(struct dm_btree_info *info, struct frame *f)
+{
+	return f->level < (info->levels - 1);
+}
+
 static int push_frame(struct del_stack *s, dm_block_t b, unsigned level)
 {
 	int r;
@@ -205,6 +220,7 @@
 		dm_tm_dec(s->tm, b);
 
 	else {
+		uint32_t flags;
 		struct frame *f = s->spine + ++s->top;
 
 		r = dm_tm_read_lock(s->tm, b, &btree_node_validator, &f->b);
@@ -217,6 +233,10 @@
 		f->level = level;
 		f->nr_children = le32_to_cpu(f->n->header.nr_entries);
 		f->current_child = 0;
+
+		flags = le32_to_cpu(f->n->header.flags);
+		if (flags & INTERNAL_NODE || is_internal_level(s->info, f))
+			prefetch_children(s, f);
 	}
 
 	return 0;
@@ -230,11 +250,6 @@
 	dm_tm_unlock(s->tm, f->b);
 }
 
-static bool is_internal_level(struct dm_btree_info *info, struct frame *f)
-{
-	return f->level < (info->levels - 1);
-}
-
 int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
 {
 	int r;
@@ -243,6 +258,7 @@
 	s = kmalloc(sizeof(*s), GFP_KERNEL);
 	if (!s)
 		return -ENOMEM;
+	s->info = info;
 	s->tm = info->tm;
 	s->top = -1;
 
@@ -287,7 +303,7 @@
 					info->value_type.dec(info->value_type.context,
 							     value_ptr(f->n, i));
 			}
-			f->current_child = f->nr_children;
+			pop_frame(s);
 		}
 	}
 
diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
index 3e7a88d..6058569 100644
--- a/drivers/md/persistent-data/dm-space-map-common.c
+++ b/drivers/md/persistent-data/dm-space-map-common.c
@@ -292,16 +292,11 @@
 	return dm_tm_unlock(ll->tm, blk);
 }
 
-int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result)
+static int sm_ll_lookup_big_ref_count(struct ll_disk *ll, dm_block_t b,
+				      uint32_t *result)
 {
 	__le32 le_rc;
-	int r = sm_ll_lookup_bitmap(ll, b, result);
-
-	if (r)
-		return r;
-
-	if (*result != 3)
-		return r;
+	int r;
 
 	r = dm_btree_lookup(&ll->ref_count_info, ll->ref_count_root, &b, &le_rc);
 	if (r < 0)
@@ -312,6 +307,19 @@
 	return r;
 }
 
+int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result)
+{
+	int r = sm_ll_lookup_bitmap(ll, b, result);
+
+	if (r)
+		return r;
+
+	if (*result != 3)
+		return r;
+
+	return sm_ll_lookup_big_ref_count(ll, b, result);
+}
+
 int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
 			  dm_block_t end, dm_block_t *result)
 {
@@ -372,11 +380,12 @@
 	return -ENOSPC;
 }
 
-int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
-		 uint32_t ref_count, enum allocation_event *ev)
+static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
+			uint32_t (*mutator)(void *context, uint32_t old),
+			void *context, enum allocation_event *ev)
 {
 	int r;
-	uint32_t bit, old;
+	uint32_t bit, old, ref_count;
 	struct dm_block *nb;
 	dm_block_t index = b;
 	struct disk_index_entry ie_disk;
@@ -399,6 +408,14 @@
 	bm_le = dm_bitmap_data(nb);
 	old = sm_lookup_bitmap(bm_le, bit);
 
+	if (old > 2) {
+		r = sm_ll_lookup_big_ref_count(ll, b, &old);
+		if (r < 0)
+			return r;
+	}
+
+	ref_count = mutator(context, old);
+
 	if (ref_count <= 2) {
 		sm_set_bitmap(bm_le, bit, ref_count);
 
@@ -448,31 +465,35 @@
 	return ll->save_ie(ll, index, &ie_disk);
 }
 
+static uint32_t set_ref_count(void *context, uint32_t old)
+{
+	return *((uint32_t *) context);
+}
+
+int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
+		 uint32_t ref_count, enum allocation_event *ev)
+{
+	return sm_ll_mutate(ll, b, set_ref_count, &ref_count, ev);
+}
+
+static uint32_t inc_ref_count(void *context, uint32_t old)
+{
+	return old + 1;
+}
+
 int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
 {
-	int r;
-	uint32_t rc;
+	return sm_ll_mutate(ll, b, inc_ref_count, NULL, ev);
+}
 
-	r = sm_ll_lookup(ll, b, &rc);
-	if (r)
-		return r;
-
-	return sm_ll_insert(ll, b, rc + 1, ev);
+static uint32_t dec_ref_count(void *context, uint32_t old)
+{
+	return old - 1;
 }
 
 int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
 {
-	int r;
-	uint32_t rc;
-
-	r = sm_ll_lookup(ll, b, &rc);
-	if (r)
-		return r;
-
-	if (!rc)
-		return -EINVAL;
-
-	return sm_ll_insert(ll, b, rc - 1, ev);
+	return sm_ll_mutate(ll, b, dec_ref_count, NULL, ev);
 }
 
 int sm_ll_commit(struct ll_disk *ll)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 78ea443..7ff4f25 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -53,6 +53,7 @@
 #include <linux/cpu.h>
 #include <linux/slab.h>
 #include <linux/ratelimit.h>
+#include <linux/nodemask.h>
 #include <trace/events/block.h>
 
 #include "md.h"
@@ -60,6 +61,10 @@
 #include "raid0.h"
 #include "bitmap.h"
 
+#define cpu_to_group(cpu) cpu_to_node(cpu)
+#define ANY_GROUP NUMA_NO_NODE
+
+static struct workqueue_struct *raid5_wq;
 /*
  * Stripe cache
  */
@@ -72,6 +77,7 @@
 #define BYPASS_THRESHOLD	1
 #define NR_HASH			(PAGE_SIZE / sizeof(struct hlist_head))
 #define HASH_MASK		(NR_HASH - 1)
+#define MAX_STRIPE_BATCH	8
 
 static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect)
 {
@@ -200,6 +206,49 @@
 	       test_bit(STRIPE_COMPUTE_RUN, &sh->state);
 }
 
+static void raid5_wakeup_stripe_thread(struct stripe_head *sh)
+{
+	struct r5conf *conf = sh->raid_conf;
+	struct r5worker_group *group;
+	int thread_cnt;
+	int i, cpu = sh->cpu;
+
+	if (!cpu_online(cpu)) {
+		cpu = cpumask_any(cpu_online_mask);
+		sh->cpu = cpu;
+	}
+
+	if (list_empty(&sh->lru)) {
+		struct r5worker_group *group;
+		group = conf->worker_groups + cpu_to_group(cpu);
+		list_add_tail(&sh->lru, &group->handle_list);
+		group->stripes_cnt++;
+		sh->group = group;
+	}
+
+	if (conf->worker_cnt_per_group == 0) {
+		md_wakeup_thread(conf->mddev->thread);
+		return;
+	}
+
+	group = conf->worker_groups + cpu_to_group(sh->cpu);
+
+	group->workers[0].working = true;
+	/* at least one worker should run to avoid race */
+	queue_work_on(sh->cpu, raid5_wq, &group->workers[0].work);
+
+	thread_cnt = group->stripes_cnt / MAX_STRIPE_BATCH - 1;
+	/* wakeup more workers */
+	for (i = 1; i < conf->worker_cnt_per_group && thread_cnt > 0; i++) {
+		if (group->workers[i].working == false) {
+			group->workers[i].working = true;
+			queue_work_on(sh->cpu, raid5_wq,
+				      &group->workers[i].work);
+			thread_cnt--;
+		}
+	}
+}
+
 static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh)
 {
 	BUG_ON(!list_empty(&sh->lru));
@@ -214,7 +263,12 @@
 		else {
 			clear_bit(STRIPE_DELAYED, &sh->state);
 			clear_bit(STRIPE_BIT_DELAY, &sh->state);
-			list_add_tail(&sh->lru, &conf->handle_list);
+			if (conf->worker_cnt_per_group == 0) {
+				list_add_tail(&sh->lru, &conf->handle_list);
+			} else {
+				raid5_wakeup_stripe_thread(sh);
+				return;
+			}
 		}
 		md_wakeup_thread(conf->mddev->thread);
 	} else {
@@ -239,12 +293,62 @@
 		do_release_stripe(conf, sh);
 }
 
+static struct llist_node *llist_reverse_order(struct llist_node *head)
+{
+	struct llist_node *new_head = NULL;
+
+	while (head) {
+		struct llist_node *tmp = head;
+		head = head->next;
+		tmp->next = new_head;
+		new_head = tmp;
+	}
+
+	return new_head;
+}
+
+/* should hold conf->device_lock already */
+static int release_stripe_list(struct r5conf *conf)
+{
+	struct stripe_head *sh;
+	int count = 0;
+	struct llist_node *head;
+
+	head = llist_del_all(&conf->released_stripes);
+	head = llist_reverse_order(head);
+	while (head) {
+		sh = llist_entry(head, struct stripe_head, release_list);
+		head = llist_next(head);
+		/* sh could be readded after STRIPE_ON_RELEASE_LIST is cleard */
+		smp_mb();
+		clear_bit(STRIPE_ON_RELEASE_LIST, &sh->state);
+		/*
+		 * Don't worry the bit is set here, because if the bit is set
+		 * again, the count is always > 1. This is true for
+		 * STRIPE_ON_UNPLUG_LIST bit too.
+		 */
+		__release_stripe(conf, sh);
+		count++;
+	}
+
+	return count;
+}
+
 static void release_stripe(struct stripe_head *sh)
 {
 	struct r5conf *conf = sh->raid_conf;
 	unsigned long flags;
+	bool wakeup;
 
+	if (test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state))
+		goto slow_path;
+	wakeup = llist_add(&sh->release_list, &conf->released_stripes);
+	if (wakeup)
+		md_wakeup_thread(conf->mddev->thread);
+	return;
+slow_path:
 	local_irq_save(flags);
+	/* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */
 	if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) {
 		do_release_stripe(conf, sh);
 		spin_unlock(&conf->device_lock);
@@ -359,6 +463,7 @@
 		raid5_build_block(sh, i, previous);
 	}
 	insert_hash(conf, sh);
+	sh->cpu = smp_processor_id();
 }
 
 static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
@@ -491,7 +596,8 @@
 			if (atomic_read(&sh->count)) {
 				BUG_ON(!list_empty(&sh->lru)
 				    && !test_bit(STRIPE_EXPANDING, &sh->state)
-				    && !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state));
+				    && !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)
+				    && !test_bit(STRIPE_ON_RELEASE_LIST, &sh->state));
 			} else {
 				if (!test_bit(STRIPE_HANDLE, &sh->state))
 					atomic_inc(&conf->active_stripes);
@@ -499,6 +605,10 @@
 				    !test_bit(STRIPE_EXPANDING, &sh->state))
 					BUG();
 				list_del_init(&sh->lru);
+				if (sh->group) {
+					sh->group->stripes_cnt--;
+					sh->group = NULL;
+				}
 			}
 		}
 	} while (sh == NULL);
@@ -3779,6 +3889,7 @@
 			if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
 				atomic_inc(&conf->preread_active_stripes);
 			list_add_tail(&sh->lru, &conf->hold_list);
+			raid5_wakeup_stripe_thread(sh);
 		}
 	}
 }
@@ -4058,18 +4169,35 @@
  * head of the hold_list has changed, i.e. the head was promoted to the
  * handle_list.
  */
-static struct stripe_head *__get_priority_stripe(struct r5conf *conf)
+static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group)
 {
-	struct stripe_head *sh;
+	struct stripe_head *sh = NULL, *tmp;
+	struct list_head *handle_list = NULL;
+	struct r5worker_group *wg = NULL;
+
+	if (conf->worker_cnt_per_group == 0) {
+		handle_list = &conf->handle_list;
+	} else if (group != ANY_GROUP) {
+		handle_list = &conf->worker_groups[group].handle_list;
+		wg = &conf->worker_groups[group];
+	} else {
+		int i;
+		for (i = 0; i < conf->group_cnt; i++) {
+			handle_list = &conf->worker_groups[i].handle_list;
+			wg = &conf->worker_groups[i];
+			if (!list_empty(handle_list))
+				break;
+		}
+	}
 
 	pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n",
 		  __func__,
-		  list_empty(&conf->handle_list) ? "empty" : "busy",
+		  list_empty(handle_list) ? "empty" : "busy",
 		  list_empty(&conf->hold_list) ? "empty" : "busy",
 		  atomic_read(&conf->pending_full_writes), conf->bypass_count);
 
-	if (!list_empty(&conf->handle_list)) {
-		sh = list_entry(conf->handle_list.next, typeof(*sh), lru);
+	if (!list_empty(handle_list)) {
+		sh = list_entry(handle_list->next, typeof(*sh), lru);
 
 		if (list_empty(&conf->hold_list))
 			conf->bypass_count = 0;
@@ -4087,14 +4215,32 @@
 		   ((conf->bypass_threshold &&
 		     conf->bypass_count > conf->bypass_threshold) ||
 		    atomic_read(&conf->pending_full_writes) == 0)) {
-		sh = list_entry(conf->hold_list.next,
-				typeof(*sh), lru);
-		conf->bypass_count -= conf->bypass_threshold;
-		if (conf->bypass_count < 0)
-			conf->bypass_count = 0;
-	} else
+
+		list_for_each_entry(tmp, &conf->hold_list,  lru) {
+			if (conf->worker_cnt_per_group == 0 ||
+			    group == ANY_GROUP ||
+			    !cpu_online(tmp->cpu) ||
+			    cpu_to_group(tmp->cpu) == group) {
+				sh = tmp;
+				break;
+			}
+		}
+
+		if (sh) {
+			conf->bypass_count -= conf->bypass_threshold;
+			if (conf->bypass_count < 0)
+				conf->bypass_count = 0;
+		}
+		wg = NULL;
+	}
+
+	if (!sh)
 		return NULL;
 
+	if (wg) {
+		wg->stripes_cnt--;
+		sh->group = NULL;
+	}
 	list_del_init(&sh->lru);
 	atomic_inc(&sh->count);
 	BUG_ON(atomic_read(&sh->count) != 1);
@@ -4127,6 +4273,10 @@
 			 */
 			smp_mb__before_clear_bit();
 			clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state);
+			/*
+			 * STRIPE_ON_RELEASE_LIST could be set here. In that
+			 * case, the count is always > 1 here
+			 */
 			__release_stripe(conf, sh);
 			cnt++;
 		}
@@ -4286,8 +4436,10 @@
 	for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
 		DEFINE_WAIT(w);
 		int previous;
+		int seq;
 
 	retry:
+		seq = read_seqcount_begin(&conf->gen_lock);
 		previous = 0;
 		prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
 		if (unlikely(conf->reshape_progress != MaxSector)) {
@@ -4320,7 +4472,7 @@
 						  previous,
 						  &dd_idx, NULL);
 		pr_debug("raid456: make_request, sector %llu logical %llu\n",
-			(unsigned long long)new_sector, 
+			(unsigned long long)new_sector,
 			(unsigned long long)logical_sector);
 
 		sh = get_active_stripe(conf, new_sector, previous,
@@ -4349,6 +4501,13 @@
 					goto retry;
 				}
 			}
+			if (read_seqcount_retry(&conf->gen_lock, seq)) {
+				/* Might have got the wrong stripe_head
+				 * by accident
+				 */
+				release_stripe(sh);
+				goto retry;
+			}
 
 			if (rw == WRITE &&
 			    logical_sector >= mddev->suspend_lo &&
@@ -4788,14 +4947,14 @@
 	return handled;
 }
 
-#define MAX_STRIPE_BATCH 8
-static int handle_active_stripes(struct r5conf *conf)
+static int handle_active_stripes(struct r5conf *conf, int group,
+				 struct r5worker *worker)
 {
 	struct stripe_head *batch[MAX_STRIPE_BATCH], *sh;
 	int i, batch_size = 0;
 
 	while (batch_size < MAX_STRIPE_BATCH &&
-			(sh = __get_priority_stripe(conf)) != NULL)
+			(sh = __get_priority_stripe(conf, group)) != NULL)
 		batch[batch_size++] = sh;
 
 	if (batch_size == 0)
@@ -4813,6 +4972,39 @@
 	return batch_size;
 }
 
+static void raid5_do_work(struct work_struct *work)
+{
+	struct r5worker *worker = container_of(work, struct r5worker, work);
+	struct r5worker_group *group = worker->group;
+	struct r5conf *conf = group->conf;
+	int group_id = group - conf->worker_groups;
+	int handled;
+	struct blk_plug plug;
+
+	pr_debug("+++ raid5worker active\n");
+
+	blk_start_plug(&plug);
+	handled = 0;
+	spin_lock_irq(&conf->device_lock);
+	while (1) {
+		int batch_size, released;
+
+		released = release_stripe_list(conf);
+
+		batch_size = handle_active_stripes(conf, group_id, worker);
+		worker->working = false;
+		if (!batch_size && !released)
+			break;
+		handled += batch_size;
+	}
+	pr_debug("%d stripes handled\n", handled);
+
+	spin_unlock_irq(&conf->device_lock);
+	blk_finish_plug(&plug);
+
+	pr_debug("--- raid5worker inactive\n");
+}
+
 /*
  * This is our raid5 kernel thread.
  *
@@ -4836,7 +5028,9 @@
 	spin_lock_irq(&conf->device_lock);
 	while (1) {
 		struct bio *bio;
-		int batch_size;
+		int batch_size, released;
+
+		released = release_stripe_list(conf);
 
 		if (
 		    !list_empty(&conf->bitmap_list)) {
@@ -4860,8 +5054,8 @@
 			handled++;
 		}
 
-		batch_size = handle_active_stripes(conf);
-		if (!batch_size)
+		batch_size = handle_active_stripes(conf, ANY_GROUP, NULL);
+		if (!batch_size && !released)
 			break;
 		handled += batch_size;
 
@@ -4989,10 +5183,70 @@
 static struct md_sysfs_entry
 raid5_stripecache_active = __ATTR_RO(stripe_cache_active);
 
+static ssize_t
+raid5_show_group_thread_cnt(struct mddev *mddev, char *page)
+{
+	struct r5conf *conf = mddev->private;
+	if (conf)
+		return sprintf(page, "%d\n", conf->worker_cnt_per_group);
+	else
+		return 0;
+}
+
+static int alloc_thread_groups(struct r5conf *conf, int cnt);
+static ssize_t
+raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
+{
+	struct r5conf *conf = mddev->private;
+	unsigned long new;
+	int err;
+	struct r5worker_group *old_groups;
+	int old_group_cnt;
+
+	if (len >= PAGE_SIZE)
+		return -EINVAL;
+	if (!conf)
+		return -ENODEV;
+
+	if (kstrtoul(page, 10, &new))
+		return -EINVAL;
+
+	if (new == conf->worker_cnt_per_group)
+		return len;
+
+	mddev_suspend(mddev);
+
+	old_groups = conf->worker_groups;
+	old_group_cnt = conf->worker_cnt_per_group;
+
+	conf->worker_groups = NULL;
+	err = alloc_thread_groups(conf, new);
+	if (err) {
+		conf->worker_groups = old_groups;
+		conf->worker_cnt_per_group = old_group_cnt;
+	} else {
+		if (old_groups)
+			kfree(old_groups[0].workers);
+		kfree(old_groups);
+	}
+
+	mddev_resume(mddev);
+
+	if (err)
+		return err;
+	return len;
+}
+
+static struct md_sysfs_entry
+raid5_group_thread_cnt = __ATTR(group_thread_cnt, S_IRUGO | S_IWUSR,
+				raid5_show_group_thread_cnt,
+				raid5_store_group_thread_cnt);
+
 static struct attribute *raid5_attrs[] =  {
 	&raid5_stripecache_size.attr,
 	&raid5_stripecache_active.attr,
 	&raid5_preread_bypass_threshold.attr,
+	&raid5_group_thread_cnt.attr,
 	NULL,
 };
 static struct attribute_group raid5_attrs_group = {
@@ -5000,6 +5254,54 @@
 	.attrs = raid5_attrs,
 };
 
+static int alloc_thread_groups(struct r5conf *conf, int cnt)
+{
+	int i, j;
+	ssize_t size;
+	struct r5worker *workers;
+
+	conf->worker_cnt_per_group = cnt;
+	if (cnt == 0) {
+		conf->worker_groups = NULL;
+		return 0;
+	}
+	conf->group_cnt = num_possible_nodes();
+	size = sizeof(struct r5worker) * cnt;
+	workers = kzalloc(size * conf->group_cnt, GFP_NOIO);
+	conf->worker_groups = kzalloc(sizeof(struct r5worker_group) *
+				conf->group_cnt, GFP_NOIO);
+	if (!conf->worker_groups || !workers) {
+		kfree(workers);
+		kfree(conf->worker_groups);
+		conf->worker_groups = NULL;
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < conf->group_cnt; i++) {
+		struct r5worker_group *group;
+
+		group = &conf->worker_groups[i];
+		INIT_LIST_HEAD(&group->handle_list);
+		group->conf = conf;
+		group->workers = workers + i * cnt;
+
+		for (j = 0; j < cnt; j++) {
+			group->workers[j].group = group;
+			INIT_WORK(&group->workers[j].work, raid5_do_work);
+		}
+	}
+
+	return 0;
+}
+
+static void free_thread_groups(struct r5conf *conf)
+{
+	if (conf->worker_groups)
+		kfree(conf->worker_groups[0].workers);
+	kfree(conf->worker_groups);
+	conf->worker_groups = NULL;
+}
+
 static sector_t
 raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks)
 {
@@ -5040,6 +5342,7 @@
 
 static void free_conf(struct r5conf *conf)
 {
+	free_thread_groups(conf);
 	shrink_stripes(conf);
 	raid5_free_percpu(conf);
 	kfree(conf->disks);
@@ -5168,7 +5471,11 @@
 	conf = kzalloc(sizeof(struct r5conf), GFP_KERNEL);
 	if (conf == NULL)
 		goto abort;
+	/* Don't enable multi-threading by default*/
+	if (alloc_thread_groups(conf, 0))
+		goto abort;
 	spin_lock_init(&conf->device_lock);
+	seqcount_init(&conf->gen_lock);
 	init_waitqueue_head(&conf->wait_for_stripe);
 	init_waitqueue_head(&conf->wait_for_overlap);
 	INIT_LIST_HEAD(&conf->handle_list);
@@ -5176,6 +5483,7 @@
 	INIT_LIST_HEAD(&conf->delayed_list);
 	INIT_LIST_HEAD(&conf->bitmap_list);
 	INIT_LIST_HEAD(&conf->inactive_list);
+	init_llist_head(&conf->released_stripes);
 	atomic_set(&conf->active_stripes, 0);
 	atomic_set(&conf->preread_active_stripes, 0);
 	atomic_set(&conf->active_aligned_reads, 0);
@@ -5980,6 +6288,7 @@
 
 	atomic_set(&conf->reshape_stripes, 0);
 	spin_lock_irq(&conf->device_lock);
+	write_seqcount_begin(&conf->gen_lock);
 	conf->previous_raid_disks = conf->raid_disks;
 	conf->raid_disks += mddev->delta_disks;
 	conf->prev_chunk_sectors = conf->chunk_sectors;
@@ -5996,8 +6305,16 @@
 	else
 		conf->reshape_progress = 0;
 	conf->reshape_safe = conf->reshape_progress;
+	write_seqcount_end(&conf->gen_lock);
 	spin_unlock_irq(&conf->device_lock);
 
+	/* Now make sure any requests that proceeded on the assumption
+	 * the reshape wasn't running - like Discard or Read - have
+	 * completed.
+	 */
+	mddev_suspend(mddev);
+	mddev_resume(mddev);
+
 	/* Add some new drives, as many as will fit.
 	 * We know there are enough to make the newly sized array work.
 	 * Don't add devices if we are reducing the number of
@@ -6472,6 +6789,10 @@
 
 static int __init raid5_init(void)
 {
+	raid5_wq = alloc_workqueue("raid5wq",
+		WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE|WQ_SYSFS, 0);
+	if (!raid5_wq)
+		return -ENOMEM;
 	register_md_personality(&raid6_personality);
 	register_md_personality(&raid5_personality);
 	register_md_personality(&raid4_personality);
@@ -6483,6 +6804,7 @@
 	unregister_md_personality(&raid6_personality);
 	unregister_md_personality(&raid5_personality);
 	unregister_md_personality(&raid4_personality);
+	destroy_workqueue(raid5_wq);
 }
 
 module_init(raid5_init);
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 70c4932..2113ffa 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -197,6 +197,7 @@
 struct stripe_head {
 	struct hlist_node	hash;
 	struct list_head	lru;	      /* inactive_list or handle_list */
+	struct llist_node	release_list;
 	struct r5conf		*raid_conf;
 	short			generation;	/* increments with every
 						 * reshape */
@@ -211,6 +212,8 @@
 	enum check_states	check_state;
 	enum reconstruct_states reconstruct_state;
 	spinlock_t		stripe_lock;
+	int			cpu;
+	struct r5worker_group	*group;
 	/**
 	 * struct stripe_operations
 	 * @target - STRIPE_OP_COMPUTE_BLK target
@@ -321,6 +324,7 @@
 	STRIPE_OPS_REQ_PENDING,
 	STRIPE_ON_UNPLUG_LIST,
 	STRIPE_DISCARD,
+	STRIPE_ON_RELEASE_LIST,
 };
 
 /*
@@ -363,6 +367,19 @@
 	struct md_rdev	*rdev, *replacement;
 };
 
+struct r5worker {
+	struct work_struct work;
+	struct r5worker_group *group;
+	bool working;
+};
+
+struct r5worker_group {
+	struct list_head handle_list;
+	struct r5conf *conf;
+	struct r5worker *workers;
+	int stripes_cnt;
+};
+
 struct r5conf {
 	struct hlist_head	*stripe_hashtbl;
 	struct mddev		*mddev;
@@ -386,6 +403,7 @@
 	int			prev_chunk_sectors;
 	int			prev_algo;
 	short			generation; /* increments with every reshape */
+	seqcount_t		gen_lock;	/* lock against generation changes */
 	unsigned long		reshape_checkpoint; /* Time we last updated
 						     * metadata */
 	long long		min_offset_diff; /* minimum difference between
@@ -445,6 +463,7 @@
 	 */
 	atomic_t		active_stripes;
 	struct list_head	inactive_list;
+	struct llist_head	released_stripes;
 	wait_queue_head_t	wait_for_stripe;
 	wait_queue_head_t	wait_for_overlap;
 	int			inactive_blocked;	/* release of inactive stripes blocked,
@@ -458,6 +477,9 @@
 	 * the new thread here until we fully activate the array.
 	 */
 	struct md_thread	*thread;
+	struct r5worker_group	*worker_groups;
+	int			group_cnt;
+	int			worker_cnt_per_group;
 };
 
 /*
diff --git a/drivers/memstick/core/Kconfig b/drivers/memstick/core/Kconfig
index 95f1814..1d38949 100644
--- a/drivers/memstick/core/Kconfig
+++ b/drivers/memstick/core/Kconfig
@@ -24,3 +24,15 @@
 	  support. This provides a block device driver, which you can use
 	  to mount the filesystem. Almost everyone wishing MemoryStick
 	  support should say Y or M here.
+
+config MS_BLOCK
+	tristate "MemoryStick Standard device driver"
+	depends on BLOCK
+	help
+	  Say Y here to enable the MemoryStick Standard device driver
+	  support. This provides a block device driver, which you can use
+	  to mount the filesystem.
+	  This driver works with old (bulky) MemoryStick and MemoryStick Duo
+	  but not PRO. Say Y if you have such card.
+	  Driver is new and not yet well tested, thus it can damage your card
+	  (even permanently)
diff --git a/drivers/memstick/core/Makefile b/drivers/memstick/core/Makefile
index ecd0299..0d7f90c 100644
--- a/drivers/memstick/core/Makefile
+++ b/drivers/memstick/core/Makefile
@@ -3,5 +3,5 @@
 #
 
 obj-$(CONFIG_MEMSTICK)		+= memstick.o
-
+obj-$(CONFIG_MS_BLOCK)		+= ms_block.o
 obj-$(CONFIG_MSPRO_BLOCK)	+= mspro_block.o
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
new file mode 100644
index 0000000..08e7023
--- /dev/null
+++ b/drivers/memstick/core/ms_block.c
@@ -0,0 +1,2385 @@
+/*
+ *  ms_block.c - Sony MemoryStick (legacy) storage support
+
+ *  Copyright (C) 2013 Maxim Levitsky <maximlevitsky@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Minor portions of the driver were copied from mspro_block.c which is
+ * Copyright (C) 2007 Alex Dubov <oakad@yahoo.com>
+ *
+ */
+#define DRIVER_NAME "ms_block"
+#define pr_fmt(fmt) DRIVER_NAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/memstick.h>
+#include <linux/idr.h>
+#include <linux/hdreg.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/bitmap.h>
+#include <linux/scatterlist.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include "ms_block.h"
+
+static int debug;
+static int cache_flush_timeout = 1000;
+static bool verify_writes;
+
+/*
+ * Copies section of 'sg_from' starting from offset 'offset' and with length
+ * 'len' To another scatterlist of to_nents enties
+ */
+static size_t msb_sg_copy(struct scatterlist *sg_from,
+	struct scatterlist *sg_to, int to_nents, size_t offset, size_t len)
+{
+	size_t copied = 0;
+
+	while (offset > 0) {
+		if (offset >= sg_from->length) {
+			if (sg_is_last(sg_from))
+				return 0;
+
+			offset -= sg_from->length;
+			sg_from = sg_next(sg_from);
+			continue;
+		}
+
+		copied = min(len, sg_from->length - offset);
+		sg_set_page(sg_to, sg_page(sg_from),
+			copied, sg_from->offset + offset);
+
+		len -= copied;
+		offset = 0;
+
+		if (sg_is_last(sg_from) || !len)
+			goto out;
+
+		sg_to = sg_next(sg_to);
+		to_nents--;
+		sg_from = sg_next(sg_from);
+	}
+
+	while (len > sg_from->length && to_nents--) {
+		len -= sg_from->length;
+		copied += sg_from->length;
+
+		sg_set_page(sg_to, sg_page(sg_from),
+				sg_from->length, sg_from->offset);
+
+		if (sg_is_last(sg_from) || !len)
+			goto out;
+
+		sg_from = sg_next(sg_from);
+		sg_to = sg_next(sg_to);
+	}
+
+	if (len && to_nents) {
+		sg_set_page(sg_to, sg_page(sg_from), len, sg_from->offset);
+		copied += len;
+	}
+out:
+	sg_mark_end(sg_to);
+	return copied;
+}
+
+/*
+ * Compares section of 'sg' starting from offset 'offset' and with length 'len'
+ * to linear buffer of length 'len' at address 'buffer'
+ * Returns 0 if equal and  -1 otherwice
+ */
+static int msb_sg_compare_to_buffer(struct scatterlist *sg,
+					size_t offset, u8 *buffer, size_t len)
+{
+	int retval = 0, cmplen;
+	struct sg_mapping_iter miter;
+
+	sg_miter_start(&miter, sg, sg_nents(sg),
+					SG_MITER_ATOMIC | SG_MITER_FROM_SG);
+
+	while (sg_miter_next(&miter) && len > 0) {
+		if (offset >= miter.length) {
+			offset -= miter.length;
+			continue;
+		}
+
+		cmplen = min(miter.length - offset, len);
+		retval = memcmp(miter.addr + offset, buffer, cmplen) ? -1 : 0;
+		if (retval)
+			break;
+
+		buffer += cmplen;
+		len -= cmplen;
+		offset = 0;
+	}
+
+	if (!retval && len)
+		retval = -1;
+
+	sg_miter_stop(&miter);
+	return retval;
+}
+
+
+/* Get zone at which block with logical address 'lba' lives
+ * Flash is broken into zones.
+ * Each zone consists of 512 eraseblocks, out of which in first
+ * zone 494 are used and 496 are for all following zones.
+ * Therefore zone #0 hosts blocks 0-493, zone #1 blocks 494-988, etc...
+*/
+static int msb_get_zone_from_lba(int lba)
+{
+	if (lba < 494)
+		return 0;
+	return ((lba - 494) / 496) + 1;
+}
+
+/* Get zone of physical block. Trivial */
+static int msb_get_zone_from_pba(int pba)
+{
+	return pba / MS_BLOCKS_IN_ZONE;
+}
+
+/* Debug test to validate free block counts */
+static int msb_validate_used_block_bitmap(struct msb_data *msb)
+{
+	int total_free_blocks = 0;
+	int i;
+
+	if (!debug)
+		return 0;
+
+	for (i = 0; i < msb->zone_count; i++)
+		total_free_blocks += msb->free_block_count[i];
+
+	if (msb->block_count - bitmap_weight(msb->used_blocks_bitmap,
+					msb->block_count) == total_free_blocks)
+		return 0;
+
+	pr_err("BUG: free block counts don't match the bitmap");
+	msb->read_only = true;
+	return -EINVAL;
+}
+
+/* Mark physical block as used */
+static void msb_mark_block_used(struct msb_data *msb, int pba)
+{
+	int zone = msb_get_zone_from_pba(pba);
+
+	if (test_bit(pba, msb->used_blocks_bitmap)) {
+		pr_err(
+		"BUG: attempt to mark already used pba %d as used", pba);
+		msb->read_only = true;
+		return;
+	}
+
+	if (msb_validate_used_block_bitmap(msb))
+		return;
+
+	/* No races because all IO is single threaded */
+	__set_bit(pba, msb->used_blocks_bitmap);
+	msb->free_block_count[zone]--;
+}
+
+/* Mark physical block as free */
+static void msb_mark_block_unused(struct msb_data *msb, int pba)
+{
+	int zone = msb_get_zone_from_pba(pba);
+
+	if (!test_bit(pba, msb->used_blocks_bitmap)) {
+		pr_err("BUG: attempt to mark already unused pba %d as unused" , pba);
+		msb->read_only = true;
+		return;
+	}
+
+	if (msb_validate_used_block_bitmap(msb))
+		return;
+
+	/* No races because all IO is single threaded */
+	__clear_bit(pba, msb->used_blocks_bitmap);
+	msb->free_block_count[zone]++;
+}
+
+/* Invalidate current register window */
+static void msb_invalidate_reg_window(struct msb_data *msb)
+{
+	msb->reg_addr.w_offset = offsetof(struct ms_register, id);
+	msb->reg_addr.w_length = sizeof(struct ms_id_register);
+	msb->reg_addr.r_offset = offsetof(struct ms_register, id);
+	msb->reg_addr.r_length = sizeof(struct ms_id_register);
+	msb->addr_valid = false;
+}
+
+/* Start a state machine */
+static int msb_run_state_machine(struct msb_data *msb, int   (*state_func)
+		(struct memstick_dev *card, struct memstick_request **req))
+{
+	struct memstick_dev *card = msb->card;
+
+	WARN_ON(msb->state != -1);
+	msb->int_polling = false;
+	msb->state = 0;
+	msb->exit_error = 0;
+
+	memset(&card->current_mrq, 0, sizeof(card->current_mrq));
+
+	card->next_request = state_func;
+	memstick_new_req(card->host);
+	wait_for_completion(&card->mrq_complete);
+
+	WARN_ON(msb->state != -1);
+	return msb->exit_error;
+}
+
+/* State machines call that to exit */
+static int msb_exit_state_machine(struct msb_data *msb, int error)
+{
+	WARN_ON(msb->state == -1);
+
+	msb->state = -1;
+	msb->exit_error = error;
+	msb->card->next_request = h_msb_default_bad;
+
+	/* Invalidate reg window on errors */
+	if (error)
+		msb_invalidate_reg_window(msb);
+
+	complete(&msb->card->mrq_complete);
+	return -ENXIO;
+}
+
+/* read INT register */
+static int msb_read_int_reg(struct msb_data *msb, long timeout)
+{
+	struct memstick_request *mrq = &msb->card->current_mrq;
+
+	WARN_ON(msb->state == -1);
+
+	if (!msb->int_polling) {
+		msb->int_timeout = jiffies +
+			msecs_to_jiffies(timeout == -1 ? 500 : timeout);
+		msb->int_polling = true;
+	} else if (time_after(jiffies, msb->int_timeout)) {
+		mrq->data[0] = MEMSTICK_INT_CMDNAK;
+		return 0;
+	}
+
+	if ((msb->caps & MEMSTICK_CAP_AUTO_GET_INT) &&
+				mrq->need_card_int && !mrq->error) {
+		mrq->data[0] = mrq->int_reg;
+		mrq->need_card_int = false;
+		return 0;
+	} else {
+		memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
+		return 1;
+	}
+}
+
+/* Read a register */
+static int msb_read_regs(struct msb_data *msb, int offset, int len)
+{
+	struct memstick_request *req = &msb->card->current_mrq;
+
+	if (msb->reg_addr.r_offset != offset ||
+	    msb->reg_addr.r_length != len || !msb->addr_valid) {
+
+		msb->reg_addr.r_offset = offset;
+		msb->reg_addr.r_length = len;
+		msb->addr_valid = true;
+
+		memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
+			&msb->reg_addr, sizeof(msb->reg_addr));
+		return 0;
+	}
+
+	memstick_init_req(req, MS_TPC_READ_REG, NULL, len);
+	return 1;
+}
+
+/* Write a card register */
+static int msb_write_regs(struct msb_data *msb, int offset, int len, void *buf)
+{
+	struct memstick_request *req = &msb->card->current_mrq;
+
+	if (msb->reg_addr.w_offset != offset ||
+		msb->reg_addr.w_length != len  || !msb->addr_valid) {
+
+		msb->reg_addr.w_offset = offset;
+		msb->reg_addr.w_length = len;
+		msb->addr_valid = true;
+
+		memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
+			&msb->reg_addr, sizeof(msb->reg_addr));
+		return 0;
+	}
+
+	memstick_init_req(req, MS_TPC_WRITE_REG, buf, len);
+	return 1;
+}
+
+/* Handler for absence of IO */
+static int h_msb_default_bad(struct memstick_dev *card,
+						struct memstick_request **mrq)
+{
+	return -ENXIO;
+}
+
+/*
+ * This function is a handler for reads of one page from device.
+ * Writes output to msb->current_sg, takes sector address from msb->reg.param
+ * Can also be used to read extra data only. Set params accordintly.
+ */
+static int h_msb_read_page(struct memstick_dev *card,
+					struct memstick_request **out_mrq)
+{
+	struct msb_data *msb = memstick_get_drvdata(card);
+	struct memstick_request *mrq = *out_mrq = &card->current_mrq;
+	struct scatterlist sg[2];
+	u8 command, intreg;
+
+	if (mrq->error) {
+		dbg("read_page, unknown error");
+		return msb_exit_state_machine(msb, mrq->error);
+	}
+again:
+	switch (msb->state) {
+	case MSB_RP_SEND_BLOCK_ADDRESS:
+		/* msb_write_regs sometimes "fails" because it needs to update
+			the reg window, and thus it returns request for that.
+			Then we stay in this state and retry */
+		if (!msb_write_regs(msb,
+			offsetof(struct ms_register, param),
+			sizeof(struct ms_param_register),
+			(unsigned char *)&msb->regs.param))
+			return 0;
+
+		msb->state = MSB_RP_SEND_READ_COMMAND;
+		return 0;
+
+	case MSB_RP_SEND_READ_COMMAND:
+		command = MS_CMD_BLOCK_READ;
+		memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
+		msb->state = MSB_RP_SEND_INT_REQ;
+		return 0;
+
+	case MSB_RP_SEND_INT_REQ:
+		msb->state = MSB_RP_RECEIVE_INT_REQ_RESULT;
+		/* If dont actually need to send the int read request (only in
+			serial mode), then just fall through */
+		if (msb_read_int_reg(msb, -1))
+			return 0;
+		/* fallthrough */
+
+	case MSB_RP_RECEIVE_INT_REQ_RESULT:
+		intreg = mrq->data[0];
+		msb->regs.status.interrupt = intreg;
+
+		if (intreg & MEMSTICK_INT_CMDNAK)
+			return msb_exit_state_machine(msb, -EIO);
+
+		if (!(intreg & MEMSTICK_INT_CED)) {
+			msb->state = MSB_RP_SEND_INT_REQ;
+			goto again;
+		}
+
+		msb->int_polling = false;
+		msb->state = (intreg & MEMSTICK_INT_ERR) ?
+			MSB_RP_SEND_READ_STATUS_REG : MSB_RP_SEND_OOB_READ;
+		goto again;
+
+	case MSB_RP_SEND_READ_STATUS_REG:
+		 /* read the status register to understand source of the INT_ERR */
+		if (!msb_read_regs(msb,
+			offsetof(struct ms_register, status),
+			sizeof(struct ms_status_register)))
+			return 0;
+
+		msb->state = MSB_RP_RECEIVE_OOB_READ;
+		return 0;
+
+	case MSB_RP_RECIVE_STATUS_REG:
+		msb->regs.status = *(struct ms_status_register *)mrq->data;
+		msb->state = MSB_RP_SEND_OOB_READ;
+		/* fallthrough */
+
+	case MSB_RP_SEND_OOB_READ:
+		if (!msb_read_regs(msb,
+			offsetof(struct ms_register, extra_data),
+			sizeof(struct ms_extra_data_register)))
+			return 0;
+
+		msb->state = MSB_RP_RECEIVE_OOB_READ;
+		return 0;
+
+	case MSB_RP_RECEIVE_OOB_READ:
+		msb->regs.extra_data =
+			*(struct ms_extra_data_register *) mrq->data;
+		msb->state = MSB_RP_SEND_READ_DATA;
+		/* fallthrough */
+
+	case MSB_RP_SEND_READ_DATA:
+		/* Skip that state if we only read the oob */
+		if (msb->regs.param.cp == MEMSTICK_CP_EXTRA) {
+			msb->state = MSB_RP_RECEIVE_READ_DATA;
+			goto again;
+		}
+
+		sg_init_table(sg, ARRAY_SIZE(sg));
+		msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
+			msb->current_sg_offset,
+			msb->page_size);
+
+		memstick_init_req_sg(mrq, MS_TPC_READ_LONG_DATA, sg);
+		msb->state = MSB_RP_RECEIVE_READ_DATA;
+		return 0;
+
+	case MSB_RP_RECEIVE_READ_DATA:
+		if (!(msb->regs.status.interrupt & MEMSTICK_INT_ERR)) {
+			msb->current_sg_offset += msb->page_size;
+			return msb_exit_state_machine(msb, 0);
+		}
+
+		if (msb->regs.status.status1 & MEMSTICK_UNCORR_ERROR) {
+			dbg("read_page: uncorrectable error");
+			return msb_exit_state_machine(msb, -EBADMSG);
+		}
+
+		if (msb->regs.status.status1 & MEMSTICK_CORR_ERROR) {
+			dbg("read_page: correctable error");
+			msb->current_sg_offset += msb->page_size;
+			return msb_exit_state_machine(msb, -EUCLEAN);
+		} else {
+			dbg("read_page: INT error, but no status error bits");
+			return msb_exit_state_machine(msb, -EIO);
+		}
+	}
+
+	BUG();
+}
+
+/*
+ * Handler of writes of exactly one block.
+ * Takes address from msb->regs.param.
+ * Writes same extra data to blocks, also taken
+ * from msb->regs.extra
+ * Returns -EBADMSG if write fails due to uncorrectable error, or -EIO if
+ * device refuses to take the command or something else
+ */
+static int h_msb_write_block(struct memstick_dev *card,
+					struct memstick_request **out_mrq)
+{
+	struct msb_data *msb = memstick_get_drvdata(card);
+	struct memstick_request *mrq = *out_mrq = &card->current_mrq;
+	struct scatterlist sg[2];
+	u8 intreg, command;
+
+	if (mrq->error)
+		return msb_exit_state_machine(msb, mrq->error);
+
+again:
+	switch (msb->state) {
+
+	/* HACK: Jmicon handling of TPCs between 8 and
+	 *	sizeof(memstick_request.data) is broken due to hardware
+	 *	bug in PIO mode that is used for these TPCs
+	 *	Therefore split the write
+	 */
+
+	case MSB_WB_SEND_WRITE_PARAMS:
+		if (!msb_write_regs(msb,
+			offsetof(struct ms_register, param),
+			sizeof(struct ms_param_register),
+			&msb->regs.param))
+			return 0;
+
+		msb->state = MSB_WB_SEND_WRITE_OOB;
+		return 0;
+
+	case MSB_WB_SEND_WRITE_OOB:
+		if (!msb_write_regs(msb,
+			offsetof(struct ms_register, extra_data),
+			sizeof(struct ms_extra_data_register),
+			&msb->regs.extra_data))
+			return 0;
+		msb->state = MSB_WB_SEND_WRITE_COMMAND;
+		return 0;
+
+
+	case MSB_WB_SEND_WRITE_COMMAND:
+		command = MS_CMD_BLOCK_WRITE;
+		memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
+		msb->state = MSB_WB_SEND_INT_REQ;
+		return 0;
+
+	case MSB_WB_SEND_INT_REQ:
+		msb->state = MSB_WB_RECEIVE_INT_REQ;
+		if (msb_read_int_reg(msb, -1))
+			return 0;
+		/* fallthrough */
+
+	case MSB_WB_RECEIVE_INT_REQ:
+		intreg = mrq->data[0];
+		msb->regs.status.interrupt = intreg;
+
+		/* errors mean out of here, and fast... */
+		if (intreg & (MEMSTICK_INT_CMDNAK))
+			return msb_exit_state_machine(msb, -EIO);
+
+		if (intreg & MEMSTICK_INT_ERR)
+			return msb_exit_state_machine(msb, -EBADMSG);
+
+
+		/* for last page we need to poll CED */
+		if (msb->current_page == msb->pages_in_block) {
+			if (intreg & MEMSTICK_INT_CED)
+				return msb_exit_state_machine(msb, 0);
+			msb->state = MSB_WB_SEND_INT_REQ;
+			goto again;
+
+		}
+
+		/* for non-last page we need BREQ before writing next chunk */
+		if (!(intreg & MEMSTICK_INT_BREQ)) {
+			msb->state = MSB_WB_SEND_INT_REQ;
+			goto again;
+		}
+
+		msb->int_polling = false;
+		msb->state = MSB_WB_SEND_WRITE_DATA;
+		/* fallthrough */
+
+	case MSB_WB_SEND_WRITE_DATA:
+		sg_init_table(sg, ARRAY_SIZE(sg));
+
+		if (msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
+			msb->current_sg_offset,
+			msb->page_size) < msb->page_size)
+			return msb_exit_state_machine(msb, -EIO);
+
+		memstick_init_req_sg(mrq, MS_TPC_WRITE_LONG_DATA, sg);
+		mrq->need_card_int = 1;
+		msb->state = MSB_WB_RECEIVE_WRITE_CONFIRMATION;
+		return 0;
+
+	case MSB_WB_RECEIVE_WRITE_CONFIRMATION:
+		msb->current_page++;
+		msb->current_sg_offset += msb->page_size;
+		msb->state = MSB_WB_SEND_INT_REQ;
+		goto again;
+	default:
+		BUG();
+	}
+
+	return 0;
+}
+
+/*
+ * This function is used to send simple IO requests to device that consist
+ * of register write + command
+ */
+static int h_msb_send_command(struct memstick_dev *card,
+					struct memstick_request **out_mrq)
+{
+	struct msb_data *msb = memstick_get_drvdata(card);
+	struct memstick_request *mrq = *out_mrq = &card->current_mrq;
+	u8 intreg;
+
+	if (mrq->error) {
+		dbg("send_command: unknown error");
+		return msb_exit_state_machine(msb, mrq->error);
+	}
+again:
+	switch (msb->state) {
+
+	/* HACK: see h_msb_write_block */
+	case MSB_SC_SEND_WRITE_PARAMS: /* write param register*/
+		if (!msb_write_regs(msb,
+			offsetof(struct ms_register, param),
+			sizeof(struct ms_param_register),
+			&msb->regs.param))
+			return 0;
+		msb->state = MSB_SC_SEND_WRITE_OOB;
+		return 0;
+
+	case MSB_SC_SEND_WRITE_OOB:
+		if (!msb->command_need_oob) {
+			msb->state = MSB_SC_SEND_COMMAND;
+			goto again;
+		}
+
+		if (!msb_write_regs(msb,
+			offsetof(struct ms_register, extra_data),
+			sizeof(struct ms_extra_data_register),
+			&msb->regs.extra_data))
+			return 0;
+
+		msb->state = MSB_SC_SEND_COMMAND;
+		return 0;
+
+	case MSB_SC_SEND_COMMAND:
+		memstick_init_req(mrq, MS_TPC_SET_CMD, &msb->command_value, 1);
+		msb->state = MSB_SC_SEND_INT_REQ;
+		return 0;
+
+	case MSB_SC_SEND_INT_REQ:
+		msb->state = MSB_SC_RECEIVE_INT_REQ;
+		if (msb_read_int_reg(msb, -1))
+			return 0;
+		/* fallthrough */
+
+	case MSB_SC_RECEIVE_INT_REQ:
+		intreg = mrq->data[0];
+
+		if (intreg & MEMSTICK_INT_CMDNAK)
+			return msb_exit_state_machine(msb, -EIO);
+		if (intreg & MEMSTICK_INT_ERR)
+			return msb_exit_state_machine(msb, -EBADMSG);
+
+		if (!(intreg & MEMSTICK_INT_CED)) {
+			msb->state = MSB_SC_SEND_INT_REQ;
+			goto again;
+		}
+
+		return msb_exit_state_machine(msb, 0);
+	}
+
+	BUG();
+}
+
+/* Small handler for card reset */
+static int h_msb_reset(struct memstick_dev *card,
+					struct memstick_request **out_mrq)
+{
+	u8 command = MS_CMD_RESET;
+	struct msb_data *msb = memstick_get_drvdata(card);
+	struct memstick_request *mrq = *out_mrq = &card->current_mrq;
+
+	if (mrq->error)
+		return msb_exit_state_machine(msb, mrq->error);
+
+	switch (msb->state) {
+	case MSB_RS_SEND:
+		memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
+		mrq->need_card_int = 0;
+		msb->state = MSB_RS_CONFIRM;
+		return 0;
+	case MSB_RS_CONFIRM:
+		return msb_exit_state_machine(msb, 0);
+	}
+	BUG();
+}
+
+/* This handler is used to do serial->parallel switch */
+static int h_msb_parallel_switch(struct memstick_dev *card,
+					struct memstick_request **out_mrq)
+{
+	struct msb_data *msb = memstick_get_drvdata(card);
+	struct memstick_request *mrq = *out_mrq = &card->current_mrq;
+	struct memstick_host *host = card->host;
+
+	if (mrq->error) {
+		dbg("parallel_switch: error");
+		msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
+		return msb_exit_state_machine(msb, mrq->error);
+	}
+
+	switch (msb->state) {
+	case MSB_PS_SEND_SWITCH_COMMAND:
+		/* Set the parallel interface on memstick side */
+		msb->regs.param.system |= MEMSTICK_SYS_PAM;
+
+		if (!msb_write_regs(msb,
+			offsetof(struct ms_register, param),
+			1,
+			(unsigned char *)&msb->regs.param))
+			return 0;
+
+		msb->state = MSB_PS_SWICH_HOST;
+		return 0;
+
+	case MSB_PS_SWICH_HOST:
+		 /* Set parallel interface on our side + send a dummy request
+			to see if card responds */
+		host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_PAR4);
+		memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
+		msb->state = MSB_PS_CONFIRM;
+		return 0;
+
+	case MSB_PS_CONFIRM:
+		return msb_exit_state_machine(msb, 0);
+	}
+
+	BUG();
+}
+
+static int msb_switch_to_parallel(struct msb_data *msb);
+
+/* Reset the card, to guard against hw errors beeing treated as bad blocks */
+static int msb_reset(struct msb_data *msb, bool full)
+{
+
+	bool was_parallel = msb->regs.param.system & MEMSTICK_SYS_PAM;
+	struct memstick_dev *card = msb->card;
+	struct memstick_host *host = card->host;
+	int error;
+
+	/* Reset the card */
+	msb->regs.param.system = MEMSTICK_SYS_BAMD;
+
+	if (full) {
+		error =  host->set_param(host,
+					MEMSTICK_POWER, MEMSTICK_POWER_OFF);
+		if (error)
+			goto out_error;
+
+		msb_invalidate_reg_window(msb);
+
+		error = host->set_param(host,
+					MEMSTICK_POWER, MEMSTICK_POWER_ON);
+		if (error)
+			goto out_error;
+
+		error = host->set_param(host,
+					MEMSTICK_INTERFACE, MEMSTICK_SERIAL);
+		if (error) {
+out_error:
+			dbg("Failed to reset the host controller");
+			msb->read_only = true;
+			return -EFAULT;
+		}
+	}
+
+	error = msb_run_state_machine(msb, h_msb_reset);
+	if (error) {
+		dbg("Failed to reset the card");
+		msb->read_only = true;
+		return -ENODEV;
+	}
+
+	/* Set parallel mode */
+	if (was_parallel)
+		msb_switch_to_parallel(msb);
+	return 0;
+}
+
+/* Attempts to switch interface to parallel mode */
+static int msb_switch_to_parallel(struct msb_data *msb)
+{
+	int error;
+
+	error = msb_run_state_machine(msb, h_msb_parallel_switch);
+	if (error) {
+		pr_err("Switch to parallel failed");
+		msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
+		msb_reset(msb, true);
+		return -EFAULT;
+	}
+
+	msb->caps |= MEMSTICK_CAP_AUTO_GET_INT;
+	return 0;
+}
+
+/* Changes overwrite flag on a page */
+static int msb_set_overwrite_flag(struct msb_data *msb,
+						u16 pba, u8 page, u8 flag)
+{
+	if (msb->read_only)
+		return -EROFS;
+
+	msb->regs.param.block_address = cpu_to_be16(pba);
+	msb->regs.param.page_address = page;
+	msb->regs.param.cp = MEMSTICK_CP_OVERWRITE;
+	msb->regs.extra_data.overwrite_flag = flag;
+	msb->command_value = MS_CMD_BLOCK_WRITE;
+	msb->command_need_oob = true;
+
+	dbg_verbose("changing overwrite flag to %02x for sector %d, page %d",
+							flag, pba, page);
+	return msb_run_state_machine(msb, h_msb_send_command);
+}
+
+static int msb_mark_bad(struct msb_data *msb, int pba)
+{
+	pr_notice("marking pba %d as bad", pba);
+	msb_reset(msb, true);
+	return msb_set_overwrite_flag(
+			msb, pba, 0, 0xFF & ~MEMSTICK_OVERWRITE_BKST);
+}
+
+static int msb_mark_page_bad(struct msb_data *msb, int pba, int page)
+{
+	dbg("marking page %d of pba %d as bad", page, pba);
+	msb_reset(msb, true);
+	return msb_set_overwrite_flag(msb,
+		pba, page, ~MEMSTICK_OVERWRITE_PGST0);
+}
+
+/* Erases one physical block */
+static int msb_erase_block(struct msb_data *msb, u16 pba)
+{
+	int error, try;
+	if (msb->read_only)
+		return -EROFS;
+
+	dbg_verbose("erasing pba %d", pba);
+
+	for (try = 1; try < 3; try++) {
+		msb->regs.param.block_address = cpu_to_be16(pba);
+		msb->regs.param.page_address = 0;
+		msb->regs.param.cp = MEMSTICK_CP_BLOCK;
+		msb->command_value = MS_CMD_BLOCK_ERASE;
+		msb->command_need_oob = false;
+
+
+		error = msb_run_state_machine(msb, h_msb_send_command);
+		if (!error || msb_reset(msb, true))
+			break;
+	}
+
+	if (error) {
+		pr_err("erase failed, marking pba %d as bad", pba);
+		msb_mark_bad(msb, pba);
+	}
+
+	dbg_verbose("erase success, marking pba %d as unused", pba);
+	msb_mark_block_unused(msb, pba);
+	__set_bit(pba, msb->erased_blocks_bitmap);
+	return error;
+}
+
+/* Reads one page from device */
+static int msb_read_page(struct msb_data *msb,
+	u16 pba, u8 page, struct ms_extra_data_register *extra,
+					struct scatterlist *sg,  int offset)
+{
+	int try, error;
+
+	if (pba == MS_BLOCK_INVALID) {
+		unsigned long flags;
+		struct sg_mapping_iter miter;
+		size_t len = msb->page_size;
+
+		dbg_verbose("read unmapped sector. returning 0xFF");
+
+		local_irq_save(flags);
+		sg_miter_start(&miter, sg, sg_nents(sg),
+				SG_MITER_ATOMIC | SG_MITER_TO_SG);
+
+		while (sg_miter_next(&miter) && len > 0) {
+
+			int chunklen;
+
+			if (offset && offset >= miter.length) {
+				offset -= miter.length;
+				continue;
+			}
+
+			chunklen = min(miter.length - offset, len);
+			memset(miter.addr + offset, 0xFF, chunklen);
+			len -= chunklen;
+			offset = 0;
+		}
+
+		sg_miter_stop(&miter);
+		local_irq_restore(flags);
+
+		if (offset)
+			return -EFAULT;
+
+		if (extra)
+			memset(extra, 0xFF, sizeof(*extra));
+		return 0;
+	}
+
+	if (pba >= msb->block_count) {
+		pr_err("BUG: attempt to read beyond the end of the card at pba %d", pba);
+		return -EINVAL;
+	}
+
+	for (try = 1; try < 3; try++) {
+		msb->regs.param.block_address = cpu_to_be16(pba);
+		msb->regs.param.page_address = page;
+		msb->regs.param.cp = MEMSTICK_CP_PAGE;
+
+		msb->current_sg = sg;
+		msb->current_sg_offset = offset;
+		error = msb_run_state_machine(msb, h_msb_read_page);
+
+
+		if (error == -EUCLEAN) {
+			pr_notice("correctable error on pba %d, page %d",
+				pba, page);
+			error = 0;
+		}
+
+		if (!error && extra)
+			*extra = msb->regs.extra_data;
+
+		if (!error || msb_reset(msb, true))
+			break;
+
+	}
+
+	/* Mark bad pages */
+	if (error == -EBADMSG) {
+		pr_err("uncorrectable error on read of pba %d, page %d",
+			pba, page);
+
+		if (msb->regs.extra_data.overwrite_flag &
+					MEMSTICK_OVERWRITE_PGST0)
+			msb_mark_page_bad(msb, pba, page);
+		return -EBADMSG;
+	}
+
+	if (error)
+		pr_err("read of pba %d, page %d failed with error %d",
+			pba, page, error);
+	return error;
+}
+
+/* Reads oob of page only */
+static int msb_read_oob(struct msb_data *msb, u16 pba, u16 page,
+	struct ms_extra_data_register *extra)
+{
+	int error;
+
+	BUG_ON(!extra);
+	msb->regs.param.block_address = cpu_to_be16(pba);
+	msb->regs.param.page_address = page;
+	msb->regs.param.cp = MEMSTICK_CP_EXTRA;
+
+	if (pba > msb->block_count) {
+		pr_err("BUG: attempt to read beyond the end of card at pba %d", pba);
+		return -EINVAL;
+	}
+
+	error = msb_run_state_machine(msb, h_msb_read_page);
+	*extra = msb->regs.extra_data;
+
+	if (error == -EUCLEAN) {
+		pr_notice("correctable error on pba %d, page %d",
+			pba, page);
+		return 0;
+	}
+
+	return error;
+}
+
+/* Reads a block and compares it with data contained in scatterlist orig_sg */
+static int msb_verify_block(struct msb_data *msb, u16 pba,
+				struct scatterlist *orig_sg,  int offset)
+{
+	struct scatterlist sg;
+	int page = 0, error;
+
+	sg_init_one(&sg, msb->block_buffer, msb->block_size);
+
+	while (page < msb->pages_in_block) {
+
+		error = msb_read_page(msb, pba, page,
+				NULL, &sg, page * msb->page_size);
+		if (error)
+			return error;
+		page++;
+	}
+
+	if (msb_sg_compare_to_buffer(orig_sg, offset,
+				msb->block_buffer, msb->block_size))
+		return -EIO;
+	return 0;
+}
+
+/* Writes exectly one block + oob */
+static int msb_write_block(struct msb_data *msb,
+			u16 pba, u32 lba, struct scatterlist *sg, int offset)
+{
+	int error, current_try = 1;
+	BUG_ON(sg->length < msb->page_size);
+
+	if (msb->read_only)
+		return -EROFS;
+
+	if (pba == MS_BLOCK_INVALID) {
+		pr_err(
+			"BUG: write: attempt to write MS_BLOCK_INVALID block");
+		return -EINVAL;
+	}
+
+	if (pba >= msb->block_count || lba >= msb->logical_block_count) {
+		pr_err(
+		"BUG: write: attempt to write beyond the end of device");
+		return -EINVAL;
+	}
+
+	if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
+		pr_err("BUG: write: lba zone mismatch");
+		return -EINVAL;
+	}
+
+	if (pba == msb->boot_block_locations[0] ||
+		pba == msb->boot_block_locations[1]) {
+		pr_err("BUG: write: attempt to write to boot blocks!");
+		return -EINVAL;
+	}
+
+	while (1) {
+
+		if (msb->read_only)
+			return -EROFS;
+
+		msb->regs.param.cp = MEMSTICK_CP_BLOCK;
+		msb->regs.param.page_address = 0;
+		msb->regs.param.block_address = cpu_to_be16(pba);
+
+		msb->regs.extra_data.management_flag = 0xFF;
+		msb->regs.extra_data.overwrite_flag = 0xF8;
+		msb->regs.extra_data.logical_address = cpu_to_be16(lba);
+
+		msb->current_sg = sg;
+		msb->current_sg_offset = offset;
+		msb->current_page = 0;
+
+		error = msb_run_state_machine(msb, h_msb_write_block);
+
+		/* Sector we just wrote to is assumed erased since its pba
+			was erased. If it wasn't erased, write will succeed
+			and will just clear the bits that were set in the block
+			thus test that what we have written,
+			matches what we expect.
+			We do trust the blocks that we erased */
+		if (!error && (verify_writes ||
+				!test_bit(pba, msb->erased_blocks_bitmap)))
+			error = msb_verify_block(msb, pba, sg, offset);
+
+		if (!error)
+			break;
+
+		if (current_try > 1 || msb_reset(msb, true))
+			break;
+
+		pr_err("write failed, trying to erase the pba %d", pba);
+		error = msb_erase_block(msb, pba);
+		if (error)
+			break;
+
+		current_try++;
+	}
+	return error;
+}
+
+/* Finds a free block for write replacement */
+static u16 msb_get_free_block(struct msb_data *msb, int zone)
+{
+	u16 pos;
+	int pba = zone * MS_BLOCKS_IN_ZONE;
+	int i;
+
+	get_random_bytes(&pos, sizeof(pos));
+
+	if (!msb->free_block_count[zone]) {
+		pr_err("NO free blocks in the zone %d, to use for a write, (media is WORN out) switching to RO mode", zone);
+		msb->read_only = true;
+		return MS_BLOCK_INVALID;
+	}
+
+	pos %= msb->free_block_count[zone];
+
+	dbg_verbose("have %d choices for a free block, selected randomally: %d",
+		msb->free_block_count[zone], pos);
+
+	pba = find_next_zero_bit(msb->used_blocks_bitmap,
+							msb->block_count, pba);
+	for (i = 0; i < pos; ++i)
+		pba = find_next_zero_bit(msb->used_blocks_bitmap,
+						msb->block_count, pba + 1);
+
+	dbg_verbose("result of the free blocks scan: pba %d", pba);
+
+	if (pba == msb->block_count || (msb_get_zone_from_pba(pba)) != zone) {
+		pr_err("BUG: cant get a free block");
+		msb->read_only = true;
+		return MS_BLOCK_INVALID;
+	}
+
+	msb_mark_block_used(msb, pba);
+	return pba;
+}
+
+static int msb_update_block(struct msb_data *msb, u16 lba,
+	struct scatterlist *sg, int offset)
+{
+	u16 pba, new_pba;
+	int error, try;
+
+	pba = msb->lba_to_pba_table[lba];
+	dbg_verbose("start of a block update at lba  %d, pba %d", lba, pba);
+
+	if (pba != MS_BLOCK_INVALID) {
+		dbg_verbose("setting the update flag on the block");
+		msb_set_overwrite_flag(msb, pba, 0,
+				0xFF & ~MEMSTICK_OVERWRITE_UDST);
+	}
+
+	for (try = 0; try < 3; try++) {
+		new_pba = msb_get_free_block(msb,
+			msb_get_zone_from_lba(lba));
+
+		if (new_pba == MS_BLOCK_INVALID) {
+			error = -EIO;
+			goto out;
+		}
+
+		dbg_verbose("block update: writing updated block to the pba %d",
+								new_pba);
+		error = msb_write_block(msb, new_pba, lba, sg, offset);
+		if (error == -EBADMSG) {
+			msb_mark_bad(msb, new_pba);
+			continue;
+		}
+
+		if (error)
+			goto out;
+
+		dbg_verbose("block update: erasing the old block");
+		msb_erase_block(msb, pba);
+		msb->lba_to_pba_table[lba] = new_pba;
+		return 0;
+	}
+out:
+	if (error) {
+		pr_err("block update error after %d tries,  switching to r/o mode", try);
+		msb->read_only = true;
+	}
+	return error;
+}
+
+/* Converts endiannes in the boot block for easy use */
+static void msb_fix_boot_page_endianness(struct ms_boot_page *p)
+{
+	p->header.block_id = be16_to_cpu(p->header.block_id);
+	p->header.format_reserved = be16_to_cpu(p->header.format_reserved);
+	p->entry.disabled_block.start_addr
+		= be32_to_cpu(p->entry.disabled_block.start_addr);
+	p->entry.disabled_block.data_size
+		= be32_to_cpu(p->entry.disabled_block.data_size);
+	p->entry.cis_idi.start_addr
+		= be32_to_cpu(p->entry.cis_idi.start_addr);
+	p->entry.cis_idi.data_size
+		= be32_to_cpu(p->entry.cis_idi.data_size);
+	p->attr.block_size = be16_to_cpu(p->attr.block_size);
+	p->attr.number_of_blocks = be16_to_cpu(p->attr.number_of_blocks);
+	p->attr.number_of_effective_blocks
+		= be16_to_cpu(p->attr.number_of_effective_blocks);
+	p->attr.page_size = be16_to_cpu(p->attr.page_size);
+	p->attr.memory_manufacturer_code
+		= be16_to_cpu(p->attr.memory_manufacturer_code);
+	p->attr.memory_device_code = be16_to_cpu(p->attr.memory_device_code);
+	p->attr.implemented_capacity
+		= be16_to_cpu(p->attr.implemented_capacity);
+	p->attr.controller_number = be16_to_cpu(p->attr.controller_number);
+	p->attr.controller_function = be16_to_cpu(p->attr.controller_function);
+}
+
+static int msb_read_boot_blocks(struct msb_data *msb)
+{
+	int pba = 0;
+	struct scatterlist sg;
+	struct ms_extra_data_register extra;
+	struct ms_boot_page *page;
+
+	msb->boot_block_locations[0] = MS_BLOCK_INVALID;
+	msb->boot_block_locations[1] = MS_BLOCK_INVALID;
+	msb->boot_block_count = 0;
+
+	dbg_verbose("Start of a scan for the boot blocks");
+
+	if (!msb->boot_page) {
+		page = kmalloc(sizeof(struct ms_boot_page)*2, GFP_KERNEL);
+		if (!page)
+			return -ENOMEM;
+
+		msb->boot_page = page;
+	} else
+		page = msb->boot_page;
+
+	msb->block_count = MS_BLOCK_MAX_BOOT_ADDR;
+
+	for (pba = 0; pba < MS_BLOCK_MAX_BOOT_ADDR; pba++) {
+
+		sg_init_one(&sg, page, sizeof(*page));
+		if (msb_read_page(msb, pba, 0, &extra, &sg, 0)) {
+			dbg("boot scan: can't read pba %d", pba);
+			continue;
+		}
+
+		if (extra.management_flag & MEMSTICK_MANAGEMENT_SYSFLG) {
+			dbg("managment flag doesn't indicate boot block %d",
+									pba);
+			continue;
+		}
+
+		if (be16_to_cpu(page->header.block_id) != MS_BLOCK_BOOT_ID) {
+			dbg("the pba at %d doesn' contain boot block ID", pba);
+			continue;
+		}
+
+		msb_fix_boot_page_endianness(page);
+		msb->boot_block_locations[msb->boot_block_count] = pba;
+
+		page++;
+		msb->boot_block_count++;
+
+		if (msb->boot_block_count == 2)
+			break;
+	}
+
+	if (!msb->boot_block_count) {
+		pr_err("media doesn't contain master page, aborting");
+		return -EIO;
+	}
+
+	dbg_verbose("End of scan for boot blocks");
+	return 0;
+}
+
+static int msb_read_bad_block_table(struct msb_data *msb, int block_nr)
+{
+	struct ms_boot_page *boot_block;
+	struct scatterlist sg;
+	u16 *buffer = NULL;
+	int offset = 0;
+	int i, error = 0;
+	int data_size, data_offset, page, page_offset, size_to_read;
+	u16 pba;
+
+	BUG_ON(block_nr > 1);
+	boot_block = &msb->boot_page[block_nr];
+	pba = msb->boot_block_locations[block_nr];
+
+	if (msb->boot_block_locations[block_nr] == MS_BLOCK_INVALID)
+		return -EINVAL;
+
+	data_size = boot_block->entry.disabled_block.data_size;
+	data_offset = sizeof(struct ms_boot_page) +
+			boot_block->entry.disabled_block.start_addr;
+	if (!data_size)
+		return 0;
+
+	page = data_offset / msb->page_size;
+	page_offset = data_offset % msb->page_size;
+	size_to_read =
+		DIV_ROUND_UP(data_size + page_offset, msb->page_size) *
+			msb->page_size;
+
+	dbg("reading bad block of boot block at pba %d, offset %d len %d",
+		pba, data_offset, data_size);
+
+	buffer = kzalloc(size_to_read, GFP_KERNEL);
+	if (!buffer)
+		return -ENOMEM;
+
+	/* Read the buffer */
+	sg_init_one(&sg, buffer, size_to_read);
+
+	while (offset < size_to_read) {
+		error = msb_read_page(msb, pba, page, NULL, &sg, offset);
+		if (error)
+			goto out;
+
+		page++;
+		offset += msb->page_size;
+
+		if (page == msb->pages_in_block) {
+			pr_err(
+			"bad block table extends beyond the boot block");
+			break;
+		}
+	}
+
+	/* Process the bad block table */
+	for (i = page_offset; i < data_size / sizeof(u16); i++) {
+
+		u16 bad_block = be16_to_cpu(buffer[i]);
+
+		if (bad_block >= msb->block_count) {
+			dbg("bad block table contains invalid block %d",
+								bad_block);
+			continue;
+		}
+
+		if (test_bit(bad_block, msb->used_blocks_bitmap))  {
+			dbg("duplicate bad block %d in the table",
+				bad_block);
+			continue;
+		}
+
+		dbg("block %d is marked as factory bad", bad_block);
+		msb_mark_block_used(msb, bad_block);
+	}
+out:
+	kfree(buffer);
+	return error;
+}
+
+static int msb_ftl_initialize(struct msb_data *msb)
+{
+	int i;
+
+	if (msb->ftl_initialized)
+		return 0;
+
+	msb->zone_count = msb->block_count / MS_BLOCKS_IN_ZONE;
+	msb->logical_block_count = msb->zone_count * 496 - 2;
+
+	msb->used_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
+	msb->erased_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
+	msb->lba_to_pba_table =
+		kmalloc(msb->logical_block_count * sizeof(u16), GFP_KERNEL);
+
+	if (!msb->used_blocks_bitmap || !msb->lba_to_pba_table ||
+						!msb->erased_blocks_bitmap) {
+		kfree(msb->used_blocks_bitmap);
+		kfree(msb->lba_to_pba_table);
+		kfree(msb->erased_blocks_bitmap);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < msb->zone_count; i++)
+		msb->free_block_count[i] = MS_BLOCKS_IN_ZONE;
+
+	memset(msb->lba_to_pba_table, MS_BLOCK_INVALID,
+			msb->logical_block_count * sizeof(u16));
+
+	dbg("initial FTL tables created. Zone count = %d, Logical block count = %d",
+		msb->zone_count, msb->logical_block_count);
+
+	msb->ftl_initialized = true;
+	return 0;
+}
+
+static int msb_ftl_scan(struct msb_data *msb)
+{
+	u16 pba, lba, other_block;
+	u8 overwrite_flag, managment_flag, other_overwrite_flag;
+	int error;
+	struct ms_extra_data_register extra;
+	u8 *overwrite_flags = kzalloc(msb->block_count, GFP_KERNEL);
+
+	if (!overwrite_flags)
+		return -ENOMEM;
+
+	dbg("Start of media scanning");
+	for (pba = 0; pba < msb->block_count; pba++) {
+
+		if (pba == msb->boot_block_locations[0] ||
+			pba == msb->boot_block_locations[1]) {
+			dbg_verbose("pba %05d -> [boot block]", pba);
+			msb_mark_block_used(msb, pba);
+			continue;
+		}
+
+		if (test_bit(pba, msb->used_blocks_bitmap)) {
+			dbg_verbose("pba %05d -> [factory bad]", pba);
+			continue;
+		}
+
+		memset(&extra, 0, sizeof(extra));
+		error = msb_read_oob(msb, pba, 0, &extra);
+
+		/* can't trust the page if we can't read the oob */
+		if (error == -EBADMSG) {
+			pr_notice(
+			"oob of pba %d damaged, will try to erase it", pba);
+			msb_mark_block_used(msb, pba);
+			msb_erase_block(msb, pba);
+			continue;
+		} else if (error) {
+			pr_err("unknown error %d on read of oob of pba %d - aborting",
+				error, pba);
+
+			kfree(overwrite_flags);
+			return error;
+		}
+
+		lba = be16_to_cpu(extra.logical_address);
+		managment_flag = extra.management_flag;
+		overwrite_flag = extra.overwrite_flag;
+		overwrite_flags[pba] = overwrite_flag;
+
+		/* Skip bad blocks */
+		if (!(overwrite_flag & MEMSTICK_OVERWRITE_BKST)) {
+			dbg("pba %05d -> [BAD]", pba);
+			msb_mark_block_used(msb, pba);
+			continue;
+		}
+
+		/* Skip system/drm blocks */
+		if ((managment_flag & MEMSTICK_MANAGMENT_FLAG_NORMAL) !=
+			MEMSTICK_MANAGMENT_FLAG_NORMAL) {
+			dbg("pba %05d -> [reserved managment flag %02x]",
+							pba, managment_flag);
+			msb_mark_block_used(msb, pba);
+			continue;
+		}
+
+		/* Erase temporary tables */
+		if (!(managment_flag & MEMSTICK_MANAGEMENT_ATFLG)) {
+			dbg("pba %05d -> [temp table] - will erase", pba);
+
+			msb_mark_block_used(msb, pba);
+			msb_erase_block(msb, pba);
+			continue;
+		}
+
+		if (lba == MS_BLOCK_INVALID) {
+			dbg_verbose("pba %05d -> [free]", pba);
+			continue;
+		}
+
+		msb_mark_block_used(msb, pba);
+
+		/* Block has LBA not according to zoning*/
+		if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
+			pr_notice("pba %05d -> [bad lba %05d] - will erase",
+								pba, lba);
+			msb_erase_block(msb, pba);
+			continue;
+		}
+
+		/* No collisions - great */
+		if (msb->lba_to_pba_table[lba] == MS_BLOCK_INVALID) {
+			dbg_verbose("pba %05d -> [lba %05d]", pba, lba);
+			msb->lba_to_pba_table[lba] = pba;
+			continue;
+		}
+
+		other_block = msb->lba_to_pba_table[lba];
+		other_overwrite_flag = overwrite_flags[other_block];
+
+		pr_notice("Collision between pba %d and pba %d",
+			pba, other_block);
+
+		if (!(overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
+			pr_notice("pba %d is marked as stable, use it", pba);
+			msb_erase_block(msb, other_block);
+			msb->lba_to_pba_table[lba] = pba;
+			continue;
+		}
+
+		if (!(other_overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
+			pr_notice("pba %d is marked as stable, use it",
+								other_block);
+			msb_erase_block(msb, pba);
+			continue;
+		}
+
+		pr_notice("collision between blocks %d and %d, without stable flag set on both, erasing pba %d",
+				pba, other_block, other_block);
+
+		msb_erase_block(msb, other_block);
+		msb->lba_to_pba_table[lba] = pba;
+	}
+
+	dbg("End of media scanning");
+	kfree(overwrite_flags);
+	return 0;
+}
+
+static void msb_cache_flush_timer(unsigned long data)
+{
+	struct msb_data *msb = (struct msb_data *)data;
+	msb->need_flush_cache = true;
+	queue_work(msb->io_queue, &msb->io_work);
+}
+
+
+static void msb_cache_discard(struct msb_data *msb)
+{
+	if (msb->cache_block_lba == MS_BLOCK_INVALID)
+		return;
+
+	del_timer_sync(&msb->cache_flush_timer);
+
+	dbg_verbose("Discarding the write cache");
+	msb->cache_block_lba = MS_BLOCK_INVALID;
+	bitmap_zero(&msb->valid_cache_bitmap, msb->pages_in_block);
+}
+
+static int msb_cache_init(struct msb_data *msb)
+{
+	setup_timer(&msb->cache_flush_timer, msb_cache_flush_timer,
+		(unsigned long)msb);
+
+	if (!msb->cache)
+		msb->cache = kzalloc(msb->block_size, GFP_KERNEL);
+	if (!msb->cache)
+		return -ENOMEM;
+
+	msb_cache_discard(msb);
+	return 0;
+}
+
+static int msb_cache_flush(struct msb_data *msb)
+{
+	struct scatterlist sg;
+	struct ms_extra_data_register extra;
+	int page, offset, error;
+	u16 pba, lba;
+
+	if (msb->read_only)
+		return -EROFS;
+
+	if (msb->cache_block_lba == MS_BLOCK_INVALID)
+		return 0;
+
+	lba = msb->cache_block_lba;
+	pba = msb->lba_to_pba_table[lba];
+
+	dbg_verbose("Flushing the write cache of pba %d (LBA %d)",
+						pba, msb->cache_block_lba);
+
+	sg_init_one(&sg, msb->cache , msb->block_size);
+
+	/* Read all missing pages in cache */
+	for (page = 0; page < msb->pages_in_block; page++) {
+
+		if (test_bit(page, &msb->valid_cache_bitmap))
+			continue;
+
+		offset = page * msb->page_size;
+
+		dbg_verbose("reading non-present sector %d of cache block %d",
+			page, lba);
+		error = msb_read_page(msb, pba, page, &extra, &sg, offset);
+
+		/* Bad pages are copied with 00 page status */
+		if (error == -EBADMSG) {
+			pr_err("read error on sector %d, contents probably damaged", page);
+			continue;
+		}
+
+		if (error)
+			return error;
+
+		if ((extra.overwrite_flag & MEMSTICK_OV_PG_NORMAL) !=
+							MEMSTICK_OV_PG_NORMAL) {
+			dbg("page %d is marked as bad", page);
+			continue;
+		}
+
+		set_bit(page, &msb->valid_cache_bitmap);
+	}
+
+	/* Write the cache now */
+	error = msb_update_block(msb, msb->cache_block_lba, &sg, 0);
+	pba = msb->lba_to_pba_table[msb->cache_block_lba];
+
+	/* Mark invalid pages */
+	if (!error) {
+		for (page = 0; page < msb->pages_in_block; page++) {
+
+			if (test_bit(page, &msb->valid_cache_bitmap))
+				continue;
+
+			dbg("marking page %d as containing damaged data",
+				page);
+			msb_set_overwrite_flag(msb,
+				pba , page, 0xFF & ~MEMSTICK_OV_PG_NORMAL);
+		}
+	}
+
+	msb_cache_discard(msb);
+	return error;
+}
+
+static int msb_cache_write(struct msb_data *msb, int lba,
+	int page, bool add_to_cache_only, struct scatterlist *sg, int offset)
+{
+	int error;
+	struct scatterlist sg_tmp[10];
+
+	if (msb->read_only)
+		return -EROFS;
+
+	if (msb->cache_block_lba == MS_BLOCK_INVALID ||
+						lba != msb->cache_block_lba)
+		if (add_to_cache_only)
+			return 0;
+
+	/* If we need to write different block */
+	if (msb->cache_block_lba != MS_BLOCK_INVALID &&
+						lba != msb->cache_block_lba) {
+		dbg_verbose("first flush the cache");
+		error = msb_cache_flush(msb);
+		if (error)
+			return error;
+	}
+
+	if (msb->cache_block_lba  == MS_BLOCK_INVALID) {
+		msb->cache_block_lba  = lba;
+		mod_timer(&msb->cache_flush_timer,
+			jiffies + msecs_to_jiffies(cache_flush_timeout));
+	}
+
+	dbg_verbose("Write of LBA %d page %d to cache ", lba, page);
+
+	sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
+	msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp), offset, msb->page_size);
+
+	sg_copy_to_buffer(sg_tmp, sg_nents(sg_tmp),
+		msb->cache + page * msb->page_size, msb->page_size);
+
+	set_bit(page, &msb->valid_cache_bitmap);
+	return 0;
+}
+
+static int msb_cache_read(struct msb_data *msb, int lba,
+				int page, struct scatterlist *sg, int offset)
+{
+	int pba = msb->lba_to_pba_table[lba];
+	struct scatterlist sg_tmp[10];
+	int error = 0;
+
+	if (lba == msb->cache_block_lba &&
+			test_bit(page, &msb->valid_cache_bitmap)) {
+
+		dbg_verbose("Read of LBA %d (pba %d) sector %d from cache",
+							lba, pba, page);
+
+		sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
+		msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp),
+			offset, msb->page_size);
+		sg_copy_from_buffer(sg_tmp, sg_nents(sg_tmp),
+			msb->cache + msb->page_size * page,
+							msb->page_size);
+	} else {
+		dbg_verbose("Read of LBA %d (pba %d) sector %d from device",
+							lba, pba, page);
+
+		error = msb_read_page(msb, pba, page, NULL, sg, offset);
+		if (error)
+			return error;
+
+		msb_cache_write(msb, lba, page, true, sg, offset);
+	}
+	return error;
+}
+
+/* Emulated geometry table
+ * This table content isn't that importaint,
+ * One could put here different values, providing that they still
+ * cover whole disk.
+ * 64 MB entry is what windows reports for my 64M memstick */
+
+static const struct chs_entry chs_table[] = {
+/*        size sectors cylynders  heads */
+	{ 4,    16,    247,       2  },
+	{ 8,    16,    495,       2  },
+	{ 16,   16,    495,       4  },
+	{ 32,   16,    991,       4  },
+	{ 64,   16,    991,       8  },
+	{128,   16,    991,       16 },
+	{ 0 }
+};
+
+/* Load information about the card */
+static int msb_init_card(struct memstick_dev *card)
+{
+	struct msb_data *msb = memstick_get_drvdata(card);
+	struct memstick_host *host = card->host;
+	struct ms_boot_page *boot_block;
+	int error = 0, i, raw_size_in_megs;
+
+	msb->caps = 0;
+
+	if (card->id.class >= MEMSTICK_CLASS_ROM &&
+				card->id.class <= MEMSTICK_CLASS_ROM)
+		msb->read_only = true;
+
+	msb->state = -1;
+	error = msb_reset(msb, false);
+	if (error)
+		return error;
+
+	/* Due to a bug in Jmicron driver written by Alex Dubov,
+	 its serial mode barely works,
+	 so we switch to parallel mode right away */
+	if (host->caps & MEMSTICK_CAP_PAR4)
+		msb_switch_to_parallel(msb);
+
+	msb->page_size = sizeof(struct ms_boot_page);
+
+	/* Read the boot page */
+	error = msb_read_boot_blocks(msb);
+	if (error)
+		return -EIO;
+
+	boot_block = &msb->boot_page[0];
+
+	/* Save intersting attributes from boot page */
+	msb->block_count = boot_block->attr.number_of_blocks;
+	msb->page_size = boot_block->attr.page_size;
+
+	msb->pages_in_block = boot_block->attr.block_size * 2;
+	msb->block_size = msb->page_size * msb->pages_in_block;
+
+	if (msb->page_size > PAGE_SIZE) {
+		/* this isn't supported by linux at all, anyway*/
+		dbg("device page %d size isn't supported", msb->page_size);
+		return -EINVAL;
+	}
+
+	msb->block_buffer = kzalloc(msb->block_size, GFP_KERNEL);
+	if (!msb->block_buffer)
+		return -ENOMEM;
+
+	raw_size_in_megs = (msb->block_size * msb->block_count) >> 20;
+
+	for (i = 0; chs_table[i].size; i++) {
+
+		if (chs_table[i].size != raw_size_in_megs)
+			continue;
+
+		msb->geometry.cylinders = chs_table[i].cyl;
+		msb->geometry.heads = chs_table[i].head;
+		msb->geometry.sectors = chs_table[i].sec;
+		break;
+	}
+
+	if (boot_block->attr.transfer_supporting == 1)
+		msb->caps |= MEMSTICK_CAP_PAR4;
+
+	if (boot_block->attr.device_type & 0x03)
+		msb->read_only = true;
+
+	dbg("Total block count = %d", msb->block_count);
+	dbg("Each block consists of %d pages", msb->pages_in_block);
+	dbg("Page size = %d bytes", msb->page_size);
+	dbg("Parallel mode supported: %d", !!(msb->caps & MEMSTICK_CAP_PAR4));
+	dbg("Read only: %d", msb->read_only);
+
+#if 0
+	/* Now we can switch the interface */
+	if (host->caps & msb->caps & MEMSTICK_CAP_PAR4)
+		msb_switch_to_parallel(msb);
+#endif
+
+	error = msb_cache_init(msb);
+	if (error)
+		return error;
+
+	error = msb_ftl_initialize(msb);
+	if (error)
+		return error;
+
+
+	/* Read the bad block table */
+	error = msb_read_bad_block_table(msb, 0);
+
+	if (error && error != -ENOMEM) {
+		dbg("failed to read bad block table from primary boot block, trying from backup");
+		error = msb_read_bad_block_table(msb, 1);
+	}
+
+	if (error)
+		return error;
+
+	/* *drum roll* Scan the media */
+	error = msb_ftl_scan(msb);
+	if (error) {
+		pr_err("Scan of media failed");
+		return error;
+	}
+
+	return 0;
+
+}
+
+static int msb_do_write_request(struct msb_data *msb, int lba,
+	int page, struct scatterlist *sg, size_t len, int *sucessfuly_written)
+{
+	int error = 0;
+	off_t offset = 0;
+	*sucessfuly_written = 0;
+
+	while (offset < len) {
+		if (page == 0 && len - offset >= msb->block_size) {
+
+			if (msb->cache_block_lba == lba)
+				msb_cache_discard(msb);
+
+			dbg_verbose("Writing whole lba %d", lba);
+			error = msb_update_block(msb, lba, sg, offset);
+			if (error)
+				return error;
+
+			offset += msb->block_size;
+			*sucessfuly_written += msb->block_size;
+			lba++;
+			continue;
+		}
+
+		error = msb_cache_write(msb, lba, page, false, sg, offset);
+		if (error)
+			return error;
+
+		offset += msb->page_size;
+		*sucessfuly_written += msb->page_size;
+
+		page++;
+		if (page == msb->pages_in_block) {
+			page = 0;
+			lba++;
+		}
+	}
+	return 0;
+}
+
+static int msb_do_read_request(struct msb_data *msb, int lba,
+		int page, struct scatterlist *sg, int len, int *sucessfuly_read)
+{
+	int error = 0;
+	int offset = 0;
+	*sucessfuly_read = 0;
+
+	while (offset < len) {
+
+		error = msb_cache_read(msb, lba, page, sg, offset);
+		if (error)
+			return error;
+
+		offset += msb->page_size;
+		*sucessfuly_read += msb->page_size;
+
+		page++;
+		if (page == msb->pages_in_block) {
+			page = 0;
+			lba++;
+		}
+	}
+	return 0;
+}
+
+static void msb_io_work(struct work_struct *work)
+{
+	struct msb_data *msb = container_of(work, struct msb_data, io_work);
+	int page, error, len;
+	sector_t lba;
+	unsigned long flags;
+	struct scatterlist *sg = msb->prealloc_sg;
+
+	dbg_verbose("IO: work started");
+
+	while (1) {
+		spin_lock_irqsave(&msb->q_lock, flags);
+
+		if (msb->need_flush_cache) {
+			msb->need_flush_cache = false;
+			spin_unlock_irqrestore(&msb->q_lock, flags);
+			msb_cache_flush(msb);
+			continue;
+		}
+
+		if (!msb->req) {
+			msb->req = blk_fetch_request(msb->queue);
+			if (!msb->req) {
+				dbg_verbose("IO: no more requests exiting");
+				spin_unlock_irqrestore(&msb->q_lock, flags);
+				return;
+			}
+		}
+
+		spin_unlock_irqrestore(&msb->q_lock, flags);
+
+		/* If card was removed meanwhile */
+		if (!msb->req)
+			return;
+
+		/* process the request */
+		dbg_verbose("IO: processing new request");
+		blk_rq_map_sg(msb->queue, msb->req, sg);
+
+		lba = blk_rq_pos(msb->req);
+
+		sector_div(lba, msb->page_size / 512);
+		page = do_div(lba, msb->pages_in_block);
+
+		if (rq_data_dir(msb->req) == READ)
+			error = msb_do_read_request(msb, lba, page, sg,
+				blk_rq_bytes(msb->req), &len);
+		else
+			error = msb_do_write_request(msb, lba, page, sg,
+				blk_rq_bytes(msb->req), &len);
+
+		spin_lock_irqsave(&msb->q_lock, flags);
+
+		if (len)
+			if (!__blk_end_request(msb->req, 0, len))
+				msb->req = NULL;
+
+		if (error && msb->req) {
+			dbg_verbose("IO: ending one sector of the request with error");
+			if (!__blk_end_request(msb->req, error, msb->page_size))
+				msb->req = NULL;
+		}
+
+		if (msb->req)
+			dbg_verbose("IO: request still pending");
+
+		spin_unlock_irqrestore(&msb->q_lock, flags);
+	}
+}
+
+static DEFINE_IDR(msb_disk_idr); /*set of used disk numbers */
+static DEFINE_MUTEX(msb_disk_lock); /* protects against races in open/release */
+
+static int msb_bd_open(struct block_device *bdev, fmode_t mode)
+{
+	struct gendisk *disk = bdev->bd_disk;
+	struct msb_data *msb = disk->private_data;
+
+	dbg_verbose("block device open");
+
+	mutex_lock(&msb_disk_lock);
+
+	if (msb && msb->card)
+		msb->usage_count++;
+
+	mutex_unlock(&msb_disk_lock);
+	return 0;
+}
+
+static void msb_data_clear(struct msb_data *msb)
+{
+	kfree(msb->boot_page);
+	kfree(msb->used_blocks_bitmap);
+	kfree(msb->lba_to_pba_table);
+	kfree(msb->cache);
+	msb->card = NULL;
+}
+
+static int msb_disk_release(struct gendisk *disk)
+{
+	struct msb_data *msb = disk->private_data;
+
+	dbg_verbose("block device release");
+	mutex_lock(&msb_disk_lock);
+
+	if (msb) {
+		if (msb->usage_count)
+			msb->usage_count--;
+
+		if (!msb->usage_count) {
+			disk->private_data = NULL;
+			idr_remove(&msb_disk_idr, msb->disk_id);
+			put_disk(disk);
+			kfree(msb);
+		}
+	}
+	mutex_unlock(&msb_disk_lock);
+	return 0;
+}
+
+static void msb_bd_release(struct gendisk *disk, fmode_t mode)
+{
+	msb_disk_release(disk);
+}
+
+static int msb_bd_getgeo(struct block_device *bdev,
+				 struct hd_geometry *geo)
+{
+	struct msb_data *msb = bdev->bd_disk->private_data;
+	*geo = msb->geometry;
+	return 0;
+}
+
+static int msb_prepare_req(struct request_queue *q, struct request *req)
+{
+	if (req->cmd_type != REQ_TYPE_FS &&
+				req->cmd_type != REQ_TYPE_BLOCK_PC) {
+		blk_dump_rq_flags(req, "MS unsupported request");
+		return BLKPREP_KILL;
+	}
+	req->cmd_flags |= REQ_DONTPREP;
+	return BLKPREP_OK;
+}
+
+static void msb_submit_req(struct request_queue *q)
+{
+	struct memstick_dev *card = q->queuedata;
+	struct msb_data *msb = memstick_get_drvdata(card);
+	struct request *req = NULL;
+
+	dbg_verbose("Submit request");
+
+	if (msb->card_dead) {
+		dbg("Refusing requests on removed card");
+
+		WARN_ON(!msb->io_queue_stopped);
+
+		while ((req = blk_fetch_request(q)) != NULL)
+			__blk_end_request_all(req, -ENODEV);
+		return;
+	}
+
+	if (msb->req)
+		return;
+
+	if (!msb->io_queue_stopped)
+		queue_work(msb->io_queue, &msb->io_work);
+}
+
+static int msb_check_card(struct memstick_dev *card)
+{
+	struct msb_data *msb = memstick_get_drvdata(card);
+	return (msb->card_dead == 0);
+}
+
+static void msb_stop(struct memstick_dev *card)
+{
+	struct msb_data *msb = memstick_get_drvdata(card);
+	unsigned long flags;
+
+	dbg("Stopping all msblock IO");
+
+	spin_lock_irqsave(&msb->q_lock, flags);
+	blk_stop_queue(msb->queue);
+	msb->io_queue_stopped = true;
+	spin_unlock_irqrestore(&msb->q_lock, flags);
+
+	del_timer_sync(&msb->cache_flush_timer);
+	flush_workqueue(msb->io_queue);
+
+	if (msb->req) {
+		spin_lock_irqsave(&msb->q_lock, flags);
+		blk_requeue_request(msb->queue, msb->req);
+		msb->req = NULL;
+		spin_unlock_irqrestore(&msb->q_lock, flags);
+	}
+
+}
+
+static void msb_start(struct memstick_dev *card)
+{
+	struct msb_data *msb = memstick_get_drvdata(card);
+	unsigned long flags;
+
+	dbg("Resuming IO from msblock");
+
+	msb_invalidate_reg_window(msb);
+
+	spin_lock_irqsave(&msb->q_lock, flags);
+	if (!msb->io_queue_stopped || msb->card_dead) {
+		spin_unlock_irqrestore(&msb->q_lock, flags);
+		return;
+	}
+	spin_unlock_irqrestore(&msb->q_lock, flags);
+
+	/* Kick cache flush anyway, its harmless */
+	msb->need_flush_cache = true;
+	msb->io_queue_stopped = false;
+
+	spin_lock_irqsave(&msb->q_lock, flags);
+	blk_start_queue(msb->queue);
+	spin_unlock_irqrestore(&msb->q_lock, flags);
+
+	queue_work(msb->io_queue, &msb->io_work);
+
+}
+
+static const struct block_device_operations msb_bdops = {
+	.open    = msb_bd_open,
+	.release = msb_bd_release,
+	.getgeo  = msb_bd_getgeo,
+	.owner   = THIS_MODULE
+};
+
+/* Registers the block device */
+static int msb_init_disk(struct memstick_dev *card)
+{
+	struct msb_data *msb = memstick_get_drvdata(card);
+	struct memstick_host *host = card->host;
+	int rc;
+	u64 limit = BLK_BOUNCE_HIGH;
+	unsigned long capacity;
+
+	if (host->dev.dma_mask && *(host->dev.dma_mask))
+		limit = *(host->dev.dma_mask);
+
+	mutex_lock(&msb_disk_lock);
+	msb->disk_id = idr_alloc(&msb_disk_idr, card, 0, 256, GFP_KERNEL);
+	mutex_unlock(&msb_disk_lock);
+
+	if (msb->disk_id  < 0)
+		return msb->disk_id;
+
+	msb->disk = alloc_disk(0);
+	if (!msb->disk) {
+		rc = -ENOMEM;
+		goto out_release_id;
+	}
+
+	msb->queue = blk_init_queue(msb_submit_req, &msb->q_lock);
+	if (!msb->queue) {
+		rc = -ENOMEM;
+		goto out_put_disk;
+	}
+
+	msb->queue->queuedata = card;
+	blk_queue_prep_rq(msb->queue, msb_prepare_req);
+
+	blk_queue_bounce_limit(msb->queue, limit);
+	blk_queue_max_hw_sectors(msb->queue, MS_BLOCK_MAX_PAGES);
+	blk_queue_max_segments(msb->queue, MS_BLOCK_MAX_SEGS);
+	blk_queue_max_segment_size(msb->queue,
+				   MS_BLOCK_MAX_PAGES * msb->page_size);
+	blk_queue_logical_block_size(msb->queue, msb->page_size);
+
+	sprintf(msb->disk->disk_name, "msblk%d", msb->disk_id);
+	msb->disk->fops = &msb_bdops;
+	msb->disk->private_data = msb;
+	msb->disk->queue = msb->queue;
+	msb->disk->driverfs_dev = &card->dev;
+	msb->disk->flags |= GENHD_FL_EXT_DEVT;
+
+	capacity = msb->pages_in_block * msb->logical_block_count;
+	capacity *= (msb->page_size / 512);
+	set_capacity(msb->disk, capacity);
+	dbg("Set total disk size to %lu sectors", capacity);
+
+	msb->usage_count = 1;
+	msb->io_queue = alloc_ordered_workqueue("ms_block", WQ_MEM_RECLAIM);
+	INIT_WORK(&msb->io_work, msb_io_work);
+	sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
+
+	if (msb->read_only)
+		set_disk_ro(msb->disk, 1);
+
+	msb_start(card);
+	add_disk(msb->disk);
+	dbg("Disk added");
+	return 0;
+
+out_put_disk:
+	put_disk(msb->disk);
+out_release_id:
+	mutex_lock(&msb_disk_lock);
+	idr_remove(&msb_disk_idr, msb->disk_id);
+	mutex_unlock(&msb_disk_lock);
+	return rc;
+}
+
+static int msb_probe(struct memstick_dev *card)
+{
+	struct msb_data *msb;
+	int rc = 0;
+
+	msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
+	if (!msb)
+		return -ENOMEM;
+	memstick_set_drvdata(card, msb);
+	msb->card = card;
+	spin_lock_init(&msb->q_lock);
+
+	rc = msb_init_card(card);
+	if (rc)
+		goto out_free;
+
+	rc = msb_init_disk(card);
+	if (!rc) {
+		card->check = msb_check_card;
+		card->stop = msb_stop;
+		card->start = msb_start;
+		return 0;
+	}
+out_free:
+	memstick_set_drvdata(card, NULL);
+	msb_data_clear(msb);
+	kfree(msb);
+	return rc;
+}
+
+static void msb_remove(struct memstick_dev *card)
+{
+	struct msb_data *msb = memstick_get_drvdata(card);
+	unsigned long flags;
+
+	if (!msb->io_queue_stopped)
+		msb_stop(card);
+
+	dbg("Removing the disk device");
+
+	/* Take care of unhandled + new requests from now on */
+	spin_lock_irqsave(&msb->q_lock, flags);
+	msb->card_dead = true;
+	blk_start_queue(msb->queue);
+	spin_unlock_irqrestore(&msb->q_lock, flags);
+
+	/* Remove the disk */
+	del_gendisk(msb->disk);
+	blk_cleanup_queue(msb->queue);
+	msb->queue = NULL;
+
+	mutex_lock(&msb_disk_lock);
+	msb_data_clear(msb);
+	mutex_unlock(&msb_disk_lock);
+
+	msb_disk_release(msb->disk);
+	memstick_set_drvdata(card, NULL);
+}
+
+#ifdef CONFIG_PM
+
+static int msb_suspend(struct memstick_dev *card, pm_message_t state)
+{
+	msb_stop(card);
+	return 0;
+}
+
+static int msb_resume(struct memstick_dev *card)
+{
+	struct msb_data *msb = memstick_get_drvdata(card);
+	struct msb_data *new_msb = NULL;
+	bool card_dead = true;
+
+#ifndef CONFIG_MEMSTICK_UNSAFE_RESUME
+	msb->card_dead = true;
+	return 0;
+#endif
+	mutex_lock(&card->host->lock);
+
+	new_msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
+	if (!new_msb)
+		goto out;
+
+	new_msb->card = card;
+	memstick_set_drvdata(card, new_msb);
+	spin_lock_init(&new_msb->q_lock);
+	sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
+
+	if (msb_init_card(card))
+		goto out;
+
+	if (msb->block_size != new_msb->block_size)
+		goto out;
+
+	if (memcmp(msb->boot_page, new_msb->boot_page,
+					sizeof(struct ms_boot_page)))
+		goto out;
+
+	if (msb->logical_block_count != new_msb->logical_block_count ||
+		memcmp(msb->lba_to_pba_table, new_msb->lba_to_pba_table,
+						msb->logical_block_count))
+		goto out;
+
+	if (msb->block_count != new_msb->block_count ||
+		memcmp(msb->used_blocks_bitmap, new_msb->used_blocks_bitmap,
+							msb->block_count / 8))
+		goto out;
+
+	card_dead = false;
+out:
+	if (card_dead)
+		dbg("Card was removed/replaced during suspend");
+
+	msb->card_dead = card_dead;
+	memstick_set_drvdata(card, msb);
+
+	if (new_msb) {
+		msb_data_clear(new_msb);
+		kfree(new_msb);
+	}
+
+	msb_start(card);
+	mutex_unlock(&card->host->lock);
+	return 0;
+}
+#else
+
+#define msb_suspend NULL
+#define msb_resume NULL
+
+#endif /* CONFIG_PM */
+
+static struct memstick_device_id msb_id_tbl[] = {
+	{MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
+	 MEMSTICK_CLASS_FLASH},
+
+	{MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
+	 MEMSTICK_CLASS_ROM},
+
+	{MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
+	 MEMSTICK_CLASS_RO},
+
+	{MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
+	 MEMSTICK_CLASS_WP},
+
+	{MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_DUO, MEMSTICK_CATEGORY_STORAGE_DUO,
+	 MEMSTICK_CLASS_DUO},
+	{}
+};
+MODULE_DEVICE_TABLE(memstick, msb_id_tbl);
+
+
+static struct memstick_driver msb_driver = {
+	.driver = {
+		.name  = DRIVER_NAME,
+		.owner = THIS_MODULE
+	},
+	.id_table = msb_id_tbl,
+	.probe    = msb_probe,
+	.remove   = msb_remove,
+	.suspend  = msb_suspend,
+	.resume   = msb_resume
+};
+
+static int major;
+
+static int __init msb_init(void)
+{
+	int rc = register_blkdev(0, DRIVER_NAME);
+
+	if (rc < 0) {
+		pr_err("failed to register major (error %d)\n", rc);
+		return rc;
+	}
+
+	major = rc;
+	rc = memstick_register_driver(&msb_driver);
+	if (rc) {
+		unregister_blkdev(major, DRIVER_NAME);
+		pr_err("failed to register memstick driver (error %d)\n", rc);
+	}
+
+	return rc;
+}
+
+static void __exit msb_exit(void)
+{
+	memstick_unregister_driver(&msb_driver);
+	unregister_blkdev(major, DRIVER_NAME);
+	idr_destroy(&msb_disk_idr);
+}
+
+module_init(msb_init);
+module_exit(msb_exit);
+
+module_param(cache_flush_timeout, int, S_IRUGO);
+MODULE_PARM_DESC(cache_flush_timeout,
+				"Cache flush timeout in msec (1000 default)");
+module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Debug level (0-2)");
+
+module_param(verify_writes, bool, S_IRUGO);
+MODULE_PARM_DESC(verify_writes, "Read back and check all data that is written");
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Maxim Levitsky");
+MODULE_DESCRIPTION("Sony MemoryStick block device driver");
diff --git a/drivers/memstick/core/ms_block.h b/drivers/memstick/core/ms_block.h
new file mode 100644
index 0000000..96e6375
--- /dev/null
+++ b/drivers/memstick/core/ms_block.h
@@ -0,0 +1,290 @@
+/*
+ *  ms_block.h - Sony MemoryStick (legacy) storage support
+
+ *  Copyright (C) 2013 Maxim Levitsky <maximlevitsky@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Minor portions of the driver are copied from mspro_block.c which is
+ * Copyright (C) 2007 Alex Dubov <oakad@yahoo.com>
+ *
+ * Also ms structures were copied from old broken driver by same author
+ * These probably come from MS spec
+ *
+ */
+
+#ifndef MS_BLOCK_NEW_H
+#define MS_BLOCK_NEW_H
+
+#define MS_BLOCK_MAX_SEGS      32
+#define MS_BLOCK_MAX_PAGES     ((2 << 16) - 1)
+
+#define MS_BLOCK_MAX_BOOT_ADDR 0x000c
+#define MS_BLOCK_BOOT_ID       0x0001
+#define MS_BLOCK_INVALID       0xffff
+#define MS_MAX_ZONES           16
+#define MS_BLOCKS_IN_ZONE      512
+
+#define MS_BLOCK_MAP_LINE_SZ   16
+#define MS_BLOCK_PART_SHIFT    3
+
+
+#define MEMSTICK_UNCORR_ERROR (MEMSTICK_STATUS1_UCFG | \
+		MEMSTICK_STATUS1_UCEX | MEMSTICK_STATUS1_UCDT)
+
+#define MEMSTICK_CORR_ERROR (MEMSTICK_STATUS1_FGER | MEMSTICK_STATUS1_EXER | \
+	MEMSTICK_STATUS1_DTER)
+
+#define MEMSTICK_INT_ERROR (MEMSTICK_INT_CMDNAK | MEMSTICK_INT_ERR)
+
+#define MEMSTICK_OVERWRITE_FLAG_NORMAL \
+	(MEMSTICK_OVERWRITE_PGST1 | \
+	MEMSTICK_OVERWRITE_PGST0  | \
+	MEMSTICK_OVERWRITE_BKST)
+
+#define MEMSTICK_OV_PG_NORMAL \
+	(MEMSTICK_OVERWRITE_PGST1 | MEMSTICK_OVERWRITE_PGST0)
+
+#define MEMSTICK_MANAGMENT_FLAG_NORMAL \
+	(MEMSTICK_MANAGEMENT_SYSFLG |  \
+	MEMSTICK_MANAGEMENT_SCMS1   |  \
+	MEMSTICK_MANAGEMENT_SCMS0)     \
+
+struct ms_boot_header {
+	unsigned short block_id;
+	unsigned short format_reserved;
+	unsigned char  reserved0[184];
+	unsigned char  data_entry;
+	unsigned char  reserved1[179];
+} __packed;
+
+
+struct ms_system_item {
+	unsigned int  start_addr;
+	unsigned int  data_size;
+	unsigned char data_type_id;
+	unsigned char reserved[3];
+} __packed;
+
+struct ms_system_entry {
+	struct ms_system_item disabled_block;
+	struct ms_system_item cis_idi;
+	unsigned char         reserved[24];
+} __packed;
+
+struct ms_boot_attr_info {
+	unsigned char      memorystick_class;
+	unsigned char      format_unique_value1;
+	unsigned short     block_size;
+	unsigned short     number_of_blocks;
+	unsigned short     number_of_effective_blocks;
+	unsigned short     page_size;
+	unsigned char      extra_data_size;
+	unsigned char      format_unique_value2;
+	unsigned char      assembly_time[8];
+	unsigned char      format_unique_value3;
+	unsigned char      serial_number[3];
+	unsigned char      assembly_manufacturer_code;
+	unsigned char      assembly_model_code[3];
+	unsigned short     memory_manufacturer_code;
+	unsigned short     memory_device_code;
+	unsigned short     implemented_capacity;
+	unsigned char      format_unique_value4[2];
+	unsigned char      vcc;
+	unsigned char      vpp;
+	unsigned short     controller_number;
+	unsigned short     controller_function;
+	unsigned char      reserved0[9];
+	unsigned char      transfer_supporting;
+	unsigned short     format_unique_value5;
+	unsigned char      format_type;
+	unsigned char      memorystick_application;
+	unsigned char      device_type;
+	unsigned char      reserved1[22];
+	unsigned char      format_uniqure_value6[2];
+	unsigned char      reserved2[15];
+} __packed;
+
+struct ms_cis_idi {
+	unsigned short general_config;
+	unsigned short logical_cylinders;
+	unsigned short reserved0;
+	unsigned short logical_heads;
+	unsigned short track_size;
+	unsigned short page_size;
+	unsigned short pages_per_track;
+	unsigned short msw;
+	unsigned short lsw;
+	unsigned short reserved1;
+	unsigned char  serial_number[20];
+	unsigned short buffer_type;
+	unsigned short buffer_size_increments;
+	unsigned short long_command_ecc;
+	unsigned char  firmware_version[28];
+	unsigned char  model_name[18];
+	unsigned short reserved2[5];
+	unsigned short pio_mode_number;
+	unsigned short dma_mode_number;
+	unsigned short field_validity;
+	unsigned short current_logical_cylinders;
+	unsigned short current_logical_heads;
+	unsigned short current_pages_per_track;
+	unsigned int   current_page_capacity;
+	unsigned short mutiple_page_setting;
+	unsigned int   addressable_pages;
+	unsigned short single_word_dma;
+	unsigned short multi_word_dma;
+	unsigned char  reserved3[128];
+} __packed;
+
+
+struct ms_boot_page {
+	struct ms_boot_header    header;
+	struct ms_system_entry   entry;
+	struct ms_boot_attr_info attr;
+} __packed;
+
+struct msb_data {
+	unsigned int			usage_count;
+	struct memstick_dev		*card;
+	struct gendisk			*disk;
+	struct request_queue		*queue;
+	spinlock_t			q_lock;
+	struct hd_geometry		geometry;
+	struct attribute_group		attr_group;
+	struct request			*req;
+	int				caps;
+	int				disk_id;
+
+	/* IO */
+	struct workqueue_struct		*io_queue;
+	bool				io_queue_stopped;
+	struct work_struct		io_work;
+	bool				card_dead;
+
+	/* Media properties */
+	struct ms_boot_page		*boot_page;
+	u16				boot_block_locations[2];
+	int				boot_block_count;
+
+	bool				read_only;
+	unsigned short			page_size;
+	int				block_size;
+	int				pages_in_block;
+	int				zone_count;
+	int				block_count;
+	int				logical_block_count;
+
+	/* FTL tables */
+	unsigned long			*used_blocks_bitmap;
+	unsigned long			*erased_blocks_bitmap;
+	u16				*lba_to_pba_table;
+	int				free_block_count[MS_MAX_ZONES];
+	bool				ftl_initialized;
+
+	/* Cache */
+	unsigned char			*cache;
+	unsigned long			valid_cache_bitmap;
+	int				cache_block_lba;
+	bool				need_flush_cache;
+	struct timer_list		cache_flush_timer;
+
+	/* Preallocated buffers */
+	unsigned char			*block_buffer;
+	struct scatterlist		prealloc_sg[MS_BLOCK_MAX_SEGS+1];
+
+
+	/* handler's local data */
+	struct ms_register_addr		reg_addr;
+	bool				addr_valid;
+
+	u8				command_value;
+	bool				command_need_oob;
+	struct scatterlist		*current_sg;
+	int				current_sg_offset;
+
+	struct ms_register		regs;
+	int				current_page;
+
+	int				state;
+	int				exit_error;
+	bool				int_polling;
+	unsigned long			int_timeout;
+
+};
+
+enum msb_readpage_states {
+	MSB_RP_SEND_BLOCK_ADDRESS = 0,
+	MSB_RP_SEND_READ_COMMAND,
+
+	MSB_RP_SEND_INT_REQ,
+	MSB_RP_RECEIVE_INT_REQ_RESULT,
+
+	MSB_RP_SEND_READ_STATUS_REG,
+	MSB_RP_RECIVE_STATUS_REG,
+
+	MSB_RP_SEND_OOB_READ,
+	MSB_RP_RECEIVE_OOB_READ,
+
+	MSB_RP_SEND_READ_DATA,
+	MSB_RP_RECEIVE_READ_DATA,
+};
+
+enum msb_write_block_states {
+	MSB_WB_SEND_WRITE_PARAMS = 0,
+	MSB_WB_SEND_WRITE_OOB,
+	MSB_WB_SEND_WRITE_COMMAND,
+
+	MSB_WB_SEND_INT_REQ,
+	MSB_WB_RECEIVE_INT_REQ,
+
+	MSB_WB_SEND_WRITE_DATA,
+	MSB_WB_RECEIVE_WRITE_CONFIRMATION,
+};
+
+enum msb_send_command_states {
+	MSB_SC_SEND_WRITE_PARAMS,
+	MSB_SC_SEND_WRITE_OOB,
+	MSB_SC_SEND_COMMAND,
+
+	MSB_SC_SEND_INT_REQ,
+	MSB_SC_RECEIVE_INT_REQ,
+
+};
+
+enum msb_reset_states {
+	MSB_RS_SEND,
+	MSB_RS_CONFIRM,
+};
+
+enum msb_par_switch_states {
+	MSB_PS_SEND_SWITCH_COMMAND,
+	MSB_PS_SWICH_HOST,
+	MSB_PS_CONFIRM,
+};
+
+struct chs_entry {
+	unsigned long size;
+	unsigned char sec;
+	unsigned short cyl;
+	unsigned char head;
+};
+
+static int msb_reset(struct msb_data *msb, bool full);
+
+static int h_msb_default_bad(struct memstick_dev *card,
+						struct memstick_request **mrq);
+
+#define __dbg(level, format, ...) \
+	do { \
+		if (debug >= level) \
+			pr_err(format "\n", ## __VA_ARGS__); \
+	} while (0)
+
+
+#define dbg(format, ...)		__dbg(1, format, ## __VA_ARGS__)
+#define dbg_verbose(format, ...)	__dbg(2, format, ## __VA_ARGS__)
+
+#endif
diff --git a/drivers/memstick/host/rtsx_pci_ms.c b/drivers/memstick/host/rtsx_pci_ms.c
index cf8bd72..25f8f93 100644
--- a/drivers/memstick/host/rtsx_pci_ms.c
+++ b/drivers/memstick/host/rtsx_pci_ms.c
@@ -612,8 +612,6 @@
 	memstick_remove_host(msh);
 	memstick_free_host(msh);
 
-	platform_set_drvdata(pdev, NULL);
-
 	dev_dbg(&(pdev->dev),
 		": Realtek PCI-E Memstick controller has been removed\n");
 
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index cd0b7f4..1a3163f 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -812,7 +812,7 @@
  * Otherwise we don't understand what happened, so abort.
  */
 static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
-	struct mmc_blk_request *brq, int *ecc_err)
+	struct mmc_blk_request *brq, int *ecc_err, int *gen_err)
 {
 	bool prev_cmd_status_valid = true;
 	u32 status, stop_status = 0;
@@ -850,6 +850,16 @@
 	    (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
 		*ecc_err = 1;
 
+	/* Flag General errors */
+	if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
+		if ((status & R1_ERROR) ||
+			(brq->stop.resp[0] & R1_ERROR)) {
+			pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
+			       req->rq_disk->disk_name, __func__,
+			       brq->stop.resp[0], status);
+			*gen_err = 1;
+		}
+
 	/*
 	 * Check the current card state.  If it is in some data transfer
 	 * mode, tell it to stop (and hopefully transition back to TRAN.)
@@ -869,6 +879,13 @@
 			return ERR_ABORT;
 		if (stop_status & R1_CARD_ECC_FAILED)
 			*ecc_err = 1;
+		if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
+			if (stop_status & R1_ERROR) {
+				pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
+				       req->rq_disk->disk_name, __func__,
+				       stop_status);
+				*gen_err = 1;
+			}
 	}
 
 	/* Check for set block count errors */
@@ -1097,7 +1114,7 @@
 						    mmc_active);
 	struct mmc_blk_request *brq = &mq_mrq->brq;
 	struct request *req = mq_mrq->req;
-	int ecc_err = 0;
+	int ecc_err = 0, gen_err = 0;
 
 	/*
 	 * sbc.error indicates a problem with the set block count
@@ -1111,7 +1128,7 @@
 	 */
 	if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
 	    brq->data.error) {
-		switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err)) {
+		switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
 		case ERR_RETRY:
 			return MMC_BLK_RETRY;
 		case ERR_ABORT:
@@ -1143,6 +1160,14 @@
 		u32 status;
 		unsigned long timeout;
 
+		/* Check stop command response */
+		if (brq->stop.resp[0] & R1_ERROR) {
+			pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
+			       req->rq_disk->disk_name, __func__,
+			       brq->stop.resp[0]);
+			gen_err = 1;
+		}
+
 		timeout = jiffies + msecs_to_jiffies(MMC_BLK_TIMEOUT_MS);
 		do {
 			int err = get_card_status(card, &status, 5);
@@ -1152,6 +1177,13 @@
 				return MMC_BLK_CMD_ERR;
 			}
 
+			if (status & R1_ERROR) {
+				pr_err("%s: %s: general error sending status command, card status %#x\n",
+				       req->rq_disk->disk_name, __func__,
+				       status);
+				gen_err = 1;
+			}
+
 			/* Timeout if the device never becomes ready for data
 			 * and never leaves the program state.
 			 */
@@ -1171,6 +1203,13 @@
 			 (R1_CURRENT_STATE(status) == R1_STATE_PRG));
 	}
 
+	/* if general error occurs, retry the write operation. */
+	if (gen_err) {
+		pr_warn("%s: retrying write for general error\n",
+				req->rq_disk->disk_name);
+		return MMC_BLK_RETRY;
+	}
+
 	if (brq->data.error) {
 		pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
 		       req->rq_disk->disk_name, brq->data.error,
@@ -2191,10 +2230,10 @@
 		 * is freeing the queue that stops new requests
 		 * from being accepted.
 		 */
+		card = md->queue.card;
 		mmc_cleanup_queue(&md->queue);
 		if (md->flags & MMC_BLK_PACKED_CMD)
 			mmc_packed_clean(&md->queue);
-		card = md->queue.card;
 		if (md->disk->flags & GENHD_FL_UP) {
 			device_remove_file(disk_to_dev(md->disk), &md->force_ro);
 			if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index a69df52..0c0fc52 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -2849,18 +2849,12 @@
 	struct seq_file *sf = (struct seq_file *)file->private_data;
 	struct mmc_card *card = (struct mmc_card *)sf->private;
 	struct mmc_test_card *test;
-	char lbuf[12];
 	long testcase;
+	int ret;
 
-	if (count >= sizeof(lbuf))
-		return -EINVAL;
-
-	if (copy_from_user(lbuf, buf, count))
-		return -EFAULT;
-	lbuf[count] = '\0';
-
-	if (strict_strtol(lbuf, 10, &testcase))
-		return -EINVAL;
+	ret = kstrtol_from_user(buf, count, 10, &testcase);
+	if (ret)
+		return ret;
 
 	test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL);
 	if (!test)
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 5d08855..bf18b6b 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -27,6 +27,7 @@
 #include <linux/fault-inject.h>
 #include <linux/random.h>
 #include <linux/slab.h>
+#include <linux/of.h>
 
 #include <linux/mmc/card.h>
 #include <linux/mmc/host.h>
@@ -1196,6 +1197,49 @@
 }
 EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
 
+#ifdef CONFIG_OF
+
+/**
+ * mmc_of_parse_voltage - return mask of supported voltages
+ * @np: The device node need to be parsed.
+ * @mask: mask of voltages available for MMC/SD/SDIO
+ *
+ * 1. Return zero on success.
+ * 2. Return negative errno: voltage-range is invalid.
+ */
+int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
+{
+	const u32 *voltage_ranges;
+	int num_ranges, i;
+
+	voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
+	num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
+	if (!voltage_ranges || !num_ranges) {
+		pr_info("%s: voltage-ranges unspecified\n", np->full_name);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < num_ranges; i++) {
+		const int j = i * 2;
+		u32 ocr_mask;
+
+		ocr_mask = mmc_vddrange_to_ocrmask(
+				be32_to_cpu(voltage_ranges[j]),
+				be32_to_cpu(voltage_ranges[j + 1]));
+		if (!ocr_mask) {
+			pr_err("%s: voltage-range #%d is invalid\n",
+				np->full_name, i);
+			return -EINVAL;
+		}
+		*mask |= ocr_mask;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(mmc_of_parse_voltage);
+
+#endif /* CONFIG_OF */
+
 #ifdef CONFIG_REGULATOR
 
 /**
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 6fb6f77..49bc403 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -374,7 +374,7 @@
 			if (!(flags & OF_GPIO_ACTIVE_LOW))
 				gpio_inv_cd = true;
 
-			ret = mmc_gpio_request_cd(host, gpio);
+			ret = mmc_gpio_request_cd(host, gpio, 0);
 			if (ret < 0) {
 				dev_err(host->parent,
 					"Failed to request CD GPIO #%d: %d!\n",
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 837fc73..ef18348 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -531,6 +531,7 @@
 
 	data.sg = &sg;
 	data.sg_len = 1;
+	mmc_set_data_timeout(&data, card);
 	sg_init_one(&sg, data_buf, len);
 	mmc_wait_for_req(host, &mrq);
 	err = 0;
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 176d125..5e8823d 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -215,7 +215,7 @@
 static int mmc_read_ssr(struct mmc_card *card)
 {
 	unsigned int au, es, et, eo;
-	int err, i;
+	int err, i, max_au;
 	u32 *ssr;
 
 	if (!(card->csd.cmdclass & CCC_APP_SPEC)) {
@@ -239,12 +239,15 @@
 	for (i = 0; i < 16; i++)
 		ssr[i] = be32_to_cpu(ssr[i]);
 
+	/* SD3.0 increases max AU size to 64MB (0xF) from 4MB (0x9) */
+	max_au = card->scr.sda_spec3 ? 0xF : 0x9;
+
 	/*
 	 * UNSTUFF_BITS only works with four u32s so we have to offset the
 	 * bitfield positions accordingly.
 	 */
 	au = UNSTUFF_BITS(ssr, 428 - 384, 4);
-	if (au > 0 && au <= 9) {
+	if (au > 0 && au <= max_au) {
 		card->ssr.au = 1 << (au + 4);
 		es = UNSTUFF_BITS(ssr, 408 - 384, 16);
 		et = UNSTUFF_BITS(ssr, 402 - 384, 6);
@@ -942,13 +945,13 @@
 	if (!mmc_host_is_spi(host)) {
 		err = mmc_send_relative_addr(host, &card->rca);
 		if (err)
-			return err;
+			goto free_card;
 	}
 
 	if (!oldcard) {
 		err = mmc_sd_get_csd(host, card);
 		if (err)
-			return err;
+			goto free_card;
 
 		mmc_decode_cid(card);
 	}
@@ -959,7 +962,7 @@
 	if (!mmc_host_is_spi(host)) {
 		err = mmc_select_card(card);
 		if (err)
-			return err;
+			goto free_card;
 	}
 
 	err = mmc_sd_setup_card(host, card, oldcard != NULL);
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index 3242351..46596b71 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -135,6 +135,7 @@
  * mmc_gpio_request_cd - request a gpio for card-detection
  * @host: mmc host
  * @gpio: gpio number requested
+ * @debounce: debounce time in microseconds
  *
  * As devm_* managed functions are used in mmc_gpio_request_cd(), client
  * drivers do not need to explicitly call mmc_gpio_free_cd() for freeing up,
@@ -143,9 +144,14 @@
  * switching for card-detection, they are responsible for calling
  * mmc_gpio_request_cd() and mmc_gpio_free_cd() as a pair on their own.
  *
+ * If GPIO debouncing is desired, set the debounce parameter to a non-zero
+ * value. The caller is responsible for ensuring that the GPIO driver associated
+ * with the GPIO supports debouncing, otherwise an error will be returned.
+ *
  * Returns zero on success, else an error.
  */
-int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio)
+int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio,
+			unsigned int debounce)
 {
 	struct mmc_gpio *ctx;
 	int irq = gpio_to_irq(gpio);
@@ -167,6 +173,12 @@
 		 */
 		return ret;
 
+	if (debounce) {
+		ret = gpio_set_debounce(gpio, debounce);
+		if (ret < 0)
+			return ret;
+	}
+
 	/*
 	 * Even if gpio_to_irq() returns a valid IRQ number, the platform might
 	 * still prefer to poll, e.g., because that IRQ number is already used
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 8a4c066..b7fd5ab 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -284,11 +284,11 @@
 
 config MMC_OMAP_HS
 	tristate "TI OMAP High Speed Multimedia Card Interface support"
-	depends on SOC_OMAP2430 || ARCH_OMAP3 || ARCH_OMAP4
+	depends on ARCH_OMAP2PLUS || COMPILE_TEST
 	help
 	  This selects the TI OMAP High Speed Multimedia card Interface.
-	  If you have an OMAP2430 or OMAP3 board or OMAP4 board with a
-	  Multimedia Card slot, say Y or M here.
+	  If you have an omap2plus board with a Multimedia Card slot,
+	  say Y or M here.
 
 	  If unsure, say N.
 
@@ -530,7 +530,7 @@
 
 config MMC_DW
 	tristate "Synopsys DesignWare Memory Card Interface"
-	depends on ARM
+	depends on ARC || ARM
 	help
 	  This selects support for the Synopsys DesignWare Mobile Storage IP
 	  block, this provides host support for SD and MMC interfaces, in both
@@ -569,7 +569,7 @@
 
 config MMC_DW_SOCFPGA
 	tristate "SOCFPGA specific extensions for Synopsys DW Memory Card Interface"
-	depends on MMC_DW
+	depends on MMC_DW && MFD_SYSCON
 	select MMC_DW_PLTFM
 	help
 	  This selects support for Altera SoCFPGA specific extensions to the
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index d422e21..c41d0c3 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -52,8 +52,6 @@
 
 obj-$(CONFIG_MMC_REALTEK_PCI)	+= rtsx_pci_sdmmc.o
 
-obj-$(CONFIG_MMC_REALTEK_PCI)	+= rtsx_pci_sdmmc.o
-
 obj-$(CONFIG_MMC_SDHCI_PLTFM)		+= sdhci-pltfm.o
 obj-$(CONFIG_MMC_SDHCI_CNS3XXX)		+= sdhci-cns3xxx.o
 obj-$(CONFIG_MMC_SDHCI_ESDHC_IMX)	+= sdhci-esdhc-imx.o
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index bdb84da..69e438e 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -378,6 +378,8 @@
 {
 	struct atmel_mci	*host = s->private;
 	u32			*buf;
+	int			ret = 0;
+
 
 	buf = kmalloc(ATMCI_REGS_SIZE, GFP_KERNEL);
 	if (!buf)
@@ -388,12 +390,16 @@
 	 * not disabling interrupts, so IMR and SR may not be
 	 * consistent.
 	 */
+	ret = clk_prepare_enable(host->mck);
+	if (ret)
+		goto out;
+
 	spin_lock_bh(&host->lock);
-	clk_enable(host->mck);
 	memcpy_fromio(buf, host->regs, ATMCI_REGS_SIZE);
-	clk_disable(host->mck);
 	spin_unlock_bh(&host->lock);
 
+	clk_disable_unprepare(host->mck);
+
 	seq_printf(s, "MR:\t0x%08x%s%s ",
 			buf[ATMCI_MR / 4],
 			buf[ATMCI_MR / 4] & ATMCI_MR_RDPROOF ? " RDPROOF" : "",
@@ -442,9 +448,10 @@
 				val & ATMCI_CFG_LSYNC ? " LSYNC" : "");
 	}
 
+out:
 	kfree(buf);
 
-	return 0;
+	return ret;
 }
 
 static int atmci_regs_open(struct inode *inode, struct file *file)
@@ -1262,6 +1269,7 @@
 	struct atmel_mci_slot	*slot = mmc_priv(mmc);
 	struct atmel_mci	*host = slot->host;
 	unsigned int		i;
+	bool			unprepare_clk;
 
 	slot->sdc_reg &= ~ATMCI_SDCBUS_MASK;
 	switch (ios->bus_width) {
@@ -1277,9 +1285,13 @@
 		unsigned int clock_min = ~0U;
 		u32 clkdiv;
 
+		clk_prepare(host->mck);
+		unprepare_clk = true;
+
 		spin_lock_bh(&host->lock);
 		if (!host->mode_reg) {
 			clk_enable(host->mck);
+			unprepare_clk = false;
 			atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
 			atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
 			if (host->caps.has_cfg_reg)
@@ -1347,6 +1359,8 @@
 	} else {
 		bool any_slot_active = false;
 
+		unprepare_clk = false;
+
 		spin_lock_bh(&host->lock);
 		slot->clock = 0;
 		for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
@@ -1360,12 +1374,16 @@
 			if (host->mode_reg) {
 				atmci_readl(host, ATMCI_MR);
 				clk_disable(host->mck);
+				unprepare_clk = true;
 			}
 			host->mode_reg = 0;
 		}
 		spin_unlock_bh(&host->lock);
 	}
 
+	if (unprepare_clk)
+		clk_unprepare(host->mck);
+
 	switch (ios->power_mode) {
 	case MMC_POWER_UP:
 		set_bit(ATMCI_CARD_NEED_INIT, &slot->flags);
@@ -2376,10 +2394,12 @@
 	if (!host->regs)
 		goto err_ioremap;
 
-	clk_enable(host->mck);
+	ret = clk_prepare_enable(host->mck);
+	if (ret)
+		goto err_request_irq;
 	atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
 	host->bus_hz = clk_get_rate(host->mck);
-	clk_disable(host->mck);
+	clk_disable_unprepare(host->mck);
 
 	host->mapbase = regs->start;
 
@@ -2482,11 +2502,11 @@
 			atmci_cleanup_slot(host->slot[i], i);
 	}
 
-	clk_enable(host->mck);
+	clk_prepare_enable(host->mck);
 	atmci_writel(host, ATMCI_IDR, ~0UL);
 	atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS);
 	atmci_readl(host, ATMCI_SR);
-	clk_disable(host->mck);
+	clk_disable_unprepare(host->mck);
 
 	if (host->dma.chan)
 		dma_release_channel(host->dma.chan);
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index 866edef..6a1fa21 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -39,6 +39,7 @@
 	DW_MCI_TYPE_EXYNOS4210,
 	DW_MCI_TYPE_EXYNOS4412,
 	DW_MCI_TYPE_EXYNOS5250,
+	DW_MCI_TYPE_EXYNOS5420,
 };
 
 /* Exynos implementation specific driver private data */
@@ -62,6 +63,9 @@
 	}, {
 		.compatible	= "samsung,exynos5250-dw-mshc",
 		.ctrl_type	= DW_MCI_TYPE_EXYNOS5250,
+	}, {
+		.compatible	= "samsung,exynos5420-dw-mshc",
+		.ctrl_type	= DW_MCI_TYPE_EXYNOS5420,
 	},
 };
 
@@ -90,7 +94,8 @@
 {
 	struct dw_mci_exynos_priv_data *priv = host->priv;
 
-	if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS5250)
+	if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS5250 ||
+		priv->ctrl_type == DW_MCI_TYPE_EXYNOS5420)
 		host->bus_hz /= (priv->ciu_div + 1);
 	else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4412)
 		host->bus_hz /= EXYNOS4412_FIXED_CIU_CLK_DIV;
@@ -173,6 +178,8 @@
 			.data = &exynos_drv_data, },
 	{ .compatible = "samsung,exynos5250-dw-mshc",
 			.data = &exynos_drv_data, },
+	{ .compatible = "samsung,exynos5420-dw-mshc",
+			.data = &exynos_drv_data, },
 	{},
 };
 MODULE_DEVICE_TABLE(of, dw_mci_exynos_match);
diff --git a/drivers/mmc/host/dw_mmc-pci.c b/drivers/mmc/host/dw_mmc-pci.c
index b456b0c..f70546a 100644
--- a/drivers/mmc/host/dw_mmc-pci.c
+++ b/drivers/mmc/host/dw_mmc-pci.c
@@ -59,7 +59,9 @@
 	if (ret)
 		return ret;
 
-	host->regs = pcim_iomap_table(pdev)[0];
+	host->regs = pcim_iomap_table(pdev)[PCI_BAR_NO];
+
+	pci_set_master(pdev);
 
 	ret = dw_mci_probe(host);
 	if (ret)
diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c
index ee52556..2089752 100644
--- a/drivers/mmc/host/dw_mmc-pltfm.c
+++ b/drivers/mmc/host/dw_mmc-pltfm.c
@@ -23,6 +23,7 @@
 #include <linux/of.h>
 
 #include "dw_mmc.h"
+#include "dw_mmc-pltfm.h"
 
 static void dw_mci_rockchip_prepare_command(struct dw_mci *host, u32 *cmdr)
 {
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 5424073..018f365 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -1601,18 +1601,17 @@
 
 	pending = mci_readl(host, MINTSTS); /* read-only mask reg */
 
+	/*
+	 * DTO fix - version 2.10a and below, and only if internal DMA
+	 * is configured.
+	 */
+	if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
+		if (!pending &&
+		    ((mci_readl(host, STATUS) >> 17) & 0x1fff))
+			pending |= SDMMC_INT_DATA_OVER;
+	}
+
 	if (pending) {
-
-		/*
-		 * DTO fix - version 2.10a and below, and only if internal DMA
-		 * is configured.
-		 */
-		if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
-			if (!pending &&
-			    ((mci_readl(host, STATUS) >> 17) & 0x1fff))
-				pending |= SDMMC_INT_DATA_OVER;
-		}
-
 		if (pending & DW_MCI_CMD_ERROR_FLAGS) {
 			mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
 			host->cmd_status = pending;
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 0308c9f..6651633 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -713,7 +713,7 @@
 		mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
 
 	if (gpio_is_valid(pdata->gpio_card_detect)) {
-		ret = mmc_gpio_request_cd(mmc, pdata->gpio_card_detect);
+		ret = mmc_gpio_request_cd(mmc, pdata->gpio_card_detect, 0);
 		if (ret)
 			return ret;
 	}
@@ -783,9 +783,8 @@
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	host->base = devm_ioremap_resource(&pdev->dev, res);
-	if (!host->base) {
-		ret = -EBUSY;
-		dev_err(&pdev->dev, "Failed to ioremap base memory\n");
+	if (IS_ERR(host->base)) {
+		ret = PTR_ERR(host->base);
 		goto err_free_host;
 	}
 
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 74145d1..0a87e56 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -36,6 +36,7 @@
 
 #include <linux/mmc/host.h>
 #include <linux/mmc/mmc.h>		/* for R1_SPI_* bit values */
+#include <linux/mmc/slot-gpio.h>
 
 #include <linux/spi/spi.h>
 #include <linux/spi/mmc_spi.h>
@@ -1272,33 +1273,11 @@
 	}
 }
 
-static int mmc_spi_get_ro(struct mmc_host *mmc)
-{
-	struct mmc_spi_host *host = mmc_priv(mmc);
-
-	if (host->pdata && host->pdata->get_ro)
-		return !!host->pdata->get_ro(mmc->parent);
-	/*
-	 * Board doesn't support read only detection; let the mmc core
-	 * decide what to do.
-	 */
-	return -ENOSYS;
-}
-
-static int mmc_spi_get_cd(struct mmc_host *mmc)
-{
-	struct mmc_spi_host *host = mmc_priv(mmc);
-
-	if (host->pdata && host->pdata->get_cd)
-		return !!host->pdata->get_cd(mmc->parent);
-	return -ENOSYS;
-}
-
 static const struct mmc_host_ops mmc_spi_ops = {
 	.request	= mmc_spi_request,
 	.set_ios	= mmc_spi_set_ios,
-	.get_ro		= mmc_spi_get_ro,
-	.get_cd		= mmc_spi_get_cd,
+	.get_ro		= mmc_gpio_get_ro,
+	.get_cd		= mmc_gpio_get_cd,
 };
 
 
@@ -1324,6 +1303,7 @@
 	struct mmc_host		*mmc;
 	struct mmc_spi_host	*host;
 	int			status;
+	bool			has_ro = false;
 
 	/* We rely on full duplex transfers, mostly to reduce
 	 * per-transfer overheads (by making fewer transfers).
@@ -1448,18 +1428,33 @@
 	}
 
 	/* pass platform capabilities, if any */
-	if (host->pdata)
+	if (host->pdata) {
 		mmc->caps |= host->pdata->caps;
+		mmc->caps2 |= host->pdata->caps2;
+	}
 
 	status = mmc_add_host(mmc);
 	if (status != 0)
 		goto fail_add_host;
 
+	if (host->pdata && host->pdata->flags & MMC_SPI_USE_CD_GPIO) {
+		status = mmc_gpio_request_cd(mmc, host->pdata->cd_gpio,
+					     host->pdata->cd_debounce);
+		if (status != 0)
+			goto fail_add_host;
+	}
+
+	if (host->pdata && host->pdata->flags & MMC_SPI_USE_RO_GPIO) {
+		has_ro = true;
+		status = mmc_gpio_request_ro(mmc, host->pdata->ro_gpio);
+		if (status != 0)
+			goto fail_add_host;
+	}
+
 	dev_info(&spi->dev, "SD/MMC host %s%s%s%s%s\n",
 			dev_name(&mmc->class_dev),
 			host->dma_dev ? "" : ", no DMA",
-			(host->pdata && host->pdata->get_ro)
-				? "" : ", no WP",
+			has_ro ? "" : ", no WP",
 			(host->pdata && host->pdata->setpower)
 				? "" : ", no poweroff",
 			(mmc->caps & MMC_CAP_NEEDS_POLL)
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index 4ddd83f..06c5b0b 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -757,7 +757,8 @@
 		if (mvsd_data->gpio_card_detect &&
 		    gpio_is_valid(mvsd_data->gpio_card_detect)) {
 			ret = mmc_gpio_request_cd(mmc,
-						  mvsd_data->gpio_card_detect);
+						  mvsd_data->gpio_card_detect,
+						  0);
 			if (ret)
 				goto out;
 		} else {
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index f38d75f..e1fa3ef 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -102,12 +102,15 @@
 		  BM_SSP_STATUS_CARD_DETECT) ^ host->cd_inverted;
 }
 
-static void mxs_mmc_reset(struct mxs_mmc_host *host)
+static int mxs_mmc_reset(struct mxs_mmc_host *host)
 {
 	struct mxs_ssp *ssp = &host->ssp;
 	u32 ctrl0, ctrl1;
+	int ret;
 
-	stmp_reset_block(ssp->base);
+	ret = stmp_reset_block(ssp->base);
+	if (ret)
+		return ret;
 
 	ctrl0 = BM_SSP_CTRL0_IGNORE_CRC;
 	ctrl1 = BF_SSP(0x3, CTRL1_SSP_MODE) |
@@ -132,6 +135,7 @@
 
 	writel(ctrl0, ssp->base + HW_SSP_CTRL0);
 	writel(ctrl1, ssp->base + HW_SSP_CTRL1(ssp));
+	return 0;
 }
 
 static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
@@ -618,21 +622,25 @@
 		}
 	}
 
-	ssp->clk = clk_get(&pdev->dev, NULL);
+	ssp->clk = devm_clk_get(&pdev->dev, NULL);
 	if (IS_ERR(ssp->clk)) {
 		ret = PTR_ERR(ssp->clk);
 		goto out_mmc_free;
 	}
 	clk_prepare_enable(ssp->clk);
 
-	mxs_mmc_reset(host);
+	ret = mxs_mmc_reset(host);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to reset mmc: %d\n", ret);
+		goto out_clk_disable;
+	}
 
 	ssp->dmach = dma_request_slave_channel(&pdev->dev, "rx-tx");
 	if (!ssp->dmach) {
 		dev_err(mmc_dev(host->mmc),
 			"%s: failed to request dma\n", __func__);
 		ret = -ENODEV;
-		goto out_clk_put;
+		goto out_clk_disable;
 	}
 
 	/* set mmc core parameters */
@@ -685,9 +693,8 @@
 out_free_dma:
 	if (ssp->dmach)
 		dma_release_channel(ssp->dmach);
-out_clk_put:
+out_clk_disable:
 	clk_disable_unprepare(ssp->clk);
-	clk_put(ssp->clk);
 out_mmc_free:
 	mmc_free_host(mmc);
 	return ret;
@@ -705,7 +712,6 @@
 		dma_release_channel(ssp->dmach);
 
 	clk_disable_unprepare(ssp->clk);
-	clk_put(ssp->clk);
 
 	mmc_free_host(mmc);
 
diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c
index d720b5e..6e218fb 100644
--- a/drivers/mmc/host/of_mmc_spi.c
+++ b/drivers/mmc/host/of_mmc_spi.c
@@ -50,25 +50,6 @@
 	return container_of(dev->platform_data, struct of_mmc_spi, pdata);
 }
 
-static int of_mmc_spi_read_gpio(struct device *dev, int gpio_num)
-{
-	struct of_mmc_spi *oms = to_of_mmc_spi(dev);
-	bool active_low = oms->alow_gpios[gpio_num];
-	bool value = gpio_get_value(oms->gpios[gpio_num]);
-
-	return active_low ^ value;
-}
-
-static int of_mmc_spi_get_cd(struct device *dev)
-{
-	return of_mmc_spi_read_gpio(dev, CD_GPIO);
-}
-
-static int of_mmc_spi_get_ro(struct device *dev)
-{
-	return of_mmc_spi_read_gpio(dev, WP_GPIO);
-}
-
 static int of_mmc_spi_init(struct device *dev,
 			   irqreturn_t (*irqhandler)(int, void *), void *mmc)
 {
@@ -130,20 +111,22 @@
 		if (!gpio_is_valid(oms->gpios[i]))
 			continue;
 
-		ret = gpio_request(oms->gpios[i], dev_name(dev));
-		if (ret < 0) {
-			oms->gpios[i] = -EINVAL;
-			continue;
-		}
-
 		if (gpio_flags & OF_GPIO_ACTIVE_LOW)
 			oms->alow_gpios[i] = true;
 	}
 
-	if (gpio_is_valid(oms->gpios[CD_GPIO]))
-		oms->pdata.get_cd = of_mmc_spi_get_cd;
-	if (gpio_is_valid(oms->gpios[WP_GPIO]))
-		oms->pdata.get_ro = of_mmc_spi_get_ro;
+	if (gpio_is_valid(oms->gpios[CD_GPIO])) {
+		oms->pdata.cd_gpio = oms->gpios[CD_GPIO];
+		oms->pdata.flags |= MMC_SPI_USE_CD_GPIO;
+		if (!oms->alow_gpios[CD_GPIO])
+			oms->pdata.caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
+	}
+	if (gpio_is_valid(oms->gpios[WP_GPIO])) {
+		oms->pdata.ro_gpio = oms->gpios[WP_GPIO];
+		oms->pdata.flags |= MMC_SPI_USE_RO_GPIO;
+		if (!oms->alow_gpios[WP_GPIO])
+			oms->pdata.caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+	}
 
 	oms->detect_irq = irq_of_parse_and_map(np, 0);
 	if (oms->detect_irq != 0) {
@@ -166,15 +149,10 @@
 	struct device *dev = &spi->dev;
 	struct device_node *np = dev->of_node;
 	struct of_mmc_spi *oms = to_of_mmc_spi(dev);
-	int i;
 
 	if (!dev->platform_data || !np)
 		return;
 
-	for (i = 0; i < ARRAY_SIZE(oms->gpios); i++) {
-		if (gpio_is_valid(oms->gpios[i]))
-			gpio_free(oms->gpios[i]);
-	}
 	kfree(oms);
 	dev->platform_data = NULL;
 }
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 1865321..6ac63df 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -21,6 +21,7 @@
 #include <linux/debugfs.h>
 #include <linux/dmaengine.h>
 #include <linux/seq_file.h>
+#include <linux/sizes.h>
 #include <linux/interrupt.h>
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
@@ -1041,6 +1042,7 @@
 		}
 	}
 
+	OMAP_HSMMC_WRITE(host->base, STAT, status);
 	if (end_cmd || ((status & CC_EN) && host->cmd))
 		omap_hsmmc_cmd_done(host, host->cmd);
 	if ((end_trans || (status & TC_EN)) && host->mrq)
@@ -1060,7 +1062,6 @@
 		omap_hsmmc_do_irq(host, status);
 
 		/* Flush posted write */
-		OMAP_HSMMC_WRITE(host->base, STAT, status);
 		status = OMAP_HSMMC_READ(host->base, STAT);
 	}
 
diff --git a/drivers/mmc/host/sdhci-bcm2835.c b/drivers/mmc/host/sdhci-bcm2835.c
index 0584a1c..36fa2df 100644
--- a/drivers/mmc/host/sdhci-bcm2835.c
+++ b/drivers/mmc/host/sdhci-bcm2835.c
@@ -119,7 +119,7 @@
 	return byte;
 }
 
-unsigned int bcm2835_sdhci_get_min_clock(struct sdhci_host *host)
+static unsigned int bcm2835_sdhci_get_min_clock(struct sdhci_host *host)
 {
 	return MIN_FREQ;
 }
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 1dd5ba8..abc8cf0 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -616,7 +616,7 @@
 	/* card_detect */
 	switch (boarddata->cd_type) {
 	case ESDHC_CD_GPIO:
-		err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio);
+		err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio, 0);
 		if (err) {
 			dev_err(mmc_dev(host->mmc),
 				"failed to request card-detect gpio!\n");
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 15039e2..e328252 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -316,6 +316,7 @@
 
 	/* call to generic mmc_of_parse to support additional capabilities */
 	mmc_of_parse(host->mmc);
+	mmc_of_parse_voltage(np, &host->ocr_mask);
 
 	ret = sdhci_add_host(host);
 	if (ret)
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index bf99359..793dacd 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -278,7 +278,8 @@
 			host->mmc->pm_caps |= pdata->pm_caps;
 
 		if (gpio_is_valid(pdata->ext_cd_gpio)) {
-			ret = mmc_gpio_request_cd(host->mmc, pdata->ext_cd_gpio);
+			ret = mmc_gpio_request_cd(host->mmc, pdata->ext_cd_gpio,
+						  0);
 			if (ret) {
 				dev_err(mmc_dev(host->mmc),
 					"failed to allocate card detect gpio\n");
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 926aaf6..6debda9 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -296,9 +296,12 @@
 	unsigned long timeout;
 	u16 clk = 0;
 
-	/* don't bother if the clock is going off */
-	if (clock == 0)
+	/* If the clock is going off, set to 0 at clock control register */
+	if (clock == 0) {
+		sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+		host->clock = clock;
 		return;
+	}
 
 	sdhci_s3c_set_clock(host, clock);
 
@@ -608,6 +611,7 @@
 	host->hw_name = "samsung-hsmmc";
 	host->ops = &sdhci_s3c_ops;
 	host->quirks = 0;
+	host->quirks2 = 0;
 	host->irq = irq;
 
 	/* Setup quirks for the controller */
diff --git a/drivers/mmc/host/sdhci-sirf.c b/drivers/mmc/host/sdhci-sirf.c
index 62a4a83..696122c 100644
--- a/drivers/mmc/host/sdhci-sirf.c
+++ b/drivers/mmc/host/sdhci-sirf.c
@@ -84,7 +84,7 @@
 	 * gets setup in sdhci_add_host() and we oops.
 	 */
 	if (gpio_is_valid(priv->gpio_cd)) {
-		ret = mmc_gpio_request_cd(host->mmc, priv->gpio_cd);
+		ret = mmc_gpio_request_cd(host->mmc, priv->gpio_cd, 0);
 		if (ret) {
 			dev_err(&pdev->dev, "card detect irq request failed: %d\n",
 				ret);
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index dd2c083..7a7fb4f 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -3119,6 +3119,9 @@
 				   SDHCI_MAX_CURRENT_MULTIPLIER;
 	}
 
+	if (host->ocr_mask)
+		ocr_avail = host->ocr_mask;
+
 	mmc->ocr_avail = ocr_avail;
 	mmc->ocr_avail_sdio = ocr_avail;
 	if (host->ocr_avail_sdio)
@@ -3213,6 +3216,8 @@
 		host->tuning_timer.function = sdhci_tuning_timer;
 	}
 
+	sdhci_init(host, 0);
+
 	ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
 		mmc_hostname(mmc), host);
 	if (ret) {
@@ -3221,8 +3226,6 @@
 		goto untasklet;
 	}
 
-	sdhci_init(host, 0);
-
 #ifdef CONFIG_MMC_DEBUG
 	sdhci_dumpregs(host);
 #endif
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 6706b5e..36629a0 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -61,6 +61,7 @@
 #include <linux/platform_device.h>
 #include <linux/pm_qos.h>
 #include <linux/pm_runtime.h>
+#include <linux/sh_dma.h>
 #include <linux/spinlock.h>
 #include <linux/module.h>
 
@@ -133,6 +134,8 @@
 				 INT_BUFWEN | INT_CMD12DRE | INT_BUFRE | \
 				 INT_DTRANE | INT_CMD12RBE | INT_CMD12CRE)
 
+#define INT_CCS			(INT_CCSTO | INT_CCSRCV | INT_CCSDE)
+
 /* CE_INT_MASK */
 #define MASK_ALL		0x00000000
 #define MASK_MCCSDE		(1 << 29)
@@ -161,7 +164,7 @@
 
 #define MASK_START_CMD		(MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR | \
 				 MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR | \
-				 MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO | \
+				 MASK_MCRCSTO | MASK_MWDATTO | \
 				 MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO)
 
 #define MASK_CLEAN		(INT_ERR_STS | MASK_MRBSYE | MASK_MCRSPE |	\
@@ -243,6 +246,8 @@
 	int sg_blkidx;
 	bool power;
 	bool card_present;
+	bool ccs_enable;		/* Command Completion Signal support */
+	bool clk_ctrl2_enable;
 	struct mutex thread_lock;
 
 	/* DMA support */
@@ -386,25 +391,29 @@
 
 	host->dma_active = false;
 
-	if (!pdata)
+	if (pdata) {
+		if (pdata->slave_id_tx <= 0 || pdata->slave_id_rx <= 0)
+			return;
+	} else if (!host->pd->dev.of_node) {
 		return;
-
-	if (pdata->slave_id_tx <= 0 || pdata->slave_id_rx <= 0)
-		return;
+	}
 
 	/* We can only either use DMA for both Tx and Rx or not use it at all */
 	dma_cap_zero(mask);
 	dma_cap_set(DMA_SLAVE, mask);
 
-	host->chan_tx = dma_request_channel(mask, shdma_chan_filter,
-					    (void *)pdata->slave_id_tx);
+	host->chan_tx = dma_request_slave_channel_compat(mask, shdma_chan_filter,
+				pdata ? (void *)pdata->slave_id_tx : NULL,
+				&host->pd->dev, "tx");
 	dev_dbg(&host->pd->dev, "%s: TX: got channel %p\n", __func__,
 		host->chan_tx);
 
 	if (!host->chan_tx)
 		return;
 
-	cfg.slave_id = pdata->slave_id_tx;
+	/* In the OF case the driver will get the slave ID from the DT */
+	if (pdata)
+		cfg.slave_id = pdata->slave_id_tx;
 	cfg.direction = DMA_MEM_TO_DEV;
 	cfg.dst_addr = res->start + MMCIF_CE_DATA;
 	cfg.src_addr = 0;
@@ -412,15 +421,17 @@
 	if (ret < 0)
 		goto ecfgtx;
 
-	host->chan_rx = dma_request_channel(mask, shdma_chan_filter,
-					    (void *)pdata->slave_id_rx);
+	host->chan_rx = dma_request_slave_channel_compat(mask, shdma_chan_filter,
+				pdata ? (void *)pdata->slave_id_rx : NULL,
+				&host->pd->dev, "rx");
 	dev_dbg(&host->pd->dev, "%s: RX: got channel %p\n", __func__,
 		host->chan_rx);
 
 	if (!host->chan_rx)
 		goto erqrx;
 
-	cfg.slave_id = pdata->slave_id_rx;
+	if (pdata)
+		cfg.slave_id = pdata->slave_id_rx;
 	cfg.direction = DMA_DEV_TO_MEM;
 	cfg.dst_addr = 0;
 	cfg.src_addr = res->start + MMCIF_CE_DATA;
@@ -485,8 +496,12 @@
 
 	sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_ON);
 	sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_OFF);
+	if (host->ccs_enable)
+		tmp |= SCCSTO_29;
+	if (host->clk_ctrl2_enable)
+		sh_mmcif_writel(host->addr, MMCIF_CE_CLK_CTRL2, 0x0F0F0000);
 	sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp |
-		SRSPTO_256 | SRBSYTO_29 | SRWDTO_29 | SCCSTO_29);
+		SRSPTO_256 | SRBSYTO_29 | SRWDTO_29);
 	/* byte swap on */
 	sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP);
 }
@@ -866,6 +881,9 @@
 		break;
 	}
 
+	if (host->ccs_enable)
+		mask |= MASK_MCCSTO;
+
 	if (mrq->data) {
 		sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0);
 		sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET,
@@ -873,7 +891,10 @@
 	}
 	opc = sh_mmcif_set_cmd(host, mrq);
 
-	sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0);
+	if (host->ccs_enable)
+		sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0);
+	else
+		sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0 | INT_CCS);
 	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask);
 	/* set arg */
 	sh_mmcif_writel(host->addr, MMCIF_CE_ARG, cmd->arg);
@@ -956,11 +977,8 @@
 
 static void sh_mmcif_set_power(struct sh_mmcif_host *host, struct mmc_ios *ios)
 {
-	struct sh_mmcif_plat_data *pd = host->pd->dev.platform_data;
 	struct mmc_host *mmc = host->mmc;
 
-	if (pd && pd->set_pwr)
-		pd->set_pwr(host->pd, ios->power_mode != MMC_POWER_OFF);
 	if (!IS_ERR(mmc->supply.vmmc))
 		/* Errors ignored... */
 		mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
@@ -1241,11 +1259,14 @@
 static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
 {
 	struct sh_mmcif_host *host = dev_id;
-	u32 state;
+	u32 state, mask;
 
 	state = sh_mmcif_readl(host->addr, MMCIF_CE_INT);
-	sh_mmcif_writel(host->addr, MMCIF_CE_INT,
-			~(state & sh_mmcif_readl(host->addr, MMCIF_CE_INT_MASK)));
+	mask = sh_mmcif_readl(host->addr, MMCIF_CE_INT_MASK);
+	if (host->ccs_enable)
+		sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~(state & mask));
+	else
+		sh_mmcif_writel(host->addr, MMCIF_CE_INT, INT_CCS | ~(state & mask));
 	sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state & MASK_CLEAN);
 
 	if (state & ~MASK_CLEAN)
@@ -1379,6 +1400,8 @@
 	host->mmc	= mmc;
 	host->addr	= reg;
 	host->timeout	= msecs_to_jiffies(1000);
+	host->ccs_enable = !pd || !pd->ccs_unsupported;
+	host->clk_ctrl2_enable = pd && pd->clk_ctrl2_present;
 
 	host->pd = pdev;
 
@@ -1436,7 +1459,7 @@
 	}
 
 	if (pd && pd->use_cd_gpio) {
-		ret = mmc_gpio_request_cd(mmc, pd->cd_gpio);
+		ret = mmc_gpio_request_cd(mmc, pd->cd_gpio, 0);
 		if (ret < 0)
 			goto erqcd;
 	}
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index ebea749..87ed3fb 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -70,20 +70,6 @@
 	clk_disable(priv->clk);
 }
 
-static void sh_mobile_sdhi_set_pwr(struct platform_device *pdev, int state)
-{
-	struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
-
-	p->set_pwr(pdev, state);
-}
-
-static int sh_mobile_sdhi_get_cd(struct platform_device *pdev)
-{
-	struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
-
-	return p->get_cd(pdev);
-}
-
 static int sh_mobile_sdhi_wait_idle(struct tmio_mmc_host *host)
 {
 	int timeout = 1000;
@@ -129,7 +115,12 @@
 static const struct of_device_id sh_mobile_sdhi_of_match[] = {
 	{ .compatible = "renesas,shmobile-sdhi" },
 	{ .compatible = "renesas,sh7372-sdhi" },
+	{ .compatible = "renesas,sh73a0-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], },
+	{ .compatible = "renesas,r8a73a4-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], },
 	{ .compatible = "renesas,r8a7740-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], },
+	{ .compatible = "renesas,r8a7778-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], },
+	{ .compatible = "renesas,r8a7779-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], },
+	{ .compatible = "renesas,r8a7790-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], },
 	{},
 };
 MODULE_DEVICE_TABLE(of, sh_mobile_sdhi_of_match);
@@ -180,10 +171,6 @@
 		mmc_data->capabilities |= p->tmio_caps;
 		mmc_data->capabilities2 |= p->tmio_caps2;
 		mmc_data->cd_gpio = p->cd_gpio;
-		if (p->set_pwr)
-			mmc_data->set_pwr = sh_mobile_sdhi_set_pwr;
-		if (p->get_cd)
-			mmc_data->get_cd = sh_mobile_sdhi_get_cd;
 
 		if (p->dma_slave_tx > 0 && p->dma_slave_rx > 0) {
 			/*
diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c
index 47bdb8f..65edb4a 100644
--- a/drivers/mmc/host/tmio_mmc_dma.c
+++ b/drivers/mmc/host/tmio_mmc_dma.c
@@ -104,6 +104,7 @@
 pio:
 	if (!desc) {
 		/* DMA failed, fall back to PIO */
+		tmio_mmc_enable_dma(host, false);
 		if (ret >= 0)
 			ret = -EIO;
 		host->chan_rx = NULL;
@@ -116,7 +117,6 @@
 		}
 		dev_warn(&host->pdev->dev,
 			 "DMA failed: %d, falling back to PIO\n", ret);
-		tmio_mmc_enable_dma(host, false);
 	}
 
 	dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
@@ -185,6 +185,7 @@
 pio:
 	if (!desc) {
 		/* DMA failed, fall back to PIO */
+		tmio_mmc_enable_dma(host, false);
 		if (ret >= 0)
 			ret = -EIO;
 		host->chan_tx = NULL;
@@ -197,7 +198,6 @@
 		}
 		dev_warn(&host->pdev->dev,
 			 "DMA failed: %d, falling back to PIO\n", ret);
-		tmio_mmc_enable_dma(host, false);
 	}
 
 	dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__,
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index b72edb7..b380225 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -795,9 +795,13 @@
 	 * omap_hsmmc.c driver does.
 	 */
 	if (!IS_ERR(mmc->supply.vqmmc) && !ret) {
-		regulator_enable(mmc->supply.vqmmc);
+		ret = regulator_enable(mmc->supply.vqmmc);
 		udelay(200);
 	}
+
+	if (ret < 0)
+		dev_dbg(&host->pdev->dev, "Regulators failed to power up: %d\n",
+			ret);
 }
 
 static void tmio_mmc_power_off(struct tmio_mmc_host *host)
@@ -932,25 +936,11 @@
 		 (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
 }
 
-static int tmio_mmc_get_cd(struct mmc_host *mmc)
-{
-	struct tmio_mmc_host *host = mmc_priv(mmc);
-	struct tmio_mmc_data *pdata = host->pdata;
-	int ret = mmc_gpio_get_cd(mmc);
-	if (ret >= 0)
-		return ret;
-
-	if (!pdata->get_cd)
-		return -ENOSYS;
-	else
-		return pdata->get_cd(host->pdev);
-}
-
 static const struct mmc_host_ops tmio_mmc_ops = {
 	.request	= tmio_mmc_request,
 	.set_ios	= tmio_mmc_set_ios,
 	.get_ro         = tmio_mmc_get_ro,
-	.get_cd		= tmio_mmc_get_cd,
+	.get_cd		= mmc_gpio_get_cd,
 	.enable_sdio_irq = tmio_mmc_enable_sdio_irq,
 };
 
@@ -1106,7 +1096,7 @@
 	dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
 
 	if (pdata->flags & TMIO_MMC_USE_GPIO_CD) {
-		ret = mmc_gpio_request_cd(mmc, pdata->cd_gpio);
+		ret = mmc_gpio_request_cd(mmc, pdata->cd_gpio, 0);
 		if (ret < 0) {
 			tmio_mmc_host_remove(_host);
 			return ret;
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
index cb9f361..e9028ad 100644
--- a/drivers/mmc/host/vub300.c
+++ b/drivers/mmc/host/vub300.c
@@ -2079,7 +2079,7 @@
 	kref_put(&vub300->kref, vub300_delete);
 }
 
-void vub300_init_card(struct mmc_host *mmc, struct mmc_card *card)
+static void vub300_init_card(struct mmc_host *mmc, struct mmc_card *card)
 {				/* NOT irq */
 	struct vub300_mmc_host *vub300 = mmc_priv(mmc);
 	dev_info(&vub300->udev->dev, "NO host QUIRKS for this card\n");
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 91f179d..f428ef57 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -1472,7 +1472,7 @@
 	bond_info->lp_counter++;
 
 	/* send learning packets */
-	if (bond_info->lp_counter >= BOND_ALB_LP_TICKS) {
+	if (bond_info->lp_counter >= BOND_ALB_LP_TICKS(bond)) {
 		/* change of curr_active_slave involves swapping of mac addresses.
 		 * in order to avoid this swapping from happening while
 		 * sending the learning packets, the curr_slave_lock must be held for
diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h
index 28d8e4c..c5eff5d 100644
--- a/drivers/net/bonding/bond_alb.h
+++ b/drivers/net/bonding/bond_alb.h
@@ -36,14 +36,15 @@
 					 * Used for division - never set
 					 * to zero !!!
 					 */
-#define BOND_ALB_LP_INTERVAL	    1	/* In seconds, periodic send of
-					 * learning packets to the switch
-					 */
+#define BOND_ALB_DEFAULT_LP_INTERVAL 1
+#define BOND_ALB_LP_INTERVAL(bond) (bond->params.lp_interval)	/* In seconds, periodic send of
+								 * learning packets to the switch
+								 */
 
 #define BOND_TLB_REBALANCE_TICKS (BOND_TLB_REBALANCE_INTERVAL \
 				  * ALB_TIMER_TICKS_PER_SEC)
 
-#define BOND_ALB_LP_TICKS (BOND_ALB_LP_INTERVAL \
+#define BOND_ALB_LP_TICKS(bond) (BOND_ALB_LP_INTERVAL(bond) \
 			   * ALB_TIMER_TICKS_PER_SEC)
 
 #define TLB_HASH_TABLE_SIZE 256	/* The size of the clients hash table.
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 39e5b1c..55bbb8b 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2404,8 +2404,8 @@
 	slave->target_last_arp_rx[i] = jiffies;
 }
 
-static int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
-			struct slave *slave)
+int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
+		 struct slave *slave)
 {
 	struct arphdr *arp = (struct arphdr *)skb->data;
 	unsigned char *arp_ptr;
@@ -4416,6 +4416,7 @@
 	params->all_slaves_active = all_slaves_active;
 	params->resend_igmp = resend_igmp;
 	params->min_links = min_links;
+	params->lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
 
 	if (primary) {
 		strncpy(params->primary, primary, IFNAMSIZ);
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index ce46776..c29b836 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -349,6 +349,8 @@
 		goto out;
 	}
 
+	/* don't cache arp_validate between modes */
+	bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
 	bond->params.mode = new_value;
 	bond_set_mode_ops(bond, bond->params.mode);
 	pr_info("%s: setting mode to %s (%d).\n",
@@ -419,27 +421,39 @@
 					  struct device_attribute *attr,
 					  const char *buf, size_t count)
 {
-	int new_value;
 	struct bonding *bond = to_bond(d);
+	int new_value, ret = count;
 
+	if (!rtnl_trylock())
+		return restart_syscall();
 	new_value = bond_parse_parm(buf, arp_validate_tbl);
 	if (new_value < 0) {
 		pr_err("%s: Ignoring invalid arp_validate value %s\n",
 		       bond->dev->name, buf);
-		return -EINVAL;
+		ret = -EINVAL;
+		goto out;
 	}
-	if (new_value && (bond->params.mode != BOND_MODE_ACTIVEBACKUP)) {
+	if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
 		pr_err("%s: arp_validate only supported in active-backup mode.\n",
 		       bond->dev->name);
-		return -EINVAL;
+		ret = -EINVAL;
+		goto out;
 	}
 	pr_info("%s: setting arp_validate to %s (%d).\n",
 		bond->dev->name, arp_validate_tbl[new_value].modename,
 		new_value);
 
+	if (bond->dev->flags & IFF_UP) {
+		if (!new_value)
+			bond->recv_probe = NULL;
+		else if (bond->params.arp_interval)
+			bond->recv_probe = bond_arp_rcv;
+	}
 	bond->params.arp_validate = new_value;
+out:
+	rtnl_unlock();
 
-	return count;
+	return ret;
 }
 
 static DEVICE_ATTR(arp_validate, S_IRUGO | S_IWUSR, bonding_show_arp_validate,
@@ -555,8 +569,8 @@
 					  struct device_attribute *attr,
 					  const char *buf, size_t count)
 {
-	int new_value, ret = count;
 	struct bonding *bond = to_bond(d);
+	int new_value, ret = count;
 
 	if (!rtnl_trylock())
 		return restart_syscall();
@@ -599,8 +613,13 @@
 		 * is called.
 		 */
 		if (!new_value) {
+			if (bond->params.arp_validate)
+				bond->recv_probe = NULL;
 			cancel_delayed_work_sync(&bond->arp_work);
 		} else {
+			/* arp_validate can be set only in active-backup mode */
+			if (bond->params.arp_validate)
+				bond->recv_probe = bond_arp_rcv;
 			cancel_delayed_work_sync(&bond->mii_work);
 			queue_delayed_work(bond->wq, &bond->arp_work, 0);
 		}
@@ -1680,6 +1699,44 @@
 static DEVICE_ATTR(resend_igmp, S_IRUGO | S_IWUSR,
 		   bonding_show_resend_igmp, bonding_store_resend_igmp);
 
+
+static ssize_t bonding_show_lp_interval(struct device *d,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct bonding *bond = to_bond(d);
+	return sprintf(buf, "%d\n", bond->params.lp_interval);
+}
+
+static ssize_t bonding_store_lp_interval(struct device *d,
+					 struct device_attribute *attr,
+					 const char *buf, size_t count)
+{
+	struct bonding *bond = to_bond(d);
+	int new_value, ret = count;
+
+	if (sscanf(buf, "%d", &new_value) != 1) {
+		pr_err("%s: no lp interval value specified.\n",
+			bond->dev->name);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (new_value <= 0) {
+		pr_err ("%s: lp_interval must be between 1 and %d\n",
+			bond->dev->name, INT_MAX);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	bond->params.lp_interval = new_value;
+out:
+	return ret;
+}
+
+static DEVICE_ATTR(lp_interval, S_IRUGO | S_IWUSR,
+		   bonding_show_lp_interval, bonding_store_lp_interval);
+
 static struct attribute *per_bond_attrs[] = {
 	&dev_attr_slaves.attr,
 	&dev_attr_mode.attr,
@@ -1710,6 +1767,7 @@
 	&dev_attr_all_slaves_active.attr,
 	&dev_attr_resend_igmp.attr,
 	&dev_attr_min_links.attr,
+	&dev_attr_lp_interval.attr,
 	NULL,
 };
 
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index f7ab161..03cf3fd 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -176,6 +176,7 @@
 	int tx_queues;
 	int all_slaves_active;
 	int resend_igmp;
+	int lp_interval;
 };
 
 struct bond_parm_tbl {
@@ -430,6 +431,7 @@
 
 struct bond_net;
 
+int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave);
 struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr);
 int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
 void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id);
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index e66684a..75fb1d2 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -530,7 +530,7 @@
 	if (lp->wol && !lp->irq_wake_requested) {
 		/* register wake irq handler */
 		rc = request_irq(IRQ_MAC_WAKEDET, bfin_mac_wake_interrupt,
-				 IRQF_DISABLED, "EMAC_WAKE", dev);
+				 0, "EMAC_WAKE", dev);
 		if (rc)
 			return rc;
 		lp->irq_wake_requested = true;
@@ -1686,7 +1686,7 @@
 	/* now, enable interrupts */
 	/* register irq handler */
 	rc = request_irq(IRQ_MAC_RX, bfin_mac_interrupt,
-			IRQF_DISABLED, "EMAC_RX", ndev);
+			0, "EMAC_RX", ndev);
 	if (rc) {
 		dev_err(&pdev->dev, "Cannot request Blackfin MAC RX IRQ!\n");
 		rc = -EBUSY;
diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c
index d6b2029..3d8c6b2 100644
--- a/drivers/net/ethernet/amd/sun3lance.c
+++ b/drivers/net/ethernet/amd/sun3lance.c
@@ -358,7 +358,7 @@
 
 	REGA(CSR0) = CSR0_STOP;
 
-	if (request_irq(LANCE_IRQ, lance_interrupt, IRQF_DISABLED, "SUN3 Lance", dev) < 0) {
+	if (request_irq(LANCE_IRQ, lance_interrupt, 0, "SUN3 Lance", dev) < 0) {
 #ifdef CONFIG_SUN3
 		iounmap((void __iomem *)ioaddr);
 #endif
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 027398e..fc95b23 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1188,7 +1188,7 @@
 	struct alx_priv *alx;
 	struct alx_hw *hw;
 	bool phy_configured;
-	int bars, pm_cap, err;
+	int bars, err;
 
 	err = pci_enable_device_mem(pdev);
 	if (err)
@@ -1225,18 +1225,13 @@
 	pci_enable_pcie_error_reporting(pdev);
 	pci_set_master(pdev);
 
-	pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
-	if (pm_cap == 0) {
+	if (!pdev->pm_cap) {
 		dev_err(&pdev->dev,
 			"Can't find power management capability, aborting\n");
 		err = -EIO;
 		goto out_pci_release;
 	}
 
-	err = pci_set_power_state(pdev, PCI_D0);
-	if (err)
-		goto out_pci_release;
-
 	netdev = alloc_etherdev(sizeof(*alx));
 	if (!netdev) {
 		err = -ENOMEM;
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 8ac48fb..b9a5fb6 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -926,13 +926,13 @@
 	if (ret)
 		goto out_phy_disconnect;
 
-	ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, IRQF_DISABLED,
+	ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, 0,
 			  dev->name, dev);
 	if (ret)
 		goto out_freeirq;
 
 	ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
-			  IRQF_DISABLED, dev->name, dev);
+			  0, dev->name, dev);
 	if (ret)
 		goto out_freeirq_rx;
 
@@ -2156,13 +2156,13 @@
 	enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
 
 	ret = request_irq(priv->irq_rx, bcm_enet_isr_dma,
-			  IRQF_DISABLED, dev->name, dev);
+			  0, dev->name, dev);
 	if (ret)
 		goto out_freeirq;
 
 	if (priv->irq_tx != -1) {
 		ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
-				  IRQF_DISABLED, dev->name, dev);
+				  0, dev->name, dev);
 		if (ret)
 			goto out_freeirq_rx;
 	}
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index eec0af4..249468f 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -157,6 +157,7 @@
 	if (++ring->end >= BGMAC_TX_RING_SLOTS)
 		ring->end = 0;
 	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
+		    ring->index_base +
 		    ring->end * sizeof(struct bgmac_dma_desc));
 
 	/* Always keep one slot free to allow detecting bugged calls. */
@@ -181,6 +182,8 @@
 	/* The last slot that hardware didn't consume yet */
 	empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
 	empty_slot &= BGMAC_DMA_TX_STATDPTR;
+	empty_slot -= ring->index_base;
+	empty_slot &= BGMAC_DMA_TX_STATDPTR;
 	empty_slot /= sizeof(struct bgmac_dma_desc);
 
 	while (ring->start != empty_slot) {
@@ -274,6 +277,8 @@
 
 	end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
 	end_slot &= BGMAC_DMA_RX_STATDPTR;
+	end_slot -= ring->index_base;
+	end_slot &= BGMAC_DMA_RX_STATDPTR;
 	end_slot /= sizeof(struct bgmac_dma_desc);
 
 	ring->end = end_slot;
@@ -418,9 +423,6 @@
 		ring = &bgmac->tx_ring[i];
 		ring->num_slots = BGMAC_TX_RING_SLOTS;
 		ring->mmio_base = ring_base[i];
-		if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_TX))
-			bgmac_warn(bgmac, "TX on ring 0x%X supports unaligned addressing but this feature is not implemented\n",
-				   ring->mmio_base);
 
 		/* Alloc ring of descriptors */
 		size = ring->num_slots * sizeof(struct bgmac_dma_desc);
@@ -435,6 +437,13 @@
 		if (ring->dma_base & 0xC0000000)
 			bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
 
+		ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
+						      BGMAC_DMA_RING_TX);
+		if (ring->unaligned)
+			ring->index_base = lower_32_bits(ring->dma_base);
+		else
+			ring->index_base = 0;
+
 		/* No need to alloc TX slots yet */
 	}
 
@@ -444,9 +453,6 @@
 		ring = &bgmac->rx_ring[i];
 		ring->num_slots = BGMAC_RX_RING_SLOTS;
 		ring->mmio_base = ring_base[i];
-		if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_RX))
-			bgmac_warn(bgmac, "RX on ring 0x%X supports unaligned addressing but this feature is not implemented\n",
-				   ring->mmio_base);
 
 		/* Alloc ring of descriptors */
 		size = ring->num_slots * sizeof(struct bgmac_dma_desc);
@@ -462,6 +468,13 @@
 		if (ring->dma_base & 0xC0000000)
 			bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
 
+		ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
+						      BGMAC_DMA_RING_RX);
+		if (ring->unaligned)
+			ring->index_base = lower_32_bits(ring->dma_base);
+		else
+			ring->index_base = 0;
+
 		/* Alloc RX slots */
 		for (j = 0; j < ring->num_slots; j++) {
 			err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]);
@@ -489,12 +502,14 @@
 	for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
 		ring = &bgmac->tx_ring[i];
 
-		/* We don't implement unaligned addressing, so enable first */
-		bgmac_dma_tx_enable(bgmac, ring);
+		if (!ring->unaligned)
+			bgmac_dma_tx_enable(bgmac, ring);
 		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
 			    lower_32_bits(ring->dma_base));
 		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI,
 			    upper_32_bits(ring->dma_base));
+		if (ring->unaligned)
+			bgmac_dma_tx_enable(bgmac, ring);
 
 		ring->start = 0;
 		ring->end = 0;	/* Points the slot that should *not* be read */
@@ -505,12 +520,14 @@
 
 		ring = &bgmac->rx_ring[i];
 
-		/* We don't implement unaligned addressing, so enable first */
-		bgmac_dma_rx_enable(bgmac, ring);
+		if (!ring->unaligned)
+			bgmac_dma_rx_enable(bgmac, ring);
 		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
 			    lower_32_bits(ring->dma_base));
 		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI,
 			    upper_32_bits(ring->dma_base));
+		if (ring->unaligned)
+			bgmac_dma_rx_enable(bgmac, ring);
 
 		for (j = 0, dma_desc = ring->cpu_base; j < ring->num_slots;
 		     j++, dma_desc++) {
@@ -531,6 +548,7 @@
 		}
 
 		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
+			    ring->index_base +
 			    ring->num_slots * sizeof(struct bgmac_dma_desc));
 
 		ring->start = 0;
@@ -908,10 +926,10 @@
 		struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
 		u8 et_swtype = 0;
 		u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY |
-			     BGMAC_CHIPCTL_1_IF_TYPE_RMII;
-		char buf[2];
+			     BGMAC_CHIPCTL_1_IF_TYPE_MII;
+		char buf[4];
 
-		if (bcm47xx_nvram_getenv("et_swtype", buf, 1) > 0) {
+		if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) {
 			if (kstrtou8(buf, 0, &et_swtype))
 				bgmac_err(bgmac, "Failed to parse et_swtype (%s)\n",
 					  buf);
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 98d4b5f..66c8afb 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -333,7 +333,7 @@
 
 #define BGMAC_CHIPCTL_1_IF_TYPE_MASK		0x00000030
 #define BGMAC_CHIPCTL_1_IF_TYPE_RMII		0x00000000
-#define BGMAC_CHIPCTL_1_IF_TYPE_MI		0x00000010
+#define BGMAC_CHIPCTL_1_IF_TYPE_MII		0x00000010
 #define BGMAC_CHIPCTL_1_IF_TYPE_RGMII		0x00000020
 #define BGMAC_CHIPCTL_1_SW_TYPE_MASK		0x000000C0
 #define BGMAC_CHIPCTL_1_SW_TYPE_EPHY		0x00000000
@@ -384,6 +384,8 @@
 	u16 mmio_base;
 	struct bgmac_dma_desc *cpu_base;
 	dma_addr_t dma_base;
+	u32 index_base; /* Used for unaligned rings only, otherwise 0 */
+	bool unaligned;
 
 	struct bgmac_slot_info slots[BGMAC_RX_RING_SLOTS];
 };
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 0c33802..70b6a05 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1542,7 +1542,6 @@
 	 */
 	bool			fcoe_init;
 
-	int			pm_cap;
 	int			mrrs;
 
 	struct delayed_work	sp_task;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 2361bf2..61726af 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -490,10 +490,10 @@
 	NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
 }
 
-static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
-			      struct bnx2x_fastpath *fp, u16 index)
+static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+			      u16 index, gfp_t gfp_mask)
 {
-	struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
+	struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
 	struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
 	struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
 	dma_addr_t mapping;
@@ -572,7 +572,7 @@
 
 		/* If we fail to allocate a substitute page, we simply stop
 		   where we are and drop the whole packet */
-		err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
+		err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
 		if (unlikely(err)) {
 			bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
 			return err;
@@ -616,12 +616,17 @@
 		kfree(data);
 }
 
-static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp)
+static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
 {
-	if (fp->rx_frag_size)
-		return netdev_alloc_frag(fp->rx_frag_size);
+	if (fp->rx_frag_size) {
+		/* GFP_KERNEL allocations are used only during initialization */
+		if (unlikely(gfp_mask & __GFP_WAIT))
+			return (void *)__get_free_page(gfp_mask);
 
-	return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
+		return netdev_alloc_frag(fp->rx_frag_size);
+	}
+
+	return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
 }
 
 #ifdef CONFIG_INET
@@ -701,7 +706,7 @@
 		goto drop;
 
 	/* Try to allocate the new data */
-	new_data = bnx2x_frag_alloc(fp);
+	new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
 	/* Unmap skb in the pool anyway, as we are going to change
 	   pool entry status to BNX2X_TPA_STOP even if new skb allocation
 	   fails. */
@@ -752,15 +757,15 @@
 	bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
 }
 
-static int bnx2x_alloc_rx_data(struct bnx2x *bp,
-			       struct bnx2x_fastpath *fp, u16 index)
+static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+			       u16 index, gfp_t gfp_mask)
 {
 	u8 *data;
 	struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
 	struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
 	dma_addr_t mapping;
 
-	data = bnx2x_frag_alloc(fp);
+	data = bnx2x_frag_alloc(fp, gfp_mask);
 	if (unlikely(data == NULL))
 		return -ENOMEM;
 
@@ -953,7 +958,8 @@
 			memcpy(skb->data, data + pad, len);
 			bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
 		} else {
-			if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
+			if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
+						       GFP_ATOMIC) == 0)) {
 				dma_unmap_single(&bp->pdev->dev,
 						 dma_unmap_addr(rx_buf, mapping),
 						 fp->rx_buf_size,
@@ -1313,7 +1319,8 @@
 				struct sw_rx_bd *first_buf =
 					&tpa_info->first_buf;
 
-				first_buf->data = bnx2x_frag_alloc(fp);
+				first_buf->data =
+					bnx2x_frag_alloc(fp, GFP_KERNEL);
 				if (!first_buf->data) {
 					BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
 						  j);
@@ -1335,7 +1342,8 @@
 			for (i = 0, ring_prod = 0;
 			     i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
 
-				if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
+				if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
+						       GFP_KERNEL) < 0) {
 					BNX2X_ERR("was only able to allocate %d rx sges\n",
 						  i);
 					BNX2X_ERR("disabling TPA for queue[%d]\n",
@@ -3000,16 +3008,16 @@
 	u16 pmcsr;
 
 	/* If there is no power capability, silently succeed */
-	if (!bp->pm_cap) {
+	if (!bp->pdev->pm_cap) {
 		BNX2X_DEV_INFO("No power capability. Breaking.\n");
 		return 0;
 	}
 
-	pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
+	pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
 
 	switch (state) {
 	case PCI_D0:
-		pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
+		pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
 				      ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
 				       PCI_PM_CTRL_PME_STATUS));
 
@@ -3033,7 +3041,7 @@
 		if (bp->wol)
 			pmcsr |= PCI_PM_CTRL_PME_ENABLE;
 
-		pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
+		pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
 				      pmcsr);
 
 		/* No more memory access after this point until
@@ -4221,7 +4229,7 @@
 	 * fp->eth_q_stats.rx_skb_alloc_failed = 0
 	 */
 	for (i = 0; i < rx_ring_size; i++) {
-		if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
+		if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
 			failure_cnt++;
 			continue;
 		}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 2612e3c..324de5f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1387,9 +1387,9 @@
 	u16 pm = 0;
 	struct net_device *dev = pci_get_drvdata(bp->pdev);
 
-	if (bp->pm_cap)
+	if (bp->pdev->pm_cap)
 		rc = pci_read_config_word(bp->pdev,
-					  bp->pm_cap + PCI_PM_CTRL, &pm);
+					  bp->pdev->pm_cap + PCI_PM_CTRL, &pm);
 
 	if ((rc && !netif_running(dev)) ||
 	    (!rc && ((pm & PCI_PM_CTRL_STATE_MASK) != (__force u16)PCI_D0)))
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 2f8dbbb..62c59ed 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -8652,6 +8652,7 @@
 	else if (bp->wol) {
 		u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
 		u8 *mac_addr = bp->dev->dev_addr;
+		struct pci_dev *pdev = bp->pdev;
 		u32 val;
 		u16 pmc;
 
@@ -8668,9 +8669,9 @@
 		EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
 
 		/* Enable the PME and clear the status */
-		pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmc);
+		pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmc);
 		pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS;
-		pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, pmc);
+		pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, pmc);
 
 		reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
 
@@ -10399,7 +10400,7 @@
 		break;
 	}
 
-	pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
+	pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_PMC, &pmc);
 	bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
 
 	BNX2X_DEV_INFO("%sWoL capable\n",
@@ -12141,8 +12142,7 @@
 	}
 
 	if (IS_PF(bp)) {
-		bp->pm_cap = pdev->pm_cap;
-		if (bp->pm_cap == 0) {
+		if (!pdev->pm_cap) {
 			dev_err(&bp->pdev->dev,
 				"Cannot find power management capability, aborting\n");
 			rc = -EIO;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 5701f3d..12d961c 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -3034,6 +3034,7 @@
 {
 	switch (tg3_asic_rev(tp)) {
 	case ASIC_REV_5719:
+	case ASIC_REV_5720:
 		if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
 		    !tp->pci_fn)
 			return true;
@@ -16192,12 +16193,12 @@
 			 * So explicitly force the chip into D0 here.
 			 */
 			pci_read_config_dword(tp->pdev,
-					      tp->pm_cap + PCI_PM_CTRL,
+					      tp->pdev->pm_cap + PCI_PM_CTRL,
 					      &pm_reg);
 			pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
 			pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
 			pci_write_config_dword(tp->pdev,
-					       tp->pm_cap + PCI_PM_CTRL,
+					       tp->pdev->pm_cap + PCI_PM_CTRL,
 					       pm_reg);
 
 			/* Also, force SERR#/PERR# in PCI command. */
@@ -17346,7 +17347,6 @@
 	tp = netdev_priv(dev);
 	tp->pdev = pdev;
 	tp->dev = dev;
-	tp->pm_cap = pdev->pm_cap;
 	tp->rx_mode = TG3_DEF_RX_MODE;
 	tp->tx_mode = TG3_DEF_TX_MODE;
 	tp->irq_sync = 1;
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index ddb8be1..7025780 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -3234,7 +3234,6 @@
 	u8				pci_lat_timer;
 
 	int				pci_fn;
-	int				pm_cap;
 	int				msi_cap;
 	int				pcix_cap;
 	int				pcie_readrq;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 0d0665c..c73cabd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -6149,8 +6149,10 @@
 		pr_warn("could not create debugfs entry, continuing\n");
 
 	ret = pci_register_driver(&cxgb4_driver);
-	if (ret < 0)
+	if (ret < 0) {
 		debugfs_remove(cxgb4_debugfs_root);
+		destroy_workqueue(workq);
+	}
 
 	register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
 
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index 2db6c57..263b92c 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -1321,7 +1321,7 @@
     if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED,
 		                                     lp->adapter_name, dev)) {
 	printk("de4x5_open(): Requested IRQ%d is busy - attemping FAST/SHARE...", dev->irq);
-	if (request_irq(dev->irq, de4x5_interrupt, IRQF_DISABLED | IRQF_SHARED,
+	if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED,
 			                             lp->adapter_name, dev)) {
 	    printk("\n              Cannot get IRQ- reconfigure your hardware.\n");
 	    disable_ast(dev);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 3224d28..100b528 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2802,7 +2802,7 @@
 	struct be_resources res = {0};
 	struct be_vf_cfg *vf_cfg;
 	u32 cap_flags, en_flags, vf;
-	int status;
+	int status = 0;
 
 	cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
 		    BE_IF_FLAGS_MULTICAST;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index f9aacf5..b2793b9 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -2199,7 +2199,7 @@
 			goto failed_irq;
 		}
 		ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt,
-				       IRQF_DISABLED, pdev->name, ndev);
+				       0, pdev->name, ndev);
 		if (ret)
 			goto failed_irq;
 	}
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
index e3c7c69..91227d0 100644
--- a/drivers/net/ethernet/hp/hp100.c
+++ b/drivers/net/ethernet/hp/hp100.c
@@ -1097,7 +1097,7 @@
 	/* New: if bus is PCI or EISA, interrupts might be shared interrupts */
 	if (request_irq(dev->irq, hp100_interrupt,
 			lp->bus == HP100_BUS_PCI || lp->bus ==
-			HP100_BUS_EISA ? IRQF_SHARED : IRQF_DISABLED,
+			HP100_BUS_EISA ? IRQF_SHARED : 0,
 			"hp100", dev)) {
 		printk("hp100: %s: unable to get IRQ %d\n", dev->name, dev->irq);
 		return -EAGAIN;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 35853b4..2d1c6bd 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -102,6 +102,19 @@
 
 static int ehea_remove(struct platform_device *dev);
 
+static struct of_device_id ehea_module_device_table[] = {
+	{
+		.name = "lhea",
+		.compatible = "IBM,lhea",
+	},
+	{
+		.type = "network",
+		.compatible = "IBM,lhea-ethernet",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, ehea_module_device_table);
+
 static struct of_device_id ehea_device_table[] = {
 	{
 		.name = "lhea",
@@ -109,7 +122,6 @@
 	},
 	{},
 };
-MODULE_DEVICE_TABLE(of, ehea_device_table);
 
 static struct platform_driver ehea_driver = {
 	.driver = {
@@ -1285,7 +1297,7 @@
 
 	ret = ibmebus_request_irq(port->qp_eq->attr.ist1,
 				  ehea_qp_aff_irq_handler,
-				  IRQF_DISABLED, port->int_aff_name, port);
+				  0, port->int_aff_name, port);
 	if (ret) {
 		netdev_err(dev, "failed registering irq for qp_aff_irq_handler:ist=%X\n",
 			   port->qp_eq->attr.ist1);
@@ -1303,8 +1315,7 @@
 			 "%s-queue%d", dev->name, i);
 		ret = ibmebus_request_irq(pr->eq->attr.ist1,
 					  ehea_recv_irq_handler,
-					  IRQF_DISABLED, pr->int_send_name,
-					  pr);
+					  0, pr->int_send_name, pr);
 		if (ret) {
 			netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n",
 				   i, pr->eq->attr.ist1);
@@ -3320,7 +3331,7 @@
 	}
 
 	ret = ibmebus_request_irq(adapter->neq->attr.ist1,
-				  ehea_interrupt_neq, IRQF_DISABLED,
+				  ehea_interrupt_neq, 0,
 				  "ehea_neq", adapter);
 	if (ret) {
 		dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index f0e7ed2..149ac85 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -241,4 +241,22 @@
 	  will be called ixgbevf.  MSI-X interrupt support is required
 	  for this driver to work correctly.
 
+config I40E
+	tristate "Intel(R) Ethernet Controller XL710 Family support"
+	depends on PCI
+	---help---
+	  This driver supports Intel(R) Ethernet Controller XL710 Family of
+	  devices.  For more information on how to identify your adapter, go
+	  to the Adapter & Driver ID Guide at:
+
+	  <http://support.intel.com/support/network/adapter/pro100/21397.htm>
+
+	  For general information and support, go to the Intel support
+	  website at:
+
+	  <http://support.intel.com>
+
+	  To compile this driver as a module, choose M here. The module
+	  will be called i40e.
+
 endif # NET_VENDOR_INTEL
diff --git a/drivers/net/ethernet/intel/Makefile b/drivers/net/ethernet/intel/Makefile
index c8210e6..5bae933 100644
--- a/drivers/net/ethernet/intel/Makefile
+++ b/drivers/net/ethernet/intel/Makefile
@@ -9,4 +9,5 @@
 obj-$(CONFIG_IGBVF) += igbvf/
 obj-$(CONFIG_IXGBE) += ixgbe/
 obj-$(CONFIG_IXGBEVF) += ixgbevf/
+obj-$(CONFIG_I40E) += i40e/
 obj-$(CONFIG_IXGB) += ixgb/
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index a8633b8..d14c8f5 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -922,6 +922,14 @@
 			else
 				mask &= ~(1 << 30);
 		}
+		if (mac->type == e1000_pch2lan) {
+			/* SHRAH[0,1,2] different than previous */
+			if (i == 7)
+				mask &= 0xFFF4FFFF;
+			/* SHRAH[3] different than SHRAH[0,1,2] */
+			if (i == 10)
+				mask |= (1 << 30);
+		}
 
 		REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), mask,
 				       0xFFFFFFFF);
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index af08188..42f0f67 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1371,7 +1371,10 @@
 		return;
 	}
 
-	if (index < hw->mac.rar_entry_count) {
+	/* RAR[1-6] are owned by manageability.  Skip those and program the
+	 * next address into the SHRA register array.
+	 */
+	if (index < (u32)(hw->mac.rar_entry_count - 6)) {
 		s32 ret_val;
 
 		ret_val = e1000_acquire_swflag_ich8lan(hw);
@@ -1962,8 +1965,8 @@
 	if (ret_val)
 		goto release;
 
-	/* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */
-	for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
+	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
+	for (i = 0; i < (hw->mac.rar_entry_count); i++) {
 		mac_reg = er32(RAL(i));
 		hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
 					   (u16)(mac_reg & 0xFFFF));
@@ -2007,10 +2010,10 @@
 		return ret_val;
 
 	if (enable) {
-		/* Write Rx addresses (rar_entry_count for RAL/H, +4 for
+		/* Write Rx addresses (rar_entry_count for RAL/H, and
 		 * SHRAL/H) and initial CRC values to the MAC
 		 */
-		for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
+		for (i = 0; i < hw->mac.rar_entry_count; i++) {
 			u8 mac_addr[ETH_ALEN] = { 0 };
 			u32 addr_high, addr_low;
 
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 5986569..217090d 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -98,7 +98,7 @@
 #define PCIE_ICH8_SNOOP_ALL	PCIE_NO_SNOOP_ALL
 
 #define E1000_ICH_RAR_ENTRIES	7
-#define E1000_PCH2_RAR_ENTRIES	5	/* RAR[0], SHRA[0-3] */
+#define E1000_PCH2_RAR_ENTRIES	11      /* RAR[0-6], SHRA[0-3] */
 #define E1000_PCH_LPT_RAR_ENTRIES	12	/* RAR[0], SHRA[0-10] */
 
 #define PHY_PAGE_SHIFT		5
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index e87e9b0..4ef7867 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -4868,7 +4868,7 @@
 			 */
 			if ((hw->phy.type == e1000_phy_igp_3 ||
 			     hw->phy.type == e1000_phy_bm) &&
-			    (hw->mac.autoneg == true) &&
+			    hw->mac.autoneg &&
 			    (adapter->link_speed == SPEED_10 ||
 			     adapter->link_speed == SPEED_100) &&
 			    (adapter->link_duplex == HALF_DUPLEX)) {
diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile
new file mode 100644
index 0000000..479b2c4
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/Makefile
@@ -0,0 +1,44 @@
+################################################################################
+#
+# Intel Ethernet Controller XL710 Family Linux Driver
+# Copyright(c) 2013 Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+# more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information:
+# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+#
+################################################################################
+
+#
+# Makefile for the Intel(R) Ethernet Connection XL710 (i40e.ko) driver
+#
+
+obj-$(CONFIG_I40E) += i40e.o
+
+i40e-objs := i40e_main.o \
+	i40e_ethtool.o	\
+	i40e_adminq.o	\
+	i40e_common.o	\
+	i40e_hmc.o	\
+	i40e_lan_hmc.o	\
+	i40e_nvm.o	\
+	i40e_debugfs.o	\
+	i40e_diag.o	\
+	i40e_txrx.o	\
+	i40e_virtchnl_pf.o
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
new file mode 100644
index 0000000..b5252eb
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -0,0 +1,558 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_H_
+#define _I40E_H_
+
+#include <net/tcp.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/netdevice.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/sctp.h>
+#include <linux/pkt_sched.h>
+#include <linux/ipv6.h>
+#include <linux/version.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include "i40e_type.h"
+#include "i40e_prototype.h"
+#include "i40e_virtchnl.h"
+#include "i40e_virtchnl_pf.h"
+#include "i40e_txrx.h"
+
+/* Useful i40e defaults */
+#define I40E_BASE_PF_SEID     16
+#define I40E_BASE_VSI_SEID    512
+#define I40E_BASE_VEB_SEID    288
+#define I40E_MAX_VEB          16
+
+#define I40E_MAX_NUM_DESCRIPTORS      4096
+#define I40E_MAX_REGISTER     0x0038FFFF
+#define I40E_DEFAULT_NUM_DESCRIPTORS  512
+#define I40E_REQ_DESCRIPTOR_MULTIPLE  32
+#define I40E_MIN_NUM_DESCRIPTORS      64
+#define I40E_MIN_MSIX                 2
+#define I40E_DEFAULT_NUM_VMDQ_VSI     8 /* max 256 VSIs */
+#define I40E_DEFAULT_QUEUES_PER_VMDQ  2 /* max 16 qps */
+#define I40E_DEFAULT_QUEUES_PER_VF    4
+#define I40E_DEFAULT_QUEUES_PER_TC    1 /* should be a power of 2 */
+#define I40E_FDIR_RING                0
+#define I40E_FDIR_RING_COUNT          32
+#define I40E_MAX_AQ_BUF_SIZE          4096
+#define I40E_AQ_LEN                   32
+#define I40E_AQ_WORK_LIMIT            16
+#define I40E_MAX_USER_PRIORITY        8
+#define I40E_DEFAULT_MSG_ENABLE       4
+
+#define I40E_NVM_VERSION_LO_SHIFT  0
+#define I40E_NVM_VERSION_LO_MASK   (0xf << I40E_NVM_VERSION_LO_SHIFT)
+#define I40E_NVM_VERSION_MID_SHIFT 4
+#define I40E_NVM_VERSION_MID_MASK  (0xff << I40E_NVM_VERSION_MID_SHIFT)
+#define I40E_NVM_VERSION_HI_SHIFT  12
+#define I40E_NVM_VERSION_HI_MASK   (0xf << I40E_NVM_VERSION_HI_SHIFT)
+
+/* magic for getting defines into strings */
+#define STRINGIFY(foo)  #foo
+#define XSTRINGIFY(bar) STRINGIFY(bar)
+
+#ifndef ARCH_HAS_PREFETCH
+#define prefetch(X)
+#endif
+
+#define I40E_RX_DESC(R, i)			\
+	((ring_is_16byte_desc_enabled(R))	\
+		? (union i40e_32byte_rx_desc *)	\
+			(&(((union i40e_16byte_rx_desc *)((R)->desc))[i])) \
+		: (&(((union i40e_32byte_rx_desc *)((R)->desc))[i])))
+#define I40E_TX_DESC(R, i)			\
+	(&(((struct i40e_tx_desc *)((R)->desc))[i]))
+#define I40E_TX_CTXTDESC(R, i)			\
+	(&(((struct i40e_tx_context_desc *)((R)->desc))[i]))
+#define I40E_TX_FDIRDESC(R, i)			\
+	(&(((struct i40e_filter_program_desc *)((R)->desc))[i]))
+
+/* default to trying for four seconds */
+#define I40E_TRY_LINK_TIMEOUT (4 * HZ)
+
+/* driver state flags */
+enum i40e_state_t {
+	__I40E_TESTING,
+	__I40E_CONFIG_BUSY,
+	__I40E_CONFIG_DONE,
+	__I40E_DOWN,
+	__I40E_NEEDS_RESTART,
+	__I40E_SERVICE_SCHED,
+	__I40E_ADMINQ_EVENT_PENDING,
+	__I40E_MDD_EVENT_PENDING,
+	__I40E_VFLR_EVENT_PENDING,
+	__I40E_RESET_RECOVERY_PENDING,
+	__I40E_RESET_INTR_RECEIVED,
+	__I40E_REINIT_REQUESTED,
+	__I40E_PF_RESET_REQUESTED,
+	__I40E_CORE_RESET_REQUESTED,
+	__I40E_GLOBAL_RESET_REQUESTED,
+	__I40E_FILTER_OVERFLOW_PROMISC,
+};
+
+enum i40e_interrupt_policy {
+	I40E_INTERRUPT_BEST_CASE,
+	I40E_INTERRUPT_MEDIUM,
+	I40E_INTERRUPT_LOWEST
+};
+
+struct i40e_lump_tracking {
+	u16 num_entries;
+	u16 search_hint;
+	u16 list[0];
+#define I40E_PILE_VALID_BIT  0x8000
+};
+
+#define I40E_DEFAULT_ATR_SAMPLE_RATE	20
+#define I40E_FDIR_MAX_RAW_PACKET_LOOKUP 512
+struct i40e_fdir_data {
+	u16 q_index;
+	u8  flex_off;
+	u8  pctype;
+	u16 dest_vsi;
+	u8  dest_ctl;
+	u8  fd_status;
+	u16 cnt_index;
+	u32 fd_id;
+	u8  *raw_packet;
+};
+
+#define I40E_DCB_PRIO_TYPE_STRICT	0
+#define I40E_DCB_PRIO_TYPE_ETS		1
+#define I40E_DCB_STRICT_PRIO_CREDITS	127
+#define I40E_MAX_USER_PRIORITY	8
+/* DCB per TC information data structure */
+struct i40e_tc_info {
+	u16	qoffset;	/* Queue offset from base queue */
+	u16	qcount;		/* Total Queues */
+	u8	netdev_tc;	/* Netdev TC index if netdev associated */
+};
+
+/* TC configuration data structure */
+struct i40e_tc_configuration {
+	u8	numtc;		/* Total number of enabled TCs */
+	u8	enabled_tc;	/* TC map */
+	struct i40e_tc_info tc_info[I40E_MAX_TRAFFIC_CLASS];
+};
+
+/* struct that defines the Ethernet device */
+struct i40e_pf {
+	struct pci_dev *pdev;
+	struct i40e_hw hw;
+	unsigned long state;
+	unsigned long link_check_timeout;
+	struct msix_entry *msix_entries;
+	u16 num_msix_entries;
+	bool fc_autoneg_status;
+
+	u16 eeprom_version;
+	u16 num_vmdq_vsis;         /* num vmdq pools this pf has set up */
+	u16 num_vmdq_qps;          /* num queue pairs per vmdq pool */
+	u16 num_vmdq_msix;         /* num queue vectors per vmdq pool */
+	u16 num_req_vfs;           /* num vfs requested for this vf */
+	u16 num_vf_qps;            /* num queue pairs per vf */
+	u16 num_tc_qps;            /* num queue pairs per TC */
+	u16 num_lan_qps;           /* num lan queues this pf has set up */
+	u16 num_lan_msix;          /* num queue vectors for the base pf vsi */
+	u16 rss_size;              /* num queues in the RSS array */
+	u16 rss_size_max;          /* HW defined max RSS queues */
+	u16 fdir_pf_filter_count;  /* num of guaranteed filters for this PF */
+	u8 atr_sample_rate;
+
+	enum i40e_interrupt_policy int_policy;
+	u16 rx_itr_default;
+	u16 tx_itr_default;
+	u16 msg_enable;
+	char misc_int_name[IFNAMSIZ + 9];
+	u16 adminq_work_limit; /* num of admin receive queue desc to process */
+	int service_timer_period;
+	struct timer_list service_timer;
+	struct work_struct service_task;
+
+	u64 flags;
+#define I40E_FLAG_RX_CSUM_ENABLED              (u64)(1 << 1)
+#define I40E_FLAG_MSI_ENABLED                  (u64)(1 << 2)
+#define I40E_FLAG_MSIX_ENABLED                 (u64)(1 << 3)
+#define I40E_FLAG_RX_1BUF_ENABLED              (u64)(1 << 4)
+#define I40E_FLAG_RX_PS_ENABLED                (u64)(1 << 5)
+#define I40E_FLAG_RSS_ENABLED                  (u64)(1 << 6)
+#define I40E_FLAG_MQ_ENABLED                   (u64)(1 << 7)
+#define I40E_FLAG_VMDQ_ENABLED                 (u64)(1 << 8)
+#define I40E_FLAG_FDIR_REQUIRES_REINIT         (u64)(1 << 9)
+#define I40E_FLAG_NEED_LINK_UPDATE             (u64)(1 << 10)
+#define I40E_FLAG_IN_NETPOLL                   (u64)(1 << 13)
+#define I40E_FLAG_16BYTE_RX_DESC_ENABLED       (u64)(1 << 14)
+#define I40E_FLAG_CLEAN_ADMINQ                 (u64)(1 << 15)
+#define I40E_FLAG_FILTER_SYNC                  (u64)(1 << 16)
+#define I40E_FLAG_PROCESS_MDD_EVENT            (u64)(1 << 18)
+#define I40E_FLAG_PROCESS_VFLR_EVENT           (u64)(1 << 19)
+#define I40E_FLAG_SRIOV_ENABLED                (u64)(1 << 20)
+#define I40E_FLAG_DCB_ENABLED                  (u64)(1 << 21)
+#define I40E_FLAG_FDIR_ENABLED                 (u64)(1 << 22)
+#define I40E_FLAG_FDIR_ATR_ENABLED             (u64)(1 << 23)
+#define I40E_FLAG_MFP_ENABLED                  (u64)(1 << 27)
+
+	u16 num_tx_queues;
+	u16 num_rx_queues;
+
+	bool stat_offsets_loaded;
+	struct i40e_hw_port_stats stats;
+	struct i40e_hw_port_stats stats_offsets;
+	u32 tx_timeout_count;
+	u32 tx_timeout_recovery_level;
+	unsigned long tx_timeout_last_recovery;
+	u32 hw_csum_rx_error;
+	u32 led_status;
+	u16 corer_count; /* Core reset count */
+	u16 globr_count; /* Global reset count */
+	u16 empr_count; /* EMP reset count */
+	u16 pfr_count; /* PF reset count */
+
+	struct mutex switch_mutex;
+	u16 lan_vsi;       /* our default LAN VSI */
+	u16 lan_veb;       /* initial relay, if exists */
+#define I40E_NO_VEB   0xffff
+#define I40E_NO_VSI   0xffff
+	u16 next_vsi;      /* Next unallocated VSI - 0-based! */
+	struct i40e_vsi **vsi;
+	struct i40e_veb *veb[I40E_MAX_VEB];
+
+	struct i40e_lump_tracking *qp_pile;
+	struct i40e_lump_tracking *irq_pile;
+
+	/* switch config info */
+	u16 pf_seid;
+	u16 main_vsi_seid;
+	u16 mac_seid;
+	struct i40e_aqc_get_switch_config_data *sw_config;
+	struct kobject *switch_kobj;
+#ifdef CONFIG_DEBUG_FS
+	struct dentry *i40e_dbg_pf;
+#endif /* CONFIG_DEBUG_FS */
+
+	/* sr-iov config info */
+	struct i40e_vf *vf;
+	int num_alloc_vfs;	/* actual number of VFs allocated */
+	u32 vf_aq_requests;
+
+	/* DCBx/DCBNL capability for PF that indicates
+	 * whether DCBx is managed by firmware or host
+	 * based agent (LLDPAD). Also, indicates what
+	 * flavor of DCBx protocol (IEEE/CEE) is supported
+	 * by the device. For now we're supporting IEEE
+	 * mode only.
+	 */
+	u16 dcbx_cap;
+
+	u32	fcoe_hmc_filt_num;
+	u32	fcoe_hmc_cntx_num;
+	struct i40e_filter_control_settings filter_settings;
+};
+
+struct i40e_mac_filter {
+	struct list_head list;
+	u8 macaddr[ETH_ALEN];
+#define I40E_VLAN_ANY -1
+	s16 vlan;
+	u8 counter;		/* number of instances of this filter */
+	bool is_vf;		/* filter belongs to a VF */
+	bool is_netdev;		/* filter belongs to a netdev */
+	bool changed;		/* filter needs to be sync'd to the HW */
+};
+
+struct i40e_veb {
+	struct i40e_pf *pf;
+	u16 idx;
+	u16 veb_idx;           /* index of VEB parent */
+	u16 seid;
+	u16 uplink_seid;
+	u16 stats_idx;           /* index of VEB parent */
+	u8  enabled_tc;
+	u16 flags;
+	u16 bw_limit;
+	u8  bw_max_quanta;
+	bool is_abs_credits;
+	u8  bw_tc_share_credits[I40E_MAX_TRAFFIC_CLASS];
+	u16 bw_tc_limit_credits[I40E_MAX_TRAFFIC_CLASS];
+	u8  bw_tc_max_quanta[I40E_MAX_TRAFFIC_CLASS];
+	struct kobject *kobj;
+	bool stat_offsets_loaded;
+	struct i40e_eth_stats stats;
+	struct i40e_eth_stats stats_offsets;
+};
+
+/* struct that defines a VSI, associated with a dev */
+struct i40e_vsi {
+	struct net_device *netdev;
+	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+	bool netdev_registered;
+	bool stat_offsets_loaded;
+
+	u32 current_netdev_flags;
+	unsigned long state;
+#define I40E_VSI_FLAG_FILTER_CHANGED  (1<<0)
+#define I40E_VSI_FLAG_VEB_OWNER       (1<<1)
+	unsigned long flags;
+
+	struct list_head mac_filter_list;
+
+	/* VSI stats */
+	struct rtnl_link_stats64 net_stats;
+	struct rtnl_link_stats64 net_stats_offsets;
+	struct i40e_eth_stats eth_stats;
+	struct i40e_eth_stats eth_stats_offsets;
+	u32 tx_restart;
+	u32 tx_busy;
+	u32 rx_buf_failed;
+	u32 rx_page_failed;
+
+	/* These are arrays of rings, allocated at run-time */
+	struct i40e_ring *rx_rings;
+	struct i40e_ring *tx_rings;
+
+	u16 work_limit;
+	/* high bit set means dynamic, use accessor routines to read/write.
+	 * hardware only supports 2us resolution for the ITR registers.
+	 * these values always store the USER setting, and must be converted
+	 * before programming to a register.
+	 */
+	u16 rx_itr_setting;
+	u16 tx_itr_setting;
+
+	u16 max_frame;
+	u16 rx_hdr_len;
+	u16 rx_buf_len;
+	u8  dtype;
+
+	/* List of q_vectors allocated to this VSI */
+	struct i40e_q_vector *q_vectors;
+	int num_q_vectors;
+	int base_vector;
+
+	u16 seid;            /* HW index of this VSI (absolute index) */
+	u16 id;              /* VSI number */
+	u16 uplink_seid;
+
+	u16 base_queue;      /* vsi's first queue in hw array */
+	u16 alloc_queue_pairs; /* Allocated Tx/Rx queues */
+	u16 num_queue_pairs; /* Used tx and rx pairs */
+	u16 num_desc;
+	enum i40e_vsi_type type;  /* VSI type, e.g., LAN, FCoE, etc */
+	u16 vf_id;		/* Virtual function ID for SRIOV VSIs */
+
+	struct i40e_tc_configuration tc_config;
+	struct i40e_aqc_vsi_properties_data info;
+
+	/* VSI BW limit (absolute across all TCs) */
+	u16 bw_limit;		/* VSI BW Limit (0 = disabled) */
+	u8  bw_max_quanta;	/* Max Quanta when BW limit is enabled */
+
+	/* Relative TC credits across VSIs */
+	u8  bw_ets_share_credits[I40E_MAX_TRAFFIC_CLASS];
+	/* TC BW limit credits within VSI */
+	u16  bw_ets_limit_credits[I40E_MAX_TRAFFIC_CLASS];
+	/* TC BW limit max quanta within VSI */
+	u8  bw_ets_max_quanta[I40E_MAX_TRAFFIC_CLASS];
+
+	struct i40e_pf *back;  /* Backreference to associated PF */
+	u16 idx;               /* index in pf->vsi[] */
+	u16 veb_idx;           /* index of VEB parent */
+	struct kobject *kobj;  /* sysfs object */
+
+	/* VSI specific handlers */
+	irqreturn_t (*irq_handler)(int irq, void *data);
+} ____cacheline_internodealigned_in_smp;
+
+struct i40e_netdev_priv {
+	struct i40e_vsi *vsi;
+};
+
+/* struct that defines an interrupt vector */
+struct i40e_q_vector {
+	struct i40e_vsi *vsi;
+
+	u16 v_idx;		/* index in the vsi->q_vector array. */
+	u16 reg_idx;		/* register index of the interrupt */
+
+	struct napi_struct napi;
+
+	struct i40e_ring_container rx;
+	struct i40e_ring_container tx;
+
+	u8 num_ringpairs;	/* total number of ring pairs in vector */
+
+	char name[IFNAMSIZ + 9];
+	cpumask_t affinity_mask;
+} ____cacheline_internodealigned_in_smp;
+
+/* lan device */
+struct i40e_device {
+	struct list_head list;
+	struct i40e_pf *pf;
+};
+
+/**
+ * i40e_fw_version_str - format the FW and NVM version strings
+ * @hw: ptr to the hardware info
+ **/
+static inline char *i40e_fw_version_str(struct i40e_hw *hw)
+{
+	static char buf[32];
+
+	snprintf(buf, sizeof(buf),
+		 "f%d.%d a%d.%d n%02d.%02d.%02d e%08x",
+		 hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
+		 hw->aq.api_maj_ver, hw->aq.api_min_ver,
+		 (hw->nvm.version & I40E_NVM_VERSION_HI_MASK)
+						>> I40E_NVM_VERSION_HI_SHIFT,
+		 (hw->nvm.version & I40E_NVM_VERSION_MID_MASK)
+						>> I40E_NVM_VERSION_MID_SHIFT,
+		 (hw->nvm.version & I40E_NVM_VERSION_LO_MASK)
+						>> I40E_NVM_VERSION_LO_SHIFT,
+		 hw->nvm.eetrack);
+
+	return buf;
+}
+
+/**
+ * i40e_netdev_to_pf: Retrieve the PF struct for given netdev
+ * @netdev: the corresponding netdev
+ *
+ * Return the PF struct for the given netdev
+ **/
+static inline struct i40e_pf *i40e_netdev_to_pf(struct net_device *netdev)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_vsi *vsi = np->vsi;
+
+	return vsi->back;
+}
+
+static inline void i40e_vsi_setup_irqhandler(struct i40e_vsi *vsi,
+				irqreturn_t (*irq_handler)(int, void *))
+{
+	vsi->irq_handler = irq_handler;
+}
+
+/**
+ * i40e_rx_is_programming_status - check for programming status descriptor
+ * @qw: the first quad word of the program status descriptor
+ *
+ * The value of in the descriptor length field indicate if this
+ * is a programming status descriptor for flow director or FCoE
+ * by the value of I40E_RX_PROG_STATUS_DESC_LENGTH, otherwise
+ * it is a packet descriptor.
+ **/
+static inline bool i40e_rx_is_programming_status(u64 qw)
+{
+	return I40E_RX_PROG_STATUS_DESC_LENGTH ==
+		(qw >> I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT);
+}
+
+/* needed by i40e_ethtool.c */
+int i40e_up(struct i40e_vsi *vsi);
+void i40e_down(struct i40e_vsi *vsi);
+extern const char i40e_driver_name[];
+extern const char i40e_driver_version_str[];
+void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags);
+void i40e_update_stats(struct i40e_vsi *vsi);
+void i40e_update_eth_stats(struct i40e_vsi *vsi);
+struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi);
+int i40e_fetch_switch_configuration(struct i40e_pf *pf,
+				    bool printconfig);
+
+/* needed by i40e_main.c */
+void i40e_add_fdir_filter(struct i40e_fdir_data fdir_data,
+			  struct i40e_ring *tx_ring);
+void i40e_add_remove_filter(struct i40e_fdir_data fdir_data,
+			    struct i40e_ring *tx_ring);
+void i40e_update_fdir_filter(struct i40e_fdir_data fdir_data,
+			     struct i40e_ring *tx_ring);
+int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
+			     struct i40e_pf *pf, bool add);
+
+void i40e_set_ethtool_ops(struct net_device *netdev);
+struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
+					u8 *macaddr, s16 vlan,
+					bool is_vf, bool is_netdev);
+void i40e_del_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan,
+		     bool is_vf, bool is_netdev);
+int i40e_sync_vsi_filters(struct i40e_vsi *vsi);
+struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
+				u16 uplink, u32 param1);
+int i40e_vsi_release(struct i40e_vsi *vsi);
+struct i40e_vsi *i40e_vsi_lookup(struct i40e_pf *pf, enum i40e_vsi_type type,
+				 struct i40e_vsi *start_vsi);
+struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid,
+				u16 downlink_seid, u8 enabled_tc);
+void i40e_veb_release(struct i40e_veb *veb);
+
+i40e_status i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid);
+void i40e_vsi_remove_pvid(struct i40e_vsi *vsi);
+void i40e_vsi_reset_stats(struct i40e_vsi *vsi);
+void i40e_pf_reset_stats(struct i40e_pf *pf);
+#ifdef CONFIG_DEBUG_FS
+void i40e_dbg_pf_init(struct i40e_pf *pf);
+void i40e_dbg_pf_exit(struct i40e_pf *pf);
+void i40e_dbg_init(void);
+void i40e_dbg_exit(void);
+#else
+static inline void i40e_dbg_pf_init(struct i40e_pf *pf) {}
+static inline void i40e_dbg_pf_exit(struct i40e_pf *pf) {}
+static inline void i40e_dbg_init(void) {}
+static inline void i40e_dbg_exit(void) {}
+#endif /* CONFIG_DEBUG_FS*/
+void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector);
+int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
+void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
+int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
+int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid);
+struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
+					     bool is_vf, bool is_netdev);
+bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
+struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
+				      bool is_vf, bool is_netdev);
+void i40e_vlan_stripping_enable(struct i40e_vsi *vsi);
+
+#endif /* _I40E_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
new file mode 100644
index 0000000..0c524fa
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -0,0 +1,983 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_status.h"
+#include "i40e_type.h"
+#include "i40e_register.h"
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+
+/**
+ *  i40e_adminq_init_regs - Initialize AdminQ registers
+ *  @hw: pointer to the hardware structure
+ *
+ *  This assumes the alloc_asq and alloc_arq functions have already been called
+ **/
+static void i40e_adminq_init_regs(struct i40e_hw *hw)
+{
+	/* set head and tail registers in our local struct */
+	if (hw->mac.type == I40E_MAC_VF) {
+		hw->aq.asq.tail = I40E_VF_ATQT1;
+		hw->aq.asq.head = I40E_VF_ATQH1;
+		hw->aq.arq.tail = I40E_VF_ARQT1;
+		hw->aq.arq.head = I40E_VF_ARQH1;
+	} else {
+		hw->aq.asq.tail = I40E_PF_ATQT;
+		hw->aq.asq.head = I40E_PF_ATQH;
+		hw->aq.arq.tail = I40E_PF_ARQT;
+		hw->aq.arq.head = I40E_PF_ARQH;
+	}
+}
+
+/**
+ *  i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
+ *  @hw: pointer to the hardware structure
+ **/
+static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
+{
+	i40e_status ret_code;
+	struct i40e_virt_mem mem;
+
+	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq_mem,
+					 i40e_mem_atq_ring,
+					 (hw->aq.num_asq_entries *
+					 sizeof(struct i40e_aq_desc)),
+					 I40E_ADMINQ_DESC_ALIGNMENT);
+	if (ret_code)
+		return ret_code;
+
+	hw->aq.asq.desc = hw->aq.asq_mem.va;
+	hw->aq.asq.dma_addr = hw->aq.asq_mem.pa;
+
+	ret_code = i40e_allocate_virt_mem(hw, &mem,
+					  (hw->aq.num_asq_entries *
+					  sizeof(struct i40e_asq_cmd_details)));
+	if (ret_code) {
+		i40e_free_dma_mem(hw, &hw->aq.asq_mem);
+		hw->aq.asq_mem.va = NULL;
+		hw->aq.asq_mem.pa = 0;
+		return ret_code;
+	}
+
+	hw->aq.asq.details = mem.va;
+
+	return ret_code;
+}
+
+/**
+ *  i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
+ *  @hw: pointer to the hardware structure
+ **/
+static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
+{
+	i40e_status ret_code;
+
+	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq_mem,
+					 i40e_mem_arq_ring,
+					 (hw->aq.num_arq_entries *
+					 sizeof(struct i40e_aq_desc)),
+					 I40E_ADMINQ_DESC_ALIGNMENT);
+	if (ret_code)
+		return ret_code;
+
+	hw->aq.arq.desc = hw->aq.arq_mem.va;
+	hw->aq.arq.dma_addr = hw->aq.arq_mem.pa;
+
+	return ret_code;
+}
+
+/**
+ *  i40e_free_adminq_asq - Free Admin Queue send rings
+ *  @hw: pointer to the hardware structure
+ *
+ *  This assumes the posted send buffers have already been cleaned
+ *  and de-allocated
+ **/
+static void i40e_free_adminq_asq(struct i40e_hw *hw)
+{
+	struct i40e_virt_mem mem;
+
+	i40e_free_dma_mem(hw, &hw->aq.asq_mem);
+	hw->aq.asq_mem.va = NULL;
+	hw->aq.asq_mem.pa = 0;
+	mem.va = hw->aq.asq.details;
+	i40e_free_virt_mem(hw, &mem);
+	hw->aq.asq.details = NULL;
+}
+
+/**
+ *  i40e_free_adminq_arq - Free Admin Queue receive rings
+ *  @hw: pointer to the hardware structure
+ *
+ *  This assumes the posted receive buffers have already been cleaned
+ *  and de-allocated
+ **/
+static void i40e_free_adminq_arq(struct i40e_hw *hw)
+{
+	i40e_free_dma_mem(hw, &hw->aq.arq_mem);
+	hw->aq.arq_mem.va = NULL;
+	hw->aq.arq_mem.pa = 0;
+}
+
+/**
+ *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
+ *  @hw:     pointer to the hardware structure
+ **/
+static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
+{
+	i40e_status ret_code;
+	struct i40e_aq_desc *desc;
+	struct i40e_virt_mem mem;
+	struct i40e_dma_mem *bi;
+	int i;
+
+	/* We'll be allocating the buffer info memory first, then we can
+	 * allocate the mapped buffers for the event processing
+	 */
+
+	/* buffer_info structures do not need alignment */
+	ret_code = i40e_allocate_virt_mem(hw, &mem, (hw->aq.num_arq_entries *
+					  sizeof(struct i40e_dma_mem)));
+	if (ret_code)
+		goto alloc_arq_bufs;
+	hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)mem.va;
+
+	/* allocate the mapped buffers */
+	for (i = 0; i < hw->aq.num_arq_entries; i++) {
+		bi = &hw->aq.arq.r.arq_bi[i];
+		ret_code = i40e_allocate_dma_mem(hw, bi,
+						 i40e_mem_arq_buf,
+						 hw->aq.arq_buf_size,
+						 I40E_ADMINQ_DESC_ALIGNMENT);
+		if (ret_code)
+			goto unwind_alloc_arq_bufs;
+
+		/* now configure the descriptors for use */
+		desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
+
+		desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
+		if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
+			desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
+		desc->opcode = 0;
+		/* This is in accordance with Admin queue design, there is no
+		 * register for buffer size configuration
+		 */
+		desc->datalen = cpu_to_le16((u16)bi->size);
+		desc->retval = 0;
+		desc->cookie_high = 0;
+		desc->cookie_low = 0;
+		desc->params.external.addr_high =
+			cpu_to_le32(upper_32_bits(bi->pa));
+		desc->params.external.addr_low =
+			cpu_to_le32(lower_32_bits(bi->pa));
+		desc->params.external.param0 = 0;
+		desc->params.external.param1 = 0;
+	}
+
+alloc_arq_bufs:
+	return ret_code;
+
+unwind_alloc_arq_bufs:
+	/* don't try to free the one that failed... */
+	i--;
+	for (; i >= 0; i--)
+		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+	mem.va = hw->aq.arq.r.arq_bi;
+	i40e_free_virt_mem(hw, &mem);
+
+	return ret_code;
+}
+
+/**
+ *  i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
+ *  @hw:     pointer to the hardware structure
+ **/
+static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
+{
+	i40e_status ret_code;
+	struct i40e_virt_mem mem;
+	struct i40e_dma_mem *bi;
+	int i;
+
+	/* No mapped memory needed yet, just the buffer info structures */
+	ret_code = i40e_allocate_virt_mem(hw, &mem, (hw->aq.num_asq_entries *
+					  sizeof(struct i40e_dma_mem)));
+	if (ret_code)
+		goto alloc_asq_bufs;
+	hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)mem.va;
+
+	/* allocate the mapped buffers */
+	for (i = 0; i < hw->aq.num_asq_entries; i++) {
+		bi = &hw->aq.asq.r.asq_bi[i];
+		ret_code = i40e_allocate_dma_mem(hw, bi,
+						 i40e_mem_asq_buf,
+						 hw->aq.asq_buf_size,
+						 I40E_ADMINQ_DESC_ALIGNMENT);
+		if (ret_code)
+			goto unwind_alloc_asq_bufs;
+	}
+alloc_asq_bufs:
+	return ret_code;
+
+unwind_alloc_asq_bufs:
+	/* don't try to free the one that failed... */
+	i--;
+	for (; i >= 0; i--)
+		i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+	mem.va = hw->aq.asq.r.asq_bi;
+	i40e_free_virt_mem(hw, &mem);
+
+	return ret_code;
+}
+
+/**
+ *  i40e_free_arq_bufs - Free receive queue buffer info elements
+ *  @hw:     pointer to the hardware structure
+ **/
+static void i40e_free_arq_bufs(struct i40e_hw *hw)
+{
+	struct i40e_virt_mem mem;
+	int i;
+
+	for (i = 0; i < hw->aq.num_arq_entries; i++)
+		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+
+	mem.va = hw->aq.arq.r.arq_bi;
+	i40e_free_virt_mem(hw, &mem);
+}
+
+/**
+ *  i40e_free_asq_bufs - Free send queue buffer info elements
+ *  @hw:     pointer to the hardware structure
+ **/
+static void i40e_free_asq_bufs(struct i40e_hw *hw)
+{
+	struct i40e_virt_mem mem;
+	int i;
+
+	/* only unmap if the address is non-NULL */
+	for (i = 0; i < hw->aq.num_asq_entries; i++)
+		if (hw->aq.asq.r.asq_bi[i].pa)
+			i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+
+	/* now free the buffer info list */
+	mem.va = hw->aq.asq.r.asq_bi;
+	i40e_free_virt_mem(hw, &mem);
+}
+
+/**
+ *  i40e_config_asq_regs - configure ASQ registers
+ *  @hw:     pointer to the hardware structure
+ *
+ *  Configure base address and length registers for the transmit queue
+ **/
+static void i40e_config_asq_regs(struct i40e_hw *hw)
+{
+	if (hw->mac.type == I40E_MAC_VF) {
+		/* configure the transmit queue */
+		wr32(hw, I40E_VF_ATQBAH1, upper_32_bits(hw->aq.asq.dma_addr));
+		wr32(hw, I40E_VF_ATQBAL1, lower_32_bits(hw->aq.asq.dma_addr));
+		wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
+					  I40E_VF_ATQLEN1_ATQENABLE_MASK));
+	} else {
+		/* configure the transmit queue */
+		wr32(hw, I40E_PF_ATQBAH, upper_32_bits(hw->aq.asq.dma_addr));
+		wr32(hw, I40E_PF_ATQBAL, lower_32_bits(hw->aq.asq.dma_addr));
+		wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
+					  I40E_PF_ATQLEN_ATQENABLE_MASK));
+	}
+}
+
+/**
+ *  i40e_config_arq_regs - ARQ register configuration
+ *  @hw:     pointer to the hardware structure
+ *
+ * Configure base address and length registers for the receive (event queue)
+ **/
+static void i40e_config_arq_regs(struct i40e_hw *hw)
+{
+	if (hw->mac.type == I40E_MAC_VF) {
+		/* configure the receive queue */
+		wr32(hw, I40E_VF_ARQBAH1, upper_32_bits(hw->aq.arq.dma_addr));
+		wr32(hw, I40E_VF_ARQBAL1, lower_32_bits(hw->aq.arq.dma_addr));
+		wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
+					  I40E_VF_ARQLEN1_ARQENABLE_MASK));
+	} else {
+		/* configure the receive queue */
+		wr32(hw, I40E_PF_ARQBAH, upper_32_bits(hw->aq.arq.dma_addr));
+		wr32(hw, I40E_PF_ARQBAL, lower_32_bits(hw->aq.arq.dma_addr));
+		wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
+					  I40E_PF_ARQLEN_ARQENABLE_MASK));
+	}
+
+	/* Update tail in the HW to post pre-allocated buffers */
+	wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
+}
+
+/**
+ *  i40e_init_asq - main initialization routine for ASQ
+ *  @hw:     pointer to the hardware structure
+ *
+ *  This is the main initialization routine for the Admin Send Queue
+ *  Prior to calling this function, drivers *MUST* set the following fields
+ *  in the hw->aq structure:
+ *     - hw->aq.num_asq_entries
+ *     - hw->aq.arq_buf_size
+ *
+ *  Do *NOT* hold the lock when calling this as the memory allocation routines
+ *  called are not going to be atomic context safe
+ **/
+static i40e_status i40e_init_asq(struct i40e_hw *hw)
+{
+	i40e_status ret_code = 0;
+
+	if (hw->aq.asq.count > 0) {
+		/* queue already initialized */
+		ret_code = I40E_ERR_NOT_READY;
+		goto init_adminq_exit;
+	}
+
+	/* verify input for valid configuration */
+	if ((hw->aq.num_asq_entries == 0) ||
+	    (hw->aq.asq_buf_size == 0)) {
+		ret_code = I40E_ERR_CONFIG;
+		goto init_adminq_exit;
+	}
+
+	hw->aq.asq.next_to_use = 0;
+	hw->aq.asq.next_to_clean = 0;
+	hw->aq.asq.count = hw->aq.num_asq_entries;
+
+	/* allocate the ring memory */
+	ret_code = i40e_alloc_adminq_asq_ring(hw);
+	if (ret_code)
+		goto init_adminq_exit;
+
+	/* allocate buffers in the rings */
+	ret_code = i40e_alloc_asq_bufs(hw);
+	if (ret_code)
+		goto init_adminq_free_rings;
+
+	/* initialize base registers */
+	i40e_config_asq_regs(hw);
+
+	/* success! */
+	goto init_adminq_exit;
+
+init_adminq_free_rings:
+	i40e_free_adminq_asq(hw);
+
+init_adminq_exit:
+	return ret_code;
+}
+
+/**
+ *  i40e_init_arq - initialize ARQ
+ *  @hw:     pointer to the hardware structure
+ *
+ *  The main initialization routine for the Admin Receive (Event) Queue.
+ *  Prior to calling this function, drivers *MUST* set the following fields
+ *  in the hw->aq structure:
+ *     - hw->aq.num_asq_entries
+ *     - hw->aq.arq_buf_size
+ *
+ *  Do *NOT* hold the lock when calling this as the memory allocation routines
+ *  called are not going to be atomic context safe
+ **/
+static i40e_status i40e_init_arq(struct i40e_hw *hw)
+{
+	i40e_status ret_code = 0;
+
+	if (hw->aq.arq.count > 0) {
+		/* queue already initialized */
+		ret_code = I40E_ERR_NOT_READY;
+		goto init_adminq_exit;
+	}
+
+	/* verify input for valid configuration */
+	if ((hw->aq.num_arq_entries == 0) ||
+	    (hw->aq.arq_buf_size == 0)) {
+		ret_code = I40E_ERR_CONFIG;
+		goto init_adminq_exit;
+	}
+
+	hw->aq.arq.next_to_use = 0;
+	hw->aq.arq.next_to_clean = 0;
+	hw->aq.arq.count = hw->aq.num_arq_entries;
+
+	/* allocate the ring memory */
+	ret_code = i40e_alloc_adminq_arq_ring(hw);
+	if (ret_code)
+		goto init_adminq_exit;
+
+	/* allocate buffers in the rings */
+	ret_code = i40e_alloc_arq_bufs(hw);
+	if (ret_code)
+		goto init_adminq_free_rings;
+
+	/* initialize base registers */
+	i40e_config_arq_regs(hw);
+
+	/* success! */
+	goto init_adminq_exit;
+
+init_adminq_free_rings:
+	i40e_free_adminq_arq(hw);
+
+init_adminq_exit:
+	return ret_code;
+}
+
+/**
+ *  i40e_shutdown_asq - shutdown the ASQ
+ *  @hw:     pointer to the hardware structure
+ *
+ *  The main shutdown routine for the Admin Send Queue
+ **/
+static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
+{
+	i40e_status ret_code = 0;
+
+	if (hw->aq.asq.count == 0)
+		return I40E_ERR_NOT_READY;
+
+	/* Stop firmware AdminQ processing */
+	if (hw->mac.type == I40E_MAC_VF)
+		wr32(hw, I40E_VF_ATQLEN1, 0);
+	else
+		wr32(hw, I40E_PF_ATQLEN, 0);
+
+	/* make sure lock is available */
+	mutex_lock(&hw->aq.asq_mutex);
+
+	hw->aq.asq.count = 0; /* to indicate uninitialized queue */
+
+	/* free ring buffers */
+	i40e_free_asq_bufs(hw);
+	/* free the ring descriptors */
+	i40e_free_adminq_asq(hw);
+
+	mutex_unlock(&hw->aq.asq_mutex);
+
+	return ret_code;
+}
+
+/**
+ *  i40e_shutdown_arq - shutdown ARQ
+ *  @hw:     pointer to the hardware structure
+ *
+ *  The main shutdown routine for the Admin Receive Queue
+ **/
+static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
+{
+	i40e_status ret_code = 0;
+
+	if (hw->aq.arq.count == 0)
+		return I40E_ERR_NOT_READY;
+
+	/* Stop firmware AdminQ processing */
+	if (hw->mac.type == I40E_MAC_VF)
+		wr32(hw, I40E_VF_ARQLEN1, 0);
+	else
+		wr32(hw, I40E_PF_ARQLEN, 0);
+
+	/* make sure lock is available */
+	mutex_lock(&hw->aq.arq_mutex);
+
+	hw->aq.arq.count = 0; /* to indicate uninitialized queue */
+
+	/* free ring buffers */
+	i40e_free_arq_bufs(hw);
+	/* free the ring descriptors */
+	i40e_free_adminq_arq(hw);
+
+	mutex_unlock(&hw->aq.arq_mutex);
+
+	return ret_code;
+}
+
+/**
+ *  i40e_init_adminq - main initialization routine for Admin Queue
+ *  @hw:     pointer to the hardware structure
+ *
+ *  Prior to calling this function, drivers *MUST* set the following fields
+ *  in the hw->aq structure:
+ *     - hw->aq.num_asq_entries
+ *     - hw->aq.num_arq_entries
+ *     - hw->aq.arq_buf_size
+ *     - hw->aq.asq_buf_size
+ **/
+i40e_status i40e_init_adminq(struct i40e_hw *hw)
+{
+	u16 eetrack_lo, eetrack_hi;
+	i40e_status ret_code;
+
+	/* verify input for valid configuration */
+	if ((hw->aq.num_arq_entries == 0) ||
+	    (hw->aq.num_asq_entries == 0) ||
+	    (hw->aq.arq_buf_size == 0) ||
+	    (hw->aq.asq_buf_size == 0)) {
+		ret_code = I40E_ERR_CONFIG;
+		goto init_adminq_exit;
+	}
+
+	/* initialize locks */
+	mutex_init(&hw->aq.asq_mutex);
+	mutex_init(&hw->aq.arq_mutex);
+
+	/* Set up register offsets */
+	i40e_adminq_init_regs(hw);
+
+	/* allocate the ASQ */
+	ret_code = i40e_init_asq(hw);
+	if (ret_code)
+		goto init_adminq_destroy_locks;
+
+	/* allocate the ARQ */
+	ret_code = i40e_init_arq(hw);
+	if (ret_code)
+		goto init_adminq_free_asq;
+
+	ret_code = i40e_aq_get_firmware_version(hw,
+				     &hw->aq.fw_maj_ver, &hw->aq.fw_min_ver,
+				     &hw->aq.api_maj_ver, &hw->aq.api_min_ver,
+				     NULL);
+	if (ret_code)
+		goto init_adminq_free_arq;
+
+	if (hw->aq.api_maj_ver != I40E_FW_API_VERSION_MAJOR ||
+	    hw->aq.api_min_ver != I40E_FW_API_VERSION_MINOR) {
+		ret_code = I40E_ERR_FIRMWARE_API_VERSION;
+		goto init_adminq_free_arq;
+	}
+	i40e_read_nvm_word(hw, I40E_SR_NVM_IMAGE_VERSION, &hw->nvm.version);
+	i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
+	i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
+	hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
+
+	ret_code = i40e_aq_set_hmc_resource_profile(hw,
+						    I40E_HMC_PROFILE_DEFAULT,
+						    0,
+						    NULL);
+	ret_code = 0;
+
+	/* success! */
+	goto init_adminq_exit;
+
+init_adminq_free_arq:
+	i40e_shutdown_arq(hw);
+init_adminq_free_asq:
+	i40e_shutdown_asq(hw);
+init_adminq_destroy_locks:
+
+init_adminq_exit:
+	return ret_code;
+}
+
+/**
+ *  i40e_shutdown_adminq - shutdown routine for the Admin Queue
+ *  @hw:     pointer to the hardware structure
+ **/
+i40e_status i40e_shutdown_adminq(struct i40e_hw *hw)
+{
+	i40e_status ret_code = 0;
+
+	i40e_shutdown_asq(hw);
+	i40e_shutdown_arq(hw);
+
+	/* destroy the locks */
+
+	return ret_code;
+}
+
+/**
+ *  i40e_clean_asq - cleans Admin send queue
+ *  @asq: pointer to the adminq send ring
+ *
+ *  returns the number of free desc
+ **/
+static u16 i40e_clean_asq(struct i40e_hw *hw)
+{
+	struct i40e_adminq_ring *asq = &(hw->aq.asq);
+	struct i40e_asq_cmd_details *details;
+	u16 ntc = asq->next_to_clean;
+	struct i40e_aq_desc desc_cb;
+	struct i40e_aq_desc *desc;
+
+	desc = I40E_ADMINQ_DESC(*asq, ntc);
+	details = I40E_ADMINQ_DETAILS(*asq, ntc);
+	while (rd32(hw, hw->aq.asq.head) != ntc) {
+		if (details->callback) {
+			I40E_ADMINQ_CALLBACK cb_func =
+					(I40E_ADMINQ_CALLBACK)details->callback;
+			desc_cb = *desc;
+			cb_func(hw, &desc_cb);
+		}
+		memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+		memset((void *)details, 0,
+		       sizeof(struct i40e_asq_cmd_details));
+		ntc++;
+		if (ntc == asq->count)
+			ntc = 0;
+		desc = I40E_ADMINQ_DESC(*asq, ntc);
+		details = I40E_ADMINQ_DETAILS(*asq, ntc);
+	}
+
+	asq->next_to_clean = ntc;
+
+	return I40E_DESC_UNUSED(asq);
+}
+
+/**
+ *  i40e_asq_done - check if FW has processed the Admin Send Queue
+ *  @hw: pointer to the hw struct
+ *
+ *  Returns true if the firmware has processed all descriptors on the
+ *  admin send queue. Returns false if there are still requests pending.
+ **/
+bool i40e_asq_done(struct i40e_hw *hw)
+{
+	/* AQ designers suggest use of head for better
+	 * timing reliability than DD bit
+	 */
+	return (rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use);
+
+}
+
+/**
+ *  i40e_asq_send_command - send command to Admin Queue
+ *  @hw: pointer to the hw struct
+ *  @desc: prefilled descriptor describing the command (non DMA mem)
+ *  @buff: buffer to use for indirect commands
+ *  @buff_size: size of buffer for indirect commands
+ *  @opaque: pointer to info to be used in async cleanup
+ *
+ *  This is the main send command driver routine for the Admin Queue send
+ *  queue.  It runs the queue, cleans the queue, etc
+ **/
+i40e_status i40e_asq_send_command(struct i40e_hw *hw,
+				struct i40e_aq_desc *desc,
+				void *buff, /* can be NULL */
+				u16  buff_size,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	i40e_status status = 0;
+	struct i40e_dma_mem *dma_buff = NULL;
+	struct i40e_asq_cmd_details *details;
+	struct i40e_aq_desc *desc_on_ring;
+	bool cmd_completed = false;
+	u16  retval = 0;
+
+	if (hw->aq.asq.count == 0) {
+		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+			   "AQTX: Admin queue not initialized.\n");
+		status = I40E_ERR_QUEUE_EMPTY;
+		goto asq_send_command_exit;
+	}
+
+	details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
+	if (cmd_details) {
+		memcpy(details, cmd_details,
+		       sizeof(struct i40e_asq_cmd_details));
+
+		/* If the cmd_details are defined copy the cookie.  The
+		 * cpu_to_le32 is not needed here because the data is ignored
+		 * by the FW, only used by the driver
+		 */
+		if (details->cookie) {
+			desc->cookie_high =
+				cpu_to_le32(upper_32_bits(details->cookie));
+			desc->cookie_low =
+				cpu_to_le32(lower_32_bits(details->cookie));
+		}
+	} else {
+		memset(details, 0, sizeof(struct i40e_asq_cmd_details));
+	}
+
+	/* clear requested flags and then set additional flags if defined */
+	desc->flags &= ~cpu_to_le16(details->flags_dis);
+	desc->flags |= cpu_to_le16(details->flags_ena);
+
+	mutex_lock(&hw->aq.asq_mutex);
+
+	if (buff_size > hw->aq.asq_buf_size) {
+		i40e_debug(hw,
+			   I40E_DEBUG_AQ_MESSAGE,
+			   "AQTX: Invalid buffer size: %d.\n",
+			   buff_size);
+		status = I40E_ERR_INVALID_SIZE;
+		goto asq_send_command_error;
+	}
+
+	if (details->postpone && !details->async) {
+		i40e_debug(hw,
+			   I40E_DEBUG_AQ_MESSAGE,
+			   "AQTX: Async flag not set along with postpone flag");
+		status = I40E_ERR_PARAM;
+		goto asq_send_command_error;
+	}
+
+	/* call clean and check queue available function to reclaim the
+	 * descriptors that were processed by FW, the function returns the
+	 * number of desc available
+	 */
+	/* the clean function called here could be called in a separate thread
+	 * in case of asynchronous completions
+	 */
+	if (i40e_clean_asq(hw) == 0) {
+		i40e_debug(hw,
+			   I40E_DEBUG_AQ_MESSAGE,
+			   "AQTX: Error queue is full.\n");
+		status = I40E_ERR_ADMIN_QUEUE_FULL;
+		goto asq_send_command_error;
+	}
+
+	/* initialize the temp desc pointer with the right desc */
+	desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
+
+	/* if the desc is available copy the temp desc to the right place */
+	memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc));
+
+	/* if buff is not NULL assume indirect command */
+	if (buff != NULL) {
+		dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
+		/* copy the user buff into the respective DMA buff */
+		memcpy(dma_buff->va, buff, buff_size);
+		desc_on_ring->datalen = cpu_to_le16(buff_size);
+
+		/* Update the address values in the desc with the pa value
+		 * for respective buffer
+		 */
+		desc_on_ring->params.external.addr_high =
+				cpu_to_le32(upper_32_bits(dma_buff->pa));
+		desc_on_ring->params.external.addr_low =
+				cpu_to_le32(lower_32_bits(dma_buff->pa));
+	}
+
+	/* bump the tail */
+	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, buff);
+	(hw->aq.asq.next_to_use)++;
+	if (hw->aq.asq.next_to_use == hw->aq.asq.count)
+		hw->aq.asq.next_to_use = 0;
+	if (!details->postpone)
+		wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
+
+	/* if cmd_details are not defined or async flag is not set,
+	 * we need to wait for desc write back
+	 */
+	if (!details->async && !details->postpone) {
+		u32 total_delay = 0;
+		u32 delay_len = 10;
+
+		do {
+			/* AQ designers suggest use of head for better
+			 * timing reliability than DD bit
+			 */
+			if (i40e_asq_done(hw))
+				break;
+			/* ugh! delay while spin_lock */
+			udelay(delay_len);
+			total_delay += delay_len;
+		} while (total_delay <  I40E_ASQ_CMD_TIMEOUT);
+	}
+
+	/* if ready, copy the desc back to temp */
+	if (i40e_asq_done(hw)) {
+		memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc));
+		if (buff != NULL)
+			memcpy(buff, dma_buff->va, buff_size);
+		retval = le16_to_cpu(desc->retval);
+		if (retval != 0) {
+			i40e_debug(hw,
+				   I40E_DEBUG_AQ_MESSAGE,
+				   "AQTX: Command completed with error 0x%X.\n",
+				   retval);
+			/* strip off FW internal code */
+			retval &= 0xff;
+		}
+		cmd_completed = true;
+		if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
+			status = 0;
+		else
+			status = I40E_ERR_ADMIN_QUEUE_ERROR;
+		hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
+	}
+
+	/* update the error if time out occurred */
+	if ((!cmd_completed) &&
+	    (!details->async && !details->postpone)) {
+		i40e_debug(hw,
+			   I40E_DEBUG_AQ_MESSAGE,
+			   "AQTX: Writeback timeout.\n");
+		status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
+	}
+
+asq_send_command_error:
+	mutex_unlock(&hw->aq.asq_mutex);
+asq_send_command_exit:
+	return status;
+}
+
+/**
+ *  i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
+ *  @desc:     pointer to the temp descriptor (non DMA mem)
+ *  @opcode:   the opcode can be used to decide which flags to turn off or on
+ *
+ *  Fill the desc with default values
+ **/
+void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+				       u16 opcode)
+{
+	/* zero out the desc */
+	memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+	desc->opcode = cpu_to_le16(opcode);
+	desc->flags = cpu_to_le16(I40E_AQ_FLAG_EI | I40E_AQ_FLAG_SI);
+}
+
+/**
+ *  i40e_clean_arq_element
+ *  @hw: pointer to the hw struct
+ *  @e: event info from the receive descriptor, includes any buffers
+ *  @pending: number of events that could be left to process
+ *
+ *  This function cleans one Admin Receive Queue element and returns
+ *  the contents through e.  It can also return how many events are
+ *  left to process through 'pending'
+ **/
+i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
+					     struct i40e_arq_event_info *e,
+					     u16 *pending)
+{
+	i40e_status ret_code = 0;
+	u16 ntc = hw->aq.arq.next_to_clean;
+	struct i40e_aq_desc *desc;
+	struct i40e_dma_mem *bi;
+	u16 desc_idx;
+	u16 datalen;
+	u16 flags;
+	u16 ntu;
+
+	/* take the lock before we start messing with the ring */
+	mutex_lock(&hw->aq.arq_mutex);
+
+	/* set next_to_use to head */
+	ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
+	if (ntu == ntc) {
+		/* nothing to do - shouldn't need to update ring's values */
+		i40e_debug(hw,
+			   I40E_DEBUG_AQ_MESSAGE,
+			   "AQRX: Queue is empty.\n");
+		ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
+		goto clean_arq_element_out;
+	}
+
+	/* now clean the next descriptor */
+	desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
+	desc_idx = ntc;
+	i40e_debug_aq(hw,
+		      I40E_DEBUG_AQ_COMMAND,
+		      (void *)desc,
+		      hw->aq.arq.r.arq_bi[desc_idx].va);
+
+	flags = le16_to_cpu(desc->flags);
+	if (flags & I40E_AQ_FLAG_ERR) {
+		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+		hw->aq.arq_last_status =
+			(enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
+		i40e_debug(hw,
+			   I40E_DEBUG_AQ_MESSAGE,
+			   "AQRX: Event received with error 0x%X.\n",
+			   hw->aq.arq_last_status);
+	} else {
+		memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc));
+		datalen = le16_to_cpu(desc->datalen);
+		e->msg_size = min(datalen, e->msg_size);
+		if (e->msg_buf != NULL && (e->msg_size != 0))
+			memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
+			       e->msg_size);
+	}
+
+	/* Restore the original datalen and buffer address in the desc,
+	 * FW updates datalen to indicate the event message
+	 * size
+	 */
+	bi = &hw->aq.arq.r.arq_bi[ntc];
+	desc->datalen = cpu_to_le16((u16)bi->size);
+	desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
+	desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
+
+	/* set tail = the last cleaned desc index. */
+	wr32(hw, hw->aq.arq.tail, ntc);
+	/* ntc is updated to tail + 1 */
+	ntc++;
+	if (ntc == hw->aq.num_arq_entries)
+		ntc = 0;
+	hw->aq.arq.next_to_clean = ntc;
+	hw->aq.arq.next_to_use = ntu;
+
+clean_arq_element_out:
+	/* Set pending if needed, unlock and return */
+	if (pending != NULL)
+		*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
+	mutex_unlock(&hw->aq.arq_mutex);
+
+	return ret_code;
+}
+
+void i40e_resume_aq(struct i40e_hw *hw)
+{
+	u32 reg = 0;
+
+	/* Registers are reset after PF reset */
+	hw->aq.asq.next_to_use = 0;
+	hw->aq.asq.next_to_clean = 0;
+
+	i40e_config_asq_regs(hw);
+	reg = hw->aq.num_asq_entries;
+
+	if (hw->mac.type == I40E_MAC_VF) {
+		reg |= I40E_VF_ATQLEN_ATQENABLE_MASK;
+		wr32(hw, I40E_VF_ATQLEN1, reg);
+	} else {
+		reg |= I40E_PF_ATQLEN_ATQENABLE_MASK;
+		wr32(hw, I40E_PF_ATQLEN, reg);
+	}
+
+	hw->aq.arq.next_to_use = 0;
+	hw->aq.arq.next_to_clean = 0;
+
+	i40e_config_arq_regs(hw);
+	reg = hw->aq.num_arq_entries;
+
+	if (hw->mac.type == I40E_MAC_VF) {
+		reg |= I40E_VF_ATQLEN_ATQENABLE_MASK;
+		wr32(hw, I40E_VF_ARQLEN1, reg);
+	} else {
+		reg |= I40E_PF_ATQLEN_ATQENABLE_MASK;
+		wr32(hw, I40E_PF_ARQLEN, reg);
+	}
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
new file mode 100644
index 0000000..22e5ed6
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -0,0 +1,112 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_ADMINQ_H_
+#define _I40E_ADMINQ_H_
+
+#include "i40e_osdep.h"
+#include "i40e_adminq_cmd.h"
+
+#define I40E_ADMINQ_DESC(R, i)   \
+	(&(((struct i40e_aq_desc *)((R).desc))[i]))
+
+#define I40E_ADMINQ_DESC_ALIGNMENT 4096
+
+struct i40e_adminq_ring {
+	void *desc;		/* Descriptor ring memory */
+	void *details;		/* ASQ details */
+
+	union {
+		struct i40e_dma_mem *asq_bi;
+		struct i40e_dma_mem *arq_bi;
+	} r;
+
+	u64 dma_addr;		/* Physical address of the ring */
+	u16 count;		/* Number of descriptors */
+	u16 rx_buf_len;		/* Admin Receive Queue buffer length */
+
+	/* used for interrupt processing */
+	u16 next_to_use;
+	u16 next_to_clean;
+
+	/* used for queue tracking */
+	u32 head;
+	u32 tail;
+};
+
+/* ASQ transaction details */
+struct i40e_asq_cmd_details {
+	void *callback; /* cast from type I40E_ADMINQ_CALLBACK */
+	u64 cookie;
+	u16 flags_ena;
+	u16 flags_dis;
+	bool async;
+	bool postpone;
+};
+
+#define I40E_ADMINQ_DETAILS(R, i)   \
+	(&(((struct i40e_asq_cmd_details *)((R).details))[i]))
+
+/* ARQ event information */
+struct i40e_arq_event_info {
+	struct i40e_aq_desc desc;
+	u16 msg_size;
+	u8 *msg_buf;
+};
+
+/* Admin Queue information */
+struct i40e_adminq_info {
+	struct i40e_adminq_ring arq;    /* receive queue */
+	struct i40e_adminq_ring asq;    /* send queue */
+	u16 num_arq_entries;            /* receive queue depth */
+	u16 num_asq_entries;            /* send queue depth */
+	u16 arq_buf_size;               /* receive queue buffer size */
+	u16 asq_buf_size;               /* send queue buffer size */
+	u16 fw_maj_ver;                 /* firmware major version */
+	u16 fw_min_ver;                 /* firmware minor version */
+	u16 api_maj_ver;                /* api major version */
+	u16 api_min_ver;                /* api minor version */
+
+	struct mutex asq_mutex; /* Send queue lock */
+	struct mutex arq_mutex; /* Receive queue lock */
+
+	struct i40e_dma_mem asq_mem;    /* send queue dynamic memory */
+	struct i40e_dma_mem arq_mem;    /* receive queue dynamic memory */
+
+	/* last status values on send and receive queues */
+	enum i40e_admin_queue_err asq_last_status;
+	enum i40e_admin_queue_err arq_last_status;
+};
+
+/* general information */
+#define I40E_AQ_LARGE_BUF	512
+#define I40E_ASQ_CMD_TIMEOUT	100000  /* usecs */
+
+void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+				       u16 opcode);
+
+#endif /* _I40E_ADMINQ_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
new file mode 100644
index 0000000..e61ebdd
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -0,0 +1,2076 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_ADMINQ_CMD_H_
+#define _I40E_ADMINQ_CMD_H_
+
+/* This header file defines the i40e Admin Queue commands and is shared between
+ * i40e Firmware and Software.
+ *
+ * This file needs to comply with the Linux Kernel coding style.
+ */
+
+#define I40E_FW_API_VERSION_MAJOR  0x0001
+#define I40E_FW_API_VERSION_MINOR  0x0000
+
+struct i40e_aq_desc {
+	__le16 flags;
+	__le16 opcode;
+	__le16 datalen;
+	__le16 retval;
+	__le32 cookie_high;
+	__le32 cookie_low;
+	union {
+		struct {
+			__le32 param0;
+			__le32 param1;
+			__le32 param2;
+			__le32 param3;
+		} internal;
+		struct {
+			__le32 param0;
+			__le32 param1;
+			__le32 addr_high;
+			__le32 addr_low;
+		} external;
+		u8 raw[16];
+	} params;
+};
+
+/* Flags sub-structure
+ * |0  |1  |2  |3  |4  |5  |6  |7  |8  |9  |10 |11 |12 |13 |14 |15 |
+ * |DD |CMP|ERR|VFE| * *  RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
+ */
+
+/* command flags and offsets*/
+#define I40E_AQ_FLAG_DD_SHIFT  0
+#define I40E_AQ_FLAG_CMP_SHIFT 1
+#define I40E_AQ_FLAG_ERR_SHIFT 2
+#define I40E_AQ_FLAG_VFE_SHIFT 3
+#define I40E_AQ_FLAG_LB_SHIFT  9
+#define I40E_AQ_FLAG_RD_SHIFT  10
+#define I40E_AQ_FLAG_VFC_SHIFT 11
+#define I40E_AQ_FLAG_BUF_SHIFT 12
+#define I40E_AQ_FLAG_SI_SHIFT  13
+#define I40E_AQ_FLAG_EI_SHIFT  14
+#define I40E_AQ_FLAG_FE_SHIFT  15
+
+#define I40E_AQ_FLAG_DD  (1 << I40E_AQ_FLAG_DD_SHIFT)  /* 0x1    */
+#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2    */
+#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4    */
+#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8    */
+#define I40E_AQ_FLAG_LB  (1 << I40E_AQ_FLAG_LB_SHIFT)  /* 0x200  */
+#define I40E_AQ_FLAG_RD  (1 << I40E_AQ_FLAG_RD_SHIFT)  /* 0x400  */
+#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800  */
+#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
+#define I40E_AQ_FLAG_SI  (1 << I40E_AQ_FLAG_SI_SHIFT)  /* 0x2000 */
+#define I40E_AQ_FLAG_EI  (1 << I40E_AQ_FLAG_EI_SHIFT)  /* 0x4000 */
+#define I40E_AQ_FLAG_FE  (1 << I40E_AQ_FLAG_FE_SHIFT)  /* 0x8000 */
+
+/* error codes */
+enum i40e_admin_queue_err {
+	I40E_AQ_RC_OK       = 0,    /* success */
+	I40E_AQ_RC_EPERM    = 1,    /* Operation not permitted */
+	I40E_AQ_RC_ENOENT   = 2,    /* No such element */
+	I40E_AQ_RC_ESRCH    = 3,    /* Bad opcode */
+	I40E_AQ_RC_EINTR    = 4,    /* operation interrupted */
+	I40E_AQ_RC_EIO      = 5,    /* I/O error */
+	I40E_AQ_RC_ENXIO    = 6,    /* No such resource */
+	I40E_AQ_RC_E2BIG    = 7,    /* Arg too long */
+	I40E_AQ_RC_EAGAIN   = 8,    /* Try again */
+	I40E_AQ_RC_ENOMEM   = 9,    /* Out of memory */
+	I40E_AQ_RC_EACCES   = 10,   /* Permission denied */
+	I40E_AQ_RC_EFAULT   = 11,   /* Bad address */
+	I40E_AQ_RC_EBUSY    = 12,   /* Device or resource busy */
+	I40E_AQ_RC_EEXIST   = 13,   /* object already exists */
+	I40E_AQ_RC_EINVAL   = 14,   /* Invalid argument */
+	I40E_AQ_RC_ENOTTY   = 15,   /* Not a typewriter */
+	I40E_AQ_RC_ENOSPC   = 16,   /* No space left or alloc failure */
+	I40E_AQ_RC_ENOSYS   = 17,   /* Function not implemented */
+	I40E_AQ_RC_ERANGE   = 18,   /* Parameter out of range */
+	I40E_AQ_RC_EFLUSHED = 19,   /* Cmd flushed because of prev cmd error */
+	I40E_AQ_RC_BAD_ADDR = 20,   /* Descriptor contains a bad pointer */
+	I40E_AQ_RC_EMODE    = 21,   /* Op not allowed in current dev mode */
+	I40E_AQ_RC_EFBIG    = 22,   /* File too large */
+};
+
+/* Admin Queue command opcodes */
+enum i40e_admin_queue_opc {
+	/* aq commands */
+	i40e_aqc_opc_get_version      = 0x0001,
+	i40e_aqc_opc_driver_version   = 0x0002,
+	i40e_aqc_opc_queue_shutdown   = 0x0003,
+
+	/* resource ownership */
+	i40e_aqc_opc_request_resource = 0x0008,
+	i40e_aqc_opc_release_resource = 0x0009,
+
+	i40e_aqc_opc_list_func_capabilities = 0x000A,
+	i40e_aqc_opc_list_dev_capabilities  = 0x000B,
+
+	i40e_aqc_opc_set_cppm_configuration = 0x0103,
+	i40e_aqc_opc_set_arp_proxy_entry    = 0x0104,
+	i40e_aqc_opc_set_ns_proxy_entry     = 0x0105,
+
+	/* LAA */
+	i40e_aqc_opc_mng_laa                = 0x0106,
+	i40e_aqc_opc_mac_address_read       = 0x0107,
+	i40e_aqc_opc_mac_address_write      = 0x0108,
+
+	/* internal switch commands */
+	i40e_aqc_opc_get_switch_config         = 0x0200,
+	i40e_aqc_opc_add_statistics            = 0x0201,
+	i40e_aqc_opc_remove_statistics         = 0x0202,
+	i40e_aqc_opc_set_port_parameters       = 0x0203,
+	i40e_aqc_opc_get_switch_resource_alloc = 0x0204,
+
+	i40e_aqc_opc_add_vsi                = 0x0210,
+	i40e_aqc_opc_update_vsi_parameters  = 0x0211,
+	i40e_aqc_opc_get_vsi_parameters     = 0x0212,
+
+	i40e_aqc_opc_add_pv                = 0x0220,
+	i40e_aqc_opc_update_pv_parameters  = 0x0221,
+	i40e_aqc_opc_get_pv_parameters     = 0x0222,
+
+	i40e_aqc_opc_add_veb               = 0x0230,
+	i40e_aqc_opc_update_veb_parameters = 0x0231,
+	i40e_aqc_opc_get_veb_parameters    = 0x0232,
+
+	i40e_aqc_opc_delete_element  = 0x0243,
+
+	i40e_aqc_opc_add_macvlan                  = 0x0250,
+	i40e_aqc_opc_remove_macvlan               = 0x0251,
+	i40e_aqc_opc_add_vlan                     = 0x0252,
+	i40e_aqc_opc_remove_vlan                  = 0x0253,
+	i40e_aqc_opc_set_vsi_promiscuous_modes    = 0x0254,
+	i40e_aqc_opc_add_tag                      = 0x0255,
+	i40e_aqc_opc_remove_tag                   = 0x0256,
+	i40e_aqc_opc_add_multicast_etag           = 0x0257,
+	i40e_aqc_opc_remove_multicast_etag        = 0x0258,
+	i40e_aqc_opc_update_tag                   = 0x0259,
+	i40e_aqc_opc_add_control_packet_filter    = 0x025A,
+	i40e_aqc_opc_remove_control_packet_filter = 0x025B,
+	i40e_aqc_opc_add_cloud_filters            = 0x025C,
+	i40e_aqc_opc_remove_cloud_filters         = 0x025D,
+
+	i40e_aqc_opc_add_mirror_rule    = 0x0260,
+	i40e_aqc_opc_delete_mirror_rule = 0x0261,
+
+	i40e_aqc_opc_set_storm_control_config = 0x0280,
+	i40e_aqc_opc_get_storm_control_config = 0x0281,
+
+	/* DCB commands */
+	i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
+	i40e_aqc_opc_dcb_updated    = 0x0302,
+
+	/* TX scheduler */
+	i40e_aqc_opc_configure_vsi_bw_limit            = 0x0400,
+	i40e_aqc_opc_configure_vsi_ets_sla_bw_limit    = 0x0406,
+	i40e_aqc_opc_configure_vsi_tc_bw               = 0x0407,
+	i40e_aqc_opc_query_vsi_bw_config               = 0x0408,
+	i40e_aqc_opc_query_vsi_ets_sla_config          = 0x040A,
+	i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410,
+
+	i40e_aqc_opc_enable_switching_comp_ets             = 0x0413,
+	i40e_aqc_opc_modify_switching_comp_ets             = 0x0414,
+	i40e_aqc_opc_disable_switching_comp_ets            = 0x0415,
+	i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416,
+	i40e_aqc_opc_configure_switching_comp_bw_config    = 0x0417,
+	i40e_aqc_opc_query_switching_comp_ets_config       = 0x0418,
+	i40e_aqc_opc_query_port_ets_config                 = 0x0419,
+	i40e_aqc_opc_query_switching_comp_bw_config        = 0x041A,
+	i40e_aqc_opc_suspend_port_tx                       = 0x041B,
+	i40e_aqc_opc_resume_port_tx                        = 0x041C,
+
+	/* hmc */
+	i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
+	i40e_aqc_opc_set_hmc_resource_profile   = 0x0501,
+
+	/* phy commands*/
+	i40e_aqc_opc_get_phy_abilities   = 0x0600,
+	i40e_aqc_opc_set_phy_config      = 0x0601,
+	i40e_aqc_opc_set_mac_config      = 0x0603,
+	i40e_aqc_opc_set_link_restart_an = 0x0605,
+	i40e_aqc_opc_get_link_status     = 0x0607,
+	i40e_aqc_opc_set_phy_int_mask    = 0x0613,
+	i40e_aqc_opc_get_local_advt_reg  = 0x0614,
+	i40e_aqc_opc_set_local_advt_reg  = 0x0615,
+	i40e_aqc_opc_get_partner_advt    = 0x0616,
+	i40e_aqc_opc_set_lb_modes        = 0x0618,
+	i40e_aqc_opc_get_phy_wol_caps    = 0x0621,
+	i40e_aqc_opc_set_phy_reset       = 0x0622,
+	i40e_aqc_opc_upload_ext_phy_fm   = 0x0625,
+
+	/* NVM commands */
+	i40e_aqc_opc_nvm_read   = 0x0701,
+	i40e_aqc_opc_nvm_erase  = 0x0702,
+	i40e_aqc_opc_nvm_update = 0x0703,
+
+	/* virtualization commands */
+	i40e_aqc_opc_send_msg_to_pf   = 0x0801,
+	i40e_aqc_opc_send_msg_to_vf   = 0x0802,
+	i40e_aqc_opc_send_msg_to_peer = 0x0803,
+
+	/* alternate structure */
+	i40e_aqc_opc_alternate_write          = 0x0900,
+	i40e_aqc_opc_alternate_write_indirect = 0x0901,
+	i40e_aqc_opc_alternate_read           = 0x0902,
+	i40e_aqc_opc_alternate_read_indirect  = 0x0903,
+	i40e_aqc_opc_alternate_write_done     = 0x0904,
+	i40e_aqc_opc_alternate_set_mode       = 0x0905,
+	i40e_aqc_opc_alternate_clear_port     = 0x0906,
+
+	/* LLDP commands */
+	i40e_aqc_opc_lldp_get_mib    = 0x0A00,
+	i40e_aqc_opc_lldp_update_mib = 0x0A01,
+	i40e_aqc_opc_lldp_add_tlv    = 0x0A02,
+	i40e_aqc_opc_lldp_update_tlv = 0x0A03,
+	i40e_aqc_opc_lldp_delete_tlv = 0x0A04,
+	i40e_aqc_opc_lldp_stop       = 0x0A05,
+	i40e_aqc_opc_lldp_start      = 0x0A06,
+
+	/* Tunnel commands */
+	i40e_aqc_opc_add_udp_tunnel       = 0x0B00,
+	i40e_aqc_opc_del_udp_tunnel       = 0x0B01,
+	i40e_aqc_opc_tunnel_key_structure = 0x0B10,
+
+	/* Async Events */
+	i40e_aqc_opc_event_lan_overflow = 0x1001,
+
+	/* OEM commands */
+	i40e_aqc_opc_oem_parameter_change     = 0xFE00,
+	i40e_aqc_opc_oem_device_status_change = 0xFE01,
+
+	/* debug commands */
+	i40e_aqc_opc_debug_get_deviceid     = 0xFF00,
+	i40e_aqc_opc_debug_set_mode         = 0xFF01,
+	i40e_aqc_opc_debug_read_reg         = 0xFF03,
+	i40e_aqc_opc_debug_write_reg        = 0xFF04,
+	i40e_aqc_opc_debug_read_reg_sg      = 0xFF05,
+	i40e_aqc_opc_debug_write_reg_sg     = 0xFF06,
+	i40e_aqc_opc_debug_modify_reg       = 0xFF07,
+	i40e_aqc_opc_debug_dump_internals   = 0xFF08,
+	i40e_aqc_opc_debug_modify_internals = 0xFF09,
+};
+
+/* command structures and indirect data structures */
+
+/* Structure naming conventions:
+ * - no suffix for direct command descriptor structures
+ * - _data for indirect sent data
+ * - _resp for indirect return data (data which is both will use _data)
+ * - _completion for direct return data
+ * - _element_ for repeated elements (may also be _data or _resp)
+ *
+ * Command structures are expected to overlay the params.raw member of the basic
+ * descriptor, and as such cannot exceed 16 bytes in length.
+ */
+
+/* This macro is used to generate a compilation error if a structure
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure is not of the correct size, otherwise it creates an enum that is
+ * never used.
+ */
+#define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \
+	{ i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
+
+/* This macro is used extensively to ensure that command structures are 16
+ * bytes in length as they have to map to the raw array of that size.
+ */
+#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X)
+
+/* internal (0x00XX) commands */
+
+/* Get version (direct 0x0001) */
+struct i40e_aqc_get_version {
+	__le32 rom_ver;
+	__le32 fw_build;
+	__le16 fw_major;
+	__le16 fw_minor;
+	__le16 api_major;
+	__le16 api_minor;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version);
+
+/* Send driver version (direct 0x0002) */
+struct i40e_aqc_driver_version {
+	u8     driver_major_ver;
+	u8     driver_minor_ver;
+	u8     driver_build_ver;
+	u8     driver_subbuild_ver;
+	u8     reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version);
+
+/* Queue Shutdown (direct 0x0003) */
+struct i40e_aqc_queue_shutdown {
+	__le32     driver_unloading;
+#define I40E_AQ_DRIVER_UNLOADING    0x1
+	u8     reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
+
+/* Request resource ownership (direct 0x0008)
+ * Release resource ownership (direct 0x0009)
+ */
+#define I40E_AQ_RESOURCE_NVM               1
+#define I40E_AQ_RESOURCE_SDP               2
+#define I40E_AQ_RESOURCE_ACCESS_READ       1
+#define I40E_AQ_RESOURCE_ACCESS_WRITE      2
+#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT  3000
+#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000
+
+struct i40e_aqc_request_resource {
+	__le16 resource_id;
+	__le16 access_type;
+	__le32 timeout;
+	__le32 resource_number;
+	u8     reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource);
+
+/* Get function capabilities (indirect 0x000A)
+ * Get device capabilities (indirect 0x000B)
+ */
+struct i40e_aqc_list_capabilites {
+	u8 command_flags;
+#define I40E_AQ_LIST_CAP_PF_INDEX_EN     1
+	u8 pf_index;
+	u8 reserved[2];
+	__le32 count;
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites);
+
+struct i40e_aqc_list_capabilities_element_resp {
+	__le16 id;
+	u8     major_rev;
+	u8     minor_rev;
+	__le32 number;
+	__le32 logical_id;
+	__le32 phys_id;
+	u8     reserved[16];
+};
+
+/* list of caps */
+
+#define I40E_AQ_CAP_ID_SWITCH_MODE      0x0001
+#define I40E_AQ_CAP_ID_MNG_MODE         0x0002
+#define I40E_AQ_CAP_ID_NPAR_ACTIVE      0x0003
+#define I40E_AQ_CAP_ID_OS2BMC_CAP       0x0004
+#define I40E_AQ_CAP_ID_FUNCTIONS_VALID  0x0005
+#define I40E_AQ_CAP_ID_ALTERNATE_RAM    0x0006
+#define I40E_AQ_CAP_ID_SRIOV            0x0012
+#define I40E_AQ_CAP_ID_VF               0x0013
+#define I40E_AQ_CAP_ID_VMDQ             0x0014
+#define I40E_AQ_CAP_ID_8021QBG          0x0015
+#define I40E_AQ_CAP_ID_8021QBR          0x0016
+#define I40E_AQ_CAP_ID_VSI              0x0017
+#define I40E_AQ_CAP_ID_DCB              0x0018
+#define I40E_AQ_CAP_ID_FCOE             0x0021
+#define I40E_AQ_CAP_ID_RSS              0x0040
+#define I40E_AQ_CAP_ID_RXQ              0x0041
+#define I40E_AQ_CAP_ID_TXQ              0x0042
+#define I40E_AQ_CAP_ID_MSIX             0x0043
+#define I40E_AQ_CAP_ID_VF_MSIX          0x0044
+#define I40E_AQ_CAP_ID_FLOW_DIRECTOR    0x0045
+#define I40E_AQ_CAP_ID_1588             0x0046
+#define I40E_AQ_CAP_ID_IWARP            0x0051
+#define I40E_AQ_CAP_ID_LED              0x0061
+#define I40E_AQ_CAP_ID_SDP              0x0062
+#define I40E_AQ_CAP_ID_MDIO             0x0063
+#define I40E_AQ_CAP_ID_FLEX10           0x00F1
+#define I40E_AQ_CAP_ID_CEM              0x00F2
+
+/* Set CPPM Configuration (direct 0x0103) */
+struct i40e_aqc_cppm_configuration {
+	__le16 command_flags;
+#define I40E_AQ_CPPM_EN_LTRC    0x0800
+#define I40E_AQ_CPPM_EN_DMCTH   0x1000
+#define I40E_AQ_CPPM_EN_DMCTLX  0x2000
+#define I40E_AQ_CPPM_EN_HPTC    0x4000
+#define I40E_AQ_CPPM_EN_DMARC   0x8000
+	__le16 ttlx;
+	__le32 dmacr;
+	__le16 dmcth;
+	u8     hptc;
+	u8     reserved;
+	__le32 pfltrc;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration);
+
+/* Set ARP Proxy command / response (indirect 0x0104) */
+struct i40e_aqc_arp_proxy_data {
+	__le16 command_flags;
+#define I40E_AQ_ARP_INIT_IPV4           0x0008
+#define I40E_AQ_ARP_UNSUP_CTL           0x0010
+#define I40E_AQ_ARP_ENA                 0x0020
+#define I40E_AQ_ARP_ADD_IPV4            0x0040
+#define I40E_AQ_ARP_DEL_IPV4            0x0080
+	__le16 table_id;
+	__le32 pfpm_proxyfc;
+	__le32 ip_addr;
+	u8     mac_addr[6];
+};
+
+/* Set NS Proxy Table Entry Command (indirect 0x0105) */
+struct i40e_aqc_ns_proxy_data {
+	__le16 table_idx_mac_addr_0;
+	__le16 table_idx_mac_addr_1;
+	__le16 table_idx_ipv6_0;
+	__le16 table_idx_ipv6_1;
+	__le16 control;
+#define I40E_AQ_NS_PROXY_ADD_0             0x0100
+#define I40E_AQ_NS_PROXY_DEL_0             0x0200
+#define I40E_AQ_NS_PROXY_ADD_1             0x0400
+#define I40E_AQ_NS_PROXY_DEL_1             0x0800
+#define I40E_AQ_NS_PROXY_ADD_IPV6_0        0x1000
+#define I40E_AQ_NS_PROXY_DEL_IPV6_0        0x2000
+#define I40E_AQ_NS_PROXY_ADD_IPV6_1        0x4000
+#define I40E_AQ_NS_PROXY_DEL_IPV6_1        0x8000
+#define I40E_AQ_NS_PROXY_COMMAND_SEQ       0x0001
+#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL     0x0002
+#define I40E_AQ_NS_PROXY_INIT_MAC_TBL      0x0004
+	u8     mac_addr_0[6];
+	u8     mac_addr_1[6];
+	u8     local_mac_addr[6];
+	u8     ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */
+	u8     ipv6_addr_1[16];
+};
+
+/* Manage LAA Command (0x0106) - obsolete */
+struct i40e_aqc_mng_laa {
+	__le16	command_flags;
+#define I40E_AQ_LAA_FLAG_WR   0x8000
+	u8     reserved[2];
+	__le32 sal;
+	__le16 sah;
+	u8     reserved2[6];
+};
+
+/* Manage MAC Address Read Command (0x0107) */
+struct i40e_aqc_mac_address_read {
+	__le16	command_flags;
+#define I40E_AQC_LAN_ADDR_VALID   0x10
+#define I40E_AQC_SAN_ADDR_VALID   0x20
+#define I40E_AQC_PORT_ADDR_VALID  0x40
+#define I40E_AQC_WOL_ADDR_VALID   0x80
+#define I40E_AQC_ADDR_VALID_MASK  0xf0
+	u8     reserved[6];
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read);
+
+struct i40e_aqc_mac_address_read_data {
+	u8 pf_lan_mac[6];
+	u8 pf_san_mac[6];
+	u8 port_mac[6];
+	u8 pf_wol_mac[6];
+};
+
+I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data);
+
+/* Manage MAC Address Write Command (0x0108) */
+struct i40e_aqc_mac_address_write {
+	__le16 command_flags;
+#define I40E_AQC_WRITE_TYPE_LAA_ONLY    0x0000
+#define I40E_AQC_WRITE_TYPE_LAA_WOL     0x4000
+#define I40E_AQC_WRITE_TYPE_PORT        0x8000
+#define I40E_AQC_WRITE_TYPE_MASK        0xc000
+	__le16 mac_sah;
+	__le32 mac_sal;
+	u8     reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write);
+
+/* Switch configuration commands (0x02xx) */
+
+/* Used by many indirect commands that only pass an seid and a buffer in the
+ * command
+ */
+struct i40e_aqc_switch_seid {
+	__le16 seid;
+	u8     reserved[6];
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid);
+
+/* Get Switch Configuration command (indirect 0x0200)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+struct i40e_aqc_get_switch_config_header_resp {
+	__le16 num_reported;
+	__le16 num_total;
+	u8     reserved[12];
+};
+
+struct i40e_aqc_switch_config_element_resp {
+	u8     element_type;
+#define I40E_AQ_SW_ELEM_TYPE_MAC        1
+#define I40E_AQ_SW_ELEM_TYPE_PF         2
+#define I40E_AQ_SW_ELEM_TYPE_VF         3
+#define I40E_AQ_SW_ELEM_TYPE_EMP        4
+#define I40E_AQ_SW_ELEM_TYPE_BMC        5
+#define I40E_AQ_SW_ELEM_TYPE_PV         16
+#define I40E_AQ_SW_ELEM_TYPE_VEB        17
+#define I40E_AQ_SW_ELEM_TYPE_PA         18
+#define I40E_AQ_SW_ELEM_TYPE_VSI        19
+	u8     revision;
+#define I40E_AQ_SW_ELEM_REV_1           1
+	__le16 seid;
+	__le16 uplink_seid;
+	__le16 downlink_seid;
+	u8     reserved[3];
+	u8     connection_type;
+#define I40E_AQ_CONN_TYPE_REGULAR       0x1
+#define I40E_AQ_CONN_TYPE_DEFAULT       0x2
+#define I40E_AQ_CONN_TYPE_CASCADED      0x3
+	__le16 scheduler_id;
+	__le16 element_info;
+};
+
+/* Get Switch Configuration (indirect 0x0200)
+ *    an array of elements are returned in the response buffer
+ *    the first in the array is the header, remainder are elements
+ */
+struct i40e_aqc_get_switch_config_resp {
+	struct i40e_aqc_get_switch_config_header_resp header;
+	struct i40e_aqc_switch_config_element_resp    element[1];
+};
+
+/* Add Statistics (direct 0x0201)
+ * Remove Statistics (direct 0x0202)
+ */
+struct i40e_aqc_add_remove_statistics {
+	__le16 seid;
+	__le16 vlan;
+	__le16 stat_index;
+	u8     reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics);
+
+/* Set Port Parameters command (direct 0x0203) */
+struct i40e_aqc_set_port_parameters {
+	__le16 command_flags;
+#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS   1
+#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS  2 /* must set! */
+#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA    4
+	__le16 bad_frame_vsi;
+	__le16 default_seid;        /* reserved for command */
+	u8     reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters);
+
+/* Get Switch Resource Allocation (indirect 0x0204) */
+struct i40e_aqc_get_switch_resource_alloc {
+	u8     num_entries;         /* reserved for command */
+	u8     reserved[7];
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc);
+
+/* expect an array of these structs in the response buffer */
+struct i40e_aqc_switch_resource_alloc_element_resp {
+	u8     resource_type;
+#define I40E_AQ_RESOURCE_TYPE_VEB                 0x0
+#define I40E_AQ_RESOURCE_TYPE_VSI                 0x1
+#define I40E_AQ_RESOURCE_TYPE_MACADDR             0x2
+#define I40E_AQ_RESOURCE_TYPE_STAG                0x3
+#define I40E_AQ_RESOURCE_TYPE_ETAG                0x4
+#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH      0x5
+#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH        0x6
+#define I40E_AQ_RESOURCE_TYPE_VLAN                0x7
+#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY      0x8
+#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY     0x9
+#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL      0xA
+#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE         0xB
+#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS          0xC
+#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS        0xD
+#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS   0xF
+#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS          0x10
+#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS         0x11
+#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS            0x12
+#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS        0x13
+	u8     reserved1;
+	__le16 guaranteed;
+	__le16 total;
+	__le16 used;
+	__le16 total_unalloced;
+	u8     reserved2[6];
+};
+
+/* Add VSI (indirect 0x210)
+ *    this indirect command uses struct i40e_aqc_vsi_properties_data
+ *    as the indirect buffer (128 bytes)
+ *
+ * Update VSI (indirect 0x211) Get VSI (indirect 0x0212)
+ *    use the generic i40e_aqc_switch_seid descriptor format
+ *    use the same completion and data structure as Add VSI
+ */
+struct i40e_aqc_add_get_update_vsi {
+	__le16 uplink_seid;
+	u8     connection_type;
+#define I40E_AQ_VSI_CONN_TYPE_NORMAL            0x1
+#define I40E_AQ_VSI_CONN_TYPE_DEFAULT           0x2
+#define I40E_AQ_VSI_CONN_TYPE_CASCADED          0x3
+	u8     reserved1;
+	u8     vf_id;
+	u8     reserved2;
+	__le16 vsi_flags;
+#define I40E_AQ_VSI_TYPE_SHIFT          0x0
+#define I40E_AQ_VSI_TYPE_MASK           (0x3 << I40E_AQ_VSI_TYPE_SHIFT)
+#define I40E_AQ_VSI_TYPE_VF             0x0
+#define I40E_AQ_VSI_TYPE_VMDQ2          0x1
+#define I40E_AQ_VSI_TYPE_PF             0x2
+#define I40E_AQ_VSI_TYPE_EMP_MNG        0x3
+#define I40E_AQ_VSI_FLAG_CASCADED_PV    0x4
+#define I40E_AQ_VSI_FLAG_CLOUD_VSI      0x8
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi);
+
+struct i40e_aqc_add_get_update_vsi_completion {
+	__le16 seid;
+	__le16 vsi_number;
+	__le16 vsi_used;
+	__le16 vsi_free;
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion);
+
+struct i40e_aqc_vsi_properties_data {
+	/* first 96 byte are written by SW */
+	__le16 valid_sections;
+#define I40E_AQ_VSI_PROP_SWITCH_VALID       0x0001
+#define I40E_AQ_VSI_PROP_SECURITY_VALID     0x0002
+#define I40E_AQ_VSI_PROP_VLAN_VALID         0x0004
+#define I40E_AQ_VSI_PROP_CAS_PV_VALID       0x0008
+#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID   0x0010
+#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID    0x0020
+#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID    0x0040
+#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID    0x0080
+#define I40E_AQ_VSI_PROP_OUTER_UP_VALID     0x0100
+#define I40E_AQ_VSI_PROP_SCHED_VALID        0x0200
+	/* switch section */
+	__le16 switch_id; /* 12bit id combined with flags below */
+#define I40E_AQ_VSI_SW_ID_SHIFT             0x0000
+#define I40E_AQ_VSI_SW_ID_MASK              (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT)
+#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG     0x1000
+#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB     0x2000
+#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB     0x4000
+	u8     sw_reserved[2];
+	/* security section */
+	u8     sec_flags;
+#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD    0x01
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK    0x02
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK     0x04
+	u8     sec_reserved;
+	/* VLAN section */
+	__le16 pvid; /* VLANS include priority bits */
+	__le16 fcoe_pvid;
+	u8     port_vlan_flags;
+#define I40E_AQ_VSI_PVLAN_MODE_SHIFT        0x00
+#define I40E_AQ_VSI_PVLAN_MODE_MASK         (0x03 << \
+						I40E_AQ_VSI_PVLAN_MODE_SHIFT)
+#define I40E_AQ_VSI_PVLAN_MODE_TAGGED       0x01
+#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED     0x02
+#define I40E_AQ_VSI_PVLAN_MODE_ALL          0x03
+#define I40E_AQ_VSI_PVLAN_INSERT_PVID       0x04
+#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT        0x03
+#define I40E_AQ_VSI_PVLAN_EMOD_MASK         (0x3 << \
+					I40E_AQ_VSI_PVLAN_EMOD_SHIFT)
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH     0x0
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP       0x08
+#define I40E_AQ_VSI_PVLAN_EMOD_STR          0x10
+#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING      0x18
+	u8     pvlan_reserved[3];
+	/* ingress egress up sections */
+	__le32 ingress_table; /* bitmap, 3 bits per up */
+#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT      0
+#define I40E_AQ_VSI_UP_TABLE_UP0_MASK       (0x7 << \
+					I40E_AQ_VSI_UP_TABLE_UP0_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT      3
+#define I40E_AQ_VSI_UP_TABLE_UP1_MASK       (0x7 << \
+					I40E_AQ_VSI_UP_TABLE_UP1_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT      6
+#define I40E_AQ_VSI_UP_TABLE_UP2_MASK       (0x7 << \
+					I40E_AQ_VSI_UP_TABLE_UP2_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT      9
+#define I40E_AQ_VSI_UP_TABLE_UP3_MASK       (0x7 << \
+					I40E_AQ_VSI_UP_TABLE_UP3_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT      12
+#define I40E_AQ_VSI_UP_TABLE_UP4_MASK       (0x7 << \
+					I40E_AQ_VSI_UP_TABLE_UP4_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT      15
+#define I40E_AQ_VSI_UP_TABLE_UP5_MASK       (0x7 << \
+					I40E_AQ_VSI_UP_TABLE_UP5_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT      18
+#define I40E_AQ_VSI_UP_TABLE_UP6_MASK       (0x7 << \
+					I40E_AQ_VSI_UP_TABLE_UP6_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT      21
+#define I40E_AQ_VSI_UP_TABLE_UP7_MASK       (0x7 << \
+					I40E_AQ_VSI_UP_TABLE_UP7_SHIFT)
+	__le32 egress_table;   /* same defines as for ingress table */
+	/* cascaded PV section */
+	__le16 cas_pv_tag;
+	u8     cas_pv_flags;
+#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT      0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_MASK       (0x03 << \
+						I40E_AQ_VSI_CAS_PV_TAGX_SHIFT)
+#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE      0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE     0x01
+#define I40E_AQ_VSI_CAS_PV_TAGX_COPY       0x02
+#define I40E_AQ_VSI_CAS_PV_INSERT_TAG      0x10
+#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE      0x20
+#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40
+	u8     cas_pv_reserved;
+	/* queue mapping section */
+	__le16 mapping_flags;
+#define I40E_AQ_VSI_QUE_MAP_CONTIG          0x0
+#define I40E_AQ_VSI_QUE_MAP_NONCONTIG       0x1
+	__le16 queue_mapping[16];
+#define I40E_AQ_VSI_QUEUE_SHIFT             0x0
+#define I40E_AQ_VSI_QUEUE_MASK              (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT)
+	__le16 tc_mapping[8];
+#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT     0
+#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK      (0x1FF << \
+						I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
+#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT     9
+#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK      (0x7 << \
+						I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
+	/* queueing option section */
+	u8     queueing_opt_flags;
+#define I40E_AQ_VSI_QUE_OPT_TCP_ENA         0x10
+#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA        0x20
+	u8     queueing_opt_reserved[3];
+	/* scheduler section */
+	u8     up_enable_bits;
+	u8     sched_reserved;
+	/* outer up section */
+	__le32 outer_up_table; /* same structure and defines as ingress table */
+	u8     cmd_reserved[8];
+	/* last 32 bytes are written by FW */
+	__le16 qs_handle[8];
+#define I40E_AQ_VSI_QS_HANDLE_INVALID	0xFFFF
+	__le16 stat_counter_idx;
+	__le16 sched_id;
+	u8     resp_reserved[12];
+};
+
+I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data);
+
+/* Add Port Virtualizer (direct 0x0220)
+ * also used for update PV (direct 0x0221) but only flags are used
+ * (IS_CTRL_PORT only works on add PV)
+ */
+struct i40e_aqc_add_update_pv {
+	__le16 command_flags;
+#define I40E_AQC_PV_FLAG_PV_TYPE                0x1
+#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN    0x2
+#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN    0x4
+#define I40E_AQC_PV_FLAG_IS_CTRL_PORT           0x8
+	__le16 uplink_seid;
+	__le16 connected_seid;
+	u8     reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv);
+
+struct i40e_aqc_add_update_pv_completion {
+	/* reserved for update; for add also encodes error if rc == ENOSPC */
+	__le16 pv_seid;
+#define I40E_AQC_PV_ERR_FLAG_NO_PV               0x1
+#define I40E_AQC_PV_ERR_FLAG_NO_SCHED            0x2
+#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER          0x4
+#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY            0x8
+	u8     reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion);
+
+/* Get PV Params (direct 0x0222)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+
+struct i40e_aqc_get_pv_params_completion {
+	__le16 seid;
+	__le16 default_stag;
+	__le16 pv_flags; /* same flags as add_pv */
+#define I40E_AQC_GET_PV_PV_TYPE            0x1
+#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG  0x2
+#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG  0x4
+	u8     reserved[8];
+	__le16 default_port_seid;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion);
+
+/* Add VEB (direct 0x0230) */
+struct i40e_aqc_add_veb {
+	__le16 uplink_seid;
+	__le16 downlink_seid;
+	__le16 veb_flags;
+#define I40E_AQC_ADD_VEB_FLOATING           0x1
+#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT    1
+#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK     (0x3 << \
+					I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT)
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT  0x2
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA     0x4
+#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER   0x8
+	u8     enable_tcs;
+	u8     reserved[9];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb);
+
+struct i40e_aqc_add_veb_completion {
+	u8     reserved[6];
+	__le16 switch_seid;
+	/* also encodes error if rc == ENOSPC; codes are the same as add_pv */
+	__le16 veb_seid;
+#define I40E_AQC_VEB_ERR_FLAG_NO_VEB              0x1
+#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED            0x2
+#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER          0x4
+#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY            0x8
+	__le16 statistic_index;
+	__le16 vebs_used;
+	__le16 vebs_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion);
+
+/* Get VEB Parameters (direct 0x0232)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+struct i40e_aqc_get_veb_parameters_completion {
+	__le16 seid;
+	__le16 switch_id;
+	__le16 veb_flags; /* only the first/last flags from 0x0230 is valid */
+	__le16 statistic_index;
+	__le16 vebs_used;
+	__le16 vebs_free;
+	u8     reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion);
+
+/* Delete Element (direct 0x0243)
+ * uses the generic i40e_aqc_switch_seid
+ */
+
+/* Add MAC-VLAN (indirect 0x0250) */
+
+/* used for the command for most vlan commands */
+struct i40e_aqc_macvlan {
+	__le16 num_addresses;
+	__le16 seid[3];
+#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT  0
+#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK   (0x3FF << \
+					I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
+#define I40E_AQC_MACVLAN_CMD_SEID_VALID      0x8000
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan);
+
+/* indirect data for command and response */
+struct i40e_aqc_add_macvlan_element_data {
+	u8     mac_addr[6];
+	__le16 vlan_tag;
+	__le16 flags;
+#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH     0x0001
+#define I40E_AQC_MACVLAN_ADD_HASH_MATCH        0x0002
+#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN       0x0004
+#define I40E_AQC_MACVLAN_ADD_TO_QUEUE          0x0008
+	__le16 queue_number;
+#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT  0
+#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK   (0x7FF << \
+					I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
+	/* response section */
+	u8     match_method;
+#define I40E_AQC_MM_PERFECT_MATCH             0x01
+#define I40E_AQC_MM_HASH_MATCH                0x02
+#define I40E_AQC_MM_ERR_NO_RES                0xFF
+	u8     reserved1[3];
+};
+
+struct i40e_aqc_add_remove_macvlan_completion {
+	__le16 perfect_mac_used;
+	__le16 perfect_mac_free;
+	__le16 unicast_hash_free;
+	__le16 multicast_hash_free;
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion);
+
+/* Remove MAC-VLAN (indirect 0x0251)
+ * uses i40e_aqc_macvlan for the descriptor
+ * data points to an array of num_addresses of elements
+ */
+
+struct i40e_aqc_remove_macvlan_element_data {
+	u8     mac_addr[6];
+	__le16 vlan_tag;
+	u8     flags;
+#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH      0x01
+#define I40E_AQC_MACVLAN_DEL_HASH_MATCH         0x02
+#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN        0x08
+#define I40E_AQC_MACVLAN_DEL_ALL_VSIS           0x10
+	u8     reserved[3];
+	/* reply section */
+	u8     error_code;
+#define I40E_AQC_REMOVE_MACVLAN_SUCCESS         0x0
+#define I40E_AQC_REMOVE_MACVLAN_FAIL            0xFF
+	u8     reply_reserved[3];
+};
+
+/* Add VLAN (indirect 0x0252)
+ * Remove VLAN (indirect 0x0253)
+ * use the generic i40e_aqc_macvlan for the command
+ */
+struct i40e_aqc_add_remove_vlan_element_data {
+	__le16 vlan_tag;
+	u8     vlan_flags;
+/* flags for add VLAN */
+#define I40E_AQC_ADD_VLAN_LOCAL             0x1
+#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT       1
+#define I40E_AQC_ADD_PVLAN_TYPE_MASK        (0x3 << \
+						I40E_AQC_ADD_PVLAN_TYPE_SHIFT)
+#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR     0x0
+#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY     0x2
+#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY   0x4
+#define I40E_AQC_VLAN_PTYPE_SHIFT           3
+#define I40E_AQC_VLAN_PTYPE_MASK            (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT)
+#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI     0x0
+#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI     0x8
+#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI   0x10
+#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI    0x18
+/* flags for remove VLAN */
+#define I40E_AQC_REMOVE_VLAN_ALL            0x1
+	u8     reserved;
+	u8     result;
+/* flags for add VLAN */
+#define I40E_AQC_ADD_VLAN_SUCCESS       0x0
+#define I40E_AQC_ADD_VLAN_FAIL_REQUEST  0xFE
+#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF
+/* flags for remove VLAN */
+#define I40E_AQC_REMOVE_VLAN_SUCCESS    0x0
+#define I40E_AQC_REMOVE_VLAN_FAIL       0xFF
+	u8     reserved1[3];
+};
+
+struct i40e_aqc_add_remove_vlan_completion {
+	u8     reserved[4];
+	__le16 vlans_used;
+	__le16 vlans_free;
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+/* Set VSI Promiscuous Modes (direct 0x0254) */
+struct i40e_aqc_set_vsi_promiscuous_modes {
+	__le16 promiscuous_flags;
+	__le16 valid_flags;
+/* flags used for both fields above */
+#define I40E_AQC_SET_VSI_PROMISC_UNICAST     0x01
+#define I40E_AQC_SET_VSI_PROMISC_MULTICAST   0x02
+#define I40E_AQC_SET_VSI_PROMISC_BROADCAST   0x04
+#define I40E_AQC_SET_VSI_DEFAULT             0x08
+#define I40E_AQC_SET_VSI_PROMISC_VLAN        0x10
+	__le16 seid;
+#define I40E_AQC_VSI_PROM_CMD_SEID_MASK      0x3FF
+	u8     reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes);
+
+/* Add S/E-tag command (direct 0x0255)
+ * Uses generic i40e_aqc_add_remove_tag_completion for completion
+ */
+struct i40e_aqc_add_tag {
+	__le16 flags;
+#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE     0x0001
+	__le16 seid;
+#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT  0
+#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK   (0x3FF << \
+					I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT)
+	__le16 tag;
+	__le16 queue_number;
+	u8     reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag);
+
+struct i40e_aqc_add_remove_tag_completion {
+	u8     reserved[12];
+	__le16 tags_used;
+	__le16 tags_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion);
+
+/* Remove S/E-tag command (direct 0x0256)
+ * Uses generic i40e_aqc_add_remove_tag_completion for completion
+ */
+struct i40e_aqc_remove_tag {
+	__le16 seid;
+#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT  0
+#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK   (0x3FF << \
+					I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT)
+	__le16 tag;
+	u8     reserved[12];
+};
+
+/* Add multicast E-Tag (direct 0x0257)
+ * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields
+ * and no external data
+ */
+struct i40e_aqc_add_remove_mcast_etag {
+	__le16 pv_seid;
+	__le16 etag;
+	u8     num_unicast_etags;
+	u8     reserved[3];
+	__le32 addr_high;          /* address of array of 2-byte s-tags */
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag);
+
+struct i40e_aqc_add_remove_mcast_etag_completion {
+	u8     reserved[4];
+	__le16 mcast_etags_used;
+	__le16 mcast_etags_free;
+	__le32 addr_high;
+	__le32 addr_low;
+
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion);
+
+/* Update S/E-Tag (direct 0x0259) */
+struct i40e_aqc_update_tag {
+	__le16 seid;
+#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT  0
+#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK   (0x3FF << \
+					I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT)
+	__le16 old_tag;
+	__le16 new_tag;
+	u8     reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag);
+
+struct i40e_aqc_update_tag_completion {
+	u8     reserved[12];
+	__le16 tags_used;
+	__le16 tags_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion);
+
+/* Add Control Packet filter (direct 0x025A)
+ * Remove Control Packet filter (direct 0x025B)
+ * uses the i40e_aqc_add_oveb_cloud,
+ * and the generic direct completion structure
+ */
+struct i40e_aqc_add_remove_control_packet_filter {
+	u8     mac[6];
+	__le16 etype;
+	__le16 flags;
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC    0x0001
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP          0x0002
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE      0x0004
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX            0x0008
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX            0x0000
+	__le16 seid;
+#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT  0
+#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK   (0x3FF << \
+				I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT)
+	__le16 queue;
+	u8     reserved[2];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter);
+
+struct i40e_aqc_add_remove_control_packet_filter_completion {
+	__le16 mac_etype_used;
+	__le16 etype_used;
+	__le16 mac_etype_free;
+	__le16 etype_free;
+	u8     reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion);
+
+/* Add Cloud filters (indirect 0x025C)
+ * Remove Cloud filters (indirect 0x025D)
+ * uses the i40e_aqc_add_remove_cloud_filters,
+ * and the generic indirect completion structure
+ */
+struct i40e_aqc_add_remove_cloud_filters {
+	u8     num_filters;
+	u8     reserved;
+	__le16 seid;
+#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT  0
+#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK   (0x3FF << \
+					I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT)
+	u8     reserved2[4];
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters);
+
+struct i40e_aqc_add_remove_cloud_filters_element_data {
+	u8     outer_mac[6];
+	u8     inner_mac[6];
+	__le16 inner_vlan;
+	union {
+		struct {
+			u8 reserved[12];
+			u8 data[4];
+		} v4;
+		struct {
+			u8 data[16];
+			} v6;
+		} ipaddr;
+	__le16 flags;
+#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT                 0
+#define I40E_AQC_ADD_CLOUD_FILTER_MASK                  (0x3F << \
+					I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
+#define I40E_AQC_ADD_CLOUD_FILTER_OIP                   0x0001
+#define I40E_AQC_ADD_CLOUD_FILTER_OIP_GRE               0x0002
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN            0x0003
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_GRE        0x0004
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID           0x0006
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_VNL        0x0007
+/* 0x0008 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC                  0x0009
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC                  0x000A
+#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE               0x0080
+#define I40E_AQC_ADD_CLOUD_VNK_SHIFT                    6
+#define I40E_AQC_ADD_CLOUD_VNK_MASK                     0x00C0
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4                   0
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6                   0x0100
+	__le32 key_low;
+	__le32 key_high;
+	__le16 queue_number;
+#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT                  0
+#define I40E_AQC_ADD_CLOUD_QUEUE_MASK                   (0x3F << \
+					I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
+	u8     reserved[14];
+	/* response section */
+	u8     allocation_result;
+#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS         0x0
+#define I40E_AQC_ADD_CLOUD_FILTER_FAIL            0xFF
+	u8     response_reserved[7];
+};
+
+struct i40e_aqc_remove_cloud_filters_completion {
+	__le16 perfect_ovlan_used;
+	__le16 perfect_ovlan_free;
+	__le16 vlan_used;
+	__le16 vlan_free;
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion);
+
+/* Add Mirror Rule (indirect or direct 0x0260)
+ * Delete Mirror Rule (indirect or direct 0x0261)
+ * note: some rule types (4,5) do not use an external buffer.
+ *       take care to set the flags correctly.
+ */
+struct i40e_aqc_add_delete_mirror_rule {
+	__le16 seid;
+	__le16 rule_type;
+#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT            0
+#define I40E_AQC_MIRROR_RULE_TYPE_MASK             (0x7 << \
+						I40E_AQC_MIRROR_RULE_TYPE_SHIFT)
+#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS    1
+#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS     2
+#define I40E_AQC_MIRROR_RULE_TYPE_VLAN             3
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS      4
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS       5
+	__le16 num_entries;
+	__le16 destination;  /* VSI for add, rule id for delete */
+	__le32 addr_high;    /* address of array of 2-byte VSI or VLAN ids */
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule);
+
+struct i40e_aqc_add_delete_mirror_rule_completion {
+	u8     reserved[2];
+	__le16 rule_id;  /* only used on add */
+	__le16 mirror_rules_used;
+	__le16 mirror_rules_free;
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
+
+/* Set Storm Control Configuration (direct 0x0280)
+ * Get Storm Control Configuration (direct 0x0281)
+ *    the command and response use the same descriptor structure
+ */
+struct i40e_aqc_set_get_storm_control_config {
+	__le32 broadcast_threshold;
+	__le32 multicast_threshold;
+	__le32 control_flags;
+#define I40E_AQC_STORM_CONTROL_MDIPW            0x01
+#define I40E_AQC_STORM_CONTROL_MDICW            0x02
+#define I40E_AQC_STORM_CONTROL_BDIPW            0x04
+#define I40E_AQC_STORM_CONTROL_BDICW            0x08
+#define I40E_AQC_STORM_CONTROL_BIDU             0x10
+#define I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT   8
+#define I40E_AQC_STORM_CONTROL_INTERVAL_MASK    (0x3FF << \
+					I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT)
+	u8     reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_get_storm_control_config);
+
+/* DCB 0x03xx*/
+
+/* PFC Ignore (direct 0x0301)
+ *    the command and response use the same descriptor structure
+ */
+struct i40e_aqc_pfc_ignore {
+	u8     tc_bitmap;
+	u8     command_flags; /* unused on response */
+#define I40E_AQC_PFC_IGNORE_SET    0x80
+#define I40E_AQC_PFC_IGNORE_CLEAR  0x0
+	u8     reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore);
+
+/* DCB Update (direct 0x0302) uses the i40e_aq_desc structure
+ * with no parameters
+ */
+
+/* TX scheduler 0x04xx */
+
+/* Almost all the indirect commands use
+ * this generic struct to pass the SEID in param0
+ */
+struct i40e_aqc_tx_sched_ind {
+	__le16 vsi_seid;
+	u8     reserved[6];
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind);
+
+/* Several commands respond with a set of queue set handles */
+struct i40e_aqc_qs_handles_resp {
+	__le16 qs_handles[8];
+};
+
+/* Configure VSI BW limits (direct 0x0400) */
+struct i40e_aqc_configure_vsi_bw_limit {
+	__le16 vsi_seid;
+	u8     reserved[2];
+	__le16 credit;
+	u8     reserved1[2];
+	u8     max_credit; /* 0-3, limit = 2^max */
+	u8     reserved2[7];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit);
+
+/* Configure VSI Bandwidth Limit per Traffic Type (indirect 0x0406)
+ *    responds with i40e_aqc_qs_handles_resp
+ */
+struct i40e_aqc_configure_vsi_ets_sla_bw_data {
+	u8     tc_valid_bits;
+	u8     reserved[15];
+	__le16 tc_bw_credits[8]; /* FW writesback QS handles here */
+
+	/* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+	__le16 tc_bw_max[2];
+	u8     reserved1[28];
+};
+
+/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407)
+ *    responds with i40e_aqc_qs_handles_resp
+ */
+struct i40e_aqc_configure_vsi_tc_bw_data {
+	u8     tc_valid_bits;
+	u8     reserved[3];
+	u8     tc_bw_credits[8];
+	u8     reserved1[4];
+	__le16 qs_handles[8];
+};
+
+/* Query vsi bw configuration (indirect 0x0408) */
+struct i40e_aqc_query_vsi_bw_config_resp {
+	u8     tc_valid_bits;
+	u8     tc_suspended_bits;
+	u8     reserved[14];
+	__le16 qs_handles[8];
+	u8     reserved1[4];
+	__le16 port_bw_limit;
+	u8     reserved2[2];
+	u8     max_bw; /* 0-3, limit = 2^max */
+	u8     reserved3[23];
+};
+
+/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */
+struct i40e_aqc_query_vsi_ets_sla_config_resp {
+	u8     tc_valid_bits;
+	u8     reserved[3];
+	u8     share_credits[8];
+	__le16 credits[8];
+
+	/* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+	__le16 tc_bw_max[2];
+};
+
+/* Configure Switching Component Bandwidth Limit (direct 0x0410) */
+struct i40e_aqc_configure_switching_comp_bw_limit {
+	__le16 seid;
+	u8     reserved[2];
+	__le16 credit;
+	u8     reserved1[2];
+	u8     max_bw; /* 0-3, limit = 2^max */
+	u8     reserved2[7];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);
+
+/* Enable  Physical Port ETS (indirect 0x0413)
+ * Modify  Physical Port ETS (indirect 0x0414)
+ * Disable Physical Port ETS (indirect 0x0415)
+ */
+struct i40e_aqc_configure_switching_comp_ets_data {
+	u8     reserved[4];
+	u8     tc_valid_bits;
+	u8     reserved1;
+	u8     tc_strict_priority_flags;
+	u8     reserved2[17];
+	u8     tc_bw_share_credits[8];
+	u8     reserved3[96];
+};
+
+/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
+struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
+	u8     tc_valid_bits;
+	u8     reserved[15];
+	__le16 tc_bw_credit[8];
+
+	/* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+	__le16 tc_bw_max[2];
+	u8     reserved1[28];
+};
+
+/* Configure Switching Component Bandwidth Allocation per Tc
+ * (indirect 0x0417)
+ */
+struct i40e_aqc_configure_switching_comp_bw_config_data {
+	u8     tc_valid_bits;
+	u8     reserved[2];
+	u8     absolute_credits; /* bool */
+	u8     tc_bw_share_credits[8];
+	u8     reserved1[20];
+};
+
+/* Query Switching Component Configuration (indirect 0x0418) */
+struct i40e_aqc_query_switching_comp_ets_config_resp {
+	u8     tc_valid_bits;
+	u8     reserved[35];
+	__le16 port_bw_limit;
+	u8     reserved1[2];
+	u8     tc_bw_max; /* 0-3, limit = 2^max */
+	u8     reserved2[23];
+};
+
+/* Query PhysicalPort ETS Configuration (indirect 0x0419) */
+struct i40e_aqc_query_port_ets_config_resp {
+	u8     reserved[4];
+	u8     tc_valid_bits;
+	u8     reserved1;
+	u8     tc_strict_priority_bits;
+	u8     reserved2;
+	u8     tc_bw_share_credits[8];
+	__le16 tc_bw_limits[8];
+
+	/* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */
+	__le16 tc_bw_max[2];
+	u8     reserved3[32];
+};
+
+/* Query Switching Component Bandwidth Allocation per Traffic Type
+ * (indirect 0x041A)
+ */
+struct i40e_aqc_query_switching_comp_bw_config_resp {
+	u8     tc_valid_bits;
+	u8     reserved[2];
+	u8     absolute_credits_enable; /* bool */
+	u8     tc_bw_share_credits[8];
+	__le16 tc_bw_limits[8];
+
+	/* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+	__le16 tc_bw_max[2];
+};
+
+/* Suspend/resume port TX traffic
+ * (direct 0x041B and 0x041C) uses the generic SEID struct
+ */
+
+/* Get and set the active HMC resource profile and status.
+ * (direct 0x0500) and (direct 0x0501)
+ */
+struct i40e_aq_get_set_hmc_resource_profile {
+	u8     pm_profile;
+	u8     pe_vf_enabled;
+	u8     reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
+
+enum i40e_aq_hmc_profile {
+	/* I40E_HMC_PROFILE_NO_CHANGE    = 0, reserved */
+	I40E_HMC_PROFILE_DEFAULT     = 1,
+	I40E_HMC_PROFILE_FAVOR_VF    = 2,
+	I40E_HMC_PROFILE_EQUAL       = 3,
+};
+
+#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK       0xF
+#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK    0x3F
+
+/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
+
+/* set in param0 for get phy abilities to report qualified modules */
+#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES  0x0001
+#define I40E_AQ_PHY_REPORT_INITIAL_VALUES     0x0002
+
+enum i40e_aq_phy_type {
+	I40E_PHY_TYPE_SGMII			= 0x0,
+	I40E_PHY_TYPE_1000BASE_KX		= 0x1,
+	I40E_PHY_TYPE_10GBASE_KX4		= 0x2,
+	I40E_PHY_TYPE_10GBASE_KR		= 0x3,
+	I40E_PHY_TYPE_40GBASE_KR4		= 0x4,
+	I40E_PHY_TYPE_XAUI			= 0x5,
+	I40E_PHY_TYPE_XFI			= 0x6,
+	I40E_PHY_TYPE_SFI			= 0x7,
+	I40E_PHY_TYPE_XLAUI			= 0x8,
+	I40E_PHY_TYPE_XLPPI			= 0x9,
+	I40E_PHY_TYPE_40GBASE_CR4_CU		= 0xA,
+	I40E_PHY_TYPE_10GBASE_CR1_CU		= 0xB,
+	I40E_PHY_TYPE_100BASE_TX		= 0x11,
+	I40E_PHY_TYPE_1000BASE_T		= 0x12,
+	I40E_PHY_TYPE_10GBASE_T			= 0x13,
+	I40E_PHY_TYPE_10GBASE_SR		= 0x14,
+	I40E_PHY_TYPE_10GBASE_LR		= 0x15,
+	I40E_PHY_TYPE_10GBASE_SFPP_CU		= 0x16,
+	I40E_PHY_TYPE_10GBASE_CR1		= 0x17,
+	I40E_PHY_TYPE_40GBASE_CR4		= 0x18,
+	I40E_PHY_TYPE_40GBASE_SR4		= 0x19,
+	I40E_PHY_TYPE_40GBASE_LR4		= 0x1A,
+	I40E_PHY_TYPE_20GBASE_KR2		= 0x1B,
+	I40E_PHY_TYPE_MAX
+};
+
+#define I40E_LINK_SPEED_100MB_SHIFT	0x1
+#define I40E_LINK_SPEED_1000MB_SHIFT	0x2
+#define I40E_LINK_SPEED_10GB_SHIFT	0x3
+#define I40E_LINK_SPEED_40GB_SHIFT	0x4
+#define I40E_LINK_SPEED_20GB_SHIFT	0x5
+
+enum i40e_aq_link_speed {
+	I40E_LINK_SPEED_UNKNOWN	= 0,
+	I40E_LINK_SPEED_100MB	= (1 << I40E_LINK_SPEED_100MB_SHIFT),
+	I40E_LINK_SPEED_1GB	= (1 << I40E_LINK_SPEED_1000MB_SHIFT),
+	I40E_LINK_SPEED_10GB	= (1 << I40E_LINK_SPEED_10GB_SHIFT),
+	I40E_LINK_SPEED_40GB	= (1 << I40E_LINK_SPEED_40GB_SHIFT),
+	I40E_LINK_SPEED_20GB	= (1 << I40E_LINK_SPEED_20GB_SHIFT)
+};
+
+struct i40e_aqc_module_desc {
+	u8 oui[3];
+	u8 reserved1;
+	u8 part_number[16];
+	u8 revision[4];
+	u8 reserved2[8];
+};
+
+struct i40e_aq_get_phy_abilities_resp {
+	__le32 phy_type;       /* bitmap using the above enum for offsets */
+	u8     link_speed;     /* bitmap using the above enum */
+	u8     abilities;
+#define I40E_AQ_PHY_FLAG_PAUSE_TX         0x01
+#define I40E_AQ_PHY_FLAG_PAUSE_RX         0x02
+#define I40E_AQ_PHY_FLAG_LOW_POWER        0x04
+#define I40E_AQ_PHY_FLAG_AN_SHIFT         3
+#define I40E_AQ_PHY_FLAG_AN_MASK          (0x3 << I40E_AQ_PHY_FLAG_AN_SHIFT)
+#define I40E_AQ_PHY_FLAG_AN_OFF           0x00 /* link forced on */
+#define I40E_AQ_PHY_FLAG_AN_OFF_LINK_DOWN 0x01
+#define I40E_AQ_PHY_FLAG_AN_ON            0x02
+#define I40E_AQ_PHY_FLAG_MODULE_QUAL      0x20
+	__le16 eee_capability;
+#define I40E_AQ_EEE_100BASE_TX       0x0002
+#define I40E_AQ_EEE_1000BASE_T       0x0004
+#define I40E_AQ_EEE_10GBASE_T        0x0008
+#define I40E_AQ_EEE_1000BASE_KX      0x0010
+#define I40E_AQ_EEE_10GBASE_KX4      0x0020
+#define I40E_AQ_EEE_10GBASE_KR       0x0040
+	__le32 eeer_val;
+	u8     d3_lpan;
+#define I40E_AQ_SET_PHY_D3_LPAN_ENA  0x01
+	u8     reserved[3];
+	u8     phy_id[4];
+	u8     module_type[3];
+	u8     qualified_module_count;
+#define I40E_AQ_PHY_MAX_QMS          16
+	struct i40e_aqc_module_desc  qualified_module[I40E_AQ_PHY_MAX_QMS];
+};
+
+/* Set PHY Config (direct 0x0601) */
+struct i40e_aq_set_phy_config { /* same bits as above in all */
+	__le32 phy_type;
+	u8     link_speed;
+	u8     abilities;
+	__le16 eee_capability;
+	__le32 eeer;
+	u8     low_power_ctrl;
+	u8     reserved[3];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
+
+/* Set MAC Config command data structure (direct 0x0603) */
+struct i40e_aq_set_mac_config {
+	__le16 max_frame_size;
+	u8     params;
+#define I40E_AQ_SET_MAC_CONFIG_CRC_EN           0x04
+#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK      0x78
+#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT     3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE      0x0
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX   0xF
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX   0x9
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX   0x8
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX   0x7
+#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX   0x6
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX   0x5
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX   0x4
+#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX   0x3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX   0x2
+#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX   0x1
+	u8     tx_timer_priority; /* bitmap */
+	__le16 tx_timer_value;
+	__le16 fc_refresh_threshold;
+	u8     reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config);
+
+/* Restart Auto-Negotiation (direct 0x605) */
+struct i40e_aqc_set_link_restart_an {
+	u8     command;
+#define I40E_AQ_PHY_RESTART_AN  0x02
+#define I40E_AQ_PHY_LINK_ENABLE 0x04
+	u8     reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an);
+
+/* Get Link Status cmd & response data structure (direct 0x0607) */
+struct i40e_aqc_get_link_status {
+	__le16 command_flags; /* only field set on command */
+#define I40E_AQ_LSE_MASK             0x3
+#define I40E_AQ_LSE_NOP              0x0
+#define I40E_AQ_LSE_DISABLE          0x2
+#define I40E_AQ_LSE_ENABLE           0x3
+/* only response uses this flag */
+#define I40E_AQ_LSE_IS_ENABLED       0x1
+	u8     phy_type;    /* i40e_aq_phy_type   */
+	u8     link_speed;  /* i40e_aq_link_speed */
+	u8     link_info;
+#define I40E_AQ_LINK_UP              0x01
+#define I40E_AQ_LINK_FAULT           0x02
+#define I40E_AQ_LINK_FAULT_TX        0x04
+#define I40E_AQ_LINK_FAULT_RX        0x08
+#define I40E_AQ_LINK_FAULT_REMOTE    0x10
+#define I40E_AQ_MEDIA_AVAILABLE      0x40
+#define I40E_AQ_SIGNAL_DETECT        0x80
+	u8     an_info;
+#define I40E_AQ_AN_COMPLETED         0x01
+#define I40E_AQ_LP_AN_ABILITY        0x02
+#define I40E_AQ_PD_FAULT             0x04
+#define I40E_AQ_FEC_EN               0x08
+#define I40E_AQ_PHY_LOW_POWER        0x10
+#define I40E_AQ_LINK_PAUSE_TX        0x20
+#define I40E_AQ_LINK_PAUSE_RX        0x40
+#define I40E_AQ_QUALIFIED_MODULE     0x80
+	u8     ext_info;
+#define I40E_AQ_LINK_PHY_TEMP_ALARM  0x01
+#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02
+#define I40E_AQ_LINK_TX_SHIFT        0x02
+#define I40E_AQ_LINK_TX_MASK         (0x03 << I40E_AQ_LINK_TX_SHIFT)
+#define I40E_AQ_LINK_TX_ACTIVE       0x00
+#define I40E_AQ_LINK_TX_DRAINED      0x01
+#define I40E_AQ_LINK_TX_FLUSHED      0x03
+	u8     loopback;         /* use defines from i40e_aqc_set_lb_mode */
+	__le16 max_frame_size;
+	u8     config;
+#define I40E_AQ_CONFIG_CRC_ENA       0x04
+#define I40E_AQ_CONFIG_PACING_MASK   0x78
+	u8     reserved[5];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status);
+
+/* Set event mask command (direct 0x613) */
+struct i40e_aqc_set_phy_int_mask {
+	u8     reserved[8];
+	__le16 event_mask;
+#define I40E_AQ_EVENT_LINK_UPDOWN       0x0002
+#define I40E_AQ_EVENT_MEDIA_NA          0x0004
+#define I40E_AQ_EVENT_LINK_FAULT        0x0008
+#define I40E_AQ_EVENT_PHY_TEMP_ALARM    0x0010
+#define I40E_AQ_EVENT_EXCESSIVE_ERRORS  0x0020
+#define I40E_AQ_EVENT_SIGNAL_DETECT     0x0040
+#define I40E_AQ_EVENT_AN_COMPLETED      0x0080
+#define I40E_AQ_EVENT_MODULE_QUAL_FAIL  0x0100
+#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200
+	u8     reserved1[6];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask);
+
+/* Get Local AN advt register (direct 0x0614)
+ * Set Local AN advt register (direct 0x0615)
+ * Get Link Partner AN advt register (direct 0x0616)
+ */
+struct i40e_aqc_an_advt_reg {
+	__le32 local_an_reg0;
+	__le16 local_an_reg1;
+	u8     reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg);
+
+/* Set Loopback mode (0x0618) */
+struct i40e_aqc_set_lb_mode {
+	__le16 lb_mode;
+#define I40E_AQ_LB_PHY_LOCAL   0x01
+#define I40E_AQ_LB_PHY_REMOTE  0x02
+#define I40E_AQ_LB_MAC_LOCAL   0x04
+	u8     reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
+
+/* Set PHY Reset command (0x0622) */
+struct i40e_aqc_set_phy_reset {
+	u8     reset_flags;
+#define I40E_AQ_PHY_RESET_REQUEST  0x02
+	u8     reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_reset);
+
+enum i40e_aq_phy_reg_type {
+	I40E_AQC_PHY_REG_INTERNAL         = 0x1,
+	I40E_AQC_PHY_REG_EXERNAL_BASET    = 0x2,
+	I40E_AQC_PHY_REG_EXERNAL_MODULE   = 0x3
+};
+
+/* NVM Read command (indirect 0x0701)
+ * NVM Erase commands (direct 0x0702)
+ * NVM Update commands (indirect 0x0703)
+ */
+struct i40e_aqc_nvm_update {
+	u8     command_flags;
+#define I40E_AQ_NVM_LAST_CMD    0x01
+#define I40E_AQ_NVM_FLASH_ONLY  0x80
+	u8     module_pointer;
+	__le16 length;
+	__le32 offset;
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
+
+/* Send to PF command (indirect 0x0801) id is only used by PF
+ * Send to VF command (indirect 0x0802) id is only used by PF
+ * Send to Peer PF command (indirect 0x0803)
+ */
+struct i40e_aqc_pf_vf_message {
+	__le32 id;
+	u8     reserved[4];
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message);
+
+/* Alternate structure */
+
+/* Direct write (direct 0x0900)
+ * Direct read (direct 0x0902)
+ */
+struct i40e_aqc_alternate_write {
+	__le32 address0;
+	__le32 data0;
+	__le32 address1;
+	__le32 data1;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write);
+
+/* Indirect write (indirect 0x0901)
+ * Indirect read (indirect 0x0903)
+ */
+
+struct i40e_aqc_alternate_ind_write {
+	__le32 address;
+	__le32 length;
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write);
+
+/* Done alternate write (direct 0x0904)
+ * uses i40e_aq_desc
+ */
+struct i40e_aqc_alternate_write_done {
+	__le16 cmd_flags;
+#define I40E_AQ_ALTERNATE_MODE_BIOS_MASK	1
+#define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY	0
+#define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI	1
+#define I40E_AQ_ALTERNATE_RESET_NEEDED		2
+	u8     reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done);
+
+/* Set OEM mode (direct 0x0905) */
+struct i40e_aqc_alternate_set_mode {
+	__le32 mode;
+#define I40E_AQ_ALTERNATE_MODE_NONE	0
+#define I40E_AQ_ALTERNATE_MODE_OEM	1
+	u8     reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode);
+
+/* Clear port Alternate RAM (direct 0x0906) uses i40e_aq_desc */
+
+/* async events 0x10xx */
+
+/* Lan Queue Overflow Event (direct, 0x1001) */
+struct i40e_aqc_lan_overflow {
+	__le32 prtdcb_rupto;
+	__le32 otx_ctl;
+	u8     reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow);
+
+/* Get LLDP MIB (indirect 0x0A00) */
+struct i40e_aqc_lldp_get_mib {
+	u8     type;
+	u8     reserved1;
+#define I40E_AQ_LLDP_MIB_TYPE_MASK                      0x3
+#define I40E_AQ_LLDP_MIB_LOCAL                          0x0
+#define I40E_AQ_LLDP_MIB_REMOTE                         0x1
+#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE               0x2
+#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK                   0xC
+#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT                  0x2
+#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE         0x0
+#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR               0x1
+#define I40E_AQ_LLDP_TX_SHIFT              0x4
+#define I40E_AQ_LLDP_TX_MASK               (0x03 << I40E_AQ_LLDP_TX_SHIFT)
+/* TX pause flags use I40E_AQ_LINK_TX_* above */
+	__le16 local_len;
+	__le16 remote_len;
+	u8     reserved2[2];
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib);
+
+/* Configure LLDP MIB Change Event (direct 0x0A01)
+ * also used for the event (with type in the command field)
+ */
+struct i40e_aqc_lldp_update_mib {
+	u8     command;
+#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE          0x0
+#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE         0x1
+	u8     reserved[7];
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib);
+
+/* Add LLDP TLV (indirect 0x0A02)
+ * Delete LLDP TLV (indirect 0x0A04)
+ */
+struct i40e_aqc_lldp_add_tlv {
+	u8     type; /* only nearest bridge and non-TPMR from 0x0A00 */
+	u8     reserved1[1];
+	__le16 len;
+	u8     reserved2[4];
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv);
+
+/* Update LLDP TLV (indirect 0x0A03) */
+struct i40e_aqc_lldp_update_tlv {
+	u8     type; /* only nearest bridge and non-TPMR from 0x0A00 */
+	u8     reserved;
+	__le16 old_len;
+	__le16 new_offset;
+	__le16 new_len;
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv);
+
+/* Stop LLDP (direct 0x0A05) */
+struct i40e_aqc_lldp_stop {
+	u8     command;
+#define I40E_AQ_LLDP_AGENT_STOP                 0x0
+#define I40E_AQ_LLDP_AGENT_SHUTDOWN             0x1
+	u8     reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop);
+
+/* Start LLDP (direct 0x0A06) */
+
+struct i40e_aqc_lldp_start {
+	u8     command;
+#define I40E_AQ_LLDP_AGENT_START                0x1
+	u8     reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
+
+/* Apply MIB changes (0x0A07)
+ * uses the generic struc as it contains no data
+ */
+
+/* Add Udp Tunnel command and completion (direct 0x0B00) */
+struct i40e_aqc_add_udp_tunnel {
+	__le16 udp_port;
+	u8     header_len; /* in DWords, 1 to 15 */
+	u8     protocol_index;
+#define I40E_AQC_TUNNEL_TYPE_MAC    0x0
+#define I40E_AQC_TUNNEL_TYPE_UDP    0x1
+	u8     reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
+
+/* remove UDP Tunnel command (0x0B01) */
+struct i40e_aqc_remove_udp_tunnel {
+	u8     reserved[2];
+	u8     index; /* 0 to 15 */
+	u8     pf_filters;
+	u8     total_filters;
+	u8     reserved2[11];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel);
+
+struct i40e_aqc_del_udp_tunnel_completion {
+	__le16 udp_port;
+	u8     index; /* 0 to 15 */
+	u8     multiple_entries;
+	u8     tunnels_used;
+	u8     reserved;
+	u8     tunnels_free;
+	u8     reserved1[9];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
+
+/* tunnel key structure 0x0B10 */
+struct i40e_aqc_tunnel_key_structure {
+	__le16     key1_off;
+	__le16     key1_len;
+	__le16     key2_off;
+	__le16     key2_len;
+	__le16     flags;
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01
+/* response flags */
+#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS    0x01
+#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED   0x02
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03
+	u8         resreved[6];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure);
+
+/* OEM mode commands (direct 0xFE0x) */
+struct i40e_aqc_oem_param_change {
+	__le32 param_type;
+#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL   0
+#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL   1
+#define I40E_AQ_OEM_PARAM_MAC           2
+	__le32 param_value1;
+	u8     param_value2[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change);
+
+struct i40e_aqc_oem_state_change {
+	__le32 state;
+#define I40E_AQ_OEM_STATE_LINK_DOWN  0x0
+#define I40E_AQ_OEM_STATE_LINK_UP    0x1
+	u8     reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change);
+
+/* debug commands */
+
+/* get device id (0xFF00) uses the generic structure */
+
+/* set test more (0xFF01, internal) */
+
+struct i40e_acq_set_test_mode {
+	u8     mode;
+#define I40E_AQ_TEST_PARTIAL    0
+#define I40E_AQ_TEST_FULL       1
+#define I40E_AQ_TEST_NVM        2
+	u8     reserved[3];
+	u8     command;
+#define I40E_AQ_TEST_OPEN        0
+#define I40E_AQ_TEST_CLOSE       1
+#define I40E_AQ_TEST_INC         2
+	u8     reserved2[3];
+	__le32 address_high;
+	__le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode);
+
+/* Debug Read Register command (0xFF03)
+ * Debug Write Register command (0xFF04)
+ */
+struct i40e_aqc_debug_reg_read_write {
+	__le32 reserved;
+	__le32 address;
+	__le32 value_high;
+	__le32 value_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_reg_read_write);
+
+/* Scatter/gather Reg Read  (indirect 0xFF05)
+ * Scatter/gather Reg Write (indirect 0xFF06)
+ */
+
+/* i40e_aq_desc is used for the command */
+struct i40e_aqc_debug_reg_sg_element_data {
+	__le32 address;
+	__le32 value;
+};
+
+/* Debug Modify register (direct 0xFF07) */
+struct i40e_aqc_debug_modify_reg {
+	__le32 address;
+	__le32 value;
+	__le32 clear_mask;
+	__le32 set_mask;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg);
+
+/* dump internal data (0xFF08, indirect) */
+
+#define I40E_AQ_CLUSTER_ID_AUX		0
+#define I40E_AQ_CLUSTER_ID_SWITCH_FLU	1
+#define I40E_AQ_CLUSTER_ID_TXSCHED	2
+#define I40E_AQ_CLUSTER_ID_HMC		3
+#define I40E_AQ_CLUSTER_ID_MAC0		4
+#define I40E_AQ_CLUSTER_ID_MAC1		5
+#define I40E_AQ_CLUSTER_ID_MAC2		6
+#define I40E_AQ_CLUSTER_ID_MAC3		7
+#define I40E_AQ_CLUSTER_ID_DCB		8
+#define I40E_AQ_CLUSTER_ID_EMP_MEM	9
+#define I40E_AQ_CLUSTER_ID_PKT_BUF	10
+
+struct i40e_aqc_debug_dump_internals {
+	u8     cluster_id;
+	u8     table_id;
+	__le16 data_size;
+	__le32 idx;
+	__le32 address_high;
+	__le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals);
+
+struct i40e_aqc_debug_modify_internals {
+	u8     cluster_id;
+	u8     cluster_specific_params[7];
+	__le32 address_high;
+	__le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals);
+
+#endif
diff --git a/drivers/net/ethernet/intel/i40e/i40e_alloc.h b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
new file mode 100644
index 0000000..3b1cc21
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
@@ -0,0 +1,59 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_ALLOC_H_
+#define _I40E_ALLOC_H_
+
+struct i40e_hw;
+
+/* Memory allocation types */
+enum i40e_memory_type {
+	i40e_mem_arq_buf = 0,		/* ARQ indirect command buffer */
+	i40e_mem_asq_buf = 1,
+	i40e_mem_atq_buf = 2,		/* ATQ indirect command buffer */
+	i40e_mem_arq_ring = 3,		/* ARQ descriptor ring */
+	i40e_mem_atq_ring = 4,		/* ATQ descriptor ring */
+	i40e_mem_pd = 5,		/* Page Descriptor */
+	i40e_mem_bp = 6,		/* Backing Page - 4KB */
+	i40e_mem_bp_jumbo = 7,		/* Backing Page - > 4KB */
+	i40e_mem_reserved
+};
+
+/* prototype for functions used for dynamic memory allocation */
+i40e_status i40e_allocate_dma_mem(struct i40e_hw *hw,
+					    struct i40e_dma_mem *mem,
+					    enum i40e_memory_type type,
+					    u64 size, u32 alignment);
+i40e_status i40e_free_dma_mem(struct i40e_hw *hw,
+					struct i40e_dma_mem *mem);
+i40e_status i40e_allocate_virt_mem(struct i40e_hw *hw,
+					     struct i40e_virt_mem *mem,
+					     u32 size);
+i40e_status i40e_free_virt_mem(struct i40e_hw *hw,
+					 struct i40e_virt_mem *mem);
+
+#endif /* _I40E_ALLOC_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
new file mode 100644
index 0000000..c21df7b
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -0,0 +1,2041 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_type.h"
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+#include "i40e_virtchnl.h"
+
+/**
+ * i40e_set_mac_type - Sets MAC type
+ * @hw: pointer to the HW structure
+ *
+ * This function sets the mac type of the adapter based on the
+ * vendor ID and device ID stored in the hw structure.
+ **/
+static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
+{
+	i40e_status status = 0;
+
+	if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
+		switch (hw->device_id) {
+		case I40E_SFP_XL710_DEVICE_ID:
+		case I40E_SFP_X710_DEVICE_ID:
+		case I40E_QEMU_DEVICE_ID:
+		case I40E_KX_A_DEVICE_ID:
+		case I40E_KX_B_DEVICE_ID:
+		case I40E_KX_C_DEVICE_ID:
+		case I40E_KX_D_DEVICE_ID:
+		case I40E_QSFP_A_DEVICE_ID:
+		case I40E_QSFP_B_DEVICE_ID:
+		case I40E_QSFP_C_DEVICE_ID:
+			hw->mac.type = I40E_MAC_XL710;
+			break;
+		case I40E_VF_DEVICE_ID:
+		case I40E_VF_HV_DEVICE_ID:
+			hw->mac.type = I40E_MAC_VF;
+			break;
+		default:
+			hw->mac.type = I40E_MAC_GENERIC;
+			break;
+		}
+	} else {
+		status = I40E_ERR_DEVICE_NOT_SUPPORTED;
+	}
+
+	hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n",
+		  hw->mac.type, status);
+	return status;
+}
+
+/**
+ * i40e_debug_aq
+ * @hw: debug mask related to admin queue
+ * @cap: pointer to adminq command descriptor
+ * @buffer: pointer to command buffer
+ *
+ * Dumps debug log about adminq command with descriptor contents.
+ **/
+void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
+		   void *buffer)
+{
+	struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
+	u8 *aq_buffer = (u8 *)buffer;
+	u32 data[4];
+	u32 i = 0;
+
+	if ((!(mask & hw->debug_mask)) || (desc == NULL))
+		return;
+
+	i40e_debug(hw, mask,
+		   "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
+		   aq_desc->opcode, aq_desc->flags, aq_desc->datalen,
+		   aq_desc->retval);
+	i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
+		   aq_desc->cookie_high, aq_desc->cookie_low);
+	i40e_debug(hw, mask, "\tparam (0,1)  0x%08X 0x%08X\n",
+		   aq_desc->params.internal.param0,
+		   aq_desc->params.internal.param1);
+	i40e_debug(hw, mask, "\taddr (h,l)   0x%08X 0x%08X\n",
+		   aq_desc->params.external.addr_high,
+		   aq_desc->params.external.addr_low);
+
+	if ((buffer != NULL) && (aq_desc->datalen != 0)) {
+		memset(data, 0, sizeof(data));
+		i40e_debug(hw, mask, "AQ CMD Buffer:\n");
+		for (i = 0; i < le16_to_cpu(aq_desc->datalen); i++) {
+			data[((i % 16) / 4)] |=
+				((u32)aq_buffer[i]) << (8 * (i % 4));
+			if ((i % 16) == 15) {
+				i40e_debug(hw, mask,
+					   "\t0x%04X  %08X %08X %08X %08X\n",
+					   i - 15, data[0], data[1], data[2],
+					   data[3]);
+				memset(data, 0, sizeof(data));
+			}
+		}
+		if ((i % 16) != 0)
+			i40e_debug(hw, mask, "\t0x%04X  %08X %08X %08X %08X\n",
+				   i - (i % 16), data[0], data[1], data[2],
+				   data[3]);
+	}
+}
+
+/**
+ * i40e_init_shared_code - Initialize the shared code
+ * @hw: pointer to hardware structure
+ *
+ * This assigns the MAC type and PHY code and inits the NVM.
+ * Does not touch the hardware. This function must be called prior to any
+ * other function in the shared code. The i40e_hw structure should be
+ * memset to 0 prior to calling this function.  The following fields in
+ * hw structure should be filled in prior to calling this function:
+ * hw_addr, back, device_id, vendor_id, subsystem_device_id,
+ * subsystem_vendor_id, and revision_id
+ **/
+i40e_status i40e_init_shared_code(struct i40e_hw *hw)
+{
+	i40e_status status = 0;
+	u32 reg;
+
+	hw->phy.get_link_info = true;
+
+	/* Determine port number */
+	reg = rd32(hw, I40E_PFGEN_PORTNUM);
+	reg = ((reg & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) >>
+	       I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT);
+	hw->port = (u8)reg;
+
+	i40e_set_mac_type(hw);
+
+	switch (hw->mac.type) {
+	case I40E_MAC_XL710:
+		break;
+	default:
+		return I40E_ERR_DEVICE_NOT_SUPPORTED;
+		break;
+	}
+
+	status = i40e_init_nvm(hw);
+	return status;
+}
+
+/**
+ * i40e_aq_mac_address_read - Retrieve the MAC addresses
+ * @hw: pointer to the hw struct
+ * @flags: a return indicator of what addresses were added to the addr store
+ * @addrs: the requestor's mac addr store
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw,
+				   u16 *flags,
+				   struct i40e_aqc_mac_address_read_data *addrs,
+				   struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_mac_address_read *cmd_data =
+		(struct i40e_aqc_mac_address_read *)&desc.params.raw;
+	i40e_status status;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read);
+	desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF);
+
+	status = i40e_asq_send_command(hw, &desc, addrs,
+				       sizeof(*addrs), cmd_details);
+	*flags = le16_to_cpu(cmd_data->command_flags);
+
+	return status;
+}
+
+/**
+ * i40e_aq_mac_address_write - Change the MAC addresses
+ * @hw: pointer to the hw struct
+ * @flags: indicates which MAC to be written
+ * @mac_addr: address to write
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
+				    u16 flags, u8 *mac_addr,
+				    struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_mac_address_write *cmd_data =
+		(struct i40e_aqc_mac_address_write *)&desc.params.raw;
+	i40e_status status;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_mac_address_write);
+	cmd_data->command_flags = cpu_to_le16(flags);
+	memcpy(&cmd_data->mac_sal, &mac_addr[0], 4);
+	memcpy(&cmd_data->mac_sah, &mac_addr[4], 2);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_get_mac_addr - get MAC address
+ * @hw: pointer to the HW structure
+ * @mac_addr: pointer to MAC address
+ *
+ * Reads the adapter's MAC address from register
+ **/
+i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
+{
+	struct i40e_aqc_mac_address_read_data addrs;
+	i40e_status status;
+	u16 flags = 0;
+
+	status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
+
+	if (flags & I40E_AQC_LAN_ADDR_VALID)
+		memcpy(mac_addr, &addrs.pf_lan_mac, sizeof(addrs.pf_lan_mac));
+
+	return status;
+}
+
+/**
+ * i40e_validate_mac_addr - Validate MAC address
+ * @mac_addr: pointer to MAC address
+ *
+ * Tests a MAC address to ensure it is a valid Individual Address
+ **/
+i40e_status i40e_validate_mac_addr(u8 *mac_addr)
+{
+	i40e_status status = 0;
+
+	/* Make sure it is not a multicast address */
+	if (I40E_IS_MULTICAST(mac_addr)) {
+		hw_dbg(hw, "MAC address is multicast\n");
+		status = I40E_ERR_INVALID_MAC_ADDR;
+	/* Not a broadcast address */
+	} else if (I40E_IS_BROADCAST(mac_addr)) {
+		hw_dbg(hw, "MAC address is broadcast\n");
+		status = I40E_ERR_INVALID_MAC_ADDR;
+	/* Reject the zero address */
+	} else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
+		   mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
+		hw_dbg(hw, "MAC address is all zeros\n");
+		status = I40E_ERR_INVALID_MAC_ADDR;
+	}
+	return status;
+}
+
+/**
+ * i40e_pf_reset - Reset the PF
+ * @hw: pointer to the hardware structure
+ *
+ * Assuming someone else has triggered a global reset,
+ * assure the global reset is complete and then reset the PF
+ **/
+i40e_status i40e_pf_reset(struct i40e_hw *hw)
+{
+	u32 wait_cnt = 0;
+	u32 reg = 0;
+	u32 grst_del;
+
+	/* Poll for Global Reset steady state in case of recent GRST.
+	 * The grst delay value is in 100ms units, and we'll wait a
+	 * couple counts longer to be sure we don't just miss the end.
+	 */
+	grst_del = rd32(hw, I40E_GLGEN_RSTCTL) & I40E_GLGEN_RSTCTL_GRSTDEL_MASK
+			>> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
+	for (wait_cnt = 0; wait_cnt < grst_del + 2; wait_cnt++) {
+		reg = rd32(hw, I40E_GLGEN_RSTAT);
+		if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
+			break;
+		msleep(100);
+	}
+	if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
+		hw_dbg(hw, "Global reset polling failed to complete.\n");
+		return I40E_ERR_RESET_FAILED;
+	}
+
+	/* Determine the PF number based on the PCI fn */
+	hw->pf_id = (u8)hw->bus.func;
+
+	/* If there was a Global Reset in progress when we got here,
+	 * we don't need to do the PF Reset
+	 */
+	if (!wait_cnt) {
+		reg = rd32(hw, I40E_PFGEN_CTRL);
+		wr32(hw, I40E_PFGEN_CTRL,
+		     (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
+		for (wait_cnt = 0; wait_cnt < 10; wait_cnt++) {
+			reg = rd32(hw, I40E_PFGEN_CTRL);
+			if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
+				break;
+			usleep_range(1000, 2000);
+		}
+		if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
+			hw_dbg(hw, "PF reset polling failed to complete.\n");
+			return I40E_ERR_RESET_FAILED;
+		}
+	}
+
+	i40e_clear_pxe_mode(hw);
+	return 0;
+}
+
+/**
+ * i40e_clear_pxe_mode - clear pxe operations mode
+ * @hw: pointer to the hw struct
+ *
+ * Make sure all PXE mode settings are cleared, including things
+ * like descriptor fetch/write-back mode.
+ **/
+void i40e_clear_pxe_mode(struct i40e_hw *hw)
+{
+	u32 reg;
+
+	/* Clear single descriptor fetch/write-back mode */
+	reg = rd32(hw, I40E_GLLAN_RCTL_0);
+	wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK));
+}
+
+/**
+ * i40e_led_get - return current on/off mode
+ * @hw: pointer to the hw struct
+ *
+ * The value returned is the 'mode' field as defined in the
+ * GPIO register definitions: 0x0 = off, 0xf = on, and other
+ * values are variations of possible behaviors relating to
+ * blink, link, and wire.
+ **/
+u32 i40e_led_get(struct i40e_hw *hw)
+{
+	u32 gpio_val = 0;
+	u32 mode = 0;
+	u32 port;
+	int i;
+
+	for (i = 0; i < I40E_HW_CAP_MAX_GPIO; i++) {
+		if (!hw->func_caps.led[i])
+			continue;
+
+		gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(i));
+		port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK)
+			>> I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
+
+		if (port != hw->port)
+			continue;
+
+		mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK)
+				>> I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT;
+		break;
+	}
+
+	return mode;
+}
+
+/**
+ * i40e_led_set - set new on/off mode
+ * @hw: pointer to the hw struct
+ * @mode: 0=off, else on (see EAS for mode details)
+ **/
+void i40e_led_set(struct i40e_hw *hw, u32 mode)
+{
+	u32 gpio_val = 0;
+	u32 led_mode = 0;
+	u32 port;
+	int i;
+
+	for (i = 0; i < I40E_HW_CAP_MAX_GPIO; i++) {
+		if (!hw->func_caps.led[i])
+			continue;
+
+		gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(i));
+		port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK)
+			>> I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
+
+		if (port != hw->port)
+			continue;
+
+		led_mode = (mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
+			    I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
+		gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
+		gpio_val |= led_mode;
+		wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
+	}
+}
+
+/* Admin command wrappers */
+/**
+ * i40e_aq_queue_shutdown
+ * @hw: pointer to the hw struct
+ * @unloading: is the driver unloading itself
+ *
+ * Tell the Firmware that we're shutting down the AdminQ and whether
+ * or not the driver is unloading as well.
+ **/
+i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
+					     bool unloading)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_queue_shutdown *cmd =
+		(struct i40e_aqc_queue_shutdown *)&desc.params.raw;
+	i40e_status status;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_queue_shutdown);
+
+	if (unloading)
+		cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+	return status;
+}
+
+/**
+ * i40e_aq_set_link_restart_an
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Sets up the link and restarts the Auto-Negotiation over the link.
+ **/
+i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_set_link_restart_an *cmd =
+		(struct i40e_aqc_set_link_restart_an *)&desc.params.raw;
+	i40e_status status;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_set_link_restart_an);
+
+	cmd->command = I40E_AQ_PHY_RESTART_AN;
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_get_link_info
+ * @hw: pointer to the hw struct
+ * @enable_lse: enable/disable LinkStatusEvent reporting
+ * @link: pointer to link status structure - optional
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Returns the link status of the adapter.
+ **/
+i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
+				bool enable_lse, struct i40e_link_status *link,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_get_link_status *resp =
+		(struct i40e_aqc_get_link_status *)&desc.params.raw;
+	struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+	i40e_status status;
+	u16 command_flags;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
+
+	if (enable_lse)
+		command_flags = I40E_AQ_LSE_ENABLE;
+	else
+		command_flags = I40E_AQ_LSE_DISABLE;
+	resp->command_flags = cpu_to_le16(command_flags);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	if (status)
+		goto aq_get_link_info_exit;
+
+	/* save off old link status information */
+	memcpy(&hw->phy.link_info_old, hw_link_info,
+	       sizeof(struct i40e_link_status));
+
+	/* update link status */
+	hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type;
+	hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed;
+	hw_link_info->link_info = resp->link_info;
+	hw_link_info->an_info = resp->an_info;
+	hw_link_info->ext_info = resp->ext_info;
+
+	if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_ENABLE))
+		hw_link_info->lse_enable = true;
+	else
+		hw_link_info->lse_enable = false;
+
+	/* save link status information */
+	if (link)
+		memcpy(link, hw_link_info, sizeof(struct i40e_link_status));
+
+	/* flag cleared so helper functions don't call AQ again */
+	hw->phy.get_link_info = false;
+
+aq_get_link_info_exit:
+	return status;
+}
+
+/**
+ * i40e_aq_add_vsi
+ * @hw: pointer to the hw struct
+ * @vsi: pointer to a vsi context struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Add a VSI context to the hardware.
+**/
+i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
+				struct i40e_vsi_context *vsi_ctx,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_add_get_update_vsi *cmd =
+		(struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
+	struct i40e_aqc_add_get_update_vsi_completion *resp =
+		(struct i40e_aqc_add_get_update_vsi_completion *)
+		&desc.params.raw;
+	i40e_status status;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_add_vsi);
+
+	cmd->uplink_seid = cpu_to_le16(vsi_ctx->uplink_seid);
+	cmd->connection_type = vsi_ctx->connection_type;
+	cmd->vf_id = vsi_ctx->vf_num;
+	cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
+
+	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+	if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)
+		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+	status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
+				    sizeof(vsi_ctx->info), cmd_details);
+
+	if (status)
+		goto aq_add_vsi_exit;
+
+	vsi_ctx->seid = le16_to_cpu(resp->seid);
+	vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number);
+	vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
+	vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
+
+aq_add_vsi_exit:
+	return status;
+}
+
+/**
+ * i40e_aq_set_vsi_unicast_promiscuous
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @set: set unicast promiscuous enable/disable
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
+				u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+	i40e_status status;
+	u16 flags = 0;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+	if (set)
+		flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
+
+	cmd->promiscuous_flags = cpu_to_le16(flags);
+
+	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
+
+	cmd->seid = cpu_to_le16(seid);
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_set_vsi_multicast_promiscuous
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @set: set multicast promiscuous enable/disable
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
+				u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+	i40e_status status;
+	u16 flags = 0;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+	if (set)
+		flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
+
+	cmd->promiscuous_flags = cpu_to_le16(flags);
+
+	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
+
+	cmd->seid = cpu_to_le16(seid);
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_set_vsi_broadcast
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @set_filter: true to set filter, false to clear filter
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set or clear the broadcast promiscuous flag (filter) for a given VSI.
+ **/
+i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
+				u16 seid, bool set_filter,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+	i40e_status status;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+	if (set_filter)
+		cmd->promiscuous_flags
+			    |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+	else
+		cmd->promiscuous_flags
+			    &= cpu_to_le16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+
+	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+	cmd->seid = cpu_to_le16(seid);
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_get_vsi_params - get VSI configuration info
+ * @hw: pointer to the hw struct
+ * @vsi: pointer to a vsi context struct
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
+				struct i40e_vsi_context *vsi_ctx,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_switch_seid *cmd =
+		(struct i40e_aqc_switch_seid *)&desc.params.raw;
+	struct i40e_aqc_add_get_update_vsi_completion *resp =
+		(struct i40e_aqc_add_get_update_vsi_completion *)
+		&desc.params.raw;
+	i40e_status status;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_get_vsi_parameters);
+
+	cmd->seid = cpu_to_le16(vsi_ctx->seid);
+
+	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+	if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)
+		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+	status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
+				    sizeof(vsi_ctx->info), NULL);
+
+	if (status)
+		goto aq_get_vsi_params_exit;
+
+	vsi_ctx->seid = le16_to_cpu(resp->seid);
+	vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number);
+	vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
+	vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
+
+aq_get_vsi_params_exit:
+	return status;
+}
+
+/**
+ * i40e_aq_update_vsi_params
+ * @hw: pointer to the hw struct
+ * @vsi: pointer to a vsi context struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Update a VSI context.
+ **/
+i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
+				struct i40e_vsi_context *vsi_ctx,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_switch_seid *cmd =
+		(struct i40e_aqc_switch_seid *)&desc.params.raw;
+	i40e_status status;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_update_vsi_parameters);
+	cmd->seid = cpu_to_le16(vsi_ctx->seid);
+
+	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+	if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)
+		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+	status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
+				    sizeof(vsi_ctx->info), cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_get_switch_config
+ * @hw: pointer to the hardware structure
+ * @buf: pointer to the result buffer
+ * @buf_size: length of input buffer
+ * @start_seid: seid to start for the report, 0 == beginning
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Fill the buf with switch configuration returned from AdminQ command
+ **/
+i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
+				struct i40e_aqc_get_switch_config_resp *buf,
+				u16 buf_size, u16 *start_seid,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_switch_seid *scfg =
+		(struct i40e_aqc_switch_seid *)&desc.params.raw;
+	i40e_status status;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_get_switch_config);
+	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+	if (buf_size > I40E_AQ_LARGE_BUF)
+		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+	scfg->seid = cpu_to_le16(*start_seid);
+
+	status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details);
+	*start_seid = le16_to_cpu(scfg->seid);
+
+	return status;
+}
+
+/**
+ * i40e_aq_get_firmware_version
+ * @hw: pointer to the hw struct
+ * @fw_major_version: firmware major version
+ * @fw_minor_version: firmware minor version
+ * @api_major_version: major queue version
+ * @api_minor_version: minor queue version
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get the firmware version from the admin queue commands
+ **/
+i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
+				u16 *fw_major_version, u16 *fw_minor_version,
+				u16 *api_major_version, u16 *api_minor_version,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_get_version *resp =
+		(struct i40e_aqc_get_version *)&desc.params.raw;
+	i40e_status status;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	if (!status) {
+		if (fw_major_version != NULL)
+			*fw_major_version = le16_to_cpu(resp->fw_major);
+		if (fw_minor_version != NULL)
+			*fw_minor_version = le16_to_cpu(resp->fw_minor);
+		if (api_major_version != NULL)
+			*api_major_version = le16_to_cpu(resp->api_major);
+		if (api_minor_version != NULL)
+			*api_minor_version = le16_to_cpu(resp->api_minor);
+	}
+
+	return status;
+}
+
+/**
+ * i40e_aq_send_driver_version
+ * @hw: pointer to the hw struct
+ * @event: driver event: driver ok, start or stop
+ * @dv: driver's major, minor version
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Send the driver version to the firmware
+ **/
+i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
+				struct i40e_driver_version *dv,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_driver_version *cmd =
+		(struct i40e_aqc_driver_version *)&desc.params.raw;
+	i40e_status status;
+
+	if (dv == NULL)
+		return I40E_ERR_PARAM;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version);
+
+	desc.flags |= cpu_to_le16(I40E_AQ_FLAG_SI);
+	cmd->driver_major_ver = dv->major_version;
+	cmd->driver_minor_ver = dv->minor_version;
+	cmd->driver_build_ver = dv->build_version;
+	cmd->driver_subbuild_ver = dv->subbuild_version;
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_get_link_status - get status of the HW network link
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if link is up, false if link is down.
+ *
+ * Side effect: LinkStatusEvent reporting becomes enabled
+ **/
+bool i40e_get_link_status(struct i40e_hw *hw)
+{
+	i40e_status status = 0;
+	bool link_status = false;
+
+	if (hw->phy.get_link_info) {
+		status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+
+		if (status)
+			goto i40e_get_link_status_exit;
+	}
+
+	link_status = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
+
+i40e_get_link_status_exit:
+	return link_status;
+}
+
+/**
+ * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC
+ * @hw: pointer to the hw struct
+ * @uplink_seid: the MAC or other gizmo SEID
+ * @downlink_seid: the VSI SEID
+ * @enabled_tc: bitmap of TCs to be enabled
+ * @default_port: true for default port VSI, false for control port
+ * @veb_seid: pointer to where to put the resulting VEB SEID
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This asks the FW to add a VEB between the uplink and downlink
+ * elements.  If the uplink SEID is 0, this will be a floating VEB.
+ **/
+i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
+				u16 downlink_seid, u8 enabled_tc,
+				bool default_port, u16 *veb_seid,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_add_veb *cmd =
+		(struct i40e_aqc_add_veb *)&desc.params.raw;
+	struct i40e_aqc_add_veb_completion *resp =
+		(struct i40e_aqc_add_veb_completion *)&desc.params.raw;
+	i40e_status status;
+	u16 veb_flags = 0;
+
+	/* SEIDs need to either both be set or both be 0 for floating VEB */
+	if (!!uplink_seid != !!downlink_seid)
+		return I40E_ERR_PARAM;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb);
+
+	cmd->uplink_seid = cpu_to_le16(uplink_seid);
+	cmd->downlink_seid = cpu_to_le16(downlink_seid);
+	cmd->enable_tcs = enabled_tc;
+	if (!uplink_seid)
+		veb_flags |= I40E_AQC_ADD_VEB_FLOATING;
+	if (default_port)
+		veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT;
+	else
+		veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA;
+	cmd->veb_flags = cpu_to_le16(veb_flags);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	if (!status && veb_seid)
+		*veb_seid = le16_to_cpu(resp->veb_seid);
+
+	return status;
+}
+
+/**
+ * i40e_aq_get_veb_parameters - Retrieve VEB parameters
+ * @hw: pointer to the hw struct
+ * @veb_seid: the SEID of the VEB to query
+ * @switch_id: the uplink switch id
+ * @floating_veb: set to true if the VEB is floating
+ * @statistic_index: index of the stats counter block for this VEB
+ * @vebs_used: number of VEB's used by function
+ * @vebs_unallocated: total VEB's not reserved by any function
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This retrieves the parameters for a particular VEB, specified by
+ * uplink_seid, and returns them to the caller.
+ **/
+i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
+				u16 veb_seid, u16 *switch_id,
+				bool *floating, u16 *statistic_index,
+				u16 *vebs_used, u16 *vebs_free,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_get_veb_parameters_completion *cmd_resp =
+		(struct i40e_aqc_get_veb_parameters_completion *)
+		&desc.params.raw;
+	i40e_status status;
+
+	if (veb_seid == 0)
+		return I40E_ERR_PARAM;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_get_veb_parameters);
+	cmd_resp->seid = cpu_to_le16(veb_seid);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+	if (status)
+		goto get_veb_exit;
+
+	if (switch_id)
+		*switch_id = le16_to_cpu(cmd_resp->switch_id);
+	if (statistic_index)
+		*statistic_index = le16_to_cpu(cmd_resp->statistic_index);
+	if (vebs_used)
+		*vebs_used = le16_to_cpu(cmd_resp->vebs_used);
+	if (vebs_free)
+		*vebs_free = le16_to_cpu(cmd_resp->vebs_free);
+	if (floating) {
+		u16 flags = le16_to_cpu(cmd_resp->veb_flags);
+		if (flags & I40E_AQC_ADD_VEB_FLOATING)
+			*floating = true;
+		else
+			*floating = false;
+	}
+
+get_veb_exit:
+	return status;
+}
+
+/**
+ * i40e_aq_add_macvlan
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the mac address
+ * @mv_list: list of macvlans to be added
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Add MAC/VLAN addresses to the HW filtering
+ **/
+i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
+			struct i40e_aqc_add_macvlan_element_data *mv_list,
+			u16 count, struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_macvlan *cmd =
+		(struct i40e_aqc_macvlan *)&desc.params.raw;
+	i40e_status status;
+	u16 buf_size;
+
+	if (count == 0 || !mv_list || !hw)
+		return I40E_ERR_PARAM;
+
+	buf_size = count * sizeof(struct i40e_aqc_add_macvlan_element_data);
+
+	/* prep the rest of the request */
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan);
+	cmd->num_addresses = cpu_to_le16(count);
+	cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
+	cmd->seid[1] = 0;
+	cmd->seid[2] = 0;
+
+	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+	if (buf_size > I40E_AQ_LARGE_BUF)
+		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+	status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
+				    cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_remove_macvlan
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the mac address
+ * @mv_list: list of macvlans to be removed
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Remove MAC/VLAN addresses from the HW filtering
+ **/
+i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
+			struct i40e_aqc_remove_macvlan_element_data *mv_list,
+			u16 count, struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_macvlan *cmd =
+		(struct i40e_aqc_macvlan *)&desc.params.raw;
+	i40e_status status;
+	u16 buf_size;
+
+	if (count == 0 || !mv_list || !hw)
+		return I40E_ERR_PARAM;
+
+	buf_size = count * sizeof(struct i40e_aqc_remove_macvlan_element_data);
+
+	/* prep the rest of the request */
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
+	cmd->num_addresses = cpu_to_le16(count);
+	cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
+	cmd->seid[1] = 0;
+	cmd->seid[2] = 0;
+
+	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+	if (buf_size > I40E_AQ_LARGE_BUF)
+		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+	status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
+				       cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_add_vlan - Add VLAN ids to the HW filtering
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the vlan filters
+ * @v_list: list of vlan filters to be added
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_add_vlan(struct i40e_hw *hw, u16 seid,
+			struct i40e_aqc_add_remove_vlan_element_data *v_list,
+			u8 count, struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_macvlan *cmd =
+		(struct i40e_aqc_macvlan *)&desc.params.raw;
+	i40e_status status;
+	u16 buf_size;
+
+	if (count == 0 || !v_list || !hw)
+		return I40E_ERR_PARAM;
+
+	buf_size = count * sizeof(struct i40e_aqc_add_remove_vlan_element_data);
+
+	/* prep the rest of the request */
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_vlan);
+	cmd->num_addresses = cpu_to_le16(count);
+	cmd->seid[0] = cpu_to_le16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID);
+	cmd->seid[1] = 0;
+	cmd->seid[2] = 0;
+
+	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+	if (buf_size > I40E_AQ_LARGE_BUF)
+		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+	status = i40e_asq_send_command(hw, &desc, v_list, buf_size,
+				       cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_remove_vlan - Remove VLANs from the HW filtering
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the vlan filters
+ * @v_list: list of macvlans to be removed
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_remove_vlan(struct i40e_hw *hw, u16 seid,
+			struct i40e_aqc_add_remove_vlan_element_data *v_list,
+			u8 count, struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_macvlan *cmd =
+		(struct i40e_aqc_macvlan *)&desc.params.raw;
+	i40e_status status;
+	u16 buf_size;
+
+	if (count == 0 || !v_list || !hw)
+		return I40E_ERR_PARAM;
+
+	buf_size = count * sizeof(struct i40e_aqc_add_remove_vlan_element_data);
+
+	/* prep the rest of the request */
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_vlan);
+	cmd->num_addresses = cpu_to_le16(count);
+	cmd->seid[0] = cpu_to_le16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID);
+	cmd->seid[1] = 0;
+	cmd->seid[2] = 0;
+
+	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+	if (buf_size > I40E_AQ_LARGE_BUF)
+		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+	status = i40e_asq_send_command(hw, &desc, v_list, buf_size,
+				       cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_send_msg_to_vf
+ * @hw: pointer to the hardware structure
+ * @vfid: vf id to send msg
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @cmd_details: pointer to command details
+ *
+ * send msg to vf
+ **/
+i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
+				u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_pf_vf_message *cmd =
+		(struct i40e_aqc_pf_vf_message *)&desc.params.raw;
+	i40e_status status;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf);
+	cmd->id = cpu_to_le32(vfid);
+	desc.cookie_high = cpu_to_le32(v_opcode);
+	desc.cookie_low = cpu_to_le32(v_retval);
+	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI);
+	if (msglen) {
+		desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
+						I40E_AQ_FLAG_RD));
+		if (msglen > I40E_AQ_LARGE_BUF)
+			desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+		desc.datalen = cpu_to_le16(msglen);
+	}
+	status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_set_hmc_resource_profile
+ * @hw: pointer to the hw struct
+ * @profile: type of profile the HMC is to be set as
+ * @pe_vf_enabled_count: the number of PE enabled VFs the system has
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * set the HMC profile of the device.
+ **/
+i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
+				enum i40e_aq_hmc_profile profile,
+				u8 pe_vf_enabled_count,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aq_get_set_hmc_resource_profile *cmd =
+		(struct i40e_aq_get_set_hmc_resource_profile *)&desc.params.raw;
+	i40e_status status;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					i40e_aqc_opc_set_hmc_resource_profile);
+
+	cmd->pm_profile = (u8)profile;
+	cmd->pe_vf_enabled = pe_vf_enabled_count;
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_request_resource
+ * @hw: pointer to the hw struct
+ * @resource: resource id
+ * @access: access type
+ * @sdp_number: resource number
+ * @timeout: the maximum time in ms that the driver may hold the resource
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * requests common resource using the admin queue commands
+ **/
+i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
+				enum i40e_aq_resources_ids resource,
+				enum i40e_aq_resource_access_type access,
+				u8 sdp_number, u64 *timeout,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_request_resource *cmd_resp =
+		(struct i40e_aqc_request_resource *)&desc.params.raw;
+	i40e_status status;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource);
+
+	cmd_resp->resource_id = cpu_to_le16(resource);
+	cmd_resp->access_type = cpu_to_le16(access);
+	cmd_resp->resource_number = cpu_to_le32(sdp_number);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+	/* The completion specifies the maximum time in ms that the driver
+	 * may hold the resource in the Timeout field.
+	 * If the resource is held by someone else, the command completes with
+	 * busy return value and the timeout field indicates the maximum time
+	 * the current owner of the resource has to free it.
+	 */
+	if (!status || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY)
+		*timeout = le32_to_cpu(cmd_resp->timeout);
+
+	return status;
+}
+
+/**
+ * i40e_aq_release_resource
+ * @hw: pointer to the hw struct
+ * @resource: resource id
+ * @sdp_number: resource number
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * release common resource using the admin queue commands
+ **/
+i40e_status i40e_aq_release_resource(struct i40e_hw *hw,
+				enum i40e_aq_resources_ids resource,
+				u8 sdp_number,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_request_resource *cmd =
+		(struct i40e_aqc_request_resource *)&desc.params.raw;
+	i40e_status status;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource);
+
+	cmd->resource_id = cpu_to_le16(resource);
+	cmd->resource_number = cpu_to_le32(sdp_number);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_read_nvm
+ * @hw: pointer to the hw struct
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be read (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Read the NVM using the admin queue commands
+ **/
+i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
+				u32 offset, u16 length, void *data,
+				bool last_command,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_nvm_update *cmd =
+		(struct i40e_aqc_nvm_update *)&desc.params.raw;
+	i40e_status status;
+
+	/* In offset the highest byte must be zeroed. */
+	if (offset & 0xFF000000) {
+		status = I40E_ERR_PARAM;
+		goto i40e_aq_read_nvm_exit;
+	}
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read);
+
+	/* If this is the last command in a series, set the proper flag. */
+	if (last_command)
+		cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
+	cmd->module_pointer = module_pointer;
+	cmd->offset = cpu_to_le32(offset);
+	cmd->length = cpu_to_le16(length);
+
+	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+	if (length > I40E_AQ_LARGE_BUF)
+		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+	status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
+
+i40e_aq_read_nvm_exit:
+	return status;
+}
+
+#define I40E_DEV_FUNC_CAP_SWITCH_MODE	0x01
+#define I40E_DEV_FUNC_CAP_MGMT_MODE	0x02
+#define I40E_DEV_FUNC_CAP_NPAR		0x03
+#define I40E_DEV_FUNC_CAP_OS2BMC	0x04
+#define I40E_DEV_FUNC_CAP_VALID_FUNC	0x05
+#define I40E_DEV_FUNC_CAP_SRIOV_1_1	0x12
+#define I40E_DEV_FUNC_CAP_VF		0x13
+#define I40E_DEV_FUNC_CAP_VMDQ		0x14
+#define I40E_DEV_FUNC_CAP_802_1_QBG	0x15
+#define I40E_DEV_FUNC_CAP_802_1_QBH	0x16
+#define I40E_DEV_FUNC_CAP_VSI		0x17
+#define I40E_DEV_FUNC_CAP_DCB		0x18
+#define I40E_DEV_FUNC_CAP_FCOE		0x21
+#define I40E_DEV_FUNC_CAP_RSS		0x40
+#define I40E_DEV_FUNC_CAP_RX_QUEUES	0x41
+#define I40E_DEV_FUNC_CAP_TX_QUEUES	0x42
+#define I40E_DEV_FUNC_CAP_MSIX		0x43
+#define I40E_DEV_FUNC_CAP_MSIX_VF	0x44
+#define I40E_DEV_FUNC_CAP_FLOW_DIRECTOR	0x45
+#define I40E_DEV_FUNC_CAP_IEEE_1588	0x46
+#define I40E_DEV_FUNC_CAP_MFP_MODE_1	0xF1
+#define I40E_DEV_FUNC_CAP_CEM		0xF2
+#define I40E_DEV_FUNC_CAP_IWARP		0x51
+#define I40E_DEV_FUNC_CAP_LED		0x61
+#define I40E_DEV_FUNC_CAP_SDP		0x62
+#define I40E_DEV_FUNC_CAP_MDIO		0x63
+
+/**
+ * i40e_parse_discover_capabilities
+ * @hw: pointer to the hw struct
+ * @buff: pointer to a buffer containing device/function capability records
+ * @cap_count: number of capability records in the list
+ * @list_type_opc: type of capabilities list to parse
+ *
+ * Parse the device/function capabilities list.
+ **/
+static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
+				     u32 cap_count,
+				     enum i40e_admin_queue_opc list_type_opc)
+{
+	struct i40e_aqc_list_capabilities_element_resp *cap;
+	u32 number, logical_id, phys_id;
+	struct i40e_hw_capabilities *p;
+	u32 reg_val;
+	u32 i = 0;
+	u16 id;
+
+	cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
+
+	if (list_type_opc == i40e_aqc_opc_list_dev_capabilities)
+		p = (struct i40e_hw_capabilities *)&hw->dev_caps;
+	else if (list_type_opc == i40e_aqc_opc_list_func_capabilities)
+		p = (struct i40e_hw_capabilities *)&hw->func_caps;
+	else
+		return;
+
+	for (i = 0; i < cap_count; i++, cap++) {
+		id = le16_to_cpu(cap->id);
+		number = le32_to_cpu(cap->number);
+		logical_id = le32_to_cpu(cap->logical_id);
+		phys_id = le32_to_cpu(cap->phys_id);
+
+		switch (id) {
+		case I40E_DEV_FUNC_CAP_SWITCH_MODE:
+			p->switch_mode = number;
+			break;
+		case I40E_DEV_FUNC_CAP_MGMT_MODE:
+			p->management_mode = number;
+			break;
+		case I40E_DEV_FUNC_CAP_NPAR:
+			p->npar_enable = number;
+			break;
+		case I40E_DEV_FUNC_CAP_OS2BMC:
+			p->os2bmc = number;
+			break;
+		case I40E_DEV_FUNC_CAP_VALID_FUNC:
+			p->valid_functions = number;
+			break;
+		case I40E_DEV_FUNC_CAP_SRIOV_1_1:
+			if (number == 1)
+				p->sr_iov_1_1 = true;
+			break;
+		case I40E_DEV_FUNC_CAP_VF:
+			p->num_vfs = number;
+			p->vf_base_id = logical_id;
+			break;
+		case I40E_DEV_FUNC_CAP_VMDQ:
+			if (number == 1)
+				p->vmdq = true;
+			break;
+		case I40E_DEV_FUNC_CAP_802_1_QBG:
+			if (number == 1)
+				p->evb_802_1_qbg = true;
+			break;
+		case I40E_DEV_FUNC_CAP_802_1_QBH:
+			if (number == 1)
+				p->evb_802_1_qbh = true;
+			break;
+		case I40E_DEV_FUNC_CAP_VSI:
+			p->num_vsis = number;
+			break;
+		case I40E_DEV_FUNC_CAP_DCB:
+			if (number == 1) {
+				p->dcb = true;
+				p->enabled_tcmap = logical_id;
+				p->maxtc = phys_id;
+			}
+			break;
+		case I40E_DEV_FUNC_CAP_FCOE:
+			if (number == 1)
+				p->fcoe = true;
+			break;
+		case I40E_DEV_FUNC_CAP_RSS:
+			p->rss = true;
+			reg_val = rd32(hw, I40E_PFQF_CTL_0);
+			if (reg_val & I40E_PFQF_CTL_0_HASHLUTSIZE_MASK)
+				p->rss_table_size = number;
+			else
+				p->rss_table_size = 128;
+			p->rss_table_entry_width = logical_id;
+			break;
+		case I40E_DEV_FUNC_CAP_RX_QUEUES:
+			p->num_rx_qp = number;
+			p->base_queue = phys_id;
+			break;
+		case I40E_DEV_FUNC_CAP_TX_QUEUES:
+			p->num_tx_qp = number;
+			p->base_queue = phys_id;
+			break;
+		case I40E_DEV_FUNC_CAP_MSIX:
+			p->num_msix_vectors = number;
+			break;
+		case I40E_DEV_FUNC_CAP_MSIX_VF:
+			p->num_msix_vectors_vf = number;
+			break;
+		case I40E_DEV_FUNC_CAP_MFP_MODE_1:
+			if (number == 1)
+				p->mfp_mode_1 = true;
+			break;
+		case I40E_DEV_FUNC_CAP_CEM:
+			if (number == 1)
+				p->mgmt_cem = true;
+			break;
+		case I40E_DEV_FUNC_CAP_IWARP:
+			if (number == 1)
+				p->iwarp = true;
+			break;
+		case I40E_DEV_FUNC_CAP_LED:
+			if (phys_id < I40E_HW_CAP_MAX_GPIO)
+				p->led[phys_id] = true;
+			break;
+		case I40E_DEV_FUNC_CAP_SDP:
+			if (phys_id < I40E_HW_CAP_MAX_GPIO)
+				p->sdp[phys_id] = true;
+			break;
+		case I40E_DEV_FUNC_CAP_MDIO:
+			if (number == 1) {
+				p->mdio_port_num = phys_id;
+				p->mdio_port_mode = logical_id;
+			}
+			break;
+		case I40E_DEV_FUNC_CAP_IEEE_1588:
+			if (number == 1)
+				p->ieee_1588 = true;
+			break;
+		case I40E_DEV_FUNC_CAP_FLOW_DIRECTOR:
+			p->fd = true;
+			p->fd_filters_guaranteed = number;
+			p->fd_filters_best_effort = logical_id;
+			break;
+		default:
+			break;
+		}
+	}
+
+	/* additional HW specific goodies that might
+	 * someday be HW version specific
+	 */
+	p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS;
+}
+
+/**
+ * i40e_aq_discover_capabilities
+ * @hw: pointer to the hw struct
+ * @buff: a virtual buffer to hold the capabilities
+ * @buff_size: Size of the virtual buffer
+ * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM
+ * @list_type_opc: capabilities type to discover - pass in the command opcode
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get the device capabilities descriptions from the firmware
+ **/
+i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
+				void *buff, u16 buff_size, u16 *data_size,
+				enum i40e_admin_queue_opc list_type_opc,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aqc_list_capabilites *cmd;
+	i40e_status status = 0;
+	struct i40e_aq_desc desc;
+
+	cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw;
+
+	if (list_type_opc != i40e_aqc_opc_list_func_capabilities &&
+		list_type_opc != i40e_aqc_opc_list_dev_capabilities) {
+		status = I40E_ERR_PARAM;
+		goto exit;
+	}
+
+	i40e_fill_default_direct_cmd_desc(&desc, list_type_opc);
+
+	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+	if (buff_size > I40E_AQ_LARGE_BUF)
+		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+	*data_size = le16_to_cpu(desc.datalen);
+
+	if (status)
+		goto exit;
+
+	i40e_parse_discover_capabilities(hw, buff, le32_to_cpu(cmd->count),
+					 list_type_opc);
+
+exit:
+	return status;
+}
+
+/**
+ * i40e_aq_get_lldp_mib
+ * @hw: pointer to the hw struct
+ * @bridge_type: type of bridge requested
+ * @mib_type: Local, Remote or both Local and Remote MIBs
+ * @buff: pointer to a user supplied buffer to store the MIB block
+ * @buff_size: size of the buffer (in bytes)
+ * @local_len : length of the returned Local LLDP MIB
+ * @remote_len: length of the returned Remote LLDP MIB
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Requests the complete LLDP MIB (entire packet).
+ **/
+i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
+				u8 mib_type, void *buff, u16 buff_size,
+				u16 *local_len, u16 *remote_len,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_lldp_get_mib *cmd =
+		(struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
+	struct i40e_aqc_lldp_get_mib *resp =
+		(struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
+	i40e_status status;
+
+	if (buff_size == 0 || !buff)
+		return I40E_ERR_PARAM;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib);
+	/* Indirect Command */
+	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+
+	cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK;
+	cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
+		       I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+
+	desc.datalen = cpu_to_le16(buff_size);
+
+	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+	if (buff_size > I40E_AQ_LARGE_BUF)
+		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+	if (!status) {
+		if (local_len != NULL)
+			*local_len = le16_to_cpu(resp->local_len);
+		if (remote_len != NULL)
+			*remote_len = le16_to_cpu(resp->remote_len);
+	}
+
+	return status;
+}
+
+/**
+ * i40e_aq_cfg_lldp_mib_change_event
+ * @hw: pointer to the hw struct
+ * @enable_update: Enable or Disable event posting
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Enable or Disable posting of an event on ARQ when LLDP MIB
+ * associated with the interface changes
+ **/
+i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
+				bool enable_update,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_lldp_update_mib *cmd =
+		(struct i40e_aqc_lldp_update_mib *)&desc.params.raw;
+	i40e_status status;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib);
+
+	if (!enable_update)
+		cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE;
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_stop_lldp
+ * @hw: pointer to the hw struct
+ * @shutdown_agent: True if LLDP Agent needs to be Shutdown
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Stop or Shutdown the embedded LLDP Agent
+ **/
+i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_lldp_stop *cmd =
+		(struct i40e_aqc_lldp_stop *)&desc.params.raw;
+	i40e_status status;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop);
+
+	if (shutdown_agent)
+		cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN;
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_start_lldp
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Start the embedded LLDP Agent on all ports.
+ **/
+i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_lldp_start *cmd =
+		(struct i40e_aqc_lldp_start *)&desc.params.raw;
+	i40e_status status;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
+
+	cmd->command = I40E_AQ_LLDP_AGENT_START;
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_delete_element - Delete switch element
+ * @hw: pointer to the hw struct
+ * @seid: the SEID to delete from the switch
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This deletes a switch element from the switch.
+ **/
+i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_switch_seid *cmd =
+		(struct i40e_aqc_switch_seid *)&desc.params.raw;
+	i40e_status status;
+
+	if (seid == 0)
+		return I40E_ERR_PARAM;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element);
+
+	cmd->seid = cpu_to_le16(seid);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler
+ * @hw: pointer to the hw struct
+ * @seid: seid for the physical port/switching component/vsi
+ * @buff: Indirect buffer to hold data parameters and response
+ * @buff_size: Indirect buffer size
+ * @opcode: Tx scheduler AQ command opcode
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Generic command handler for Tx scheduler AQ commands
+ **/
+static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
+				void *buff, u16 buff_size,
+				 enum i40e_admin_queue_opc opcode,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_tx_sched_ind *cmd =
+		(struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
+	i40e_status status;
+	bool cmd_param_flag = false;
+
+	switch (opcode) {
+	case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit:
+	case i40e_aqc_opc_configure_vsi_tc_bw:
+	case i40e_aqc_opc_enable_switching_comp_ets:
+	case i40e_aqc_opc_modify_switching_comp_ets:
+	case i40e_aqc_opc_disable_switching_comp_ets:
+	case i40e_aqc_opc_configure_switching_comp_ets_bw_limit:
+	case i40e_aqc_opc_configure_switching_comp_bw_config:
+		cmd_param_flag = true;
+		break;
+	case i40e_aqc_opc_query_vsi_bw_config:
+	case i40e_aqc_opc_query_vsi_ets_sla_config:
+	case i40e_aqc_opc_query_switching_comp_ets_config:
+	case i40e_aqc_opc_query_port_ets_config:
+	case i40e_aqc_opc_query_switching_comp_bw_config:
+		cmd_param_flag = false;
+		break;
+	default:
+		return I40E_ERR_PARAM;
+	}
+
+	i40e_fill_default_direct_cmd_desc(&desc, opcode);
+
+	/* Indirect command */
+	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+	if (cmd_param_flag)
+		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+	if (buff_size > I40E_AQ_LARGE_BUF)
+		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+	desc.datalen = cpu_to_le16(buff_size);
+
+	cmd->vsi_seid = cpu_to_le16(seid);
+
+	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC
+ * @hw: pointer to the hw struct
+ * @seid: VSI seid
+ * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
+			u16 seid,
+			struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
+			struct i40e_asq_cmd_details *cmd_details)
+{
+	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+				    i40e_aqc_opc_configure_vsi_tc_bw,
+				    cmd_details);
+}
+
+/**
+ * i40e_aq_query_vsi_bw_config - Query VSI BW configuration
+ * @hw: pointer to the hw struct
+ * @seid: seid of the VSI
+ * @bw_data: Buffer to hold VSI BW configuration
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
+			u16 seid,
+			struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
+			struct i40e_asq_cmd_details *cmd_details)
+{
+	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+				    i40e_aqc_opc_query_vsi_bw_config,
+				    cmd_details);
+}
+
+/**
+ * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC
+ * @hw: pointer to the hw struct
+ * @seid: seid of the VSI
+ * @bw_data: Buffer to hold VSI BW configuration per TC
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
+			u16 seid,
+			struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
+			struct i40e_asq_cmd_details *cmd_details)
+{
+	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+				    i40e_aqc_opc_query_vsi_ets_sla_config,
+				    cmd_details);
+}
+
+/**
+ * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component
+ * @bw_data: Buffer to hold switching component's per TC BW config
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
+		u16 seid,
+		struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
+		struct i40e_asq_cmd_details *cmd_details)
+{
+	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+				   i40e_aqc_opc_query_switching_comp_ets_config,
+				   cmd_details);
+}
+
+/**
+ * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration
+ * @hw: pointer to the hw struct
+ * @seid: seid of the VSI or switching component connected to Physical Port
+ * @bw_data: Buffer to hold current ETS configuration for the Physical Port
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw,
+			u16 seid,
+			struct i40e_aqc_query_port_ets_config_resp *bw_data,
+			struct i40e_asq_cmd_details *cmd_details)
+{
+	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+				    i40e_aqc_opc_query_port_ets_config,
+				    cmd_details);
+}
+
+/**
+ * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component
+ * @bw_data: Buffer to hold switching component's BW configuration
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
+		u16 seid,
+		struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
+		struct i40e_asq_cmd_details *cmd_details)
+{
+	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+				    i40e_aqc_opc_query_switching_comp_bw_config,
+				    cmd_details);
+}
+
+/**
+ * i40e_validate_filter_settings
+ * @hw: pointer to the hardware structure
+ * @settings: Filter control settings
+ *
+ * Check and validate the filter control settings passed.
+ * The function checks for the valid filter/context sizes being
+ * passed for FCoE and PE.
+ *
+ * Returns 0 if the values passed are valid and within
+ * range else returns an error.
+ **/
+static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
+				struct i40e_filter_control_settings *settings)
+{
+	u32 fcoe_cntx_size, fcoe_filt_size;
+	u32 pe_cntx_size, pe_filt_size;
+	u32 fcoe_fmax, pe_fmax;
+	u32 val;
+
+	/* Validate FCoE settings passed */
+	switch (settings->fcoe_filt_num) {
+	case I40E_HASH_FILTER_SIZE_1K:
+	case I40E_HASH_FILTER_SIZE_2K:
+	case I40E_HASH_FILTER_SIZE_4K:
+	case I40E_HASH_FILTER_SIZE_8K:
+	case I40E_HASH_FILTER_SIZE_16K:
+	case I40E_HASH_FILTER_SIZE_32K:
+		fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
+		fcoe_filt_size <<= (u32)settings->fcoe_filt_num;
+		break;
+	default:
+		return I40E_ERR_PARAM;
+	}
+
+	switch (settings->fcoe_cntx_num) {
+	case I40E_DMA_CNTX_SIZE_512:
+	case I40E_DMA_CNTX_SIZE_1K:
+	case I40E_DMA_CNTX_SIZE_2K:
+	case I40E_DMA_CNTX_SIZE_4K:
+		fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
+		fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num;
+		break;
+	default:
+		return I40E_ERR_PARAM;
+	}
+
+	/* Validate PE settings passed */
+	switch (settings->pe_filt_num) {
+	case I40E_HASH_FILTER_SIZE_1K:
+	case I40E_HASH_FILTER_SIZE_2K:
+	case I40E_HASH_FILTER_SIZE_4K:
+	case I40E_HASH_FILTER_SIZE_8K:
+	case I40E_HASH_FILTER_SIZE_16K:
+	case I40E_HASH_FILTER_SIZE_32K:
+	case I40E_HASH_FILTER_SIZE_64K:
+	case I40E_HASH_FILTER_SIZE_128K:
+	case I40E_HASH_FILTER_SIZE_256K:
+	case I40E_HASH_FILTER_SIZE_512K:
+	case I40E_HASH_FILTER_SIZE_1M:
+		pe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
+		pe_filt_size <<= (u32)settings->pe_filt_num;
+		break;
+	default:
+		return I40E_ERR_PARAM;
+	}
+
+	switch (settings->pe_cntx_num) {
+	case I40E_DMA_CNTX_SIZE_512:
+	case I40E_DMA_CNTX_SIZE_1K:
+	case I40E_DMA_CNTX_SIZE_2K:
+	case I40E_DMA_CNTX_SIZE_4K:
+	case I40E_DMA_CNTX_SIZE_8K:
+	case I40E_DMA_CNTX_SIZE_16K:
+	case I40E_DMA_CNTX_SIZE_32K:
+	case I40E_DMA_CNTX_SIZE_64K:
+	case I40E_DMA_CNTX_SIZE_128K:
+	case I40E_DMA_CNTX_SIZE_256K:
+		pe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
+		pe_cntx_size <<= (u32)settings->pe_cntx_num;
+		break;
+	default:
+		return I40E_ERR_PARAM;
+	}
+
+	/* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */
+	val = rd32(hw, I40E_GLHMC_FCOEFMAX);
+	fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK)
+		     >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT;
+	if (fcoe_filt_size + fcoe_cntx_size >  fcoe_fmax)
+		return I40E_ERR_INVALID_SIZE;
+
+	/* PEHSIZE + PEDSIZE should not be greater than PMPEXFMAX */
+	val = rd32(hw, I40E_GLHMC_PEXFMAX);
+	pe_fmax = (val & I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK)
+		   >> I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT;
+	if (pe_filt_size + pe_cntx_size >  pe_fmax)
+		return I40E_ERR_INVALID_SIZE;
+
+	return 0;
+}
+
+/**
+ * i40e_set_filter_control
+ * @hw: pointer to the hardware structure
+ * @settings: Filter control settings
+ *
+ * Set the Queue Filters for PE/FCoE and enable filters required
+ * for a single PF. It is expected that these settings are programmed
+ * at the driver initialization time.
+ **/
+i40e_status i40e_set_filter_control(struct i40e_hw *hw,
+				struct i40e_filter_control_settings *settings)
+{
+	i40e_status ret = 0;
+	u32 hash_lut_size = 0;
+	u32 val;
+
+	if (!settings)
+		return I40E_ERR_PARAM;
+
+	/* Validate the input settings */
+	ret = i40e_validate_filter_settings(hw, settings);
+	if (ret)
+		return ret;
+
+	/* Read the PF Queue Filter control register */
+	val = rd32(hw, I40E_PFQF_CTL_0);
+
+	/* Program required PE hash buckets for the PF */
+	val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK;
+	val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) &
+		I40E_PFQF_CTL_0_PEHSIZE_MASK;
+	/* Program required PE contexts for the PF */
+	val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK;
+	val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) &
+		I40E_PFQF_CTL_0_PEDSIZE_MASK;
+
+	/* Program required FCoE hash buckets for the PF */
+	val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
+	val |= ((u32)settings->fcoe_filt_num <<
+			I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) &
+		I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
+	/* Program required FCoE DDP contexts for the PF */
+	val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
+	val |= ((u32)settings->fcoe_cntx_num <<
+			I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) &
+		I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
+
+	/* Program Hash LUT size for the PF */
+	val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
+	if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512)
+		hash_lut_size = 1;
+	val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) &
+		I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
+
+	/* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */
+	if (settings->enable_fdir)
+		val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
+	if (settings->enable_ethtype)
+		val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK;
+	if (settings->enable_macvlan)
+		val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK;
+
+	wr32(hw, I40E_PFQF_CTL_0, val);
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
new file mode 100644
index 0000000..8dbd91f
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -0,0 +1,2076 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+
+#include "i40e.h"
+
+static struct dentry *i40e_dbg_root;
+
+/**
+ * i40e_dbg_find_vsi - searches for the vsi with the given seid
+ * @pf - the pf structure to search for the vsi
+ * @seid - seid of the vsi it is searching for
+ **/
+static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
+{
+	int i;
+
+	if (seid < 0)
+		dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);
+	else
+		for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+			if (pf->vsi[i] && (pf->vsi[i]->seid == seid))
+				return pf->vsi[i];
+
+	return NULL;
+}
+
+/**
+ * i40e_dbg_find_veb - searches for the veb with the given seid
+ * @pf - the pf structure to search for the veb
+ * @seid - seid of the veb it is searching for
+ **/
+static struct i40e_veb *i40e_dbg_find_veb(struct i40e_pf *pf, int seid)
+{
+	int i;
+
+	if ((seid < I40E_BASE_VEB_SEID) ||
+	    (seid > (I40E_BASE_VEB_SEID + I40E_MAX_VEB)))
+		dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);
+	else
+		for (i = 0; i < I40E_MAX_VEB; i++)
+			if (pf->veb[i] && pf->veb[i]->seid == seid)
+				return pf->veb[i];
+	return NULL;
+}
+
+/**************************************************************
+ * dump
+ * The dump entry in debugfs is for getting a data snapshow of
+ * the driver's current configuration and runtime details.
+ * When the filesystem entry is written, a snapshot is taken.
+ * When the entry is read, the most recent snapshot data is dumped.
+ **************************************************************/
+static char *i40e_dbg_dump_buf;
+static ssize_t i40e_dbg_dump_data_len;
+static ssize_t i40e_dbg_dump_buffer_len;
+
+/**
+ * i40e_dbg_dump_read - read the dump data
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ **/
+static ssize_t i40e_dbg_dump_read(struct file *filp, char __user *buffer,
+				  size_t count, loff_t *ppos)
+{
+	int bytes_not_copied;
+	int len;
+
+	/* is *ppos bigger than the available data? */
+	if (*ppos >= i40e_dbg_dump_data_len || !i40e_dbg_dump_buf)
+		return 0;
+
+	/* be sure to not read beyond the end of available data */
+	len = min_t(int, count, (i40e_dbg_dump_data_len - *ppos));
+
+	bytes_not_copied = copy_to_user(buffer, &i40e_dbg_dump_buf[*ppos], len);
+	if (bytes_not_copied < 0)
+		return bytes_not_copied;
+
+	*ppos += len;
+	return len;
+}
+
+/**
+ * i40e_dbg_prep_dump_buf
+ * @pf: the pf we're working with
+ * @buflen: the desired buffer length
+ *
+ * Return positive if success, 0 if failed
+ **/
+static int i40e_dbg_prep_dump_buf(struct i40e_pf *pf, int buflen)
+{
+	/* if not already big enough, prep for re alloc */
+	if (i40e_dbg_dump_buffer_len && i40e_dbg_dump_buffer_len < buflen) {
+		kfree(i40e_dbg_dump_buf);
+		i40e_dbg_dump_buffer_len = 0;
+		i40e_dbg_dump_buf = NULL;
+	}
+
+	/* get a new buffer if needed */
+	if (!i40e_dbg_dump_buf) {
+		i40e_dbg_dump_buf = kzalloc(buflen, GFP_KERNEL);
+		if (i40e_dbg_dump_buf != NULL)
+			i40e_dbg_dump_buffer_len = buflen;
+	}
+
+	return i40e_dbg_dump_buffer_len;
+}
+
+/**
+ * i40e_dbg_dump_write - trigger a datadump snapshot
+ * @filp: the opened file
+ * @buffer: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ *
+ * Any write clears the stats
+ **/
+static ssize_t i40e_dbg_dump_write(struct file *filp,
+				   const char __user *buffer,
+				   size_t count, loff_t *ppos)
+{
+	struct i40e_pf *pf = filp->private_data;
+	char dump_request_buf[16];
+	bool seid_found = false;
+	int bytes_not_copied;
+	long seid = -1;
+	int buflen = 0;
+	int i, ret;
+	int len;
+	u8 *p;
+
+	/* don't allow partial writes */
+	if (*ppos != 0)
+		return 0;
+	if (count >= sizeof(dump_request_buf))
+		return -ENOSPC;
+
+	bytes_not_copied = copy_from_user(dump_request_buf, buffer, count);
+	if (bytes_not_copied < 0)
+		return bytes_not_copied;
+	if (bytes_not_copied > 0)
+		count -= bytes_not_copied;
+	dump_request_buf[count] = '\0';
+
+	/* decode the SEID given to be dumped */
+	ret = kstrtol(dump_request_buf, 0, &seid);
+	if (ret < 0) {
+		dev_info(&pf->pdev->dev, "bad seid value '%s'\n",
+			 dump_request_buf);
+	} else if (seid == 0) {
+		seid_found = true;
+
+		kfree(i40e_dbg_dump_buf);
+		i40e_dbg_dump_buffer_len = 0;
+		i40e_dbg_dump_data_len = 0;
+		i40e_dbg_dump_buf = NULL;
+		dev_info(&pf->pdev->dev, "debug buffer freed\n");
+
+	} else if (seid == pf->pf_seid || seid == 1) {
+		seid_found = true;
+
+		buflen = sizeof(struct i40e_pf);
+		buflen += (sizeof(struct i40e_aq_desc)
+		     * (pf->hw.aq.num_arq_entries + pf->hw.aq.num_asq_entries));
+
+		if (i40e_dbg_prep_dump_buf(pf, buflen)) {
+			p = i40e_dbg_dump_buf;
+
+			len = sizeof(struct i40e_pf);
+			memcpy(p, pf, len);
+			p += len;
+
+			len = (sizeof(struct i40e_aq_desc)
+					* pf->hw.aq.num_asq_entries);
+			memcpy(p, pf->hw.aq.asq.desc, len);
+			p += len;
+
+			len = (sizeof(struct i40e_aq_desc)
+					* pf->hw.aq.num_arq_entries);
+			memcpy(p, pf->hw.aq.arq.desc, len);
+			p += len;
+
+			i40e_dbg_dump_data_len = buflen;
+			dev_info(&pf->pdev->dev,
+				 "PF seid %ld dumped %d bytes\n",
+				 seid, (int)i40e_dbg_dump_data_len);
+		}
+	} else if (seid >= I40E_BASE_VSI_SEID) {
+		struct i40e_vsi *vsi = NULL;
+		struct i40e_mac_filter *f;
+		int filter_count = 0;
+
+		mutex_lock(&pf->switch_mutex);
+		vsi = i40e_dbg_find_vsi(pf, seid);
+		if (!vsi) {
+			mutex_unlock(&pf->switch_mutex);
+			goto write_exit;
+		}
+
+		buflen = sizeof(struct i40e_vsi);
+		buflen += sizeof(struct i40e_q_vector) * vsi->num_q_vectors;
+		buflen += sizeof(struct i40e_ring) * 2 * vsi->num_queue_pairs;
+		buflen += sizeof(struct i40e_tx_buffer) * vsi->num_queue_pairs;
+		buflen += sizeof(struct i40e_rx_buffer) * vsi->num_queue_pairs;
+		list_for_each_entry(f, &vsi->mac_filter_list, list)
+			filter_count++;
+		buflen += sizeof(struct i40e_mac_filter) * filter_count;
+
+		if (i40e_dbg_prep_dump_buf(pf, buflen)) {
+			p = i40e_dbg_dump_buf;
+			seid_found = true;
+
+			len = sizeof(struct i40e_vsi);
+			memcpy(p, vsi, len);
+			p += len;
+
+			len = (sizeof(struct i40e_q_vector)
+				* vsi->num_q_vectors);
+			memcpy(p, vsi->q_vectors, len);
+			p += len;
+
+			len = (sizeof(struct i40e_ring) * vsi->num_queue_pairs);
+			memcpy(p, vsi->tx_rings, len);
+			p += len;
+			memcpy(p, vsi->rx_rings, len);
+			p += len;
+
+			for (i = 0; i < vsi->num_queue_pairs; i++) {
+				len = sizeof(struct i40e_tx_buffer);
+				memcpy(p, vsi->tx_rings[i].tx_bi, len);
+				p += len;
+			}
+			for (i = 0; i < vsi->num_queue_pairs; i++) {
+				len = sizeof(struct i40e_rx_buffer);
+				memcpy(p, vsi->rx_rings[i].rx_bi, len);
+				p += len;
+			}
+
+			/* macvlan filter list */
+			len = sizeof(struct i40e_mac_filter);
+			list_for_each_entry(f, &vsi->mac_filter_list, list) {
+				memcpy(p, f, len);
+				p += len;
+			}
+
+			i40e_dbg_dump_data_len = buflen;
+			dev_info(&pf->pdev->dev,
+				 "VSI seid %ld dumped %d bytes\n",
+				 seid, (int)i40e_dbg_dump_data_len);
+		}
+		mutex_unlock(&pf->switch_mutex);
+	} else if (seid >= I40E_BASE_VEB_SEID) {
+		struct i40e_veb *veb = NULL;
+
+		mutex_lock(&pf->switch_mutex);
+		veb = i40e_dbg_find_veb(pf, seid);
+		if (!veb) {
+			mutex_unlock(&pf->switch_mutex);
+			goto write_exit;
+		}
+
+		buflen = sizeof(struct i40e_veb);
+		if (i40e_dbg_prep_dump_buf(pf, buflen)) {
+			seid_found = true;
+			memcpy(i40e_dbg_dump_buf, veb, buflen);
+			i40e_dbg_dump_data_len = buflen;
+			dev_info(&pf->pdev->dev,
+				 "VEB seid %ld dumped %d bytes\n",
+				 seid, (int)i40e_dbg_dump_data_len);
+		}
+		mutex_unlock(&pf->switch_mutex);
+	}
+
+write_exit:
+	if (!seid_found)
+		dev_info(&pf->pdev->dev, "unknown seid %ld\n", seid);
+
+	return count;
+}
+
+static const struct file_operations i40e_dbg_dump_fops = {
+	.owner = THIS_MODULE,
+	.open =  simple_open,
+	.read =  i40e_dbg_dump_read,
+	.write = i40e_dbg_dump_write,
+};
+
+/**************************************************************
+ * command
+ * The command entry in debugfs is for giving the driver commands
+ * to be executed - these may be for changing the internal switch
+ * setup, adding or removing filters, or other things.  Many of
+ * these will be useful for some forms of unit testing.
+ **************************************************************/
+static char i40e_dbg_command_buf[256] = "hello world";
+
+/**
+ * i40e_dbg_command_read - read for command datum
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ **/
+static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer,
+				     size_t count, loff_t *ppos)
+{
+	struct i40e_pf *pf = filp->private_data;
+	int bytes_not_copied;
+	int buf_size = 256;
+	char *buf;
+	int len;
+
+	/* don't allow partial reads */
+	if (*ppos != 0)
+		return 0;
+	if (count < buf_size)
+		return -ENOSPC;
+
+	buf = kzalloc(buf_size, GFP_KERNEL);
+	if (!buf)
+		return -ENOSPC;
+
+	len = snprintf(buf, buf_size, "%s: %s\n",
+		       pf->vsi[pf->lan_vsi]->netdev->name,
+		       i40e_dbg_command_buf);
+
+	bytes_not_copied = copy_to_user(buffer, buf, len);
+	kfree(buf);
+
+	if (bytes_not_copied < 0)
+		return bytes_not_copied;
+
+	*ppos = len;
+	return len;
+}
+
+/**
+ * i40e_dbg_dump_vsi_seid - handles dump vsi seid write into pokem datum
+ * @pf: the i40e_pf created in command write
+ * @seid: the seid the user put in
+ **/
+static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
+{
+	struct rtnl_link_stats64 *nstat;
+	struct i40e_mac_filter *f;
+	struct i40e_vsi *vsi;
+	int i;
+
+	vsi = i40e_dbg_find_vsi(pf, seid);
+	if (!vsi) {
+		dev_info(&pf->pdev->dev,
+			 "dump %d: seid not found\n", seid);
+		return;
+	}
+	dev_info(&pf->pdev->dev, "vsi seid %d\n", seid);
+	if (vsi->netdev)
+		dev_info(&pf->pdev->dev,
+			 "    netdev: name = %s\n",
+			 vsi->netdev->name);
+	if (vsi->active_vlans)
+		dev_info(&pf->pdev->dev,
+			 "    vlgrp: & = %p\n", vsi->active_vlans);
+	dev_info(&pf->pdev->dev,
+		 "    netdev_registered = %i, current_netdev_flags = 0x%04x, state = %li flags = 0x%08lx\n",
+		 vsi->netdev_registered,
+		 vsi->current_netdev_flags, vsi->state, vsi->flags);
+	list_for_each_entry(f, &vsi->mac_filter_list, list) {
+		dev_info(&pf->pdev->dev,
+			 "    mac_filter_list: %pM vid=%d, is_netdev=%d is_vf=%d counter=%d\n",
+			 f->macaddr, f->vlan, f->is_netdev, f->is_vf,
+			 f->counter);
+	}
+	nstat = i40e_get_vsi_stats_struct(vsi);
+	dev_info(&pf->pdev->dev,
+		 "    net_stats: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n",
+		 (long unsigned int)nstat->rx_packets,
+		 (long unsigned int)nstat->rx_bytes,
+		 (long unsigned int)nstat->rx_errors,
+		 (long unsigned int)nstat->rx_dropped);
+	dev_info(&pf->pdev->dev,
+		 "    net_stats: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n",
+		 (long unsigned int)nstat->tx_packets,
+		 (long unsigned int)nstat->tx_bytes,
+		 (long unsigned int)nstat->tx_errors,
+		 (long unsigned int)nstat->tx_dropped);
+	dev_info(&pf->pdev->dev,
+		 "    net_stats: multicast = %lu, collisions = %lu\n",
+		 (long unsigned int)nstat->multicast,
+		 (long unsigned int)nstat->collisions);
+	dev_info(&pf->pdev->dev,
+		 "    net_stats: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n",
+		 (long unsigned int)nstat->rx_length_errors,
+		 (long unsigned int)nstat->rx_over_errors,
+		 (long unsigned int)nstat->rx_crc_errors);
+	dev_info(&pf->pdev->dev,
+		 "    net_stats: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n",
+		 (long unsigned int)nstat->rx_frame_errors,
+		 (long unsigned int)nstat->rx_fifo_errors,
+		 (long unsigned int)nstat->rx_missed_errors);
+	dev_info(&pf->pdev->dev,
+		 "    net_stats: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n",
+		 (long unsigned int)nstat->tx_aborted_errors,
+		 (long unsigned int)nstat->tx_carrier_errors,
+		 (long unsigned int)nstat->tx_fifo_errors);
+	dev_info(&pf->pdev->dev,
+		 "    net_stats: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n",
+		 (long unsigned int)nstat->tx_heartbeat_errors,
+		 (long unsigned int)nstat->tx_window_errors);
+	dev_info(&pf->pdev->dev,
+		 "    net_stats: rx_compressed = %lu, tx_compressed = %lu\n",
+		 (long unsigned int)nstat->rx_compressed,
+		 (long unsigned int)nstat->tx_compressed);
+	dev_info(&pf->pdev->dev,
+		 "    net_stats_offsets: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n",
+		 (long unsigned int)vsi->net_stats_offsets.rx_packets,
+		 (long unsigned int)vsi->net_stats_offsets.rx_bytes,
+		 (long unsigned int)vsi->net_stats_offsets.rx_errors,
+		 (long unsigned int)vsi->net_stats_offsets.rx_dropped);
+	dev_info(&pf->pdev->dev,
+		 "    net_stats_offsets: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n",
+		 (long unsigned int)vsi->net_stats_offsets.tx_packets,
+		 (long unsigned int)vsi->net_stats_offsets.tx_bytes,
+		 (long unsigned int)vsi->net_stats_offsets.tx_errors,
+		 (long unsigned int)vsi->net_stats_offsets.tx_dropped);
+	dev_info(&pf->pdev->dev,
+		 "    net_stats_offsets: multicast = %lu, collisions = %lu\n",
+		 (long unsigned int)vsi->net_stats_offsets.multicast,
+		 (long unsigned int)vsi->net_stats_offsets.collisions);
+	dev_info(&pf->pdev->dev,
+		 "    net_stats_offsets: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n",
+		 (long unsigned int)vsi->net_stats_offsets.rx_length_errors,
+		 (long unsigned int)vsi->net_stats_offsets.rx_over_errors,
+		 (long unsigned int)vsi->net_stats_offsets.rx_crc_errors);
+	dev_info(&pf->pdev->dev,
+		 "    net_stats_offsets: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n",
+		 (long unsigned int)vsi->net_stats_offsets.rx_frame_errors,
+		 (long unsigned int)vsi->net_stats_offsets.rx_fifo_errors,
+		 (long unsigned int)vsi->net_stats_offsets.rx_missed_errors);
+	dev_info(&pf->pdev->dev,
+		 "    net_stats_offsets: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n",
+		 (long unsigned int)vsi->net_stats_offsets.tx_aborted_errors,
+		 (long unsigned int)vsi->net_stats_offsets.tx_carrier_errors,
+		 (long unsigned int)vsi->net_stats_offsets.tx_fifo_errors);
+	dev_info(&pf->pdev->dev,
+		 "    net_stats_offsets: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n",
+		 (long unsigned int)vsi->net_stats_offsets.tx_heartbeat_errors,
+		 (long unsigned int)vsi->net_stats_offsets.tx_window_errors);
+	dev_info(&pf->pdev->dev,
+		 "    net_stats_offsets: rx_compressed = %lu, tx_compressed = %lu\n",
+		 (long unsigned int)vsi->net_stats_offsets.rx_compressed,
+		 (long unsigned int)vsi->net_stats_offsets.tx_compressed);
+	dev_info(&pf->pdev->dev,
+		 "    tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n",
+		 vsi->tx_restart, vsi->tx_busy,
+		 vsi->rx_buf_failed, vsi->rx_page_failed);
+	if (vsi->rx_rings) {
+		for (i = 0; i < vsi->num_queue_pairs; i++) {
+			dev_info(&pf->pdev->dev,
+				 "    rx_rings[%i]: desc = %p\n",
+				 i, vsi->rx_rings[i].desc);
+			dev_info(&pf->pdev->dev,
+				 "    rx_rings[%i]: dev = %p, netdev = %p, rx_bi = %p\n",
+				 i, vsi->rx_rings[i].dev,
+				 vsi->rx_rings[i].netdev,
+				 vsi->rx_rings[i].rx_bi);
+			dev_info(&pf->pdev->dev,
+				 "    rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
+				 i, vsi->rx_rings[i].state,
+				 vsi->rx_rings[i].queue_index,
+				 vsi->rx_rings[i].reg_idx);
+			dev_info(&pf->pdev->dev,
+				 "    rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n",
+				 i, vsi->rx_rings[i].rx_hdr_len,
+				 vsi->rx_rings[i].rx_buf_len,
+				 vsi->rx_rings[i].dtype);
+			dev_info(&pf->pdev->dev,
+				 "    rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
+				 i, vsi->rx_rings[i].hsplit,
+				 vsi->rx_rings[i].next_to_use,
+				 vsi->rx_rings[i].next_to_clean,
+				 vsi->rx_rings[i].ring_active);
+			dev_info(&pf->pdev->dev,
+				 "    rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n",
+				 i, vsi->rx_rings[i].rx_stats.packets,
+				 vsi->rx_rings[i].rx_stats.bytes,
+				 vsi->rx_rings[i].rx_stats.non_eop_descs);
+			dev_info(&pf->pdev->dev,
+				 "    rx_rings[%i]: rx_stats: alloc_rx_page_failed = %lld, alloc_rx_buff_failed = %lld\n",
+				 i,
+				 vsi->rx_rings[i].rx_stats.alloc_rx_page_failed,
+				vsi->rx_rings[i].rx_stats.alloc_rx_buff_failed);
+			dev_info(&pf->pdev->dev,
+				 "    rx_rings[%i]: size = %i, dma = 0x%08lx\n",
+				 i, vsi->rx_rings[i].size,
+				 (long unsigned int)vsi->rx_rings[i].dma);
+			dev_info(&pf->pdev->dev,
+				 "    rx_rings[%i]: vsi = %p, q_vector = %p\n",
+				 i, vsi->rx_rings[i].vsi,
+				 vsi->rx_rings[i].q_vector);
+		}
+	}
+	if (vsi->tx_rings) {
+		for (i = 0; i < vsi->num_queue_pairs; i++) {
+			dev_info(&pf->pdev->dev,
+				 "    tx_rings[%i]: desc = %p\n",
+				 i, vsi->tx_rings[i].desc);
+			dev_info(&pf->pdev->dev,
+				 "    tx_rings[%i]: dev = %p, netdev = %p, tx_bi = %p\n",
+				 i, vsi->tx_rings[i].dev,
+				 vsi->tx_rings[i].netdev,
+				 vsi->tx_rings[i].tx_bi);
+			dev_info(&pf->pdev->dev,
+				 "    tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
+				 i, vsi->tx_rings[i].state,
+				 vsi->tx_rings[i].queue_index,
+				 vsi->tx_rings[i].reg_idx);
+			dev_info(&pf->pdev->dev,
+				 "    tx_rings[%i]: dtype = %d\n",
+				 i, vsi->tx_rings[i].dtype);
+			dev_info(&pf->pdev->dev,
+				 "    tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
+				 i, vsi->tx_rings[i].hsplit,
+				 vsi->tx_rings[i].next_to_use,
+				 vsi->tx_rings[i].next_to_clean,
+				 vsi->tx_rings[i].ring_active);
+			dev_info(&pf->pdev->dev,
+				 "    tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
+				 i, vsi->tx_rings[i].tx_stats.packets,
+				 vsi->tx_rings[i].tx_stats.bytes,
+				 vsi->tx_rings[i].tx_stats.restart_queue);
+			dev_info(&pf->pdev->dev,
+				 "    tx_rings[%i]: tx_stats: tx_busy = %lld, completed = %lld, tx_done_old = %lld\n",
+				 i,
+				 vsi->tx_rings[i].tx_stats.tx_busy,
+				 vsi->tx_rings[i].tx_stats.completed,
+				 vsi->tx_rings[i].tx_stats.tx_done_old);
+			dev_info(&pf->pdev->dev,
+				 "    tx_rings[%i]: size = %i, dma = 0x%08lx\n",
+				 i, vsi->tx_rings[i].size,
+				 (long unsigned int)vsi->tx_rings[i].dma);
+			dev_info(&pf->pdev->dev,
+				 "    tx_rings[%i]: vsi = %p, q_vector = %p\n",
+				 i, vsi->tx_rings[i].vsi,
+				 vsi->tx_rings[i].q_vector);
+			dev_info(&pf->pdev->dev,
+				 "    tx_rings[%i]: DCB tc = %d\n",
+				 i, vsi->tx_rings[i].dcb_tc);
+		}
+	}
+	dev_info(&pf->pdev->dev,
+		 "    work_limit = %d, rx_itr_setting = %d (%s), tx_itr_setting = %d (%s)\n",
+		 vsi->work_limit, vsi->rx_itr_setting,
+		 ITR_IS_DYNAMIC(vsi->rx_itr_setting) ? "dynamic" : "fixed",
+		 vsi->tx_itr_setting,
+		 ITR_IS_DYNAMIC(vsi->tx_itr_setting) ? "dynamic" : "fixed");
+	dev_info(&pf->pdev->dev,
+		 "    max_frame = %d, rx_hdr_len = %d, rx_buf_len = %d dtype = %d\n",
+		 vsi->max_frame, vsi->rx_hdr_len, vsi->rx_buf_len, vsi->dtype);
+	if (vsi->q_vectors) {
+		for (i = 0; i < vsi->num_q_vectors; i++) {
+			dev_info(&pf->pdev->dev,
+				 "    q_vectors[%i]: base index = %ld\n",
+				 i, ((long int)*vsi->q_vectors[i].rx.ring-
+					(long int)*vsi->q_vectors[0].rx.ring)/
+					sizeof(struct i40e_ring));
+		}
+	}
+	dev_info(&pf->pdev->dev,
+		 "    num_q_vectors = %i, base_vector = %i\n",
+		 vsi->num_q_vectors, vsi->base_vector);
+	dev_info(&pf->pdev->dev,
+		 "    seid = %d, id = %d, uplink_seid = %d\n",
+		 vsi->seid, vsi->id, vsi->uplink_seid);
+	dev_info(&pf->pdev->dev,
+		 "    base_queue = %d, num_queue_pairs = %d, num_desc = %d\n",
+		 vsi->base_queue, vsi->num_queue_pairs, vsi->num_desc);
+	dev_info(&pf->pdev->dev, "    type = %i\n", vsi->type);
+	dev_info(&pf->pdev->dev,
+		 "    info: valid_sections = 0x%04x, switch_id = 0x%04x\n",
+		 vsi->info.valid_sections, vsi->info.switch_id);
+	dev_info(&pf->pdev->dev,
+		 "    info: sw_reserved[] = 0x%02x 0x%02x\n",
+		 vsi->info.sw_reserved[0], vsi->info.sw_reserved[1]);
+	dev_info(&pf->pdev->dev,
+		 "    info: sec_flags = 0x%02x, sec_reserved = 0x%02x\n",
+		 vsi->info.sec_flags, vsi->info.sec_reserved);
+	dev_info(&pf->pdev->dev,
+		 "    info: pvid = 0x%04x, fcoe_pvid = 0x%04x, port_vlan_flags = 0x%02x\n",
+		 vsi->info.pvid, vsi->info.fcoe_pvid,
+		 vsi->info.port_vlan_flags);
+	dev_info(&pf->pdev->dev,
+		 "    info: pvlan_reserved[] = 0x%02x 0x%02x 0x%02x\n",
+		 vsi->info.pvlan_reserved[0], vsi->info.pvlan_reserved[1],
+		 vsi->info.pvlan_reserved[2]);
+	dev_info(&pf->pdev->dev,
+		 "    info: ingress_table = 0x%08x, egress_table = 0x%08x\n",
+		 vsi->info.ingress_table, vsi->info.egress_table);
+	dev_info(&pf->pdev->dev,
+		 "    info: cas_pv_stag = 0x%04x, cas_pv_flags= 0x%02x, cas_pv_reserved = 0x%02x\n",
+		 vsi->info.cas_pv_tag, vsi->info.cas_pv_flags,
+		 vsi->info.cas_pv_reserved);
+	dev_info(&pf->pdev->dev,
+		 "    info: queue_mapping[0..7 ] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
+		 vsi->info.queue_mapping[0], vsi->info.queue_mapping[1],
+		 vsi->info.queue_mapping[2], vsi->info.queue_mapping[3],
+		 vsi->info.queue_mapping[4], vsi->info.queue_mapping[5],
+		 vsi->info.queue_mapping[6], vsi->info.queue_mapping[7]);
+	dev_info(&pf->pdev->dev,
+		 "    info: queue_mapping[8..15] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
+		 vsi->info.queue_mapping[8], vsi->info.queue_mapping[9],
+		 vsi->info.queue_mapping[10], vsi->info.queue_mapping[11],
+		 vsi->info.queue_mapping[12], vsi->info.queue_mapping[13],
+		 vsi->info.queue_mapping[14], vsi->info.queue_mapping[15]);
+	dev_info(&pf->pdev->dev,
+		 "    info: tc_mapping[] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
+		 vsi->info.tc_mapping[0], vsi->info.tc_mapping[1],
+		 vsi->info.tc_mapping[2], vsi->info.tc_mapping[3],
+		 vsi->info.tc_mapping[4], vsi->info.tc_mapping[5],
+		 vsi->info.tc_mapping[6], vsi->info.tc_mapping[7]);
+	dev_info(&pf->pdev->dev,
+		 "    info: queueing_opt_flags = 0x%02x  queueing_opt_reserved[0..2] = 0x%02x 0x%02x 0x%02x\n",
+		 vsi->info.queueing_opt_flags,
+		 vsi->info.queueing_opt_reserved[0],
+		 vsi->info.queueing_opt_reserved[1],
+		 vsi->info.queueing_opt_reserved[2]);
+	dev_info(&pf->pdev->dev,
+		 "    info: up_enable_bits = 0x%02x\n",
+		 vsi->info.up_enable_bits);
+	dev_info(&pf->pdev->dev,
+		 "    info: sched_reserved = 0x%02x, outer_up_table = 0x%04x\n",
+		 vsi->info.sched_reserved, vsi->info.outer_up_table);
+	dev_info(&pf->pdev->dev,
+		 "    info: cmd_reserved[] = 0x%02x 0x%02x 0x%02x 0x0%02x 0x%02x 0x%02x 0x%02x 0x0%02x\n",
+		 vsi->info.cmd_reserved[0], vsi->info.cmd_reserved[1],
+		 vsi->info.cmd_reserved[2], vsi->info.cmd_reserved[3],
+		 vsi->info.cmd_reserved[4], vsi->info.cmd_reserved[5],
+		 vsi->info.cmd_reserved[6], vsi->info.cmd_reserved[7]);
+	dev_info(&pf->pdev->dev,
+		 "    info: qs_handle[] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
+		 vsi->info.qs_handle[0], vsi->info.qs_handle[1],
+		 vsi->info.qs_handle[2], vsi->info.qs_handle[3],
+		 vsi->info.qs_handle[4], vsi->info.qs_handle[5],
+		 vsi->info.qs_handle[6], vsi->info.qs_handle[7]);
+	dev_info(&pf->pdev->dev,
+		 "    info: stat_counter_idx = 0x%04x, sched_id = 0x%04x\n",
+		 vsi->info.stat_counter_idx, vsi->info.sched_id);
+	dev_info(&pf->pdev->dev,
+		 "    info: resp_reserved[] = 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n",
+		 vsi->info.resp_reserved[0], vsi->info.resp_reserved[1],
+		 vsi->info.resp_reserved[2], vsi->info.resp_reserved[3],
+		 vsi->info.resp_reserved[4], vsi->info.resp_reserved[5],
+		 vsi->info.resp_reserved[6], vsi->info.resp_reserved[7],
+		 vsi->info.resp_reserved[8], vsi->info.resp_reserved[9],
+		 vsi->info.resp_reserved[10], vsi->info.resp_reserved[11]);
+	if (vsi->back)
+		dev_info(&pf->pdev->dev, "    pf = %p\n", vsi->back);
+	dev_info(&pf->pdev->dev, "    idx = %d\n", vsi->idx);
+	dev_info(&pf->pdev->dev,
+		 "    tc_config: numtc = %d, enabled_tc = 0x%x\n",
+		 vsi->tc_config.numtc, vsi->tc_config.enabled_tc);
+	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+		dev_info(&pf->pdev->dev,
+			 "    tc_config: tc = %d, qoffset = %d, qcount = %d, netdev_tc = %d\n",
+			 i, vsi->tc_config.tc_info[i].qoffset,
+			 vsi->tc_config.tc_info[i].qcount,
+			 vsi->tc_config.tc_info[i].netdev_tc);
+	}
+	dev_info(&pf->pdev->dev,
+		 "    bw: bw_limit = %d, bw_max_quanta = %d\n",
+		 vsi->bw_limit, vsi->bw_max_quanta);
+	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+		dev_info(&pf->pdev->dev,
+			 "    bw[%d]: ets_share_credits = %d, ets_limit_credits = %d, max_quanta = %d\n",
+			 i, vsi->bw_ets_share_credits[i],
+			 vsi->bw_ets_limit_credits[i],
+			 vsi->bw_ets_max_quanta[i]);
+	}
+}
+
+/**
+ * i40e_dbg_dump_aq_desc - handles dump aq_desc write into command datum
+ * @pf: the i40e_pf created in command write
+ **/
+static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
+{
+	struct i40e_adminq_ring *ring;
+	struct i40e_hw *hw = &pf->hw;
+	int i;
+
+	/* first the send (command) ring, then the receive (event) ring */
+	dev_info(&pf->pdev->dev, "AdminQ Tx Ring\n");
+	ring = &(hw->aq.asq);
+	for (i = 0; i < ring->count; i++) {
+		struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
+		dev_info(&pf->pdev->dev,
+			 "   at[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
+			 i, d->flags, d->opcode, d->datalen, d->retval,
+			 d->cookie_high, d->cookie_low);
+		dev_info(&pf->pdev->dev,
+			 "            %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
+			 d->params.raw[0], d->params.raw[1], d->params.raw[2],
+			 d->params.raw[3], d->params.raw[4], d->params.raw[5],
+			 d->params.raw[6], d->params.raw[7], d->params.raw[8],
+			 d->params.raw[9], d->params.raw[10], d->params.raw[11],
+			 d->params.raw[12], d->params.raw[13],
+			 d->params.raw[14], d->params.raw[15]);
+	}
+
+	dev_info(&pf->pdev->dev, "AdminQ Rx Ring\n");
+	ring = &(hw->aq.arq);
+	for (i = 0; i < ring->count; i++) {
+		struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
+		dev_info(&pf->pdev->dev,
+			 "   ar[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
+			 i, d->flags, d->opcode, d->datalen, d->retval,
+			 d->cookie_high, d->cookie_low);
+		dev_info(&pf->pdev->dev,
+			 "            %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
+			 d->params.raw[0], d->params.raw[1], d->params.raw[2],
+			 d->params.raw[3], d->params.raw[4], d->params.raw[5],
+			 d->params.raw[6], d->params.raw[7], d->params.raw[8],
+			 d->params.raw[9], d->params.raw[10], d->params.raw[11],
+			 d->params.raw[12], d->params.raw[13],
+			 d->params.raw[14], d->params.raw[15]);
+	}
+}
+
+/**
+ * i40e_dbg_dump_desc - handles dump desc write into command datum
+ * @cnt: number of arguments that the user supplied
+ * @vsi_seid: vsi id entered by user
+ * @ring_id: ring id entered by user
+ * @desc_n: descriptor number entered by user
+ * @pf: the i40e_pf created in command write
+ * @is_rx_ring: true if rx, false if tx
+ **/
+static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
+			       struct i40e_pf *pf, bool is_rx_ring)
+{
+	union i40e_rx_desc *ds;
+	struct i40e_ring ring;
+	struct i40e_vsi *vsi;
+	int i;
+
+	vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+	if (!vsi) {
+		dev_info(&pf->pdev->dev,
+			 "vsi %d not found\n", vsi_seid);
+		if (is_rx_ring)
+			dev_info(&pf->pdev->dev, "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
+		else
+			dev_info(&pf->pdev->dev, "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
+		return;
+	}
+	if (ring_id >= vsi->num_queue_pairs || ring_id < 0) {
+		dev_info(&pf->pdev->dev, "ring %d not found\n", ring_id);
+		if (is_rx_ring)
+			dev_info(&pf->pdev->dev, "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
+		else
+			dev_info(&pf->pdev->dev, "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
+		return;
+	}
+	if (is_rx_ring)
+		ring = vsi->rx_rings[ring_id];
+	else
+		ring = vsi->tx_rings[ring_id];
+	if (cnt == 2) {
+		dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n",
+			 vsi_seid, is_rx_ring ? "rx" : "tx", ring_id);
+		for (i = 0; i < ring.count; i++) {
+			if (is_rx_ring)
+				ds = I40E_RX_DESC(&ring, i);
+			else
+				ds = (union i40e_rx_desc *)
+					I40E_TX_DESC(&ring, i);
+			if ((sizeof(union i40e_rx_desc) ==
+			    sizeof(union i40e_16byte_rx_desc)) || (!is_rx_ring))
+				dev_info(&pf->pdev->dev,
+					 "   d[%03i] = 0x%016llx 0x%016llx\n", i,
+					 ds->read.pkt_addr, ds->read.hdr_addr);
+			else
+				dev_info(&pf->pdev->dev,
+					 "   d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
+					 i, ds->read.pkt_addr,
+					 ds->read.hdr_addr,
+					 ds->read.rsvd1, ds->read.rsvd2);
+		}
+	} else if (cnt == 3) {
+		if (desc_n >= ring.count || desc_n < 0) {
+			dev_info(&pf->pdev->dev,
+				 "descriptor %d not found\n", desc_n);
+			return;
+		}
+		if (is_rx_ring)
+			ds = I40E_RX_DESC(&ring, desc_n);
+		else
+			ds = (union i40e_rx_desc *)I40E_TX_DESC(&ring, desc_n);
+		if ((sizeof(union i40e_rx_desc) ==
+		    sizeof(union i40e_16byte_rx_desc)) || (!is_rx_ring))
+			dev_info(&pf->pdev->dev,
+				 "vsi = %02i %s ring = %02i d[%03i] = 0x%016llx 0x%016llx\n",
+				 vsi_seid, is_rx_ring ? "rx" : "tx", ring_id,
+				 desc_n, ds->read.pkt_addr, ds->read.hdr_addr);
+		else
+			dev_info(&pf->pdev->dev,
+				 "vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
+				 vsi_seid, ring_id,
+				 desc_n, ds->read.pkt_addr, ds->read.hdr_addr,
+				 ds->read.rsvd1, ds->read.rsvd2);
+	} else {
+		if (is_rx_ring)
+			dev_info(&pf->pdev->dev, "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
+		else
+			dev_info(&pf->pdev->dev, "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
+	}
+}
+
+/**
+ * i40e_dbg_dump_vsi_no_seid - handles dump vsi write into command datum
+ * @pf: the i40e_pf created in command write
+ **/
+static void i40e_dbg_dump_vsi_no_seid(struct i40e_pf *pf)
+{
+	int i;
+
+	for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+		if (pf->vsi[i])
+			dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n",
+				 i, pf->vsi[i]->seid);
+}
+
+/**
+ * i40e_dbg_dump_stats - handles dump stats write into command datum
+ * @pf: the i40e_pf created in command write
+ * @estats: the eth stats structure to be dumped
+ **/
+static void i40e_dbg_dump_eth_stats(struct i40e_pf *pf,
+				    struct i40e_eth_stats *estats)
+{
+	dev_info(&pf->pdev->dev, "  ethstats:\n");
+	dev_info(&pf->pdev->dev,
+		 "    rx_bytes = \t%lld \trx_unicast = \t\t%lld \trx_multicast = \t%lld\n",
+		estats->rx_bytes, estats->rx_unicast, estats->rx_multicast);
+	dev_info(&pf->pdev->dev,
+		 "    rx_broadcast = \t%lld \trx_discards = \t\t%lld \trx_errors = \t%lld\n",
+		 estats->rx_broadcast, estats->rx_discards, estats->rx_errors);
+	dev_info(&pf->pdev->dev,
+		 "    rx_missed = \t%lld \trx_unknown_protocol = \t%lld \ttx_bytes = \t%lld\n",
+		 estats->rx_missed, estats->rx_unknown_protocol,
+		 estats->tx_bytes);
+	dev_info(&pf->pdev->dev,
+		 "    tx_unicast = \t%lld \ttx_multicast = \t\t%lld \ttx_broadcast = \t%lld\n",
+		 estats->tx_unicast, estats->tx_multicast, estats->tx_broadcast);
+	dev_info(&pf->pdev->dev,
+		 "    tx_discards = \t%lld \ttx_errors = \t\t%lld\n",
+		 estats->tx_discards, estats->tx_errors);
+}
+
+/**
+ * i40e_dbg_dump_stats - handles dump stats write into command datum
+ * @pf: the i40e_pf created in command write
+ * @stats: the stats structure to be dumped
+ **/
+static void i40e_dbg_dump_stats(struct i40e_pf *pf,
+				struct i40e_hw_port_stats *stats)
+{
+	int i;
+
+	dev_info(&pf->pdev->dev, "  stats:\n");
+	dev_info(&pf->pdev->dev,
+		 "    crc_errors = \t\t%lld \tillegal_bytes = \t%lld \terror_bytes = \t\t%lld\n",
+		 stats->crc_errors, stats->illegal_bytes, stats->error_bytes);
+	dev_info(&pf->pdev->dev,
+		 "    mac_local_faults = \t%lld \tmac_remote_faults = \t%lld \trx_length_errors = \t%lld\n",
+		 stats->mac_local_faults, stats->mac_remote_faults,
+		 stats->rx_length_errors);
+	dev_info(&pf->pdev->dev,
+		 "    link_xon_rx = \t\t%lld \tlink_xoff_rx = \t\t%lld \tlink_xon_tx = \t\t%lld\n",
+		 stats->link_xon_rx, stats->link_xoff_rx, stats->link_xon_tx);
+	dev_info(&pf->pdev->dev,
+		 "    link_xoff_tx = \t\t%lld \trx_size_64 = \t\t%lld \trx_size_127 = \t\t%lld\n",
+		 stats->link_xoff_tx, stats->rx_size_64, stats->rx_size_127);
+	dev_info(&pf->pdev->dev,
+		 "    rx_size_255 = \t\t%lld \trx_size_511 = \t\t%lld \trx_size_1023 = \t\t%lld\n",
+		 stats->rx_size_255, stats->rx_size_511, stats->rx_size_1023);
+	dev_info(&pf->pdev->dev,
+		 "    rx_size_big = \t\t%lld \trx_undersize = \t\t%lld \trx_jabber = \t\t%lld\n",
+		 stats->rx_size_big, stats->rx_undersize, stats->rx_jabber);
+	dev_info(&pf->pdev->dev,
+		 "    rx_fragments = \t\t%lld \trx_oversize = \t\t%lld \ttx_size_64 = \t\t%lld\n",
+		 stats->rx_fragments, stats->rx_oversize, stats->tx_size_64);
+	dev_info(&pf->pdev->dev,
+		 "    tx_size_127 = \t\t%lld \ttx_size_255 = \t\t%lld \ttx_size_511 = \t\t%lld\n",
+		 stats->tx_size_127, stats->tx_size_255, stats->tx_size_511);
+	dev_info(&pf->pdev->dev,
+		 "    tx_size_1023 = \t\t%lld \ttx_size_big = \t\t%lld \tmac_short_packet_dropped = \t%lld\n",
+		 stats->tx_size_1023, stats->tx_size_big,
+		 stats->mac_short_packet_dropped);
+	for (i = 0; i < 8; i += 4) {
+		dev_info(&pf->pdev->dev,
+			 "    priority_xon_rx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n",
+			 i, stats->priority_xon_rx[i],
+			 i+1, stats->priority_xon_rx[i+1],
+			 i+2, stats->priority_xon_rx[i+2],
+			 i+3, stats->priority_xon_rx[i+3]);
+	}
+	for (i = 0; i < 8; i += 4) {
+		dev_info(&pf->pdev->dev,
+			 "    priority_xoff_rx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n",
+			 i, stats->priority_xoff_rx[i],
+			 i+1, stats->priority_xoff_rx[i+1],
+			 i+2, stats->priority_xoff_rx[i+2],
+			 i+3, stats->priority_xoff_rx[i+3]);
+	}
+	for (i = 0; i < 8; i += 4) {
+		dev_info(&pf->pdev->dev,
+			 "    priority_xon_tx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n",
+			 i, stats->priority_xon_tx[i],
+			 i+1, stats->priority_xon_tx[i+1],
+			 i+2, stats->priority_xon_tx[i+2],
+			 i+3, stats->priority_xon_rx[i+3]);
+	}
+	for (i = 0; i < 8; i += 4) {
+		dev_info(&pf->pdev->dev,
+			 "    priority_xoff_tx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n",
+			 i, stats->priority_xoff_tx[i],
+			 i+1, stats->priority_xoff_tx[i+1],
+			 i+2, stats->priority_xoff_tx[i+2],
+			 i+3, stats->priority_xoff_tx[i+3]);
+	}
+	for (i = 0; i < 8; i += 4) {
+		dev_info(&pf->pdev->dev,
+			 "    priority_xon_2_xoff[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n",
+			 i, stats->priority_xon_2_xoff[i],
+			 i+1, stats->priority_xon_2_xoff[i+1],
+			 i+2, stats->priority_xon_2_xoff[i+2],
+			 i+3, stats->priority_xon_2_xoff[i+3]);
+	}
+
+	i40e_dbg_dump_eth_stats(pf, &stats->eth);
+}
+
+/**
+ * i40e_dbg_dump_veb_seid - handles dump stats of a single given veb
+ * @pf: the i40e_pf created in command write
+ * @seid: the seid the user put in
+ **/
+static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid)
+{
+	struct i40e_veb *veb;
+
+	if ((seid < I40E_BASE_VEB_SEID) ||
+	    (seid >= (I40E_MAX_VEB + I40E_BASE_VEB_SEID))) {
+		dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);
+		return;
+	}
+
+	veb = i40e_dbg_find_veb(pf, seid);
+	if (!veb) {
+		dev_info(&pf->pdev->dev,
+			 "%d: can't find veb\n", seid);
+		return;
+	}
+	dev_info(&pf->pdev->dev,
+		 "veb idx=%d,%d stats_ic=%d  seid=%d uplink=%d\n",
+		 veb->idx, veb->veb_idx, veb->stats_idx, veb->seid,
+		 veb->uplink_seid);
+	i40e_dbg_dump_eth_stats(pf, &veb->stats);
+}
+
+/**
+ * i40e_dbg_dump_veb_all - dumps all known veb's stats
+ * @pf: the i40e_pf created in command write
+ **/
+static void i40e_dbg_dump_veb_all(struct i40e_pf *pf)
+{
+	struct i40e_veb *veb;
+	int i;
+
+	for (i = 0; i < I40E_MAX_VEB; i++) {
+		veb = pf->veb[i];
+		if (veb)
+			i40e_dbg_dump_veb_seid(pf, veb->seid);
+	}
+}
+
+#define I40E_MAX_DEBUG_OUT_BUFFER (4096*4)
+/**
+ * i40e_dbg_command_write - write into command datum
+ * @filp: the opened file
+ * @buffer: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ **/
+static ssize_t i40e_dbg_command_write(struct file *filp,
+				      const char __user *buffer,
+				      size_t count, loff_t *ppos)
+{
+	struct i40e_pf *pf = filp->private_data;
+	int bytes_not_copied;
+	struct i40e_vsi *vsi;
+	u8 *print_buf_start;
+	u8 *print_buf;
+	char *cmd_buf;
+	int vsi_seid;
+	int veb_seid;
+	int cnt;
+
+	/* don't allow partial writes */
+	if (*ppos != 0)
+		return 0;
+
+	cmd_buf = kzalloc(count + 1, GFP_KERNEL);
+	if (!cmd_buf)
+		return count;
+	bytes_not_copied = copy_from_user(cmd_buf, buffer, count);
+	if (bytes_not_copied < 0)
+		return bytes_not_copied;
+	if (bytes_not_copied > 0)
+		count -= bytes_not_copied;
+	cmd_buf[count] = '\0';
+
+	print_buf_start = kzalloc(I40E_MAX_DEBUG_OUT_BUFFER, GFP_KERNEL);
+	if (!print_buf_start)
+		goto command_write_done;
+	print_buf = print_buf_start;
+
+	if (strncmp(cmd_buf, "add vsi", 7) == 0) {
+		vsi_seid = -1;
+		cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid);
+		if (cnt == 0) {
+			/* default to PF VSI */
+			vsi_seid = pf->vsi[pf->lan_vsi]->seid;
+		} else if (vsi_seid < 0) {
+			dev_info(&pf->pdev->dev, "add VSI %d: bad vsi seid\n",
+				 vsi_seid);
+			goto command_write_done;
+		}
+
+		vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi_seid, 0);
+		if (vsi)
+			dev_info(&pf->pdev->dev, "added VSI %d to relay %d\n",
+				 vsi->seid, vsi->uplink_seid);
+		else
+			dev_info(&pf->pdev->dev, "'%s' failed\n", cmd_buf);
+
+	} else if (strncmp(cmd_buf, "del vsi", 7) == 0) {
+		sscanf(&cmd_buf[7], "%i", &vsi_seid);
+		vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+		if (!vsi) {
+			dev_info(&pf->pdev->dev, "del VSI %d: seid not found\n",
+				 vsi_seid);
+			goto command_write_done;
+		}
+
+		dev_info(&pf->pdev->dev, "deleting VSI %d\n", vsi_seid);
+		i40e_vsi_release(vsi);
+
+	} else if (strncmp(cmd_buf, "add relay", 9) == 0) {
+		struct i40e_veb *veb;
+		int uplink_seid, i;
+
+		cnt = sscanf(&cmd_buf[9], "%i %i", &uplink_seid, &vsi_seid);
+		if (cnt != 2) {
+			dev_info(&pf->pdev->dev,
+				 "add relay: bad command string, cnt=%d\n",
+				 cnt);
+			goto command_write_done;
+		} else if (uplink_seid < 0) {
+			dev_info(&pf->pdev->dev,
+				 "add relay %d: bad uplink seid\n",
+				 uplink_seid);
+			goto command_write_done;
+		}
+
+		vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+		if (!vsi) {
+			dev_info(&pf->pdev->dev,
+				 "add relay: vsi VSI %d not found\n", vsi_seid);
+			goto command_write_done;
+		}
+
+		for (i = 0; i < I40E_MAX_VEB; i++)
+			if (pf->veb[i] && pf->veb[i]->seid == uplink_seid)
+				break;
+		if (i >= I40E_MAX_VEB && uplink_seid != 0 &&
+		    uplink_seid != pf->mac_seid) {
+			dev_info(&pf->pdev->dev,
+				 "add relay: relay uplink %d not found\n",
+				 uplink_seid);
+			goto command_write_done;
+		}
+
+		veb = i40e_veb_setup(pf, 0, uplink_seid, vsi_seid,
+				     vsi->tc_config.enabled_tc);
+		if (veb)
+			dev_info(&pf->pdev->dev, "added relay %d\n", veb->seid);
+		else
+			dev_info(&pf->pdev->dev, "add relay failed\n");
+
+	} else if (strncmp(cmd_buf, "del relay", 9) == 0) {
+		int i;
+		cnt = sscanf(&cmd_buf[9], "%i", &veb_seid);
+		if (cnt != 1) {
+			dev_info(&pf->pdev->dev,
+				 "del relay: bad command string, cnt=%d\n",
+				 cnt);
+			goto command_write_done;
+		} else if (veb_seid < 0) {
+			dev_info(&pf->pdev->dev,
+				 "del relay %d: bad relay seid\n", veb_seid);
+			goto command_write_done;
+		}
+
+		/* find the veb */
+		for (i = 0; i < I40E_MAX_VEB; i++)
+			if (pf->veb[i] && pf->veb[i]->seid == veb_seid)
+				break;
+		if (i >= I40E_MAX_VEB) {
+			dev_info(&pf->pdev->dev,
+				 "del relay: relay %d not found\n", veb_seid);
+			goto command_write_done;
+		}
+
+		dev_info(&pf->pdev->dev, "deleting relay %d\n", veb_seid);
+		i40e_veb_release(pf->veb[i]);
+
+	} else if (strncmp(cmd_buf, "add macaddr", 11) == 0) {
+		u8 ma[6];
+		int vlan = 0;
+		struct i40e_mac_filter *f;
+		int ret;
+
+		cnt = sscanf(&cmd_buf[11],
+			     "%i %hhx:%hhx:%hhx:%hhx:%hhx:%hhx %i",
+			     &vsi_seid,
+			     &ma[0], &ma[1], &ma[2], &ma[3], &ma[4], &ma[5],
+			     &vlan);
+		if (cnt == 7) {
+			vlan = 0;
+		} else if (cnt != 8) {
+			dev_info(&pf->pdev->dev,
+				 "add macaddr: bad command string, cnt=%d\n",
+				 cnt);
+			goto command_write_done;
+		}
+
+		vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+		if (!vsi) {
+			dev_info(&pf->pdev->dev,
+				 "add macaddr: VSI %d not found\n", vsi_seid);
+			goto command_write_done;
+		}
+
+		f = i40e_add_filter(vsi, ma, vlan, false, false);
+		ret = i40e_sync_vsi_filters(vsi);
+		if (f && !ret)
+			dev_info(&pf->pdev->dev,
+				 "add macaddr: %pM vlan=%d added to VSI %d\n",
+				 ma, vlan, vsi_seid);
+		else
+			dev_info(&pf->pdev->dev,
+				 "add macaddr: %pM vlan=%d to VSI %d failed, f=%p ret=%d\n",
+				 ma, vlan, vsi_seid, f, ret);
+
+	} else if (strncmp(cmd_buf, "del macaddr", 11) == 0) {
+		u8 ma[6];
+		int vlan = 0;
+		int ret;
+
+		cnt = sscanf(&cmd_buf[11],
+			     "%i %hhx:%hhx:%hhx:%hhx:%hhx:%hhx %i",
+			     &vsi_seid,
+			     &ma[0], &ma[1], &ma[2], &ma[3], &ma[4], &ma[5],
+			     &vlan);
+		if (cnt == 7) {
+			vlan = 0;
+		} else if (cnt != 8) {
+			dev_info(&pf->pdev->dev,
+				 "del macaddr: bad command string, cnt=%d\n",
+				 cnt);
+			goto command_write_done;
+		}
+
+		vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+		if (!vsi) {
+			dev_info(&pf->pdev->dev,
+				 "del macaddr: VSI %d not found\n", vsi_seid);
+			goto command_write_done;
+		}
+
+		i40e_del_filter(vsi, ma, vlan, false, false);
+		ret = i40e_sync_vsi_filters(vsi);
+		if (!ret)
+			dev_info(&pf->pdev->dev,
+				 "del macaddr: %pM vlan=%d removed from VSI %d\n",
+				 ma, vlan, vsi_seid);
+		else
+			dev_info(&pf->pdev->dev,
+				 "del macaddr: %pM vlan=%d from VSI %d failed, ret=%d\n",
+				 ma, vlan, vsi_seid, ret);
+
+	} else if (strncmp(cmd_buf, "add pvid", 8) == 0) {
+		int v;
+		u16 vid;
+		i40e_status ret;
+
+		cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v);
+		if (cnt != 2) {
+			dev_info(&pf->pdev->dev,
+				 "add pvid: bad command string, cnt=%d\n", cnt);
+			goto command_write_done;
+		}
+
+		vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+		if (!vsi) {
+			dev_info(&pf->pdev->dev, "add pvid: VSI %d not found\n",
+				 vsi_seid);
+			goto command_write_done;
+		}
+
+		vid = (unsigned)v;
+		ret = i40e_vsi_add_pvid(vsi, vid);
+		if (!ret)
+			dev_info(&pf->pdev->dev,
+				 "add pvid: %d added to VSI %d\n",
+				 vid, vsi_seid);
+		else
+			dev_info(&pf->pdev->dev,
+				 "add pvid: %d to VSI %d failed, ret=%d\n",
+				 vid, vsi_seid, ret);
+
+	} else if (strncmp(cmd_buf, "del pvid", 8) == 0) {
+
+		cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid);
+		if (cnt != 1) {
+			dev_info(&pf->pdev->dev,
+				 "del pvid: bad command string, cnt=%d\n",
+				 cnt);
+			goto command_write_done;
+		}
+
+		vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+		if (!vsi) {
+			dev_info(&pf->pdev->dev,
+				 "del pvid: VSI %d not found\n", vsi_seid);
+			goto command_write_done;
+		}
+
+		i40e_vsi_remove_pvid(vsi);
+		dev_info(&pf->pdev->dev,
+			 "del pvid: removed from VSI %d\n", vsi_seid);
+
+	} else if (strncmp(cmd_buf, "dump", 4) == 0) {
+		if (strncmp(&cmd_buf[5], "switch", 6) == 0) {
+			i40e_fetch_switch_configuration(pf, true);
+		} else if (strncmp(&cmd_buf[5], "vsi", 3) == 0) {
+			cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid);
+			if (cnt > 0)
+				i40e_dbg_dump_vsi_seid(pf, vsi_seid);
+			else
+				i40e_dbg_dump_vsi_no_seid(pf);
+		} else if (strncmp(&cmd_buf[5], "veb", 3) == 0) {
+			cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid);
+			if (cnt > 0)
+				i40e_dbg_dump_veb_seid(pf, vsi_seid);
+			else
+				i40e_dbg_dump_veb_all(pf);
+		} else if (strncmp(&cmd_buf[5], "desc", 4) == 0) {
+			int ring_id, desc_n;
+			if (strncmp(&cmd_buf[10], "rx", 2) == 0) {
+				cnt = sscanf(&cmd_buf[12], "%i %i %i",
+					     &vsi_seid, &ring_id, &desc_n);
+				i40e_dbg_dump_desc(cnt, vsi_seid, ring_id,
+						   desc_n, pf, true);
+			} else if (strncmp(&cmd_buf[10], "tx", 2)
+					== 0) {
+				cnt = sscanf(&cmd_buf[12], "%i %i %i",
+					     &vsi_seid, &ring_id, &desc_n);
+				i40e_dbg_dump_desc(cnt, vsi_seid, ring_id,
+						   desc_n, pf, false);
+			} else if (strncmp(&cmd_buf[10], "aq", 2) == 0) {
+				i40e_dbg_dump_aq_desc(pf);
+			} else {
+				dev_info(&pf->pdev->dev,
+					 "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
+				dev_info(&pf->pdev->dev,
+					 "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
+				dev_info(&pf->pdev->dev, "dump desc aq\n");
+			}
+		} else if (strncmp(&cmd_buf[5], "stats", 5) == 0) {
+			dev_info(&pf->pdev->dev, "pf stats:\n");
+			i40e_dbg_dump_stats(pf, &pf->stats);
+			dev_info(&pf->pdev->dev, "pf stats_offsets:\n");
+			i40e_dbg_dump_stats(pf, &pf->stats_offsets);
+		} else if (strncmp(&cmd_buf[5], "reset stats", 11) == 0) {
+			dev_info(&pf->pdev->dev,
+				 "core reset count: %d\n", pf->corer_count);
+			dev_info(&pf->pdev->dev,
+				 "global reset count: %d\n", pf->globr_count);
+			dev_info(&pf->pdev->dev,
+				 "emp reset count: %d\n", pf->empr_count);
+			dev_info(&pf->pdev->dev,
+				 "pf reset count: %d\n", pf->pfr_count);
+		} else if (strncmp(&cmd_buf[5], "port", 4) == 0) {
+			struct i40e_aqc_query_port_ets_config_resp *bw_data;
+			struct i40e_dcbx_config *cfg =
+						&pf->hw.local_dcbx_config;
+			struct i40e_dcbx_config *r_cfg =
+						&pf->hw.remote_dcbx_config;
+			int i, ret;
+
+			bw_data = kzalloc(sizeof(
+				    struct i40e_aqc_query_port_ets_config_resp),
+					  GFP_KERNEL);
+			if (!bw_data) {
+				ret = -ENOMEM;
+				goto command_write_done;
+			}
+
+			ret = i40e_aq_query_port_ets_config(&pf->hw,
+							    pf->mac_seid,
+							    bw_data, NULL);
+			if (ret) {
+				dev_info(&pf->pdev->dev,
+					 "Query Port ETS Config AQ command failed =0x%x\n",
+					 pf->hw.aq.asq_last_status);
+				kfree(bw_data);
+				bw_data = NULL;
+				goto command_write_done;
+			}
+			dev_info(&pf->pdev->dev,
+				 "port bw: tc_valid=0x%x tc_strict_prio=0x%x, tc_bw_max=0x%04x,0x%04x\n",
+				 bw_data->tc_valid_bits,
+				 bw_data->tc_strict_priority_bits,
+				 le16_to_cpu(bw_data->tc_bw_max[0]),
+				 le16_to_cpu(bw_data->tc_bw_max[1]));
+			for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+				dev_info(&pf->pdev->dev, "port bw: tc_bw_share=%d tc_bw_limit=%d\n",
+					 bw_data->tc_bw_share_credits[i],
+					 le16_to_cpu(bw_data->tc_bw_limits[i]));
+			}
+
+			kfree(bw_data);
+			bw_data = NULL;
+
+			dev_info(&pf->pdev->dev,
+				 "port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n",
+				 cfg->etscfg.willing, cfg->etscfg.cbs,
+				 cfg->etscfg.maxtcs);
+			for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+				dev_info(&pf->pdev->dev, "port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n",
+					 i, cfg->etscfg.prioritytable[i],
+					 cfg->etscfg.tcbwtable[i],
+					 cfg->etscfg.tsatable[i]);
+			}
+			for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+				dev_info(&pf->pdev->dev, "port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n",
+					 i, cfg->etsrec.prioritytable[i],
+					 cfg->etsrec.tcbwtable[i],
+					 cfg->etsrec.tsatable[i]);
+			}
+			dev_info(&pf->pdev->dev,
+				 "port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n",
+				 cfg->pfc.willing, cfg->pfc.mbc,
+				 cfg->pfc.pfccap, cfg->pfc.pfcenable);
+			dev_info(&pf->pdev->dev,
+				 "port app_table: num_apps=%d\n", cfg->numapps);
+			for (i = 0; i < cfg->numapps; i++) {
+				dev_info(&pf->pdev->dev, "port app_table: %d prio=%d selector=%d protocol=0x%x\n",
+					 i, cfg->app[i].priority,
+					 cfg->app[i].selector,
+					 cfg->app[i].protocolid);
+			}
+			/* Peer TLV DCBX data */
+			dev_info(&pf->pdev->dev,
+				 "remote port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n",
+				 r_cfg->etscfg.willing,
+				 r_cfg->etscfg.cbs, r_cfg->etscfg.maxtcs);
+			for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+				dev_info(&pf->pdev->dev, "remote port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n",
+					 i, r_cfg->etscfg.prioritytable[i],
+					 r_cfg->etscfg.tcbwtable[i],
+					 r_cfg->etscfg.tsatable[i]);
+			}
+			for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+				dev_info(&pf->pdev->dev, "remote port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n",
+					 i, r_cfg->etsrec.prioritytable[i],
+					 r_cfg->etsrec.tcbwtable[i],
+					 r_cfg->etsrec.tsatable[i]);
+			}
+			dev_info(&pf->pdev->dev,
+				 "remote port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n",
+				 r_cfg->pfc.willing,
+				 r_cfg->pfc.mbc,
+				 r_cfg->pfc.pfccap,
+				 r_cfg->pfc.pfcenable);
+			dev_info(&pf->pdev->dev,
+				 "remote port app_table: num_apps=%d\n",
+				 r_cfg->numapps);
+			for (i = 0; i < r_cfg->numapps; i++) {
+				dev_info(&pf->pdev->dev, "remote port app_table: %d prio=%d selector=%d protocol=0x%x\n",
+					 i, r_cfg->app[i].priority,
+					 r_cfg->app[i].selector,
+					 r_cfg->app[i].protocolid);
+			}
+		} else {
+			dev_info(&pf->pdev->dev,
+				 "dump desc tx <vsi_seid> <ring_id> [<desc_n>], dump desc rx <vsi_seid> <ring_id> [<desc_n>],\n");
+			dev_info(&pf->pdev->dev, "dump switch, dump vsi [seid] or\n");
+			dev_info(&pf->pdev->dev, "dump stats\n");
+			dev_info(&pf->pdev->dev, "dump reset stats\n");
+			dev_info(&pf->pdev->dev, "dump port\n");
+			dev_info(&pf->pdev->dev,
+				 "dump debug fwdata <cluster_id> <table_id> <index>\n");
+		}
+
+	} else if (strncmp(cmd_buf, "msg_enable", 10) == 0) {
+		u32 level;
+		cnt = sscanf(&cmd_buf[10], "%i", &level);
+		if (cnt) {
+			if (I40E_DEBUG_USER & level) {
+				pf->hw.debug_mask = level;
+				dev_info(&pf->pdev->dev,
+					 "set hw.debug_mask = 0x%08x\n",
+					 pf->hw.debug_mask);
+			}
+			pf->msg_enable = level;
+			dev_info(&pf->pdev->dev, "set msg_enable = 0x%08x\n",
+				 pf->msg_enable);
+		} else {
+			dev_info(&pf->pdev->dev, "msg_enable = 0x%08x\n",
+				 pf->msg_enable);
+		}
+	} else if (strncmp(cmd_buf, "pfr", 3) == 0) {
+		dev_info(&pf->pdev->dev, "forcing PFR\n");
+		i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+
+	} else if (strncmp(cmd_buf, "corer", 5) == 0) {
+		dev_info(&pf->pdev->dev, "forcing CoreR\n");
+		i40e_do_reset(pf, (1 << __I40E_CORE_RESET_REQUESTED));
+
+	} else if (strncmp(cmd_buf, "globr", 5) == 0) {
+		dev_info(&pf->pdev->dev, "forcing GlobR\n");
+		i40e_do_reset(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED));
+
+	} else if (strncmp(cmd_buf, "read", 4) == 0) {
+		u32 address;
+		u32 value;
+		cnt = sscanf(&cmd_buf[4], "%x", &address);
+		if (cnt != 1) {
+			dev_info(&pf->pdev->dev, "read <reg>\n");
+			goto command_write_done;
+		}
+
+		/* check the range on address */
+		if (address >= I40E_MAX_REGISTER) {
+			dev_info(&pf->pdev->dev, "read reg address 0x%08x too large\n",
+				 address);
+			goto command_write_done;
+		}
+
+		value = rd32(&pf->hw, address);
+		dev_info(&pf->pdev->dev, "read: 0x%08x = 0x%08x\n",
+			 address, value);
+
+	} else if (strncmp(cmd_buf, "write", 5) == 0) {
+		u32 address, value;
+		cnt = sscanf(&cmd_buf[5], "%x %x", &address, &value);
+		if (cnt != 2) {
+			dev_info(&pf->pdev->dev, "write <reg> <value>\n");
+			goto command_write_done;
+		}
+
+		/* check the range on address */
+		if (address >= I40E_MAX_REGISTER) {
+			dev_info(&pf->pdev->dev, "write reg address 0x%08x too large\n",
+				 address);
+			goto command_write_done;
+		}
+		wr32(&pf->hw, address, value);
+		value = rd32(&pf->hw, address);
+		dev_info(&pf->pdev->dev, "write: 0x%08x = 0x%08x\n",
+			 address, value);
+	} else if (strncmp(cmd_buf, "clear_stats", 11) == 0) {
+		if (strncmp(&cmd_buf[12], "vsi", 3) == 0) {
+			cnt = sscanf(&cmd_buf[15], "%d", &vsi_seid);
+			if (cnt == 0) {
+				int i;
+				for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+					i40e_vsi_reset_stats(pf->vsi[i]);
+				dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n");
+			} else if (cnt == 1) {
+				vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+				if (!vsi) {
+					dev_info(&pf->pdev->dev,
+						 "clear_stats vsi: bad vsi %d\n",
+						 vsi_seid);
+					goto command_write_done;
+				}
+				i40e_vsi_reset_stats(vsi);
+				dev_info(&pf->pdev->dev,
+					 "vsi clear stats called for vsi %d\n",
+					 vsi_seid);
+			} else {
+				dev_info(&pf->pdev->dev, "clear_stats vsi [seid]\n");
+			}
+		} else if (strncmp(&cmd_buf[12], "pf", 2) == 0) {
+			i40e_pf_reset_stats(pf);
+			dev_info(&pf->pdev->dev, "pf clear stats called\n");
+		} else {
+			dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats pf\n");
+		}
+	} else if ((strncmp(cmd_buf, "add fd_filter", 13) == 0) ||
+		   (strncmp(cmd_buf, "rem fd_filter", 13) == 0)) {
+		struct i40e_fdir_data fd_data;
+		int ret;
+		u16 packet_len, i, j = 0;
+		char *asc_packet;
+		bool add = false;
+
+		asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP,
+				     GFP_KERNEL);
+		if (!asc_packet)
+			goto command_write_done;
+
+		fd_data.raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP,
+					     GFP_KERNEL);
+
+		if (!fd_data.raw_packet) {
+			kfree(asc_packet);
+			asc_packet = NULL;
+			goto command_write_done;
+		}
+
+		if (strncmp(cmd_buf, "add", 3) == 0)
+			add = true;
+		cnt = sscanf(&cmd_buf[13],
+			     "%hx %2hhx %2hhx %hx %2hhx %2hhx %hx %x %hd %512s",
+			     &fd_data.q_index,
+			     &fd_data.flex_off, &fd_data.pctype,
+			     &fd_data.dest_vsi, &fd_data.dest_ctl,
+			     &fd_data.fd_status, &fd_data.cnt_index,
+			     &fd_data.fd_id, &packet_len, asc_packet);
+		if (cnt != 10) {
+			dev_info(&pf->pdev->dev,
+				 "program fd_filter: bad command string, cnt=%d\n",
+				 cnt);
+			kfree(asc_packet);
+			asc_packet = NULL;
+			kfree(fd_data.raw_packet);
+			goto command_write_done;
+		}
+
+		/* fix packet length if user entered 0 */
+		if (packet_len == 0)
+			packet_len = I40E_FDIR_MAX_RAW_PACKET_LOOKUP;
+
+		/* make sure to check the max as well */
+		packet_len = min_t(u16,
+				   packet_len, I40E_FDIR_MAX_RAW_PACKET_LOOKUP);
+
+		dev_info(&pf->pdev->dev, "FD raw packet:\n");
+		for (i = 0; i < packet_len; i++) {
+			sscanf(&asc_packet[j], "%2hhx ",
+			       &fd_data.raw_packet[i]);
+			j += 3;
+			snprintf(print_buf, 3, "%02x ", fd_data.raw_packet[i]);
+			print_buf += 3;
+			if ((i % 16) == 15) {
+				snprintf(print_buf, 1, "\n");
+				print_buf++;
+			}
+		}
+		dev_info(&pf->pdev->dev, "%s\n", print_buf_start);
+		ret = i40e_program_fdir_filter(&fd_data, pf, add);
+		if (!ret) {
+			dev_info(&pf->pdev->dev, "Filter command send Status : Success\n");
+		} else {
+			dev_info(&pf->pdev->dev,
+				 "Filter command send failed %d\n", ret);
+		}
+		kfree(fd_data.raw_packet);
+		fd_data.raw_packet = NULL;
+		kfree(asc_packet);
+		asc_packet = NULL;
+	} else if (strncmp(cmd_buf, "lldp", 4) == 0) {
+		if (strncmp(&cmd_buf[5], "stop", 4) == 0) {
+			int ret;
+			ret = i40e_aq_stop_lldp(&pf->hw, false, NULL);
+			if (ret) {
+				dev_info(&pf->pdev->dev,
+					 "Stop LLDP AQ command failed =0x%x\n",
+					 pf->hw.aq.asq_last_status);
+				goto command_write_done;
+			}
+		} else if (strncmp(&cmd_buf[5], "start", 5) == 0) {
+			int ret;
+			ret = i40e_aq_start_lldp(&pf->hw, NULL);
+			if (ret) {
+				dev_info(&pf->pdev->dev,
+					 "Start LLDP AQ command failed =0x%x\n",
+					 pf->hw.aq.asq_last_status);
+				goto command_write_done;
+			}
+		} else if (strncmp(&cmd_buf[5],
+			   "get local", 9) == 0) {
+			int ret, i;
+			u8 *buff;
+			u16 llen, rlen;
+			buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
+			if (!buff)
+				goto command_write_done;
+
+			ret = i40e_aq_get_lldp_mib(&pf->hw, 0,
+						   I40E_AQ_LLDP_MIB_LOCAL,
+						   buff, I40E_LLDPDU_SIZE,
+						   &llen, &rlen, NULL);
+			if (ret) {
+				dev_info(&pf->pdev->dev,
+					 "Get LLDP MIB (local) AQ command failed =0x%x\n",
+					 pf->hw.aq.asq_last_status);
+				kfree(buff);
+				buff = NULL;
+				goto command_write_done;
+			}
+			dev_info(&pf->pdev->dev,
+				 "Get LLDP MIB (local) AQ buffer written back:\n");
+			for (i = 0; i < I40E_LLDPDU_SIZE; i++) {
+				snprintf(print_buf, 3, "%02x ", buff[i]);
+				print_buf += 3;
+				if ((i % 16) == 15) {
+					snprintf(print_buf, 1, "\n");
+					print_buf++;
+				}
+			}
+			dev_info(&pf->pdev->dev, "%s\n", print_buf_start);
+			kfree(buff);
+			buff = NULL;
+		} else if (strncmp(&cmd_buf[5], "get remote", 10) == 0) {
+			int ret, i;
+			u8 *buff;
+			u16 llen, rlen;
+			buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
+			if (!buff)
+				goto command_write_done;
+
+			ret = i40e_aq_get_lldp_mib(&pf->hw,
+					I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
+					I40E_AQ_LLDP_MIB_LOCAL,
+					buff, I40E_LLDPDU_SIZE,
+					&llen, &rlen, NULL);
+			if (ret) {
+				dev_info(&pf->pdev->dev,
+					 "Get LLDP MIB (remote) AQ command failed =0x%x\n",
+					 pf->hw.aq.asq_last_status);
+				kfree(buff);
+				buff = NULL;
+				goto command_write_done;
+			}
+			dev_info(&pf->pdev->dev,
+				 "Get LLDP MIB (remote) AQ buffer written back:\n");
+			for (i = 0; i < I40E_LLDPDU_SIZE; i++) {
+				snprintf(print_buf, 3, "%02x ", buff[i]);
+				print_buf += 3;
+				if ((i % 16) == 15) {
+					snprintf(print_buf, 1, "\n");
+					print_buf++;
+				}
+			}
+			dev_info(&pf->pdev->dev, "%s\n", print_buf_start);
+			kfree(buff);
+			buff = NULL;
+		} else if (strncmp(&cmd_buf[5], "event on", 8) == 0) {
+			int ret;
+			ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw,
+								true, NULL);
+			if (ret) {
+				dev_info(&pf->pdev->dev,
+					 "Config LLDP MIB Change Event (on) AQ command failed =0x%x\n",
+					 pf->hw.aq.asq_last_status);
+				goto command_write_done;
+			}
+		} else if (strncmp(&cmd_buf[5], "event off", 9) == 0) {
+			int ret;
+			ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw,
+								false, NULL);
+			if (ret) {
+				dev_info(&pf->pdev->dev,
+					 "Config LLDP MIB Change Event (off) AQ command failed =0x%x\n",
+					 pf->hw.aq.asq_last_status);
+				goto command_write_done;
+			}
+		}
+	} else if (strncmp(cmd_buf, "nvm read", 8) == 0) {
+		u16 buffer_len, i, bytes;
+		u16 module;
+		u32 offset;
+		u16 *buff;
+		int ret;
+
+		cnt = sscanf(&cmd_buf[8], "%hx %x %hx",
+			     &module, &offset, &buffer_len);
+		if (cnt == 0) {
+			module = 0;
+			offset = 0;
+			buffer_len = 0;
+		} else if (cnt == 1) {
+			offset = 0;
+			buffer_len = 0;
+		} else if (cnt == 2) {
+			buffer_len = 0;
+		} else if (cnt > 3) {
+			dev_info(&pf->pdev->dev,
+				 "nvm read: bad command string, cnt=%d\n", cnt);
+			goto command_write_done;
+		}
+
+		/* Read at least 512 words */
+		if (buffer_len == 0)
+			buffer_len = 512;
+
+		bytes = 2 * buffer_len;
+		buff = kzalloc(bytes, GFP_KERNEL);
+		if (!buff)
+			goto command_write_done;
+
+		ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
+		if (ret) {
+			dev_info(&pf->pdev->dev,
+				 "Failed Acquiring NVM resource for read err=%d status=0x%x\n",
+				 ret, pf->hw.aq.asq_last_status);
+			kfree(buff);
+			goto command_write_done;
+		}
+
+		ret = i40e_aq_read_nvm(&pf->hw, module, (2 * offset),
+				       bytes, (u8 *)buff, true, NULL);
+		i40e_release_nvm(&pf->hw);
+		if (ret) {
+			dev_info(&pf->pdev->dev,
+				 "Read NVM AQ failed err=%d status=0x%x\n",
+				 ret, pf->hw.aq.asq_last_status);
+		} else {
+			dev_info(&pf->pdev->dev,
+				 "Read NVM module=0x%x offset=0x%x words=%d\n",
+				 module, offset, buffer_len);
+			for (i = 0; i < buffer_len; i++) {
+				if ((i % 16) == 0) {
+					snprintf(print_buf, 11, "\n0x%08x: ",
+						 offset + i);
+					print_buf += 11;
+				}
+				snprintf(print_buf, 5, "%04x ", buff[i]);
+				print_buf += 5;
+			}
+			dev_info(&pf->pdev->dev, "%s\n", print_buf_start);
+		}
+		kfree(buff);
+		buff = NULL;
+	} else {
+		dev_info(&pf->pdev->dev, "unknown command '%s'\n", cmd_buf);
+		dev_info(&pf->pdev->dev, "available commands\n");
+		dev_info(&pf->pdev->dev, "  add vsi [relay_seid]\n");
+		dev_info(&pf->pdev->dev, "  del vsi [vsi_seid]\n");
+		dev_info(&pf->pdev->dev, "  add relay <uplink_seid> <vsi_seid>\n");
+		dev_info(&pf->pdev->dev, "  del relay <relay_seid>\n");
+		dev_info(&pf->pdev->dev, "  add macaddr <vsi_seid> <aa:bb:cc:dd:ee:ff> [vlan]\n");
+		dev_info(&pf->pdev->dev, "  del macaddr <vsi_seid> <aa:bb:cc:dd:ee:ff> [vlan]\n");
+		dev_info(&pf->pdev->dev, "  add pvid <vsi_seid> <vid>\n");
+		dev_info(&pf->pdev->dev, "  del pvid <vsi_seid>\n");
+		dev_info(&pf->pdev->dev, "  dump switch\n");
+		dev_info(&pf->pdev->dev, "  dump vsi [seid]\n");
+		dev_info(&pf->pdev->dev, "  dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
+		dev_info(&pf->pdev->dev, "  dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
+		dev_info(&pf->pdev->dev, "  dump desc aq\n");
+		dev_info(&pf->pdev->dev, "  dump stats\n");
+		dev_info(&pf->pdev->dev, "  dump reset stats\n");
+		dev_info(&pf->pdev->dev, "  msg_enable [level]\n");
+		dev_info(&pf->pdev->dev, "  read <reg>\n");
+		dev_info(&pf->pdev->dev, "  write <reg> <value>\n");
+		dev_info(&pf->pdev->dev, "  clear_stats vsi [seid]\n");
+		dev_info(&pf->pdev->dev, "  clear_stats pf\n");
+		dev_info(&pf->pdev->dev, "  pfr\n");
+		dev_info(&pf->pdev->dev, "  corer\n");
+		dev_info(&pf->pdev->dev, "  globr\n");
+		dev_info(&pf->pdev->dev, "  add fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
+		dev_info(&pf->pdev->dev, "  rem fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
+		dev_info(&pf->pdev->dev, "  lldp start\n");
+		dev_info(&pf->pdev->dev, "  lldp stop\n");
+		dev_info(&pf->pdev->dev, "  lldp get local\n");
+		dev_info(&pf->pdev->dev, "  lldp get remote\n");
+		dev_info(&pf->pdev->dev, "  lldp event on\n");
+		dev_info(&pf->pdev->dev, "  lldp event off\n");
+		dev_info(&pf->pdev->dev, "  nvm read [module] [word_offset] [word_count]\n");
+	}
+
+command_write_done:
+	kfree(cmd_buf);
+	cmd_buf = NULL;
+	kfree(print_buf_start);
+	print_buf = NULL;
+	print_buf_start = NULL;
+	return count;
+}
+
+static const struct file_operations i40e_dbg_command_fops = {
+	.owner = THIS_MODULE,
+	.open =  simple_open,
+	.read =  i40e_dbg_command_read,
+	.write = i40e_dbg_command_write,
+};
+
+/**************************************************************
+ * netdev_ops
+ * The netdev_ops entry in debugfs is for giving the driver commands
+ * to be executed from the netdev operations.
+ **************************************************************/
+static char i40e_dbg_netdev_ops_buf[256] = "hello world";
+
+/**
+ * i40e_dbg_netdev_ops - read for netdev_ops datum
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ **/
+static ssize_t i40e_dbg_netdev_ops_read(struct file *filp, char __user *buffer,
+					size_t count, loff_t *ppos)
+{
+	struct i40e_pf *pf = filp->private_data;
+	int bytes_not_copied;
+	int buf_size = 256;
+	char *buf;
+	int len;
+
+	/* don't allow partal reads */
+	if (*ppos != 0)
+		return 0;
+	if (count < buf_size)
+		return -ENOSPC;
+
+	buf = kzalloc(buf_size, GFP_KERNEL);
+	if (!buf)
+		return -ENOSPC;
+
+	len = snprintf(buf, buf_size, "%s: %s\n",
+		       pf->vsi[pf->lan_vsi]->netdev->name,
+		       i40e_dbg_netdev_ops_buf);
+
+	bytes_not_copied = copy_to_user(buffer, buf, len);
+	kfree(buf);
+
+	if (bytes_not_copied < 0)
+		return bytes_not_copied;
+
+	*ppos = len;
+	return len;
+}
+
+/**
+ * i40e_dbg_netdev_ops_write - write into netdev_ops datum
+ * @filp: the opened file
+ * @buffer: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ **/
+static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
+					 const char __user *buffer,
+					 size_t count, loff_t *ppos)
+{
+	struct i40e_pf *pf = filp->private_data;
+	int bytes_not_copied;
+	struct i40e_vsi *vsi;
+	int vsi_seid;
+	int i, cnt;
+
+	/* don't allow partial writes */
+	if (*ppos != 0)
+		return 0;
+	if (count >= sizeof(i40e_dbg_netdev_ops_buf))
+		return -ENOSPC;
+
+	memset(i40e_dbg_netdev_ops_buf, 0, sizeof(i40e_dbg_netdev_ops_buf));
+	bytes_not_copied = copy_from_user(i40e_dbg_netdev_ops_buf,
+					  buffer, count);
+	if (bytes_not_copied < 0)
+		return bytes_not_copied;
+	else if (bytes_not_copied > 0)
+		count -= bytes_not_copied;
+	i40e_dbg_netdev_ops_buf[count] = '\0';
+
+	if (strncmp(i40e_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) {
+		cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid);
+		if (cnt != 1) {
+			dev_info(&pf->pdev->dev, "tx_timeout <vsi_seid>\n");
+			goto netdev_ops_write_done;
+		}
+		vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+		if (!vsi) {
+			dev_info(&pf->pdev->dev,
+				 "tx_timeout: VSI %d not found\n", vsi_seid);
+			goto netdev_ops_write_done;
+		}
+		if (rtnl_trylock()) {
+			vsi->netdev->netdev_ops->ndo_tx_timeout(vsi->netdev);
+			rtnl_unlock();
+			dev_info(&pf->pdev->dev, "tx_timeout called\n");
+		} else {
+			dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
+		}
+	} else if (strncmp(i40e_dbg_netdev_ops_buf, "change_mtu", 10) == 0) {
+		int mtu;
+		cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i %i",
+			     &vsi_seid, &mtu);
+		if (cnt != 2) {
+			dev_info(&pf->pdev->dev, "change_mtu <vsi_seid> <mtu>\n");
+			goto netdev_ops_write_done;
+		}
+		vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+		if (!vsi) {
+			dev_info(&pf->pdev->dev,
+				 "change_mtu: VSI %d not found\n", vsi_seid);
+			goto netdev_ops_write_done;
+		}
+		if (rtnl_trylock()) {
+			vsi->netdev->netdev_ops->ndo_change_mtu(vsi->netdev,
+								mtu);
+			rtnl_unlock();
+			dev_info(&pf->pdev->dev, "change_mtu called\n");
+		} else {
+			dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
+		}
+
+	} else if (strncmp(i40e_dbg_netdev_ops_buf, "set_rx_mode", 11) == 0) {
+		cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid);
+		if (cnt != 1) {
+			dev_info(&pf->pdev->dev, "set_rx_mode <vsi_seid>\n");
+			goto netdev_ops_write_done;
+		}
+		vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+		if (!vsi) {
+			dev_info(&pf->pdev->dev,
+				 "set_rx_mode: VSI %d not found\n", vsi_seid);
+			goto netdev_ops_write_done;
+		}
+		if (rtnl_trylock()) {
+			vsi->netdev->netdev_ops->ndo_set_rx_mode(vsi->netdev);
+			rtnl_unlock();
+			dev_info(&pf->pdev->dev, "set_rx_mode called\n");
+		} else {
+			dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
+		}
+
+	} else if (strncmp(i40e_dbg_netdev_ops_buf, "napi", 4) == 0) {
+		cnt = sscanf(&i40e_dbg_netdev_ops_buf[4], "%i", &vsi_seid);
+		if (cnt != 1) {
+			dev_info(&pf->pdev->dev, "napi <vsi_seid>\n");
+			goto netdev_ops_write_done;
+		}
+		vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+		if (!vsi) {
+			dev_info(&pf->pdev->dev, "napi: VSI %d not found\n",
+				 vsi_seid);
+			goto netdev_ops_write_done;
+		}
+		for (i = 0; i < vsi->num_q_vectors; i++)
+			napi_schedule(&vsi->q_vectors[i].napi);
+		dev_info(&pf->pdev->dev, "napi called\n");
+	} else {
+		dev_info(&pf->pdev->dev, "unknown command '%s'\n",
+			 i40e_dbg_netdev_ops_buf);
+		dev_info(&pf->pdev->dev, "available commands\n");
+		dev_info(&pf->pdev->dev, "  tx_timeout <vsi_seid>\n");
+		dev_info(&pf->pdev->dev, "  change_mtu <vsi_seid> <mtu>\n");
+		dev_info(&pf->pdev->dev, "  set_rx_mode <vsi_seid>\n");
+		dev_info(&pf->pdev->dev, "  napi <vsi_seid>\n");
+	}
+netdev_ops_write_done:
+	return count;
+}
+
+static const struct file_operations i40e_dbg_netdev_ops_fops = {
+	.owner = THIS_MODULE,
+	.open = simple_open,
+	.read = i40e_dbg_netdev_ops_read,
+	.write = i40e_dbg_netdev_ops_write,
+};
+
+/**
+ * i40e_dbg_pf_init - setup the debugfs directory for the pf
+ * @pf: the pf that is starting up
+ **/
+void i40e_dbg_pf_init(struct i40e_pf *pf)
+{
+	struct dentry *pfile __attribute__((unused));
+	const char *name = pci_name(pf->pdev);
+
+	pf->i40e_dbg_pf = debugfs_create_dir(name, i40e_dbg_root);
+	if (pf->i40e_dbg_pf) {
+		pfile = debugfs_create_file("command", 0600, pf->i40e_dbg_pf,
+					    pf, &i40e_dbg_command_fops);
+		pfile = debugfs_create_file("dump", 0600, pf->i40e_dbg_pf, pf,
+					    &i40e_dbg_dump_fops);
+		pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf,
+					    pf, &i40e_dbg_netdev_ops_fops);
+	} else {
+		dev_info(&pf->pdev->dev,
+			 "debugfs entry for %s failed\n", name);
+	}
+}
+
+/**
+ * i40e_dbg_pf_exit - clear out the pf's debugfs entries
+ * @pf: the pf that is stopping
+ **/
+void i40e_dbg_pf_exit(struct i40e_pf *pf)
+{
+	debugfs_remove_recursive(pf->i40e_dbg_pf);
+	pf->i40e_dbg_pf = NULL;
+
+	kfree(i40e_dbg_dump_buf);
+	i40e_dbg_dump_buf = NULL;
+}
+
+/**
+ * i40e_dbg_init - start up debugfs for the driver
+ **/
+void i40e_dbg_init(void)
+{
+	i40e_dbg_root = debugfs_create_dir(i40e_driver_name, NULL);
+	if (!i40e_dbg_root)
+		pr_info("init of debugfs failed\n");
+}
+
+/**
+ * i40e_dbg_exit - clean out the driver's debugfs entries
+ **/
+void i40e_dbg_exit(void)
+{
+	debugfs_remove_recursive(i40e_dbg_root);
+	i40e_dbg_root = NULL;
+}
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c
new file mode 100644
index 0000000..de25514
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c
@@ -0,0 +1,131 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_diag.h"
+#include "i40e_prototype.h"
+
+/**
+ * i40e_diag_reg_pattern_test
+ * @hw: pointer to the hw struct
+ * @reg: reg to be tested
+ * @mask: bits to be touched
+ **/
+static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw,
+							u32 reg, u32 mask)
+{
+	const u32 patterns[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
+	u32 pat, val, orig_val;
+	int i;
+
+	orig_val = rd32(hw, reg);
+	for (i = 0; i < ARRAY_SIZE(patterns); i++) {
+		pat = patterns[i];
+		wr32(hw, reg, (pat & mask));
+		val = rd32(hw, reg);
+		if ((val & mask) != (pat & mask)) {
+			i40e_debug(hw, I40E_DEBUG_DIAG,
+				   "%s: reg pattern test failed - reg 0x%08x pat 0x%08x val 0x%08x\n",
+				   __func__, reg, pat, val);
+			return I40E_ERR_DIAG_TEST_FAILED;
+		}
+	}
+
+	wr32(hw, reg, orig_val);
+	val = rd32(hw, reg);
+	if (val != orig_val) {
+		i40e_debug(hw, I40E_DEBUG_DIAG,
+			   "%s: reg restore test failed - reg 0x%08x orig_val 0x%08x val 0x%08x\n",
+			   __func__, reg, orig_val, val);
+		return I40E_ERR_DIAG_TEST_FAILED;
+	}
+
+	return 0;
+}
+
+struct i40e_diag_reg_test_info i40e_reg_list[] = {
+	/* offset               mask         elements   stride */
+	{I40E_QTX_CTL(0),       0x0000FFBF,  64, I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
+	{I40E_PFINT_ITR0(0),    0x00000FFF,   3, I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)},
+	{I40E_PFINT_ITRN(0, 0), 0x00000FFF,  64, I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)},
+	{I40E_PFINT_ITRN(1, 0), 0x00000FFF,  64, I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)},
+	{I40E_PFINT_ITRN(2, 0), 0x00000FFF,  64, I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)},
+	{I40E_PFINT_STAT_CTL0,  0x0000000C,   1, 0},
+	{I40E_PFINT_LNKLST0,    0x00001FFF,   1, 0},
+	{I40E_PFINT_LNKLSTN(0), 0x000007FF, 511, I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)},
+	{I40E_QINT_TQCTL(0),    0x000000FF, I40E_QINT_TQCTL_MAX_INDEX + 1, I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)},
+	{I40E_QINT_RQCTL(0),    0x000000FF, I40E_QINT_RQCTL_MAX_INDEX + 1, I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)},
+	{I40E_PFINT_ICR0_ENA,   0xF7F20000,   1, 0},
+	{ 0 }
+};
+
+/**
+ * i40e_diag_reg_test
+ * @hw: pointer to the hw struct
+ *
+ * Perform registers diagnostic test
+ **/
+i40e_status i40e_diag_reg_test(struct i40e_hw *hw)
+{
+	i40e_status ret_code = 0;
+	u32 reg, mask;
+	u32 i, j;
+
+	for (i = 0; (i40e_reg_list[i].offset != 0) && !ret_code; i++) {
+		mask = i40e_reg_list[i].mask;
+		for (j = 0; (j < i40e_reg_list[i].elements) && !ret_code; j++) {
+			reg = i40e_reg_list[i].offset +
+			      (j * i40e_reg_list[i].stride);
+			ret_code = i40e_diag_reg_pattern_test(hw, reg, mask);
+		}
+	}
+
+	return ret_code;
+}
+
+/**
+ * i40e_diag_eeprom_test
+ * @hw: pointer to the hw struct
+ *
+ * Perform EEPROM diagnostic test
+ **/
+i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw)
+{
+	i40e_status ret_code;
+	u16 reg_val;
+
+	/* read NVM control word and if NVM valid, validate EEPROM checksum*/
+	ret_code = i40e_read_nvm_word(hw, I40E_SR_NVM_CONTROL_WORD, &reg_val);
+	if ((!ret_code) &&
+	    ((reg_val & I40E_SR_CONTROL_WORD_1_MASK) ==
+	     (0x01 << I40E_SR_CONTROL_WORD_1_SHIFT))) {
+		ret_code = i40e_validate_nvm_checksum(hw, NULL);
+	} else {
+		ret_code = I40E_ERR_DIAG_TEST_FAILED;
+	}
+
+	return ret_code;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.h b/drivers/net/ethernet/intel/i40e/i40e_diag.h
new file mode 100644
index 0000000..3d98277
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.h
@@ -0,0 +1,52 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_DIAG_H_
+#define _I40E_DIAG_H_
+
+#include "i40e_type.h"
+
+enum i40e_lb_mode {
+	I40E_LB_MODE_NONE = 0,
+	I40E_LB_MODE_PHY_LOCAL,
+	I40E_LB_MODE_PHY_REMOTE,
+	I40E_LB_MODE_MAC_LOCAL,
+};
+
+struct i40e_diag_reg_test_info {
+	u32 offset;	/* the base register */
+	u32 mask;	/* bits that can be tested */
+	u32 elements;	/* number of elements if array */
+	u32 stride;	/* bytes between each element */
+};
+
+extern struct i40e_diag_reg_test_info i40e_reg_list[];
+
+i40e_status i40e_diag_reg_test(struct i40e_hw *hw);
+i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw);
+
+#endif /* _I40E_DIAG_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
new file mode 100644
index 0000000..9a76b8c
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -0,0 +1,1449 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+/* ethtool support for i40e */
+
+#include "i40e.h"
+#include "i40e_diag.h"
+
+struct i40e_stats {
+	char stat_string[ETH_GSTRING_LEN];
+	int sizeof_stat;
+	int stat_offset;
+};
+
+#define I40E_STAT(_type, _name, _stat) { \
+	.stat_string = _name, \
+	.sizeof_stat = FIELD_SIZEOF(_type, _stat), \
+	.stat_offset = offsetof(_type, _stat) \
+}
+#define I40E_NETDEV_STAT(_net_stat) \
+		I40E_STAT(struct net_device_stats, #_net_stat, _net_stat)
+#define I40E_PF_STAT(_name, _stat) \
+		I40E_STAT(struct i40e_pf, _name, _stat)
+#define I40E_VSI_STAT(_name, _stat) \
+		I40E_STAT(struct i40e_vsi, _name, _stat)
+
+static const struct i40e_stats i40e_gstrings_net_stats[] = {
+	I40E_NETDEV_STAT(rx_packets),
+	I40E_NETDEV_STAT(tx_packets),
+	I40E_NETDEV_STAT(rx_bytes),
+	I40E_NETDEV_STAT(tx_bytes),
+	I40E_NETDEV_STAT(rx_errors),
+	I40E_NETDEV_STAT(tx_errors),
+	I40E_NETDEV_STAT(rx_dropped),
+	I40E_NETDEV_STAT(tx_dropped),
+	I40E_NETDEV_STAT(multicast),
+	I40E_NETDEV_STAT(collisions),
+	I40E_NETDEV_STAT(rx_length_errors),
+	I40E_NETDEV_STAT(rx_crc_errors),
+};
+
+/* These PF_STATs might look like duplicates of some NETDEV_STATs,
+ * but they are separate.  This device supports Virtualization, and
+ * as such might have several netdevs supporting VMDq and FCoE going
+ * through a single port.  The NETDEV_STATs are for individual netdevs
+ * seen at the top of the stack, and the PF_STATs are for the physical
+ * function at the bottom of the stack hosting those netdevs.
+ *
+ * The PF_STATs are appended to the netdev stats only when ethtool -S
+ * is queried on the base PF netdev, not on the VMDq or FCoE netdev.
+ */
+static struct i40e_stats i40e_gstrings_stats[] = {
+	I40E_PF_STAT("rx_bytes", stats.eth.rx_bytes),
+	I40E_PF_STAT("tx_bytes", stats.eth.tx_bytes),
+	I40E_PF_STAT("rx_errors", stats.eth.rx_errors),
+	I40E_PF_STAT("tx_errors", stats.eth.tx_errors),
+	I40E_PF_STAT("rx_dropped", stats.eth.rx_discards),
+	I40E_PF_STAT("tx_dropped", stats.eth.tx_discards),
+	I40E_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down),
+	I40E_PF_STAT("crc_errors", stats.crc_errors),
+	I40E_PF_STAT("illegal_bytes", stats.illegal_bytes),
+	I40E_PF_STAT("mac_local_faults", stats.mac_local_faults),
+	I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
+	I40E_PF_STAT("rx_length_errors", stats.rx_length_errors),
+	I40E_PF_STAT("link_xon_rx", stats.link_xon_rx),
+	I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
+	I40E_PF_STAT("link_xon_tx", stats.link_xon_tx),
+	I40E_PF_STAT("link_xoff_tx", stats.link_xoff_tx),
+	I40E_PF_STAT("rx_size_64", stats.rx_size_64),
+	I40E_PF_STAT("rx_size_127", stats.rx_size_127),
+	I40E_PF_STAT("rx_size_255", stats.rx_size_255),
+	I40E_PF_STAT("rx_size_511", stats.rx_size_511),
+	I40E_PF_STAT("rx_size_1023", stats.rx_size_1023),
+	I40E_PF_STAT("rx_size_1522", stats.rx_size_1522),
+	I40E_PF_STAT("rx_size_big", stats.rx_size_big),
+	I40E_PF_STAT("tx_size_64", stats.tx_size_64),
+	I40E_PF_STAT("tx_size_127", stats.tx_size_127),
+	I40E_PF_STAT("tx_size_255", stats.tx_size_255),
+	I40E_PF_STAT("tx_size_511", stats.tx_size_511),
+	I40E_PF_STAT("tx_size_1023", stats.tx_size_1023),
+	I40E_PF_STAT("tx_size_1522", stats.tx_size_1522),
+	I40E_PF_STAT("tx_size_big", stats.tx_size_big),
+	I40E_PF_STAT("rx_undersize", stats.rx_undersize),
+	I40E_PF_STAT("rx_fragments", stats.rx_fragments),
+	I40E_PF_STAT("rx_oversize", stats.rx_oversize),
+	I40E_PF_STAT("rx_jabber", stats.rx_jabber),
+	I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests),
+};
+
+#define I40E_QUEUE_STATS_LEN(n) \
+  ((((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs + \
+    ((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs) * 2)
+#define I40E_GLOBAL_STATS_LEN	ARRAY_SIZE(i40e_gstrings_stats)
+#define I40E_NETDEV_STATS_LEN   ARRAY_SIZE(i40e_gstrings_net_stats)
+#define I40E_VSI_STATS_LEN(n)   (I40E_NETDEV_STATS_LEN + \
+				 I40E_QUEUE_STATS_LEN((n)))
+#define I40E_PFC_STATS_LEN ( \
+		(FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \
+		 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_rx) + \
+		 FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_tx) + \
+		 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_tx) + \
+		 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_2_xoff)) \
+		 / sizeof(u64))
+#define I40E_PF_STATS_LEN(n)	(I40E_GLOBAL_STATS_LEN + \
+				 I40E_PFC_STATS_LEN + \
+				 I40E_VSI_STATS_LEN((n)))
+
+enum i40e_ethtool_test_id {
+	I40E_ETH_TEST_REG = 0,
+	I40E_ETH_TEST_EEPROM,
+	I40E_ETH_TEST_INTR,
+	I40E_ETH_TEST_LOOPBACK,
+	I40E_ETH_TEST_LINK,
+};
+
+static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
+	"Register test  (offline)",
+	"Eeprom test    (offline)",
+	"Interrupt test (offline)",
+	"Loopback test  (offline)",
+	"Link test   (on/offline)"
+};
+
+#define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN)
+
+/**
+ * i40e_get_settings - Get Link Speed and Duplex settings
+ * @netdev: network interface device structure
+ * @ecmd: ethtool command
+ *
+ * Reports speed/duplex settings based on media_type
+ **/
+static int i40e_get_settings(struct net_device *netdev,
+			     struct ethtool_cmd *ecmd)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_pf *pf = np->vsi->back;
+	struct i40e_hw *hw = &pf->hw;
+	struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+	bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
+	u32 link_speed = hw_link_info->link_speed;
+
+	/* hardware is either in 40G mode or 10G mode
+	 * NOTE: this section initializes supported and advertising
+	 */
+	switch (hw_link_info->phy_type) {
+	case I40E_PHY_TYPE_40GBASE_CR4:
+	case I40E_PHY_TYPE_40GBASE_CR4_CU:
+		ecmd->supported = SUPPORTED_40000baseCR4_Full;
+		ecmd->advertising = ADVERTISED_40000baseCR4_Full;
+		break;
+	case I40E_PHY_TYPE_40GBASE_KR4:
+		ecmd->supported = SUPPORTED_40000baseKR4_Full;
+		ecmd->advertising = ADVERTISED_40000baseKR4_Full;
+		break;
+	case I40E_PHY_TYPE_40GBASE_SR4:
+		ecmd->supported = SUPPORTED_40000baseSR4_Full;
+		ecmd->advertising = ADVERTISED_40000baseSR4_Full;
+		break;
+	case I40E_PHY_TYPE_40GBASE_LR4:
+		ecmd->supported = SUPPORTED_40000baseLR4_Full;
+		ecmd->advertising = ADVERTISED_40000baseLR4_Full;
+		break;
+	case I40E_PHY_TYPE_10GBASE_KX4:
+		ecmd->supported = SUPPORTED_10000baseKX4_Full;
+		ecmd->advertising = ADVERTISED_10000baseKX4_Full;
+		break;
+	case I40E_PHY_TYPE_10GBASE_KR:
+		ecmd->supported = SUPPORTED_10000baseKR_Full;
+		ecmd->advertising = ADVERTISED_10000baseKR_Full;
+		break;
+	case I40E_PHY_TYPE_10GBASE_T:
+	default:
+		ecmd->supported = SUPPORTED_10000baseT_Full;
+		ecmd->advertising = ADVERTISED_10000baseT_Full;
+		break;
+	}
+
+	/* for now just say autoneg all the time */
+	ecmd->supported |= SUPPORTED_Autoneg;
+
+	if (hw->phy.media_type == I40E_MEDIA_TYPE_BACKPLANE) {
+		ecmd->supported |= SUPPORTED_Backplane;
+		ecmd->advertising |= ADVERTISED_Backplane;
+		ecmd->port = PORT_NONE;
+	} else if (hw->phy.media_type == I40E_MEDIA_TYPE_BASET) {
+		ecmd->supported |= SUPPORTED_TP;
+		ecmd->advertising |= ADVERTISED_TP;
+		ecmd->port = PORT_TP;
+	} else {
+		ecmd->supported |= SUPPORTED_FIBRE;
+		ecmd->advertising |= ADVERTISED_FIBRE;
+		ecmd->port = PORT_FIBRE;
+	}
+
+	ecmd->transceiver = XCVR_EXTERNAL;
+
+	if (link_up) {
+		switch (link_speed) {
+		case I40E_LINK_SPEED_40GB:
+			/* need a SPEED_40000 in ethtool.h */
+			ethtool_cmd_speed_set(ecmd, 40000);
+			break;
+		case I40E_LINK_SPEED_10GB:
+			ethtool_cmd_speed_set(ecmd, SPEED_10000);
+			break;
+		default:
+			break;
+		}
+		ecmd->duplex = DUPLEX_FULL;
+	} else {
+		ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+		ecmd->duplex = DUPLEX_UNKNOWN;
+	}
+
+	return 0;
+}
+
+/**
+ * i40e_get_pauseparam -  Get Flow Control status
+ * Return tx/rx-pause status
+ **/
+static void i40e_get_pauseparam(struct net_device *netdev,
+				struct ethtool_pauseparam *pause)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_pf *pf = np->vsi->back;
+	struct i40e_hw *hw = &pf->hw;
+	struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+
+	pause->autoneg =
+		((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
+		  AUTONEG_ENABLE : AUTONEG_DISABLE);
+
+	pause->rx_pause = 0;
+	pause->tx_pause = 0;
+	if (hw_link_info->an_info & I40E_AQ_LINK_PAUSE_RX)
+		pause->rx_pause = 1;
+	if (hw_link_info->an_info & I40E_AQ_LINK_PAUSE_TX)
+		pause->tx_pause = 1;
+}
+
+static u32 i40e_get_msglevel(struct net_device *netdev)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_pf *pf = np->vsi->back;
+
+	return pf->msg_enable;
+}
+
+static void i40e_set_msglevel(struct net_device *netdev, u32 data)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_pf *pf = np->vsi->back;
+
+	if (I40E_DEBUG_USER & data)
+		pf->hw.debug_mask = data;
+	pf->msg_enable = data;
+}
+
+static int i40e_get_regs_len(struct net_device *netdev)
+{
+	int reg_count = 0;
+	int i;
+
+	for (i = 0; i40e_reg_list[i].offset != 0; i++)
+		reg_count += i40e_reg_list[i].elements;
+
+	return reg_count * sizeof(u32);
+}
+
+static void i40e_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
+			  void *p)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_pf *pf = np->vsi->back;
+	struct i40e_hw *hw = &pf->hw;
+	u32 *reg_buf = p;
+	int i, j, ri;
+	u32 reg;
+
+	/* Tell ethtool which driver-version-specific regs output we have.
+	 *
+	 * At some point, if we have ethtool doing special formatting of
+	 * this data, it will rely on this version number to know how to
+	 * interpret things.  Hence, this needs to be updated if/when the
+	 * diags register table is changed.
+	 */
+	regs->version = 1;
+
+	/* loop through the diags reg table for what to print */
+	ri = 0;
+	for (i = 0; i40e_reg_list[i].offset != 0; i++) {
+		for (j = 0; j < i40e_reg_list[i].elements; j++) {
+			reg = i40e_reg_list[i].offset
+				+ (j * i40e_reg_list[i].stride);
+			reg_buf[ri++] = rd32(hw, reg);
+		}
+	}
+
+}
+
+static int i40e_get_eeprom(struct net_device *netdev,
+			   struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_hw *hw = &np->vsi->back->hw;
+	int first_word, last_word;
+	u16 i, eeprom_len;
+	u16 *eeprom_buff;
+	int ret_val = 0;
+
+	if (eeprom->len == 0)
+		return -EINVAL;
+
+	eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+	first_word = eeprom->offset >> 1;
+	last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+	eeprom_len = last_word - first_word + 1;
+
+	eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL);
+	if (!eeprom_buff)
+		return -ENOMEM;
+
+	ret_val = i40e_read_nvm_buffer(hw, first_word, &eeprom_len,
+					   eeprom_buff);
+	if (eeprom_len == 0) {
+		kfree(eeprom_buff);
+		return -EACCES;
+	}
+
+	/* Device's eeprom is always little-endian, word addressable */
+	for (i = 0; i < eeprom_len; i++)
+		le16_to_cpus(&eeprom_buff[i]);
+
+	memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
+	kfree(eeprom_buff);
+
+	return ret_val;
+}
+
+static int i40e_get_eeprom_len(struct net_device *netdev)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_hw *hw = &np->vsi->back->hw;
+
+	return hw->nvm.sr_size * 2;
+}
+
+static void i40e_get_drvinfo(struct net_device *netdev,
+			     struct ethtool_drvinfo *drvinfo)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_vsi *vsi = np->vsi;
+	struct i40e_pf *pf = vsi->back;
+
+	strlcpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver));
+	strlcpy(drvinfo->version, i40e_driver_version_str,
+		sizeof(drvinfo->version));
+	strlcpy(drvinfo->fw_version, i40e_fw_version_str(&pf->hw),
+		sizeof(drvinfo->fw_version));
+	strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
+		sizeof(drvinfo->bus_info));
+}
+
+static void i40e_get_ringparam(struct net_device *netdev,
+			       struct ethtool_ringparam *ring)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_pf *pf = np->vsi->back;
+	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+
+	ring->rx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
+	ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
+	ring->rx_mini_max_pending = 0;
+	ring->rx_jumbo_max_pending = 0;
+	ring->rx_pending = vsi->rx_rings[0].count;
+	ring->tx_pending = vsi->tx_rings[0].count;
+	ring->rx_mini_pending = 0;
+	ring->rx_jumbo_pending = 0;
+}
+
+static int i40e_set_ringparam(struct net_device *netdev,
+			      struct ethtool_ringparam *ring)
+{
+	struct i40e_ring *tx_rings = NULL, *rx_rings = NULL;
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_vsi *vsi = np->vsi;
+	struct i40e_pf *pf = vsi->back;
+	u32 new_rx_count, new_tx_count;
+	int i, err = 0;
+
+	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+		return -EINVAL;
+
+	new_tx_count = clamp_t(u32, ring->tx_pending,
+			       I40E_MIN_NUM_DESCRIPTORS,
+			       I40E_MAX_NUM_DESCRIPTORS);
+	new_tx_count = ALIGN(new_tx_count, I40E_REQ_DESCRIPTOR_MULTIPLE);
+
+	new_rx_count = clamp_t(u32, ring->rx_pending,
+			       I40E_MIN_NUM_DESCRIPTORS,
+			       I40E_MAX_NUM_DESCRIPTORS);
+	new_rx_count = ALIGN(new_rx_count, I40E_REQ_DESCRIPTOR_MULTIPLE);
+
+	/* if nothing to do return success */
+	if ((new_tx_count == vsi->tx_rings[0].count) &&
+	    (new_rx_count == vsi->rx_rings[0].count))
+		return 0;
+
+	while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
+		usleep_range(1000, 2000);
+
+	if (!netif_running(vsi->netdev)) {
+		/* simple case - set for the next time the netdev is started */
+		for (i = 0; i < vsi->num_queue_pairs; i++) {
+			vsi->tx_rings[i].count = new_tx_count;
+			vsi->rx_rings[i].count = new_rx_count;
+		}
+		goto done;
+	}
+
+	/* We can't just free everything and then setup again,
+	 * because the ISRs in MSI-X mode get passed pointers
+	 * to the Tx and Rx ring structs.
+	 */
+
+	/* alloc updated Tx resources */
+	if (new_tx_count != vsi->tx_rings[0].count) {
+		netdev_info(netdev,
+			    "Changing Tx descriptor count from %d to %d.\n",
+			    vsi->tx_rings[0].count, new_tx_count);
+		tx_rings = kcalloc(vsi->alloc_queue_pairs,
+				   sizeof(struct i40e_ring), GFP_KERNEL);
+		if (!tx_rings) {
+			err = -ENOMEM;
+			goto done;
+		}
+
+		for (i = 0; i < vsi->num_queue_pairs; i++) {
+			/* clone ring and setup updated count */
+			tx_rings[i] = vsi->tx_rings[i];
+			tx_rings[i].count = new_tx_count;
+			err = i40e_setup_tx_descriptors(&tx_rings[i]);
+			if (err) {
+				while (i) {
+					i--;
+					i40e_free_tx_resources(&tx_rings[i]);
+				}
+				kfree(tx_rings);
+				tx_rings = NULL;
+
+				goto done;
+			}
+		}
+	}
+
+	/* alloc updated Rx resources */
+	if (new_rx_count != vsi->rx_rings[0].count) {
+		netdev_info(netdev,
+			    "Changing Rx descriptor count from %d to %d\n",
+			    vsi->rx_rings[0].count, new_rx_count);
+		rx_rings = kcalloc(vsi->alloc_queue_pairs,
+				   sizeof(struct i40e_ring), GFP_KERNEL);
+		if (!rx_rings) {
+			err = -ENOMEM;
+			goto free_tx;
+		}
+
+		for (i = 0; i < vsi->num_queue_pairs; i++) {
+			/* clone ring and setup updated count */
+			rx_rings[i] = vsi->rx_rings[i];
+			rx_rings[i].count = new_rx_count;
+			err = i40e_setup_rx_descriptors(&rx_rings[i]);
+			if (err) {
+				while (i) {
+					i--;
+					i40e_free_rx_resources(&rx_rings[i]);
+				}
+				kfree(rx_rings);
+				rx_rings = NULL;
+
+				goto free_tx;
+			}
+		}
+	}
+
+	/* Bring interface down, copy in the new ring info,
+	 * then restore the interface
+	 */
+	i40e_down(vsi);
+
+	if (tx_rings) {
+		for (i = 0; i < vsi->num_queue_pairs; i++) {
+			i40e_free_tx_resources(&vsi->tx_rings[i]);
+			vsi->tx_rings[i] = tx_rings[i];
+		}
+		kfree(tx_rings);
+		tx_rings = NULL;
+	}
+
+	if (rx_rings) {
+		for (i = 0; i < vsi->num_queue_pairs; i++) {
+			i40e_free_rx_resources(&vsi->rx_rings[i]);
+			vsi->rx_rings[i] = rx_rings[i];
+		}
+		kfree(rx_rings);
+		rx_rings = NULL;
+	}
+
+	i40e_up(vsi);
+
+free_tx:
+	/* error cleanup if the Rx allocations failed after getting Tx */
+	if (tx_rings) {
+		for (i = 0; i < vsi->num_queue_pairs; i++)
+			i40e_free_tx_resources(&tx_rings[i]);
+		kfree(tx_rings);
+		tx_rings = NULL;
+	}
+
+done:
+	clear_bit(__I40E_CONFIG_BUSY, &pf->state);
+
+	return err;
+}
+
+static int i40e_get_sset_count(struct net_device *netdev, int sset)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_vsi *vsi = np->vsi;
+	struct i40e_pf *pf = vsi->back;
+
+	switch (sset) {
+	case ETH_SS_TEST:
+		return I40E_TEST_LEN;
+	case ETH_SS_STATS:
+		if (vsi == pf->vsi[pf->lan_vsi])
+			return I40E_PF_STATS_LEN(netdev);
+		else
+			return I40E_VSI_STATS_LEN(netdev);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static void i40e_get_ethtool_stats(struct net_device *netdev,
+				   struct ethtool_stats *stats, u64 *data)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_vsi *vsi = np->vsi;
+	struct i40e_pf *pf = vsi->back;
+	int i = 0;
+	char *p;
+	int j;
+	struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi);
+
+	i40e_update_stats(vsi);
+
+	for (j = 0; j < I40E_NETDEV_STATS_LEN; j++) {
+		p = (char *)net_stats + i40e_gstrings_net_stats[j].stat_offset;
+		data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat ==
+			sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+	}
+	for (j = 0; j < vsi->num_queue_pairs; j++) {
+		data[i++] = vsi->tx_rings[j].tx_stats.packets;
+		data[i++] = vsi->tx_rings[j].tx_stats.bytes;
+	}
+	for (j = 0; j < vsi->num_queue_pairs; j++) {
+		data[i++] = vsi->rx_rings[j].rx_stats.packets;
+		data[i++] = vsi->rx_rings[j].rx_stats.bytes;
+	}
+	if (vsi == pf->vsi[pf->lan_vsi]) {
+		for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
+			p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
+			data[i++] = (i40e_gstrings_stats[j].sizeof_stat ==
+				   sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+		}
+		for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
+			data[i++] = pf->stats.priority_xon_tx[j];
+			data[i++] = pf->stats.priority_xoff_tx[j];
+		}
+		for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
+			data[i++] = pf->stats.priority_xon_rx[j];
+			data[i++] = pf->stats.priority_xoff_rx[j];
+		}
+		for (j = 0; j < I40E_MAX_USER_PRIORITY; j++)
+			data[i++] = pf->stats.priority_xon_2_xoff[j];
+	}
+}
+
+static void i40e_get_strings(struct net_device *netdev, u32 stringset,
+			     u8 *data)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_vsi *vsi = np->vsi;
+	struct i40e_pf *pf = vsi->back;
+	char *p = (char *)data;
+	int i;
+
+	switch (stringset) {
+	case ETH_SS_TEST:
+		for (i = 0; i < I40E_TEST_LEN; i++) {
+			memcpy(data, i40e_gstrings_test[i], ETH_GSTRING_LEN);
+			data += ETH_GSTRING_LEN;
+		}
+		break;
+	case ETH_SS_STATS:
+		for (i = 0; i < I40E_NETDEV_STATS_LEN; i++) {
+			snprintf(p, ETH_GSTRING_LEN, "%s",
+				 i40e_gstrings_net_stats[i].stat_string);
+			p += ETH_GSTRING_LEN;
+		}
+		for (i = 0; i < vsi->num_queue_pairs; i++) {
+			snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_packets", i);
+			p += ETH_GSTRING_LEN;
+			snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i);
+			p += ETH_GSTRING_LEN;
+		}
+		for (i = 0; i < vsi->num_queue_pairs; i++) {
+			snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_packets", i);
+			p += ETH_GSTRING_LEN;
+			snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i);
+			p += ETH_GSTRING_LEN;
+		}
+		if (vsi == pf->vsi[pf->lan_vsi]) {
+			for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) {
+				snprintf(p, ETH_GSTRING_LEN, "port.%s",
+					 i40e_gstrings_stats[i].stat_string);
+				p += ETH_GSTRING_LEN;
+			}
+			for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+				snprintf(p, ETH_GSTRING_LEN,
+					 "port.tx_priority_%u_xon", i);
+				p += ETH_GSTRING_LEN;
+				snprintf(p, ETH_GSTRING_LEN,
+					 "port.tx_priority_%u_xoff", i);
+				p += ETH_GSTRING_LEN;
+			}
+			for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+				snprintf(p, ETH_GSTRING_LEN,
+					 "port.rx_priority_%u_xon", i);
+				p += ETH_GSTRING_LEN;
+				snprintf(p, ETH_GSTRING_LEN,
+					 "port.rx_priority_%u_xoff", i);
+				p += ETH_GSTRING_LEN;
+			}
+			for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+				snprintf(p, ETH_GSTRING_LEN,
+					 "port.rx_priority_%u_xon_2_xoff", i);
+				p += ETH_GSTRING_LEN;
+			}
+		}
+		/* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */
+		break;
+	}
+}
+
+static int i40e_get_ts_info(struct net_device *dev,
+			    struct ethtool_ts_info *info)
+{
+	return ethtool_op_get_ts_info(dev, info);
+}
+
+static int i40e_link_test(struct i40e_pf *pf, u64 *data)
+{
+	if (i40e_get_link_status(&pf->hw))
+		*data = 0;
+	else
+		*data = 1;
+
+	return *data;
+}
+
+static int i40e_reg_test(struct i40e_pf *pf, u64 *data)
+{
+	i40e_status ret;
+
+	ret = i40e_diag_reg_test(&pf->hw);
+	*data = ret;
+
+	return ret;
+}
+
+static int i40e_eeprom_test(struct i40e_pf *pf, u64 *data)
+{
+	i40e_status ret;
+
+	ret = i40e_diag_eeprom_test(&pf->hw);
+	*data = ret;
+
+	return ret;
+}
+
+static int i40e_intr_test(struct i40e_pf *pf, u64 *data)
+{
+	*data = -ENOSYS;
+
+	return *data;
+}
+
+static int i40e_loopback_test(struct i40e_pf *pf, u64 *data)
+{
+	*data = -ENOSYS;
+
+	return *data;
+}
+
+static void i40e_diag_test(struct net_device *netdev,
+			   struct ethtool_test *eth_test, u64 *data)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_pf *pf = np->vsi->back;
+
+	set_bit(__I40E_TESTING, &pf->state);
+	if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+		/* Offline tests */
+
+		netdev_info(netdev, "offline testing starting\n");
+
+		/* Link test performed before hardware reset
+		 * so autoneg doesn't interfere with test result
+		 */
+		netdev_info(netdev, "link test starting\n");
+		if (i40e_link_test(pf, &data[I40E_ETH_TEST_LINK]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		netdev_info(netdev, "register test starting\n");
+		if (i40e_reg_test(pf, &data[I40E_ETH_TEST_REG]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+		netdev_info(netdev, "eeprom test starting\n");
+		if (i40e_eeprom_test(pf, &data[I40E_ETH_TEST_EEPROM]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+		netdev_info(netdev, "interrupt test starting\n");
+		if (i40e_intr_test(pf, &data[I40E_ETH_TEST_INTR]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+		netdev_info(netdev, "loopback test starting\n");
+		if (i40e_loopback_test(pf, &data[I40E_ETH_TEST_LOOPBACK]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+	} else {
+		netdev_info(netdev, "online test starting\n");
+		/* Online tests */
+		if (i40e_link_test(pf, &data[I40E_ETH_TEST_LINK]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		/* Offline only tests, not run in online; pass by default */
+		data[I40E_ETH_TEST_REG] = 0;
+		data[I40E_ETH_TEST_EEPROM] = 0;
+		data[I40E_ETH_TEST_INTR] = 0;
+		data[I40E_ETH_TEST_LOOPBACK] = 0;
+
+		clear_bit(__I40E_TESTING, &pf->state);
+	}
+}
+
+static void i40e_get_wol(struct net_device *netdev,
+			 struct ethtool_wolinfo *wol)
+{
+	wol->supported = 0;
+	wol->wolopts = 0;
+}
+
+static int i40e_nway_reset(struct net_device *netdev)
+{
+	/* restart autonegotiation */
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_pf *pf = np->vsi->back;
+	struct i40e_hw *hw = &pf->hw;
+	i40e_status ret = 0;
+
+	ret = i40e_aq_set_link_restart_an(hw, NULL);
+	if (ret) {
+		netdev_info(netdev, "link restart failed, aq_err=%d\n",
+			    pf->hw.aq.asq_last_status);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int i40e_set_phys_id(struct net_device *netdev,
+			    enum ethtool_phys_id_state state)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_pf *pf = np->vsi->back;
+	struct i40e_hw *hw = &pf->hw;
+	int blink_freq = 2;
+
+	switch (state) {
+	case ETHTOOL_ID_ACTIVE:
+		pf->led_status = i40e_led_get(hw);
+		return blink_freq;
+	case ETHTOOL_ID_ON:
+		i40e_led_set(hw, 0xF);
+		break;
+	case ETHTOOL_ID_OFF:
+		i40e_led_set(hw, 0x0);
+		break;
+	case ETHTOOL_ID_INACTIVE:
+		i40e_led_set(hw, pf->led_status);
+		break;
+	}
+
+	return 0;
+}
+
+/* NOTE: i40e hardware uses a conversion factor of 2 for Interrupt
+ * Throttle Rate (ITR) ie. ITR(1) = 2us ITR(10) = 20 us, and also
+ * 125us (8000 interrupts per second) == ITR(62)
+ */
+
+static int i40e_get_coalesce(struct net_device *netdev,
+			     struct ethtool_coalesce *ec)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_vsi *vsi = np->vsi;
+
+	ec->tx_max_coalesced_frames_irq = vsi->work_limit;
+	ec->rx_max_coalesced_frames_irq = vsi->work_limit;
+
+	if (ITR_IS_DYNAMIC(vsi->rx_itr_setting))
+		ec->rx_coalesce_usecs = 1;
+	else
+		ec->rx_coalesce_usecs = vsi->rx_itr_setting;
+
+	if (ITR_IS_DYNAMIC(vsi->tx_itr_setting))
+		ec->tx_coalesce_usecs = 1;
+	else
+		ec->tx_coalesce_usecs = vsi->tx_itr_setting;
+
+	return 0;
+}
+
+static int i40e_set_coalesce(struct net_device *netdev,
+			     struct ethtool_coalesce *ec)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_q_vector *q_vector;
+	struct i40e_vsi *vsi = np->vsi;
+	struct i40e_pf *pf = vsi->back;
+	struct i40e_hw *hw = &pf->hw;
+	u16 vector;
+	int i;
+
+	if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
+		vsi->work_limit = ec->tx_max_coalesced_frames_irq;
+
+	switch (ec->rx_coalesce_usecs) {
+	case 0:
+		vsi->rx_itr_setting = 0;
+		break;
+	case 1:
+		vsi->rx_itr_setting = (I40E_ITR_DYNAMIC |
+				       ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
+		break;
+	default:
+		if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
+		    (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1)))
+			return -EINVAL;
+		vsi->rx_itr_setting = ec->rx_coalesce_usecs;
+		break;
+	}
+
+	switch (ec->tx_coalesce_usecs) {
+	case 0:
+		vsi->tx_itr_setting = 0;
+		break;
+	case 1:
+		vsi->tx_itr_setting = (I40E_ITR_DYNAMIC |
+				       ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
+		break;
+	default:
+		if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
+		    (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1)))
+			return -EINVAL;
+		vsi->tx_itr_setting = ec->tx_coalesce_usecs;
+		break;
+	}
+
+	vector = vsi->base_vector;
+	q_vector = vsi->q_vectors;
+	for (i = 0; i < vsi->num_q_vectors; i++, vector++, q_vector++) {
+		q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
+		wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr);
+		q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
+		wr32(hw, I40E_PFINT_ITRN(1, vector - 1), q_vector->tx.itr);
+		i40e_flush(hw);
+	}
+
+	return 0;
+}
+
+/**
+ * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type
+ * @pf: pointer to the physical function struct
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the flow is supported, else Invalid Input.
+ **/
+static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
+{
+	cmd->data = 0;
+
+	/* Report default options for RSS on i40e */
+	switch (cmd->flow_type) {
+	case TCP_V4_FLOW:
+	case UDP_V4_FLOW:
+		cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+	/* fall through to add IP fields */
+	case SCTP_V4_FLOW:
+	case AH_ESP_V4_FLOW:
+	case AH_V4_FLOW:
+	case ESP_V4_FLOW:
+	case IPV4_FLOW:
+		cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+		break;
+	case TCP_V6_FLOW:
+	case UDP_V6_FLOW:
+		cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+	/* fall through to add IP fields */
+	case SCTP_V6_FLOW:
+	case AH_ESP_V6_FLOW:
+	case AH_V6_FLOW:
+	case ESP_V6_FLOW:
+	case IPV6_FLOW:
+		cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * i40e_get_rxnfc - command to get RX flow classification rules
+ * @netdev: network interface device structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the command is supported.
+ **/
+static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
+			  u32 *rule_locs)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_vsi *vsi = np->vsi;
+	struct i40e_pf *pf = vsi->back;
+	int ret = -EOPNOTSUPP;
+
+	switch (cmd->cmd) {
+	case ETHTOOL_GRXRINGS:
+		cmd->data = vsi->alloc_queue_pairs;
+		ret = 0;
+		break;
+	case ETHTOOL_GRXFH:
+		ret = i40e_get_rss_hash_opts(pf, cmd);
+		break;
+	case ETHTOOL_GRXCLSRLCNT:
+		ret = 0;
+		break;
+	case ETHTOOL_GRXCLSRULE:
+		ret = 0;
+		break;
+	case ETHTOOL_GRXCLSRLALL:
+		cmd->data = 500;
+		ret = 0;
+	default:
+		break;
+	}
+
+	return ret;
+}
+
+/**
+ * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash
+ * @pf: pointer to the physical function struct
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the flow input set is supported.
+ **/
+static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
+{
+	struct i40e_hw *hw = &pf->hw;
+	u64 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
+		   ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
+
+	/* RSS does not support anything other than hashing
+	 * to queues on src and dst IPs and ports
+	 */
+	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
+			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
+		return -EINVAL;
+
+	/* We need at least the IP SRC and DEST fields for hashing */
+	if (!(nfc->data & RXH_IP_SRC) ||
+	    !(nfc->data & RXH_IP_DST))
+		return -EINVAL;
+
+	switch (nfc->flow_type) {
+	case TCP_V4_FLOW:
+		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+		case 0:
+			hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+			break;
+		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+			hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+			break;
+		default:
+			return -EINVAL;
+		}
+		break;
+	case TCP_V6_FLOW:
+		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+		case 0:
+			hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+			break;
+		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+			hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+			break;
+		default:
+			return -EINVAL;
+		}
+		break;
+	case UDP_V4_FLOW:
+		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+		case 0:
+			hena &=
+			~(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
+			((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
+			((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+			break;
+		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+			hena |=
+			(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP)  |
+			((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
+			((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+			break;
+		default:
+			return -EINVAL;
+		}
+		break;
+	case UDP_V6_FLOW:
+		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+		case 0:
+			hena &=
+			~(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
+			((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
+			((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+			break;
+		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+			hena |=
+			(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP)  |
+			((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
+			((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+			break;
+		default:
+			return -EINVAL;
+		}
+		break;
+	case AH_ESP_V4_FLOW:
+	case AH_V4_FLOW:
+	case ESP_V4_FLOW:
+	case SCTP_V4_FLOW:
+		if ((nfc->data & RXH_L4_B_0_1) ||
+		    (nfc->data & RXH_L4_B_2_3))
+			return -EINVAL;
+		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
+		break;
+	case AH_ESP_V6_FLOW:
+	case AH_V6_FLOW:
+	case ESP_V6_FLOW:
+	case SCTP_V6_FLOW:
+		if ((nfc->data & RXH_L4_B_0_1) ||
+		    (nfc->data & RXH_L4_B_2_3))
+			return -EINVAL;
+		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
+		break;
+	case IPV4_FLOW:
+		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
+			((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4);
+		break;
+	case IPV6_FLOW:
+		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
+			((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
+	wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
+	i40e_flush(hw);
+
+	return 0;
+}
+
+#define IP_HEADER_OFFSET 14
+/**
+ * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 Flow Director filters for
+ * a specific flow spec
+ * @vsi: pointer to the targeted VSI
+ * @fd_data: the flow director data required from the FDir descriptor
+ * @ethtool_rx_flow_spec: the flow spec
+ * @add: true adds a filter, false removes it
+ *
+ * Returns 0 if the filters were successfully added or removed
+ **/
+static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
+				   struct i40e_fdir_data *fd_data,
+				   struct ethtool_rx_flow_spec *fsp, bool add)
+{
+	struct i40e_pf *pf = vsi->back;
+	struct udphdr *udp;
+	struct iphdr *ip;
+	bool err = false;
+	int ret;
+	int i;
+
+	ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
+	udp = (struct udphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET
+	      + sizeof(struct iphdr));
+
+	ip->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
+	ip->daddr = fsp->h_u.tcp_ip4_spec.ip4dst;
+	udp->source = fsp->h_u.tcp_ip4_spec.psrc;
+	udp->dest = fsp->h_u.tcp_ip4_spec.pdst;
+
+	for (i = I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP;
+	     i <= I40E_FILTER_PCTYPE_NONF_IPV4_UDP; i++) {
+		fd_data->pctype = i;
+		ret = i40e_program_fdir_filter(fd_data, pf, add);
+
+		if (ret) {
+			dev_info(&pf->pdev->dev,
+				 "Filter command send failed for PCTYPE %d (ret = %d)\n",
+				 fd_data->pctype, ret);
+			err = true;
+		} else {
+			dev_info(&pf->pdev->dev,
+				 "Filter OK for PCTYPE %d (ret = %d)\n",
+				 fd_data->pctype, ret);
+		}
+	}
+
+	return err ? -EOPNOTSUPP : 0;
+}
+
+/**
+ * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 Flow Director filters for
+ * a specific flow spec
+ * @vsi: pointer to the targeted VSI
+ * @fd_data: the flow director data required from the FDir descriptor
+ * @ethtool_rx_flow_spec: the flow spec
+ * @add: true adds a filter, false removes it
+ *
+ * Returns 0 if the filters were successfully added or removed
+ **/
+static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
+				   struct i40e_fdir_data *fd_data,
+				   struct ethtool_rx_flow_spec *fsp, bool add)
+{
+	struct i40e_pf *pf = vsi->back;
+	struct tcphdr *tcp;
+	struct iphdr *ip;
+	bool err = false;
+	int ret;
+
+	ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
+	tcp = (struct tcphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET
+	      + sizeof(struct iphdr));
+
+	ip->daddr = fsp->h_u.tcp_ip4_spec.ip4dst;
+	tcp->dest = fsp->h_u.tcp_ip4_spec.pdst;
+
+	fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN;
+	ret = i40e_program_fdir_filter(fd_data, pf, add);
+
+	if (ret) {
+		dev_info(&pf->pdev->dev,
+			 "Filter command send failed for PCTYPE %d (ret = %d)\n",
+			 fd_data->pctype, ret);
+		err = true;
+	} else {
+		dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
+			 fd_data->pctype, ret);
+	}
+
+	ip->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
+	tcp->source = fsp->h_u.tcp_ip4_spec.psrc;
+
+	fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+
+	ret = i40e_program_fdir_filter(fd_data, pf, add);
+	if (ret) {
+		dev_info(&pf->pdev->dev,
+			 "Filter command send failed for PCTYPE %d (ret = %d)\n",
+			 fd_data->pctype, ret);
+		err = true;
+	} else {
+		dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
+			  fd_data->pctype, ret);
+	}
+
+	return err ? -EOPNOTSUPP : 0;
+}
+
+/**
+ * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
+ * a specific flow spec
+ * @vsi: pointer to the targeted VSI
+ * @fd_data: the flow director data required from the FDir descriptor
+ * @ethtool_rx_flow_spec: the flow spec
+ * @add: true adds a filter, false removes it
+ *
+ * Returns 0 if the filters were successfully added or removed
+ **/
+static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
+				    struct i40e_fdir_data *fd_data,
+				    struct ethtool_rx_flow_spec *fsp, bool add)
+{
+	return -EOPNOTSUPP;
+}
+
+/**
+ * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
+ * a specific flow spec
+ * @vsi: pointer to the targeted VSI
+ * @fd_data: the flow director data required for the FDir descriptor
+ * @fsp: the ethtool flow spec
+ * @add: true adds a filter, false removes it
+ *
+ * Returns 0 if the filters were successfully added or removed
+ **/
+static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
+				  struct i40e_fdir_data *fd_data,
+				  struct ethtool_rx_flow_spec *fsp, bool add)
+{
+	struct i40e_pf *pf = vsi->back;
+	struct iphdr *ip;
+	bool err = false;
+	int ret;
+	int i;
+
+	ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
+
+	ip->saddr = fsp->h_u.usr_ip4_spec.ip4src;
+	ip->daddr = fsp->h_u.usr_ip4_spec.ip4dst;
+	ip->protocol = fsp->h_u.usr_ip4_spec.proto;
+
+	for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
+	     i <= I40E_FILTER_PCTYPE_FRAG_IPV4;	i++) {
+		fd_data->pctype = i;
+		ret = i40e_program_fdir_filter(fd_data, pf, add);
+
+		if (ret) {
+			dev_info(&pf->pdev->dev,
+				 "Filter command send failed for PCTYPE %d (ret = %d)\n",
+				 fd_data->pctype, ret);
+			err = true;
+		} else {
+			dev_info(&pf->pdev->dev,
+				 "Filter OK for PCTYPE %d (ret = %d)\n",
+				 fd_data->pctype, ret);
+		}
+	}
+
+	return err ? -EOPNOTSUPP : 0;
+}
+
+/**
+ * i40e_add_del_fdir_ethtool - Add/Remove Flow Director filters for
+ * a specific flow spec based on their protocol
+ * @vsi: pointer to the targeted VSI
+ * @cmd: command to get or set RX flow classification rules
+ * @add: true adds a filter, false removes it
+ *
+ * Returns 0 if the filters were successfully added or removed
+ **/
+static int i40e_add_del_fdir_ethtool(struct i40e_vsi *vsi,
+			struct ethtool_rxnfc *cmd, bool add)
+{
+	struct i40e_fdir_data fd_data;
+	int ret = -EINVAL;
+	struct i40e_pf *pf;
+	struct ethtool_rx_flow_spec *fsp =
+		(struct ethtool_rx_flow_spec *)&cmd->fs;
+
+	if (!vsi)
+		return -EINVAL;
+
+	pf = vsi->back;
+
+	if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
+	    (fsp->ring_cookie >= vsi->num_queue_pairs))
+		return -EINVAL;
+
+	/* Populate the Flow Director that we have at the moment
+	 * and allocate the raw packet buffer for the calling functions
+	 */
+	fd_data.raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP,
+				     GFP_KERNEL);
+
+	if (!fd_data.raw_packet) {
+		dev_info(&pf->pdev->dev, "Could not allocate memory\n");
+		return -ENOMEM;
+	}
+
+	fd_data.q_index = fsp->ring_cookie;
+	fd_data.flex_off = 0;
+	fd_data.pctype = 0;
+	fd_data.dest_vsi = vsi->id;
+	fd_data.dest_ctl = 0;
+	fd_data.fd_status = 0;
+	fd_data.cnt_index = 0;
+	fd_data.fd_id = 0;
+
+	switch (fsp->flow_type & ~FLOW_EXT) {
+	case TCP_V4_FLOW:
+		ret = i40e_add_del_fdir_tcpv4(vsi, &fd_data, fsp, add);
+		break;
+	case UDP_V4_FLOW:
+		ret = i40e_add_del_fdir_udpv4(vsi, &fd_data, fsp, add);
+		break;
+	case SCTP_V4_FLOW:
+		ret = i40e_add_del_fdir_sctpv4(vsi, &fd_data, fsp, add);
+		break;
+	case IPV4_FLOW:
+		ret = i40e_add_del_fdir_ipv4(vsi, &fd_data, fsp, add);
+		break;
+	case IP_USER_FLOW:
+		switch (fsp->h_u.usr_ip4_spec.proto) {
+		case IPPROTO_TCP:
+			ret = i40e_add_del_fdir_tcpv4(vsi, &fd_data, fsp, add);
+			break;
+		case IPPROTO_UDP:
+			ret = i40e_add_del_fdir_udpv4(vsi, &fd_data, fsp, add);
+			break;
+		case IPPROTO_SCTP:
+			ret = i40e_add_del_fdir_sctpv4(vsi, &fd_data, fsp, add);
+			break;
+		default:
+			ret = i40e_add_del_fdir_ipv4(vsi, &fd_data, fsp, add);
+			break;
+		}
+		break;
+	default:
+		dev_info(&pf->pdev->dev, "Could not specify spec type\n");
+		ret = -EINVAL;
+	}
+
+	kfree(fd_data.raw_packet);
+	fd_data.raw_packet = NULL;
+
+	return ret;
+}
+/**
+ * i40e_set_rxnfc - command to set RX flow classification rules
+ * @netdev: network interface device structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the command is supported.
+ **/
+static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_vsi *vsi = np->vsi;
+	struct i40e_pf *pf = vsi->back;
+	int ret = -EOPNOTSUPP;
+
+	switch (cmd->cmd) {
+	case ETHTOOL_SRXFH:
+		ret = i40e_set_rss_hash_opt(pf, cmd);
+		break;
+	case ETHTOOL_SRXCLSRLINS:
+		ret = i40e_add_del_fdir_ethtool(vsi, cmd, true);
+		break;
+	case ETHTOOL_SRXCLSRLDEL:
+		ret = i40e_add_del_fdir_ethtool(vsi, cmd, false);
+		break;
+	default:
+		break;
+	}
+
+	return ret;
+}
+
+static const struct ethtool_ops i40e_ethtool_ops = {
+	.get_settings		= i40e_get_settings,
+	.get_drvinfo		= i40e_get_drvinfo,
+	.get_regs_len		= i40e_get_regs_len,
+	.get_regs		= i40e_get_regs,
+	.nway_reset		= i40e_nway_reset,
+	.get_link		= ethtool_op_get_link,
+	.get_wol		= i40e_get_wol,
+	.get_eeprom_len		= i40e_get_eeprom_len,
+	.get_eeprom		= i40e_get_eeprom,
+	.get_ringparam		= i40e_get_ringparam,
+	.set_ringparam		= i40e_set_ringparam,
+	.get_pauseparam		= i40e_get_pauseparam,
+	.get_msglevel		= i40e_get_msglevel,
+	.set_msglevel		= i40e_set_msglevel,
+	.get_rxnfc		= i40e_get_rxnfc,
+	.set_rxnfc		= i40e_set_rxnfc,
+	.self_test		= i40e_diag_test,
+	.get_strings		= i40e_get_strings,
+	.set_phys_id		= i40e_set_phys_id,
+	.get_sset_count		= i40e_get_sset_count,
+	.get_ethtool_stats	= i40e_get_ethtool_stats,
+	.get_coalesce		= i40e_get_coalesce,
+	.set_coalesce		= i40e_set_coalesce,
+	.get_ts_info		= i40e_get_ts_info,
+};
+
+void i40e_set_ethtool_ops(struct net_device *netdev)
+{
+	SET_ETHTOOL_OPS(netdev, &i40e_ethtool_ops);
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
new file mode 100644
index 0000000..901804a
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
@@ -0,0 +1,366 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_osdep.h"
+#include "i40e_register.h"
+#include "i40e_status.h"
+#include "i40e_alloc.h"
+#include "i40e_hmc.h"
+#include "i40e_type.h"
+
+/**
+ * i40e_add_sd_table_entry - Adds a segment descriptor to the table
+ * @hw: pointer to our hw struct
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @sd_index: segment descriptor index to manipulate
+ * @type: what type of segment descriptor we're manipulating
+ * @direct_mode_sz: size to alloc in direct mode
+ **/
+i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
+					      struct i40e_hmc_info *hmc_info,
+					      u32 sd_index,
+					      enum i40e_sd_entry_type type,
+					      u64 direct_mode_sz)
+{
+	enum i40e_memory_type mem_type __attribute__((unused));
+	i40e_status ret_code = 0;
+	struct i40e_hmc_sd_entry *sd_entry;
+	bool dma_mem_alloc_done = false;
+	struct i40e_dma_mem mem;
+	u64 alloc_len;
+
+	if (NULL == hmc_info->sd_table.sd_entry) {
+		ret_code = I40E_ERR_BAD_PTR;
+		hw_dbg(hw, "i40e_add_sd_table_entry: bad sd_entry\n");
+		goto exit;
+	}
+
+	if (sd_index >= hmc_info->sd_table.sd_cnt) {
+		ret_code = I40E_ERR_INVALID_SD_INDEX;
+		hw_dbg(hw, "i40e_add_sd_table_entry: bad sd_index\n");
+		goto exit;
+	}
+
+	sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
+	if (!sd_entry->valid) {
+		if (I40E_SD_TYPE_PAGED == type) {
+			mem_type = i40e_mem_pd;
+			alloc_len = I40E_HMC_PAGED_BP_SIZE;
+		} else {
+			mem_type = i40e_mem_bp_jumbo;
+			alloc_len = direct_mode_sz;
+		}
+
+		/* allocate a 4K pd page or 2M backing page */
+		ret_code = i40e_allocate_dma_mem(hw, &mem, mem_type, alloc_len,
+						 I40E_HMC_PD_BP_BUF_ALIGNMENT);
+		if (ret_code)
+			goto exit;
+		dma_mem_alloc_done = true;
+		if (I40E_SD_TYPE_PAGED == type) {
+			ret_code = i40e_allocate_virt_mem(hw,
+					&sd_entry->u.pd_table.pd_entry_virt_mem,
+					sizeof(struct i40e_hmc_pd_entry) * 512);
+			if (ret_code)
+				goto exit;
+			sd_entry->u.pd_table.pd_entry =
+				(struct i40e_hmc_pd_entry *)
+				sd_entry->u.pd_table.pd_entry_virt_mem.va;
+			memcpy(&sd_entry->u.pd_table.pd_page_addr, &mem,
+			       sizeof(struct i40e_dma_mem));
+		} else {
+			memcpy(&sd_entry->u.bp.addr, &mem,
+			       sizeof(struct i40e_dma_mem));
+			sd_entry->u.bp.sd_pd_index = sd_index;
+		}
+		/* initialize the sd entry */
+		hmc_info->sd_table.sd_entry[sd_index].entry_type = type;
+
+		/* increment the ref count */
+		I40E_INC_SD_REFCNT(&hmc_info->sd_table);
+	}
+	/* Increment backing page reference count */
+	if (I40E_SD_TYPE_DIRECT == sd_entry->entry_type)
+		I40E_INC_BP_REFCNT(&sd_entry->u.bp);
+exit:
+	if (ret_code)
+		if (dma_mem_alloc_done)
+			i40e_free_dma_mem(hw, &mem);
+
+	return ret_code;
+}
+
+/**
+ * i40e_add_pd_table_entry - Adds page descriptor to the specified table
+ * @hw: pointer to our HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @pd_index: which page descriptor index to manipulate
+ *
+ * This function:
+ *	1. Initializes the pd entry
+ *	2. Adds pd_entry in the pd_table
+ *	3. Mark the entry valid in i40e_hmc_pd_entry structure
+ *	4. Initializes the pd_entry's ref count to 1
+ * assumptions:
+ *	1. The memory for pd should be pinned down, physically contiguous and
+ *	   aligned on 4K boundary and zeroed memory.
+ *	2. It should be 4K in size.
+ **/
+i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
+					      struct i40e_hmc_info *hmc_info,
+					      u32 pd_index)
+{
+	i40e_status ret_code = 0;
+	struct i40e_hmc_pd_table *pd_table;
+	struct i40e_hmc_pd_entry *pd_entry;
+	struct i40e_dma_mem mem;
+	u32 sd_idx, rel_pd_idx;
+	u64 *pd_addr;
+	u64 page_desc;
+
+	if (pd_index / I40E_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) {
+		ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
+		hw_dbg(hw, "i40e_add_pd_table_entry: bad pd_index\n");
+		goto exit;
+	}
+
+	/* find corresponding sd */
+	sd_idx = (pd_index / I40E_HMC_PD_CNT_IN_SD);
+	if (I40E_SD_TYPE_PAGED !=
+	    hmc_info->sd_table.sd_entry[sd_idx].entry_type)
+		goto exit;
+
+	rel_pd_idx = (pd_index % I40E_HMC_PD_CNT_IN_SD);
+	pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+	pd_entry = &pd_table->pd_entry[rel_pd_idx];
+	if (!pd_entry->valid) {
+		/* allocate a 4K backing page */
+		ret_code = i40e_allocate_dma_mem(hw, &mem, i40e_mem_bp,
+						 I40E_HMC_PAGED_BP_SIZE,
+						 I40E_HMC_PD_BP_BUF_ALIGNMENT);
+		if (ret_code)
+			goto exit;
+
+		memcpy(&pd_entry->bp.addr, &mem, sizeof(struct i40e_dma_mem));
+		pd_entry->bp.sd_pd_index = pd_index;
+		pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED;
+		/* Set page address and valid bit */
+		page_desc = mem.pa | 0x1;
+
+		pd_addr = (u64 *)pd_table->pd_page_addr.va;
+		pd_addr += rel_pd_idx;
+
+		/* Add the backing page physical address in the pd entry */
+		memcpy(pd_addr, &page_desc, sizeof(u64));
+
+		pd_entry->sd_index = sd_idx;
+		pd_entry->valid = true;
+		I40E_INC_PD_REFCNT(pd_table);
+	}
+	I40E_INC_BP_REFCNT(&pd_entry->bp);
+exit:
+	return ret_code;
+}
+
+/**
+ * i40e_remove_pd_bp - remove a backing page from a page descriptor
+ * @hw: pointer to our HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ * @is_pf: distinguishes a VF from a PF
+ *
+ * This function:
+ *	1. Marks the entry in pd tabe (for paged address mode) or in sd table
+ *	   (for direct address mode) invalid.
+ *	2. Write to register PMPDINV to invalidate the backing page in FV cache
+ *	3. Decrement the ref count for the pd _entry
+ * assumptions:
+ *	1. Caller can deallocate the memory used by backing storage after this
+ *	   function returns.
+ **/
+i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
+					struct i40e_hmc_info *hmc_info,
+					u32 idx, bool is_pf)
+{
+	i40e_status ret_code = 0;
+	struct i40e_hmc_pd_entry *pd_entry;
+	struct i40e_hmc_pd_table *pd_table;
+	struct i40e_hmc_sd_entry *sd_entry;
+	u32 sd_idx, rel_pd_idx;
+	u64 *pd_addr;
+
+	/* calculate index */
+	sd_idx = idx / I40E_HMC_PD_CNT_IN_SD;
+	rel_pd_idx = idx % I40E_HMC_PD_CNT_IN_SD;
+	if (sd_idx >= hmc_info->sd_table.sd_cnt) {
+		ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
+		hw_dbg(hw, "i40e_remove_pd_bp: bad idx\n");
+		goto exit;
+	}
+	sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
+	if (I40E_SD_TYPE_PAGED != sd_entry->entry_type) {
+		ret_code = I40E_ERR_INVALID_SD_TYPE;
+		hw_dbg(hw, "i40e_remove_pd_bp: wrong sd_entry type\n");
+		goto exit;
+	}
+	/* get the entry and decrease its ref counter */
+	pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+	pd_entry = &pd_table->pd_entry[rel_pd_idx];
+	I40E_DEC_BP_REFCNT(&pd_entry->bp);
+	if (pd_entry->bp.ref_cnt)
+		goto exit;
+
+	/* mark the entry invalid */
+	pd_entry->valid = false;
+	I40E_DEC_PD_REFCNT(pd_table);
+	pd_addr = (u64 *)pd_table->pd_page_addr.va;
+	pd_addr += rel_pd_idx;
+	memset(pd_addr, 0, sizeof(u64));
+	if (is_pf)
+		I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
+	else
+		I40E_INVALIDATE_VF_HMC_PD(hw, sd_idx, idx, hmc_info->hmc_fn_id);
+
+	/* free memory here */
+	ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr));
+	if (ret_code)
+		goto exit;
+	if (!pd_table->ref_cnt)
+		i40e_free_virt_mem(hw, &pd_table->pd_entry_virt_mem);
+exit:
+	return ret_code;
+}
+
+/**
+ * i40e_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ **/
+i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
+					     u32 idx)
+{
+	i40e_status ret_code = 0;
+	struct i40e_hmc_sd_entry *sd_entry;
+
+	/* get the entry and decrease its ref counter */
+	sd_entry = &hmc_info->sd_table.sd_entry[idx];
+	I40E_DEC_BP_REFCNT(&sd_entry->u.bp);
+	if (sd_entry->u.bp.ref_cnt) {
+		ret_code = I40E_ERR_NOT_READY;
+		goto exit;
+	}
+	I40E_DEC_SD_REFCNT(&hmc_info->sd_table);
+
+	/* mark the entry invalid */
+	sd_entry->valid = false;
+exit:
+	return ret_code;
+}
+
+/**
+ * i40e_remove_sd_bp_new - Removes a backing page from a segment descriptor
+ * @hw: pointer to our hw struct
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ * @is_pf: used to distinguish between VF and PF
+ **/
+i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
+					    struct i40e_hmc_info *hmc_info,
+					    u32 idx, bool is_pf)
+{
+	struct i40e_hmc_sd_entry *sd_entry;
+	i40e_status ret_code = 0;
+
+	/* get the entry and decrease its ref counter */
+	sd_entry = &hmc_info->sd_table.sd_entry[idx];
+	if (is_pf) {
+		I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
+	} else {
+		ret_code = I40E_NOT_SUPPORTED;
+		goto exit;
+	}
+	ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr));
+	if (ret_code)
+		goto exit;
+exit:
+	return ret_code;
+}
+
+/**
+ * i40e_prep_remove_pd_page - Prepares to remove a PD page from sd entry.
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: segment descriptor index to find the relevant page descriptor
+ **/
+i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
+					       u32 idx)
+{
+	i40e_status ret_code = 0;
+	struct i40e_hmc_sd_entry *sd_entry;
+
+	sd_entry = &hmc_info->sd_table.sd_entry[idx];
+
+	if (sd_entry->u.pd_table.ref_cnt) {
+		ret_code = I40E_ERR_NOT_READY;
+		goto exit;
+	}
+
+	/* mark the entry invalid */
+	sd_entry->valid = false;
+
+	I40E_DEC_SD_REFCNT(&hmc_info->sd_table);
+exit:
+	return ret_code;
+}
+
+/**
+ * i40e_remove_pd_page_new - Removes a PD page from sd entry.
+ * @hw: pointer to our hw struct
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: segment descriptor index to find the relevant page descriptor
+ * @is_pf: used to distinguish between VF and PF
+ **/
+i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
+					      struct i40e_hmc_info *hmc_info,
+					      u32 idx, bool is_pf)
+{
+	i40e_status ret_code = 0;
+	struct i40e_hmc_sd_entry *sd_entry;
+
+	sd_entry = &hmc_info->sd_table.sd_entry[idx];
+	if (is_pf) {
+		I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
+	} else {
+		ret_code = I40E_NOT_SUPPORTED;
+		goto exit;
+	}
+	/* free memory here */
+	ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr));
+	if (ret_code)
+		goto exit;
+exit:
+	return ret_code;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
new file mode 100644
index 0000000..aacd42a
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
@@ -0,0 +1,245 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_HMC_H_
+#define _I40E_HMC_H_
+
+#define I40E_HMC_MAX_BP_COUNT 512
+
+/* forward-declare the HW struct for the compiler */
+struct i40e_hw;
+
+#define I40E_HMC_INFO_SIGNATURE		0x484D5347 /* HMSG */
+#define I40E_HMC_PD_CNT_IN_SD		512
+#define I40E_HMC_DIRECT_BP_SIZE		0x200000 /* 2M */
+#define I40E_HMC_PAGED_BP_SIZE		4096
+#define I40E_HMC_PD_BP_BUF_ALIGNMENT	4096
+#define I40E_FIRST_VF_FPM_ID		16
+
+struct i40e_hmc_obj_info {
+	u64 base;	/* base addr in FPM */
+	u32 max_cnt;	/* max count available for this hmc func */
+	u32 cnt;	/* count of objects driver actually wants to create */
+	u64 size;	/* size in bytes of one object */
+};
+
+enum i40e_sd_entry_type {
+	I40E_SD_TYPE_INVALID = 0,
+	I40E_SD_TYPE_PAGED   = 1,
+	I40E_SD_TYPE_DIRECT  = 2
+};
+
+struct i40e_hmc_bp {
+	enum i40e_sd_entry_type entry_type;
+	struct i40e_dma_mem addr; /* populate to be used by hw */
+	u32 sd_pd_index;
+	u32 ref_cnt;
+};
+
+struct i40e_hmc_pd_entry {
+	struct i40e_hmc_bp bp;
+	u32 sd_index;
+	bool valid;
+};
+
+struct i40e_hmc_pd_table {
+	struct i40e_dma_mem pd_page_addr; /* populate to be used by hw */
+	struct i40e_hmc_pd_entry  *pd_entry; /* [512] for sw book keeping */
+	struct i40e_virt_mem pd_entry_virt_mem; /* virt mem for pd_entry */
+
+	u32 ref_cnt;
+	u32 sd_index;
+};
+
+struct i40e_hmc_sd_entry {
+	enum i40e_sd_entry_type entry_type;
+	bool valid;
+
+	union {
+		struct i40e_hmc_pd_table pd_table;
+		struct i40e_hmc_bp bp;
+	} u;
+};
+
+struct i40e_hmc_sd_table {
+	struct i40e_virt_mem addr; /* used to track sd_entry allocations */
+	u32 sd_cnt;
+	u32 ref_cnt;
+	struct i40e_hmc_sd_entry *sd_entry; /* (sd_cnt*512) entries max */
+};
+
+struct i40e_hmc_info {
+	u32 signature;
+	/* equals to pci func num for PF and dynamically allocated for VFs */
+	u8 hmc_fn_id;
+	u16 first_sd_index; /* index of the first available SD */
+
+	/* hmc objects */
+	struct i40e_hmc_obj_info *hmc_obj;
+	struct i40e_virt_mem hmc_obj_virt_mem;
+	struct i40e_hmc_sd_table sd_table;
+};
+
+#define I40E_INC_SD_REFCNT(sd_table)	((sd_table)->ref_cnt++)
+#define I40E_INC_PD_REFCNT(pd_table)	((pd_table)->ref_cnt++)
+#define I40E_INC_BP_REFCNT(bp)		((bp)->ref_cnt++)
+
+#define I40E_DEC_SD_REFCNT(sd_table)	((sd_table)->ref_cnt--)
+#define I40E_DEC_PD_REFCNT(pd_table)	((pd_table)->ref_cnt--)
+#define I40E_DEC_BP_REFCNT(bp)		((bp)->ref_cnt--)
+
+/**
+ * I40E_SET_PF_SD_ENTRY - marks the sd entry as valid in the hardware
+ * @hw: pointer to our hw struct
+ * @pa: pointer to physical address
+ * @sd_index: segment descriptor index
+ * @hmc_fn_id: hmc function id
+ * @type: if sd entry is direct or paged
+ **/
+#define I40E_SET_PF_SD_ENTRY(hw, pa, sd_index, type)			\
+{									\
+	u32 val1, val2, val3;						\
+	val1 = (u32)(upper_32_bits(pa));				\
+	val2 = (u32)(pa) | (I40E_HMC_MAX_BP_COUNT <<			\
+		 I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |		\
+		((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) <<		\
+		I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) |			\
+		(1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT);		\
+	val3 = (sd_index) | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT);	\
+	wr32((hw), I40E_PFHMC_SDDATAHIGH, val1);			\
+	wr32((hw), I40E_PFHMC_SDDATALOW, val2);				\
+	wr32((hw), I40E_PFHMC_SDCMD, val3);				\
+}
+
+/**
+ * I40E_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware
+ * @hw: pointer to our hw struct
+ * @sd_index: segment descriptor index
+ * @hmc_fn_id: hmc function id
+ * @type: if sd entry is direct or paged
+ **/
+#define I40E_CLEAR_PF_SD_ENTRY(hw, sd_index, type)			\
+{									\
+	u32 val2, val3;							\
+	val2 = (I40E_HMC_MAX_BP_COUNT <<				\
+		I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |		\
+		((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) <<		\
+		I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT);			\
+	val3 = (sd_index) | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT);	\
+	wr32((hw), I40E_PFHMC_SDDATAHIGH, 0);				\
+	wr32((hw), I40E_PFHMC_SDDATALOW, val2);				\
+	wr32((hw), I40E_PFHMC_SDCMD, val3);				\
+}
+
+/**
+ * I40E_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware
+ * @hw: pointer to our hw struct
+ * @sd_idx: segment descriptor index
+ * @pd_idx: page descriptor index
+ * @hmc_fn_id: hmc function id
+ **/
+#define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx)			\
+	wr32((hw), I40E_PFHMC_PDINV,					\
+	    (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) |		\
+	     ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
+
+#define I40E_INVALIDATE_VF_HMC_PD(hw, sd_idx, pd_idx, hmc_fn_id)	   \
+	wr32((hw), I40E_GLHMC_VFPDINV((hmc_fn_id) - I40E_FIRST_VF_FPM_ID), \
+	     (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) |		   \
+	      ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
+
+/**
+ * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @type: type of HMC resources we're searching
+ * @index: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @sd_idx: pointer to return index of the segment descriptor in question
+ * @sd_limit: pointer to return the maximum number of segment descriptors
+ *
+ * This function calculates the segment descriptor index and index limit
+ * for the resource defined by i40e_hmc_rsrc_type.
+ **/
+#define I40E_FIND_SD_INDEX_LIMIT(hmc_info, type, index, cnt, sd_idx, sd_limit)\
+{									\
+	u64 fpm_addr, fpm_limit;					\
+	fpm_addr = (hmc_info)->hmc_obj[(type)].base +			\
+		   (hmc_info)->hmc_obj[(type)].size * (index);		\
+	fpm_limit = fpm_addr + (hmc_info)->hmc_obj[(type)].size * (cnt);\
+	*(sd_idx) = (u32)(fpm_addr / I40E_HMC_DIRECT_BP_SIZE);		\
+	*(sd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_DIRECT_BP_SIZE);	\
+	/* add one more to the limit to correct our range */		\
+	*(sd_limit) += 1;						\
+}
+
+/**
+ * I40E_FIND_PD_INDEX_LIMIT - finds page descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @type: HMC resource type we're examining
+ * @idx: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @pd_index: pointer to return page descriptor index
+ * @pd_limit: pointer to return page descriptor index limit
+ *
+ * Calculates the page descriptor index and index limit for the resource
+ * defined by i40e_hmc_rsrc_type.
+ **/
+#define I40E_FIND_PD_INDEX_LIMIT(hmc_info, type, idx, cnt, pd_index, pd_limit)\
+{									\
+	u64 fpm_adr, fpm_limit;						\
+	fpm_adr = (hmc_info)->hmc_obj[(type)].base +			\
+		  (hmc_info)->hmc_obj[(type)].size * (idx);		\
+	fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt);	\
+	*(pd_index) = (u32)(fpm_adr / I40E_HMC_PAGED_BP_SIZE);		\
+	*(pd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_PAGED_BP_SIZE);	\
+	/* add one more to the limit to correct our range */		\
+	*(pd_limit) += 1;						\
+}
+i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
+					      struct i40e_hmc_info *hmc_info,
+					      u32 sd_index,
+					      enum i40e_sd_entry_type type,
+					      u64 direct_mode_sz);
+
+i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
+					      struct i40e_hmc_info *hmc_info,
+					      u32 pd_index);
+i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
+					struct i40e_hmc_info *hmc_info,
+					u32 idx, bool is_pf);
+i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
+					     u32 idx);
+i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
+					    struct i40e_hmc_info *hmc_info,
+					    u32 idx, bool is_pf);
+i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
+					       u32 idx);
+i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
+					      struct i40e_hmc_info *hmc_info,
+					      u32 idx, bool is_pf);
+
+#endif /* _I40E_HMC_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
new file mode 100644
index 0000000..a695b91
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
@@ -0,0 +1,1006 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_osdep.h"
+#include "i40e_register.h"
+#include "i40e_type.h"
+#include "i40e_hmc.h"
+#include "i40e_lan_hmc.h"
+#include "i40e_prototype.h"
+
+/* lan specific interface functions */
+
+/**
+ * i40e_align_l2obj_base - aligns base object pointer to 512 bytes
+ * @offset: base address offset needing alignment
+ *
+ * Aligns the layer 2 function private memory so it's 512-byte aligned.
+ **/
+static u64 i40e_align_l2obj_base(u64 offset)
+{
+	u64 aligned_offset = offset;
+
+	if ((offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT) > 0)
+		aligned_offset += (I40E_HMC_L2OBJ_BASE_ALIGNMENT -
+				   (offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT));
+
+	return aligned_offset;
+}
+
+/**
+ * i40e_calculate_l2fpm_size - calculates layer 2 FPM memory size
+ * @txq_num: number of Tx queues needing backing context
+ * @rxq_num: number of Rx queues needing backing context
+ * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
+ * @fcoe_filt_num: number of FCoE filters needing backing context
+ *
+ * Calculates the maximum amount of memory for the function required, based
+ * on the number of resources it must provide context for.
+ **/
+static u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,
+			      u32 fcoe_cntx_num, u32 fcoe_filt_num)
+{
+	u64 fpm_size = 0;
+
+	fpm_size = txq_num * I40E_HMC_OBJ_SIZE_TXQ;
+	fpm_size = i40e_align_l2obj_base(fpm_size);
+
+	fpm_size += (rxq_num * I40E_HMC_OBJ_SIZE_RXQ);
+	fpm_size = i40e_align_l2obj_base(fpm_size);
+
+	fpm_size += (fcoe_cntx_num * I40E_HMC_OBJ_SIZE_FCOE_CNTX);
+	fpm_size = i40e_align_l2obj_base(fpm_size);
+
+	fpm_size += (fcoe_filt_num * I40E_HMC_OBJ_SIZE_FCOE_FILT);
+	fpm_size = i40e_align_l2obj_base(fpm_size);
+
+	return fpm_size;
+}
+
+/**
+ * i40e_init_lan_hmc - initialize i40e_hmc_info struct
+ * @hw: pointer to the HW structure
+ * @txq_num: number of Tx queues needing backing context
+ * @rxq_num: number of Rx queues needing backing context
+ * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
+ * @fcoe_filt_num: number of FCoE filters needing backing context
+ *
+ * This function will be called once per physical function initialization.
+ * It will fill out the i40e_hmc_obj_info structure for LAN objects based on
+ * the driver's provided input, as well as information from the HMC itself
+ * loaded from NVRAM.
+ *
+ * Assumptions:
+ *   - HMC Resource Profile has been selected before calling this function.
+ **/
+i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
+					u32 rxq_num, u32 fcoe_cntx_num,
+					u32 fcoe_filt_num)
+{
+	struct i40e_hmc_obj_info *obj, *full_obj;
+	i40e_status ret_code = 0;
+	u64 l2fpm_size;
+	u32 size_exp;
+
+	hw->hmc.signature = I40E_HMC_INFO_SIGNATURE;
+	hw->hmc.hmc_fn_id = hw->pf_id;
+
+	/* allocate memory for hmc_obj */
+	ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem,
+			sizeof(struct i40e_hmc_obj_info) * I40E_HMC_LAN_MAX);
+	if (ret_code)
+		goto init_lan_hmc_out;
+	hw->hmc.hmc_obj = (struct i40e_hmc_obj_info *)
+			  hw->hmc.hmc_obj_virt_mem.va;
+
+	/* The full object will be used to create the LAN HMC SD */
+	full_obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_FULL];
+	full_obj->max_cnt = 0;
+	full_obj->cnt = 0;
+	full_obj->base = 0;
+	full_obj->size = 0;
+
+	/* Tx queue context information */
+	obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
+	obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
+	obj->cnt = txq_num;
+	obj->base = 0;
+	size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ);
+	obj->size = (u64)1 << size_exp;
+
+	/* validate values requested by driver don't exceed HMC capacity */
+	if (txq_num > obj->max_cnt) {
+		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+		hw_dbg(hw, "i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+			  txq_num, obj->max_cnt, ret_code);
+		goto init_lan_hmc_out;
+	}
+
+	/* aggregate values into the full LAN object for later */
+	full_obj->max_cnt += obj->max_cnt;
+	full_obj->cnt += obj->cnt;
+
+	/* Rx queue context information */
+	obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
+	obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
+	obj->cnt = rxq_num;
+	obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_TX].base +
+		    (hw->hmc.hmc_obj[I40E_HMC_LAN_TX].cnt *
+		     hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size);
+	obj->base = i40e_align_l2obj_base(obj->base);
+	size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ);
+	obj->size = (u64)1 << size_exp;
+
+	/* validate values requested by driver don't exceed HMC capacity */
+	if (rxq_num > obj->max_cnt) {
+		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+		hw_dbg(hw, "i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+			  rxq_num, obj->max_cnt, ret_code);
+		goto init_lan_hmc_out;
+	}
+
+	/* aggregate values into the full LAN object for later */
+	full_obj->max_cnt += obj->max_cnt;
+	full_obj->cnt += obj->cnt;
+
+	/* FCoE context information */
+	obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
+	obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEMAX);
+	obj->cnt = fcoe_cntx_num;
+	obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_RX].base +
+		    (hw->hmc.hmc_obj[I40E_HMC_LAN_RX].cnt *
+		     hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size);
+	obj->base = i40e_align_l2obj_base(obj->base);
+	size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ);
+	obj->size = (u64)1 << size_exp;
+
+	/* validate values requested by driver don't exceed HMC capacity */
+	if (fcoe_cntx_num > obj->max_cnt) {
+		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+		hw_dbg(hw, "i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+			  fcoe_cntx_num, obj->max_cnt, ret_code);
+		goto init_lan_hmc_out;
+	}
+
+	/* aggregate values into the full LAN object for later */
+	full_obj->max_cnt += obj->max_cnt;
+	full_obj->cnt += obj->cnt;
+
+	/* FCoE filter information */
+	obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
+	obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEFMAX);
+	obj->cnt = fcoe_filt_num;
+	obj->base = hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].base +
+		    (hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].cnt *
+		     hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size);
+	obj->base = i40e_align_l2obj_base(obj->base);
+	size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ);
+	obj->size = (u64)1 << size_exp;
+
+	/* validate values requested by driver don't exceed HMC capacity */
+	if (fcoe_filt_num > obj->max_cnt) {
+		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+		hw_dbg(hw, "i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+			  fcoe_filt_num, obj->max_cnt, ret_code);
+		goto init_lan_hmc_out;
+	}
+
+	/* aggregate values into the full LAN object for later */
+	full_obj->max_cnt += obj->max_cnt;
+	full_obj->cnt += obj->cnt;
+
+	hw->hmc.first_sd_index = 0;
+	hw->hmc.sd_table.ref_cnt = 0;
+	l2fpm_size = i40e_calculate_l2fpm_size(txq_num, rxq_num, fcoe_cntx_num,
+					       fcoe_filt_num);
+	if (NULL == hw->hmc.sd_table.sd_entry) {
+		hw->hmc.sd_table.sd_cnt = (u32)
+				   (l2fpm_size + I40E_HMC_DIRECT_BP_SIZE - 1) /
+				   I40E_HMC_DIRECT_BP_SIZE;
+
+		/* allocate the sd_entry members in the sd_table */
+		ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.sd_table.addr,
+					  (sizeof(struct i40e_hmc_sd_entry) *
+					  hw->hmc.sd_table.sd_cnt));
+		if (ret_code)
+			goto init_lan_hmc_out;
+		hw->hmc.sd_table.sd_entry =
+			(struct i40e_hmc_sd_entry *)hw->hmc.sd_table.addr.va;
+	}
+	/* store in the LAN full object for later */
+	full_obj->size = l2fpm_size;
+
+init_lan_hmc_out:
+	return ret_code;
+}
+
+/**
+ * i40e_remove_pd_page - Remove a page from the page descriptor table
+ * @hw: pointer to the HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: segment descriptor index to find the relevant page descriptor
+ *
+ * This function:
+ *	1. Marks the entry in pd table (for paged address mode) invalid
+ *	2. write to register PMPDINV to invalidate the backing page in FV cache
+ *	3. Decrement the ref count for  pd_entry
+ * assumptions:
+ *	1. caller can deallocate the memory used by pd after this function
+ *	   returns.
+ **/
+static i40e_status i40e_remove_pd_page(struct i40e_hw *hw,
+						 struct i40e_hmc_info *hmc_info,
+						 u32 idx)
+{
+	i40e_status ret_code = 0;
+
+	if (!i40e_prep_remove_pd_page(hmc_info, idx))
+		ret_code = i40e_remove_pd_page_new(hw, hmc_info, idx, true);
+
+	return ret_code;
+}
+
+/**
+ * i40e_remove_sd_bp - remove a backing page from a segment descriptor
+ * @hw: pointer to our HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ *
+ * This function:
+ *	1. Marks the entry in sd table (for direct address mode) invalid
+ *	2. write to register PMSDCMD, PMSDDATALOW(PMSDDATALOW.PMSDVALID set
+ *	   to 0) and PMSDDATAHIGH to invalidate the sd page
+ *	3. Decrement the ref count for the sd_entry
+ * assumptions:
+ *	1. caller can deallocate the memory used by backing storage after this
+ *	   function returns.
+ **/
+static i40e_status i40e_remove_sd_bp(struct i40e_hw *hw,
+					       struct i40e_hmc_info *hmc_info,
+					       u32 idx)
+{
+	i40e_status ret_code = 0;
+
+	if (!i40e_prep_remove_sd_bp(hmc_info, idx))
+		ret_code = i40e_remove_sd_bp_new(hw, hmc_info, idx, true);
+
+	return ret_code;
+}
+
+/**
+ * i40e_create_lan_hmc_object - allocate backing store for hmc objects
+ * @hw: pointer to the HW structure
+ * @info: pointer to i40e_hmc_create_obj_info struct
+ *
+ * This will allocate memory for PDs and backing pages and populate
+ * the sd and pd entries.
+ **/
+static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw,
+				struct i40e_hmc_lan_create_obj_info *info)
+{
+	i40e_status ret_code = 0;
+	struct i40e_hmc_sd_entry *sd_entry;
+	u32 pd_idx1 = 0, pd_lmt1 = 0;
+	u32 pd_idx = 0, pd_lmt = 0;
+	bool pd_error = false;
+	u32 sd_idx, sd_lmt;
+	u64 sd_size;
+	u32 i, j;
+
+	if (NULL == info) {
+		ret_code = I40E_ERR_BAD_PTR;
+		hw_dbg(hw, "i40e_create_lan_hmc_object: bad info ptr\n");
+		goto exit;
+	}
+	if (NULL == info->hmc_info) {
+		ret_code = I40E_ERR_BAD_PTR;
+		hw_dbg(hw, "i40e_create_lan_hmc_object: bad hmc_info ptr\n");
+		goto exit;
+	}
+	if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
+		ret_code = I40E_ERR_BAD_PTR;
+		hw_dbg(hw, "i40e_create_lan_hmc_object: bad signature\n");
+		goto exit;
+	}
+
+	if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+		ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
+		hw_dbg(hw, "i40e_create_lan_hmc_object: returns error %d\n",
+			  ret_code);
+		goto exit;
+	}
+	if ((info->start_idx + info->count) >
+	    info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+		hw_dbg(hw, "i40e_create_lan_hmc_object: returns error %d\n",
+			  ret_code);
+		goto exit;
+	}
+
+	/* find sd index and limit */
+	I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+				 info->start_idx, info->count,
+				 &sd_idx, &sd_lmt);
+	if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
+	    sd_lmt > info->hmc_info->sd_table.sd_cnt) {
+			ret_code = I40E_ERR_INVALID_SD_INDEX;
+			goto exit;
+	}
+	/* find pd index */
+	I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+				 info->start_idx, info->count, &pd_idx,
+				 &pd_lmt);
+
+	/* This is to cover for cases where you may not want to have an SD with
+	 * the full 2M memory but something smaller. By not filling out any
+	 * size, the function will default the SD size to be 2M.
+	 */
+	if (info->direct_mode_sz == 0)
+		sd_size = I40E_HMC_DIRECT_BP_SIZE;
+	else
+		sd_size = info->direct_mode_sz;
+
+	/* check if all the sds are valid. If not, allocate a page and
+	 * initialize it.
+	 */
+	for (j = sd_idx; j < sd_lmt; j++) {
+		/* update the sd table entry */
+		ret_code = i40e_add_sd_table_entry(hw, info->hmc_info, j,
+						   info->entry_type,
+						   sd_size);
+		if (ret_code)
+			goto exit_sd_error;
+		sd_entry = &info->hmc_info->sd_table.sd_entry[j];
+		if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
+			/* check if all the pds in this sd are valid. If not,
+			 * allocate a page and initialize it.
+			 */
+
+			/* find pd_idx and pd_lmt in this sd */
+			pd_idx1 = max(pd_idx, (j * I40E_HMC_MAX_BP_COUNT));
+			pd_lmt1 = min(pd_lmt,
+				      ((j + 1) * I40E_HMC_MAX_BP_COUNT));
+			for (i = pd_idx1; i < pd_lmt1; i++) {
+				/* update the pd table entry */
+				ret_code = i40e_add_pd_table_entry(hw,
+								info->hmc_info,
+								i);
+				if (ret_code) {
+					pd_error = true;
+					break;
+				}
+			}
+			if (pd_error) {
+				/* remove the backing pages from pd_idx1 to i */
+				while (i && (i > pd_idx1)) {
+					i40e_remove_pd_bp(hw, info->hmc_info,
+							  (i - 1), true);
+					i--;
+				}
+			}
+		}
+		if (!sd_entry->valid) {
+			sd_entry->valid = true;
+			switch (sd_entry->entry_type) {
+			case I40E_SD_TYPE_PAGED:
+				I40E_SET_PF_SD_ENTRY(hw,
+					sd_entry->u.pd_table.pd_page_addr.pa,
+					j, sd_entry->entry_type);
+				break;
+			case I40E_SD_TYPE_DIRECT:
+				I40E_SET_PF_SD_ENTRY(hw, sd_entry->u.bp.addr.pa,
+						     j, sd_entry->entry_type);
+				break;
+			default:
+				ret_code = I40E_ERR_INVALID_SD_TYPE;
+				goto exit;
+				break;
+			}
+		}
+	}
+	goto exit;
+
+exit_sd_error:
+	/* cleanup for sd entries from j to sd_idx */
+	while (j && (j > sd_idx)) {
+		sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
+		switch (sd_entry->entry_type) {
+		case I40E_SD_TYPE_PAGED:
+			pd_idx1 = max(pd_idx,
+				      ((j - 1) * I40E_HMC_MAX_BP_COUNT));
+			pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT));
+			for (i = pd_idx1; i < pd_lmt1; i++) {
+				i40e_remove_pd_bp(
+					hw,
+					info->hmc_info,
+					i,
+					true);
+			}
+			i40e_remove_pd_page(hw, info->hmc_info, (j - 1));
+			break;
+		case I40E_SD_TYPE_DIRECT:
+			i40e_remove_sd_bp(hw, info->hmc_info, (j - 1));
+			break;
+		default:
+			ret_code = I40E_ERR_INVALID_SD_TYPE;
+			break;
+		}
+		j--;
+	}
+exit:
+	return ret_code;
+}
+
+/**
+ * i40e_configure_lan_hmc - prepare the HMC backing store
+ * @hw: pointer to the hw structure
+ * @model: the model for the layout of the SD/PD tables
+ *
+ * - This function will be called once per physical function initialization.
+ * - This function will be called after i40e_init_lan_hmc() and before
+ *   any LAN/FCoE HMC objects can be created.
+ **/
+i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
+					     enum i40e_hmc_model model)
+{
+	struct i40e_hmc_lan_create_obj_info info;
+	i40e_status ret_code = 0;
+	u8 hmc_fn_id = hw->hmc.hmc_fn_id;
+	struct i40e_hmc_obj_info *obj;
+
+	/* Initialize part of the create object info struct */
+	info.hmc_info = &hw->hmc;
+	info.rsrc_type = I40E_HMC_LAN_FULL;
+	info.start_idx = 0;
+	info.direct_mode_sz = hw->hmc.hmc_obj[I40E_HMC_LAN_FULL].size;
+
+	/* Build the SD entry for the LAN objects */
+	switch (model) {
+	case I40E_HMC_MODEL_DIRECT_PREFERRED:
+	case I40E_HMC_MODEL_DIRECT_ONLY:
+		info.entry_type = I40E_SD_TYPE_DIRECT;
+		/* Make one big object, a single SD */
+		info.count = 1;
+		ret_code = i40e_create_lan_hmc_object(hw, &info);
+		if ((ret_code) &&
+		    (model == I40E_HMC_MODEL_DIRECT_PREFERRED))
+			goto try_type_paged;
+		else if (ret_code)
+			goto configure_lan_hmc_out;
+		/* else clause falls through the break */
+		break;
+	case I40E_HMC_MODEL_PAGED_ONLY:
+try_type_paged:
+		info.entry_type = I40E_SD_TYPE_PAGED;
+		/* Make one big object in the PD table */
+		info.count = 1;
+		ret_code = i40e_create_lan_hmc_object(hw, &info);
+		if (ret_code)
+			goto configure_lan_hmc_out;
+		break;
+	default:
+		/* unsupported type */
+		ret_code = I40E_ERR_INVALID_SD_TYPE;
+		hw_dbg(hw, "i40e_configure_lan_hmc: Unknown SD type: %d\n",
+			  ret_code);
+		goto configure_lan_hmc_out;
+		break;
+	}
+
+	/* Configure and program the FPM registers so objects can be created */
+
+	/* Tx contexts */
+	obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
+	wr32(hw, I40E_GLHMC_LANTXBASE(hmc_fn_id),
+	     (u32)((obj->base & I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK) / 512));
+	wr32(hw, I40E_GLHMC_LANTXCNT(hmc_fn_id), obj->cnt);
+
+	/* Rx contexts */
+	obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
+	wr32(hw, I40E_GLHMC_LANRXBASE(hmc_fn_id),
+	     (u32)((obj->base & I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK) / 512));
+	wr32(hw, I40E_GLHMC_LANRXCNT(hmc_fn_id), obj->cnt);
+
+	/* FCoE contexts */
+	obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
+	wr32(hw, I40E_GLHMC_FCOEDDPBASE(hmc_fn_id),
+	 (u32)((obj->base & I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK) / 512));
+	wr32(hw, I40E_GLHMC_FCOEDDPCNT(hmc_fn_id), obj->cnt);
+
+	/* FCoE filters */
+	obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
+	wr32(hw, I40E_GLHMC_FCOEFBASE(hmc_fn_id),
+	     (u32)((obj->base & I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK) / 512));
+	wr32(hw, I40E_GLHMC_FCOEFCNT(hmc_fn_id), obj->cnt);
+
+configure_lan_hmc_out:
+	return ret_code;
+}
+
+/**
+ * i40e_delete_hmc_object - remove hmc objects
+ * @hw: pointer to the HW structure
+ * @info: pointer to i40e_hmc_delete_obj_info struct
+ *
+ * This will de-populate the SDs and PDs.  It frees
+ * the memory for PDS and backing storage.  After this function is returned,
+ * caller should deallocate memory allocated previously for
+ * book-keeping information about PDs and backing storage.
+ **/
+static i40e_status i40e_delete_lan_hmc_object(struct i40e_hw *hw,
+				struct i40e_hmc_lan_delete_obj_info *info)
+{
+	i40e_status ret_code = 0;
+	struct i40e_hmc_pd_table *pd_table;
+	u32 pd_idx, pd_lmt, rel_pd_idx;
+	u32 sd_idx, sd_lmt;
+	u32 i, j;
+
+	if (NULL == info) {
+		ret_code = I40E_ERR_BAD_PTR;
+		hw_dbg(hw, "i40e_delete_hmc_object: bad info ptr\n");
+		goto exit;
+	}
+	if (NULL == info->hmc_info) {
+		ret_code = I40E_ERR_BAD_PTR;
+		hw_dbg(hw, "i40e_delete_hmc_object: bad info->hmc_info ptr\n");
+		goto exit;
+	}
+	if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
+		ret_code = I40E_ERR_BAD_PTR;
+		hw_dbg(hw, "i40e_delete_hmc_object: bad hmc_info->signature\n");
+		goto exit;
+	}
+
+	if (NULL == info->hmc_info->sd_table.sd_entry) {
+		ret_code = I40E_ERR_BAD_PTR;
+		hw_dbg(hw, "i40e_delete_hmc_object: bad sd_entry\n");
+		goto exit;
+	}
+
+	if (NULL == info->hmc_info->hmc_obj) {
+		ret_code = I40E_ERR_BAD_PTR;
+		hw_dbg(hw, "i40e_delete_hmc_object: bad hmc_info->hmc_obj\n");
+		goto exit;
+	}
+	if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+		ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
+		hw_dbg(hw, "i40e_delete_hmc_object: returns error %d\n",
+			  ret_code);
+		goto exit;
+	}
+
+	if ((info->start_idx + info->count) >
+	    info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+		hw_dbg(hw, "i40e_delete_hmc_object: returns error %d\n",
+			  ret_code);
+		goto exit;
+	}
+
+	I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+				 info->start_idx, info->count, &pd_idx,
+				 &pd_lmt);
+
+	for (j = pd_idx; j < pd_lmt; j++) {
+		sd_idx = j / I40E_HMC_PD_CNT_IN_SD;
+
+		if (I40E_SD_TYPE_PAGED !=
+		    info->hmc_info->sd_table.sd_entry[sd_idx].entry_type)
+			continue;
+
+		rel_pd_idx = j % I40E_HMC_PD_CNT_IN_SD;
+
+		pd_table =
+			&info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+		if (pd_table->pd_entry[rel_pd_idx].valid) {
+			ret_code = i40e_remove_pd_bp(hw, info->hmc_info,
+						     j, true);
+			if (ret_code)
+				goto exit;
+		}
+	}
+
+	/* find sd index and limit */
+	I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+				 info->start_idx, info->count,
+				 &sd_idx, &sd_lmt);
+	if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
+	    sd_lmt > info->hmc_info->sd_table.sd_cnt) {
+		ret_code = I40E_ERR_INVALID_SD_INDEX;
+		goto exit;
+	}
+
+	for (i = sd_idx; i < sd_lmt; i++) {
+		if (!info->hmc_info->sd_table.sd_entry[i].valid)
+			continue;
+		switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
+		case I40E_SD_TYPE_DIRECT:
+			ret_code = i40e_remove_sd_bp(hw, info->hmc_info, i);
+			if (ret_code)
+				goto exit;
+			break;
+		case I40E_SD_TYPE_PAGED:
+			ret_code = i40e_remove_pd_page(hw, info->hmc_info, i);
+			if (ret_code)
+				goto exit;
+			break;
+		default:
+			break;
+		}
+	}
+exit:
+	return ret_code;
+}
+
+/**
+ * i40e_shutdown_lan_hmc - Remove HMC backing store, free allocated memory
+ * @hw: pointer to the hw structure
+ *
+ * This must be called by drivers as they are shutting down and being
+ * removed from the OS.
+ **/
+i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw)
+{
+	struct i40e_hmc_lan_delete_obj_info info;
+	i40e_status ret_code;
+
+	info.hmc_info = &hw->hmc;
+	info.rsrc_type = I40E_HMC_LAN_FULL;
+	info.start_idx = 0;
+	info.count = 1;
+
+	/* delete the object */
+	ret_code = i40e_delete_lan_hmc_object(hw, &info);
+
+	/* free the SD table entry for LAN */
+	i40e_free_virt_mem(hw, &hw->hmc.sd_table.addr);
+	hw->hmc.sd_table.sd_cnt = 0;
+	hw->hmc.sd_table.sd_entry = NULL;
+
+	/* free memory used for hmc_obj */
+	i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem);
+	hw->hmc.hmc_obj = NULL;
+
+	return ret_code;
+}
+
+#define I40E_HMC_STORE(_struct, _ele)		\
+	offsetof(struct _struct, _ele),		\
+	FIELD_SIZEOF(struct _struct, _ele)
+
+struct i40e_context_ele {
+	u16 offset;
+	u16 size_of;
+	u16 width;
+	u16 lsb;
+};
+
+/* LAN Tx Queue Context */
+static struct i40e_context_ele i40e_hmc_txq_ce_info[] = {
+					     /* Field      Width    LSB */
+	{I40E_HMC_STORE(i40e_hmc_obj_txq, head),           13,      0 },
+	{I40E_HMC_STORE(i40e_hmc_obj_txq, new_context),     1,     30 },
+	{I40E_HMC_STORE(i40e_hmc_obj_txq, base),           57,     32 },
+	{I40E_HMC_STORE(i40e_hmc_obj_txq, fc_ena),          1,     89 },
+	{I40E_HMC_STORE(i40e_hmc_obj_txq, timesync_ena),    1,     90 },
+	{I40E_HMC_STORE(i40e_hmc_obj_txq, fd_ena),          1,     91 },
+	{I40E_HMC_STORE(i40e_hmc_obj_txq, alt_vlan_ena),    1,     92 },
+	{I40E_HMC_STORE(i40e_hmc_obj_txq, cpuid),           8,     96 },
+/* line 1 */
+	{I40E_HMC_STORE(i40e_hmc_obj_txq, thead_wb),       13,  0 + 128 },
+	{I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_ena),     1, 32 + 128 },
+	{I40E_HMC_STORE(i40e_hmc_obj_txq, qlen),           13, 33 + 128 },
+	{I40E_HMC_STORE(i40e_hmc_obj_txq, tphrdesc_ena),    1, 46 + 128 },
+	{I40E_HMC_STORE(i40e_hmc_obj_txq, tphrpacket_ena),  1, 47 + 128 },
+	{I40E_HMC_STORE(i40e_hmc_obj_txq, tphwdesc_ena),    1, 48 + 128 },
+	{I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_addr),   64, 64 + 128 },
+/* line 7 */
+	{I40E_HMC_STORE(i40e_hmc_obj_txq, crc),            32,  0 + (7 * 128) },
+	{I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist),        10, 84 + (7 * 128) },
+	{I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist_act),     1, 94 + (7 * 128) },
+	{ 0 }
+};
+
+/* LAN Rx Queue Context */
+static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = {
+					 /* Field      Width    LSB */
+	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, head),        13,	0   },
+	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, cpuid),        8,	13  },
+	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, base),        57,	32  },
+	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, qlen),        13,	89  },
+	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, dbuff),        7,	102 },
+	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, hbuff),        5,	109 },
+	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, dtype),        2,	114 },
+	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, dsize),        1,	116 },
+	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, crcstrip),     1,	117 },
+	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, fc_ena),       1,	118 },
+	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, l2tsel),       1,	119 },
+	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_0),     4,	120 },
+	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_1),     2,	124 },
+	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, showiv),       1,	127 },
+	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, rxmax),       14,	174 },
+	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphrdesc_ena), 1,	193 },
+	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphwdesc_ena), 1,	194 },
+	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena),  1,	195 },
+	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena),  1,	196 },
+	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh),   3,	198 },
+	{ 0 }
+};
+
+/**
+ * i40e_clear_hmc_context - zero out the HMC context bits
+ * @hw:       the hardware struct
+ * @context_bytes: pointer to the context bit array (DMA memory)
+ * @hmc_type: the type of HMC resource
+ **/
+static i40e_status i40e_clear_hmc_context(struct i40e_hw *hw,
+					u8 *context_bytes,
+					enum i40e_hmc_lan_rsrc_type hmc_type)
+{
+	/* clean the bit array */
+	memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size);
+
+	return 0;
+}
+
+/**
+ * i40e_set_hmc_context - replace HMC context bits
+ * @context_bytes: pointer to the context bit array
+ * @ce_info:  a description of the struct to be filled
+ * @dest:     the struct to be filled
+ **/
+static i40e_status i40e_set_hmc_context(u8 *context_bytes,
+					struct i40e_context_ele *ce_info,
+					u8 *dest)
+{
+	u16 shift_width;
+	u64 bitfield;
+	u8 hi_byte;
+	u8 hi_mask;
+	u64 t_bits;
+	u64 mask;
+	u8 *p;
+	int f;
+
+	for (f = 0; ce_info[f].width != 0; f++) {
+		/* clear out the field */
+		bitfield = 0;
+
+		/* copy from the next struct field */
+		p = dest + ce_info[f].offset;
+		switch (ce_info[f].size_of) {
+		case 1:
+			bitfield = *p;
+			break;
+		case 2:
+			bitfield = cpu_to_le16(*(u16 *)p);
+			break;
+		case 4:
+			bitfield = cpu_to_le32(*(u32 *)p);
+			break;
+		case 8:
+			bitfield = cpu_to_le64(*(u64 *)p);
+			break;
+		}
+
+		/* prepare the bits and mask */
+		shift_width = ce_info[f].lsb % 8;
+		mask = ((u64)1 << ce_info[f].width) - 1;
+
+		/* save upper bytes for special case */
+		hi_mask = (u8)((mask >> 56) & 0xff);
+		hi_byte = (u8)((bitfield >> 56) & 0xff);
+
+		/* shift to correct alignment */
+		mask <<= shift_width;
+		bitfield <<= shift_width;
+
+		/* get the current bits from the target bit string */
+		p = context_bytes + (ce_info[f].lsb / 8);
+		memcpy(&t_bits, p, sizeof(u64));
+
+		t_bits &= ~mask;          /* get the bits not changing */
+		t_bits |= bitfield;       /* add in the new bits */
+
+		/* put it all back */
+		memcpy(p, &t_bits, sizeof(u64));
+
+		/* deal with the special case if needed
+		 * example: 62 bit field that starts in bit 5 of first byte
+		 *          will overlap 3 bits into byte 9
+		 */
+		if ((shift_width + ce_info[f].width) > 64) {
+			u8 byte;
+
+			hi_mask >>= (8 - shift_width);
+			hi_byte >>= (8 - shift_width);
+			byte = p[8] & ~hi_mask;  /* get the bits not changing */
+			byte |= hi_byte;         /* add in the new bits */
+			p[8] = byte;             /* put it back */
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * i40e_hmc_get_object_va - retrieves an object's virtual address
+ * @hmc_info: pointer to i40e_hmc_info struct
+ * @object_base: pointer to u64 to get the va
+ * @rsrc_type: the hmc resource type
+ * @obj_idx: hmc object index
+ *
+ * This function retrieves the object's virtual address from the object
+ * base pointer.  This function is used for LAN Queue contexts.
+ **/
+static
+i40e_status i40e_hmc_get_object_va(struct i40e_hmc_info *hmc_info,
+					u8 **object_base,
+					enum i40e_hmc_lan_rsrc_type rsrc_type,
+					u32 obj_idx)
+{
+	u32 obj_offset_in_sd, obj_offset_in_pd;
+	i40e_status ret_code = 0;
+	struct i40e_hmc_sd_entry *sd_entry;
+	struct i40e_hmc_pd_entry *pd_entry;
+	u32 pd_idx, pd_lmt, rel_pd_idx;
+	u64 obj_offset_in_fpm;
+	u32 sd_idx, sd_lmt;
+
+	if (NULL == hmc_info) {
+		ret_code = I40E_ERR_BAD_PTR;
+		hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info ptr\n");
+		goto exit;
+	}
+	if (NULL == hmc_info->hmc_obj) {
+		ret_code = I40E_ERR_BAD_PTR;
+		hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n");
+		goto exit;
+	}
+	if (NULL == object_base) {
+		ret_code = I40E_ERR_BAD_PTR;
+		hw_dbg(hw, "i40e_hmc_get_object_va: bad object_base ptr\n");
+		goto exit;
+	}
+	if (I40E_HMC_INFO_SIGNATURE != hmc_info->signature) {
+		ret_code = I40E_ERR_BAD_PTR;
+		hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info->signature\n");
+		goto exit;
+	}
+	if (obj_idx >= hmc_info->hmc_obj[rsrc_type].cnt) {
+		hw_dbg(hw, "i40e_hmc_get_object_va: returns error %d\n",
+			  ret_code);
+		ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
+		goto exit;
+	}
+	/* find sd index and limit */
+	I40E_FIND_SD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
+				 &sd_idx, &sd_lmt);
+
+	sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
+	obj_offset_in_fpm = hmc_info->hmc_obj[rsrc_type].base +
+			    hmc_info->hmc_obj[rsrc_type].size * obj_idx;
+
+	if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
+		I40E_FIND_PD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
+					 &pd_idx, &pd_lmt);
+		rel_pd_idx = pd_idx % I40E_HMC_PD_CNT_IN_SD;
+		pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx];
+		obj_offset_in_pd = (u32)(obj_offset_in_fpm %
+					 I40E_HMC_PAGED_BP_SIZE);
+		*object_base = (u8 *)pd_entry->bp.addr.va + obj_offset_in_pd;
+	} else {
+		obj_offset_in_sd = (u32)(obj_offset_in_fpm %
+					 I40E_HMC_DIRECT_BP_SIZE);
+		*object_base = (u8 *)sd_entry->u.bp.addr.va + obj_offset_in_sd;
+	}
+exit:
+	return ret_code;
+}
+
+/**
+ * i40e_clear_lan_tx_queue_context - clear the HMC context for the queue
+ * @hw:    the hardware struct
+ * @queue: the queue we care about
+ **/
+i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
+						      u16 queue)
+{
+	i40e_status err;
+	u8 *context_bytes;
+
+	err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
+				     I40E_HMC_LAN_TX, queue);
+	if (err < 0)
+		return err;
+
+	return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_TX);
+}
+
+/**
+ * i40e_set_lan_tx_queue_context - set the HMC context for the queue
+ * @hw:    the hardware struct
+ * @queue: the queue we care about
+ * @s:     the struct to be filled
+ **/
+i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
+						    u16 queue,
+						    struct i40e_hmc_obj_txq *s)
+{
+	i40e_status err;
+	u8 *context_bytes;
+
+	err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
+				     I40E_HMC_LAN_TX, queue);
+	if (err < 0)
+		return err;
+
+	return i40e_set_hmc_context(context_bytes,
+				    i40e_hmc_txq_ce_info, (u8 *)s);
+}
+
+/**
+ * i40e_clear_lan_rx_queue_context - clear the HMC context for the queue
+ * @hw:    the hardware struct
+ * @queue: the queue we care about
+ **/
+i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
+						      u16 queue)
+{
+	i40e_status err;
+	u8 *context_bytes;
+
+	err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
+				     I40E_HMC_LAN_RX, queue);
+	if (err < 0)
+		return err;
+
+	return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_RX);
+}
+
+/**
+ * i40e_set_lan_rx_queue_context - set the HMC context for the queue
+ * @hw:    the hardware struct
+ * @queue: the queue we care about
+ * @s:     the struct to be filled
+ **/
+i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
+						    u16 queue,
+						    struct i40e_hmc_obj_rxq *s)
+{
+	i40e_status err;
+	u8 *context_bytes;
+
+	err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
+				     I40E_HMC_LAN_RX, queue);
+	if (err < 0)
+		return err;
+
+	return i40e_set_hmc_context(context_bytes,
+				    i40e_hmc_rxq_ce_info, (u8 *)s);
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
new file mode 100644
index 0000000..00ff35006
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
@@ -0,0 +1,169 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_LAN_HMC_H_
+#define _I40E_LAN_HMC_H_
+
+/* forward-declare the HW struct for the compiler */
+struct i40e_hw;
+
+/* HMC element context information */
+
+/* Rx queue context data */
+struct i40e_hmc_obj_rxq {
+	u16 head;
+	u8  cpuid;
+	u64 base;
+	u16 qlen;
+#define I40E_RXQ_CTX_DBUFF_SHIFT 7
+	u8  dbuff;
+#define I40E_RXQ_CTX_HBUFF_SHIFT 6
+	u8  hbuff;
+	u8  dtype;
+	u8  dsize;
+	u8  crcstrip;
+	u8  fc_ena;
+	u8  l2tsel;
+	u8  hsplit_0;
+	u8  hsplit_1;
+	u8  showiv;
+	u16 rxmax;
+	u8  tphrdesc_ena;
+	u8  tphwdesc_ena;
+	u8  tphdata_ena;
+	u8  tphhead_ena;
+	u8  lrxqthresh;
+};
+
+/* Tx queue context data */
+struct i40e_hmc_obj_txq {
+	u16 head;
+	u8  new_context;
+	u64 base;
+	u8  fc_ena;
+	u8  timesync_ena;
+	u8  fd_ena;
+	u8  alt_vlan_ena;
+	u16 thead_wb;
+	u16 cpuid;
+	u8  head_wb_ena;
+	u16 qlen;
+	u8  tphrdesc_ena;
+	u8  tphrpacket_ena;
+	u8  tphwdesc_ena;
+	u64 head_wb_addr;
+	u32 crc;
+	u16 rdylist;
+	u8  rdylist_act;
+};
+
+/* for hsplit_0 field of Rx HMC context */
+enum i40e_hmc_obj_rx_hsplit_0 {
+	I40E_HMC_OBJ_RX_HSPLIT_0_NO_SPLIT      = 0,
+	I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2      = 1,
+	I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP      = 2,
+	I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP = 4,
+	I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP    = 8,
+};
+
+/* fcoe_cntx and fcoe_filt are for debugging purpose only */
+struct i40e_hmc_obj_fcoe_cntx {
+	u32 rsv[32];
+};
+
+struct i40e_hmc_obj_fcoe_filt {
+	u32 rsv[8];
+};
+
+/* Context sizes for LAN objects */
+enum i40e_hmc_lan_object_size {
+	I40E_HMC_LAN_OBJ_SZ_8   = 0x3,
+	I40E_HMC_LAN_OBJ_SZ_16  = 0x4,
+	I40E_HMC_LAN_OBJ_SZ_32  = 0x5,
+	I40E_HMC_LAN_OBJ_SZ_64  = 0x6,
+	I40E_HMC_LAN_OBJ_SZ_128 = 0x7,
+	I40E_HMC_LAN_OBJ_SZ_256 = 0x8,
+	I40E_HMC_LAN_OBJ_SZ_512 = 0x9,
+};
+
+#define I40E_HMC_L2OBJ_BASE_ALIGNMENT 512
+#define I40E_HMC_OBJ_SIZE_TXQ         128
+#define I40E_HMC_OBJ_SIZE_RXQ         32
+#define I40E_HMC_OBJ_SIZE_FCOE_CNTX   128
+#define I40E_HMC_OBJ_SIZE_FCOE_FILT   32
+
+enum i40e_hmc_lan_rsrc_type {
+	I40E_HMC_LAN_FULL  = 0,
+	I40E_HMC_LAN_TX    = 1,
+	I40E_HMC_LAN_RX    = 2,
+	I40E_HMC_FCOE_CTX  = 3,
+	I40E_HMC_FCOE_FILT = 4,
+	I40E_HMC_LAN_MAX   = 5
+};
+
+enum i40e_hmc_model {
+	I40E_HMC_MODEL_DIRECT_PREFERRED = 0,
+	I40E_HMC_MODEL_DIRECT_ONLY      = 1,
+	I40E_HMC_MODEL_PAGED_ONLY       = 2,
+	I40E_HMC_MODEL_UNKNOWN,
+};
+
+struct i40e_hmc_lan_create_obj_info {
+	struct i40e_hmc_info *hmc_info;
+	u32 rsrc_type;
+	u32 start_idx;
+	u32 count;
+	enum i40e_sd_entry_type entry_type;
+	u64 direct_mode_sz;
+};
+
+struct i40e_hmc_lan_delete_obj_info {
+	struct i40e_hmc_info *hmc_info;
+	u32 rsrc_type;
+	u32 start_idx;
+	u32 count;
+};
+
+i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
+					u32 rxq_num, u32 fcoe_cntx_num,
+					u32 fcoe_filt_num);
+i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
+					     enum i40e_hmc_model model);
+i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw);
+
+i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
+						      u16 queue);
+i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
+						    u16 queue,
+						    struct i40e_hmc_obj_txq *s);
+i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
+						      u16 queue);
+i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
+						    u16 queue,
+						    struct i40e_hmc_obj_rxq *s);
+
+#endif /* _I40E_LAN_HMC_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
new file mode 100644
index 0000000..601d482
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -0,0 +1,7375 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+/* Local includes */
+#include "i40e.h"
+
+const char i40e_driver_name[] = "i40e";
+static const char i40e_driver_string[] =
+			"Intel(R) Ethernet Connection XL710 Network Driver";
+
+#define DRV_KERN "-k"
+
+#define DRV_VERSION_MAJOR 0
+#define DRV_VERSION_MINOR 3
+#define DRV_VERSION_BUILD 9
+#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
+	     __stringify(DRV_VERSION_MINOR) "." \
+	     __stringify(DRV_VERSION_BUILD)    DRV_KERN
+const char i40e_driver_version_str[] = DRV_VERSION;
+static const char i40e_copyright[] = "Copyright (c) 2013 Intel Corporation.";
+
+/* a bit of forward declarations */
+static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
+static void i40e_handle_reset_warning(struct i40e_pf *pf);
+static int i40e_add_vsi(struct i40e_vsi *vsi);
+static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
+static int i40e_setup_pf_switch(struct i40e_pf *pf);
+static int i40e_setup_misc_vector(struct i40e_pf *pf);
+static void i40e_determine_queue_usage(struct i40e_pf *pf);
+static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
+
+/* i40e_pci_tbl - PCI Device ID Table
+ *
+ * Last entry must be all 0s
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
+ *   Class, Class Mask, private data (not used) }
+ */
+static DEFINE_PCI_DEVICE_TABLE(i40e_pci_tbl) = {
+	{PCI_VDEVICE(INTEL, I40E_SFP_XL710_DEVICE_ID), 0},
+	{PCI_VDEVICE(INTEL, I40E_SFP_X710_DEVICE_ID), 0},
+	{PCI_VDEVICE(INTEL, I40E_QEMU_DEVICE_ID), 0},
+	{PCI_VDEVICE(INTEL, I40E_KX_A_DEVICE_ID), 0},
+	{PCI_VDEVICE(INTEL, I40E_KX_B_DEVICE_ID), 0},
+	{PCI_VDEVICE(INTEL, I40E_KX_C_DEVICE_ID), 0},
+	{PCI_VDEVICE(INTEL, I40E_KX_D_DEVICE_ID), 0},
+	{PCI_VDEVICE(INTEL, I40E_QSFP_A_DEVICE_ID), 0},
+	{PCI_VDEVICE(INTEL, I40E_QSFP_B_DEVICE_ID), 0},
+	{PCI_VDEVICE(INTEL, I40E_QSFP_C_DEVICE_ID), 0},
+	/* required last entry */
+	{0, }
+};
+MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
+
+#define I40E_MAX_VF_COUNT 128
+static int debug = -1;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
+MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+/**
+ * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
+ * @hw:   pointer to the HW structure
+ * @mem:  ptr to mem struct to fill out
+ * @size: size of memory requested
+ * @alignment: what to align the allocation to
+ **/
+int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
+			    u64 size, u32 alignment)
+{
+	struct i40e_pf *pf = (struct i40e_pf *)hw->back;
+
+	mem->size = ALIGN(size, alignment);
+	mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
+				      &mem->pa, GFP_KERNEL);
+	if (mem->va)
+		return 0;
+
+	return -ENOMEM;
+}
+
+/**
+ * i40e_free_dma_mem_d - OS specific memory free for shared code
+ * @hw:   pointer to the HW structure
+ * @mem:  ptr to mem struct to free
+ **/
+int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
+{
+	struct i40e_pf *pf = (struct i40e_pf *)hw->back;
+
+	dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
+	mem->va = NULL;
+	mem->pa = 0;
+	mem->size = 0;
+
+	return 0;
+}
+
+/**
+ * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
+ * @hw:   pointer to the HW structure
+ * @mem:  ptr to mem struct to fill out
+ * @size: size of memory requested
+ **/
+int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
+			     u32 size)
+{
+	mem->size = size;
+	mem->va = kzalloc(size, GFP_KERNEL);
+
+	if (mem->va)
+		return 0;
+
+	return -ENOMEM;
+}
+
+/**
+ * i40e_free_virt_mem_d - OS specific memory free for shared code
+ * @hw:   pointer to the HW structure
+ * @mem:  ptr to mem struct to free
+ **/
+int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
+{
+	/* it's ok to kfree a NULL pointer */
+	kfree(mem->va);
+	mem->va = NULL;
+	mem->size = 0;
+
+	return 0;
+}
+
+/**
+ * i40e_get_lump - find a lump of free generic resource
+ * @pf: board private structure
+ * @pile: the pile of resource to search
+ * @needed: the number of items needed
+ * @id: an owner id to stick on the items assigned
+ *
+ * Returns the base item index of the lump, or negative for error
+ *
+ * The search_hint trick and lack of advanced fit-finding only work
+ * because we're highly likely to have all the same size lump requests.
+ * Linear search time and any fragmentation should be minimal.
+ **/
+static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
+			 u16 needed, u16 id)
+{
+	int ret = -ENOMEM;
+	int i = 0;
+	int j = 0;
+
+	if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
+		dev_info(&pf->pdev->dev,
+			 "param err: pile=%p needed=%d id=0x%04x\n",
+			 pile, needed, id);
+		return -EINVAL;
+	}
+
+	/* start the linear search with an imperfect hint */
+	i = pile->search_hint;
+	while (i < pile->num_entries && ret < 0) {
+		/* skip already allocated entries */
+		if (pile->list[i] & I40E_PILE_VALID_BIT) {
+			i++;
+			continue;
+		}
+
+		/* do we have enough in this lump? */
+		for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
+			if (pile->list[i+j] & I40E_PILE_VALID_BIT)
+				break;
+		}
+
+		if (j == needed) {
+			/* there was enough, so assign it to the requestor */
+			for (j = 0; j < needed; j++)
+				pile->list[i+j] = id | I40E_PILE_VALID_BIT;
+			ret = i;
+			pile->search_hint = i + j;
+		} else {
+			/* not enough, so skip over it and continue looking */
+			i += j;
+		}
+	}
+
+	return ret;
+}
+
+/**
+ * i40e_put_lump - return a lump of generic resource
+ * @pile: the pile of resource to search
+ * @index: the base item index
+ * @id: the owner id of the items assigned
+ *
+ * Returns the count of items in the lump
+ **/
+static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
+{
+	int valid_id = (id | I40E_PILE_VALID_BIT);
+	int count = 0;
+	int i;
+
+	if (!pile || index >= pile->num_entries)
+		return -EINVAL;
+
+	for (i = index;
+	     i < pile->num_entries && pile->list[i] == valid_id;
+	     i++) {
+		pile->list[i] = 0;
+		count++;
+	}
+
+	if (count && index < pile->search_hint)
+		pile->search_hint = index;
+
+	return count;
+}
+
+/**
+ * i40e_service_event_schedule - Schedule the service task to wake up
+ * @pf: board private structure
+ *
+ * If not already scheduled, this puts the task into the work queue
+ **/
+static void i40e_service_event_schedule(struct i40e_pf *pf)
+{
+	if (!test_bit(__I40E_DOWN, &pf->state) &&
+	    !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
+	    !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
+		schedule_work(&pf->service_task);
+}
+
+/**
+ * i40e_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ *
+ * If any port has noticed a Tx timeout, it is likely that the whole
+ * device is munged, not just the one netdev port, so go for the full
+ * reset.
+ **/
+static void i40e_tx_timeout(struct net_device *netdev)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_vsi *vsi = np->vsi;
+	struct i40e_pf *pf = vsi->back;
+
+	pf->tx_timeout_count++;
+
+	if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
+		pf->tx_timeout_recovery_level = 0;
+	pf->tx_timeout_last_recovery = jiffies;
+	netdev_info(netdev, "tx_timeout recovery level %d\n",
+		    pf->tx_timeout_recovery_level);
+
+	switch (pf->tx_timeout_recovery_level) {
+	case 0:
+		/* disable and re-enable queues for the VSI */
+		if (in_interrupt()) {
+			set_bit(__I40E_REINIT_REQUESTED, &pf->state);
+			set_bit(__I40E_REINIT_REQUESTED, &vsi->state);
+		} else {
+			i40e_vsi_reinit_locked(vsi);
+		}
+		break;
+	case 1:
+		set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+		break;
+	case 2:
+		set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
+		break;
+	case 3:
+		set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
+		break;
+	default:
+		netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
+		i40e_down(vsi);
+		break;
+	}
+	i40e_service_event_schedule(pf);
+	pf->tx_timeout_recovery_level++;
+}
+
+/**
+ * i40e_release_rx_desc - Store the new tail and head values
+ * @rx_ring: ring to bump
+ * @val: new head index
+ **/
+static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
+{
+	rx_ring->next_to_use = val;
+
+	/* Force memory writes to complete before letting h/w
+	 * know there are new descriptors to fetch.  (Only
+	 * applicable for weak-ordered memory model archs,
+	 * such as IA-64).
+	 */
+	wmb();
+	writel(val, rx_ring->tail);
+}
+
+/**
+ * i40e_get_vsi_stats_struct - Get System Network Statistics
+ * @vsi: the VSI we care about
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the service task.
+ **/
+struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
+{
+	return &vsi->net_stats;
+}
+
+/**
+ * i40e_get_netdev_stats_struct - Get statistics for netdev interface
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the service task.
+ **/
+static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
+					     struct net_device *netdev,
+					     struct rtnl_link_stats64 *storage)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_vsi *vsi = np->vsi;
+
+	*storage = *i40e_get_vsi_stats_struct(vsi);
+
+	return storage;
+}
+
+/**
+ * i40e_vsi_reset_stats - Resets all stats of the given vsi
+ * @vsi: the VSI to have its stats reset
+ **/
+void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
+{
+	struct rtnl_link_stats64 *ns;
+	int i;
+
+	if (!vsi)
+		return;
+
+	ns = i40e_get_vsi_stats_struct(vsi);
+	memset(ns, 0, sizeof(*ns));
+	memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
+	memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
+	memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
+	if (vsi->rx_rings)
+		for (i = 0; i < vsi->num_queue_pairs; i++) {
+			memset(&vsi->rx_rings[i].rx_stats, 0 ,
+			       sizeof(vsi->rx_rings[i].rx_stats));
+			memset(&vsi->tx_rings[i].tx_stats, 0,
+			       sizeof(vsi->tx_rings[i].tx_stats));
+		}
+	vsi->stat_offsets_loaded = false;
+}
+
+/**
+ * i40e_pf_reset_stats - Reset all of the stats for the given pf
+ * @pf: the PF to be reset
+ **/
+void i40e_pf_reset_stats(struct i40e_pf *pf)
+{
+	memset(&pf->stats, 0, sizeof(pf->stats));
+	memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
+	pf->stat_offsets_loaded = false;
+}
+
+/**
+ * i40e_stat_update48 - read and update a 48 bit stat from the chip
+ * @hw: ptr to the hardware info
+ * @hireg: the high 32 bit reg to read
+ * @loreg: the low 32 bit reg to read
+ * @offset_loaded: has the initial offset been loaded yet
+ * @offset: ptr to current offset value
+ * @stat: ptr to the stat
+ *
+ * Since the device stats are not reset at PFReset, they likely will not
+ * be zeroed when the driver starts.  We'll save the first values read
+ * and use them as offsets to be subtracted from the raw values in order
+ * to report stats that count from zero.  In the process, we also manage
+ * the potential roll-over.
+ **/
+static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
+			       bool offset_loaded, u64 *offset, u64 *stat)
+{
+	u64 new_data;
+
+	if (hw->device_id == I40E_QEMU_DEVICE_ID) {
+		new_data = rd32(hw, loreg);
+		new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
+	} else {
+		new_data = rd64(hw, loreg);
+	}
+	if (!offset_loaded)
+		*offset = new_data;
+	if (likely(new_data >= *offset))
+		*stat = new_data - *offset;
+	else
+		*stat = (new_data + ((u64)1 << 48)) - *offset;
+	*stat &= 0xFFFFFFFFFFFFULL;
+}
+
+/**
+ * i40e_stat_update32 - read and update a 32 bit stat from the chip
+ * @hw: ptr to the hardware info
+ * @reg: the hw reg to read
+ * @offset_loaded: has the initial offset been loaded yet
+ * @offset: ptr to current offset value
+ * @stat: ptr to the stat
+ **/
+static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
+			       bool offset_loaded, u64 *offset, u64 *stat)
+{
+	u32 new_data;
+
+	new_data = rd32(hw, reg);
+	if (!offset_loaded)
+		*offset = new_data;
+	if (likely(new_data >= *offset))
+		*stat = (u32)(new_data - *offset);
+	else
+		*stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
+}
+
+/**
+ * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
+ * @vsi: the VSI to be updated
+ **/
+void i40e_update_eth_stats(struct i40e_vsi *vsi)
+{
+	int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
+	struct i40e_pf *pf = vsi->back;
+	struct i40e_hw *hw = &pf->hw;
+	struct i40e_eth_stats *oes;
+	struct i40e_eth_stats *es;     /* device's eth stats */
+
+	es = &vsi->eth_stats;
+	oes = &vsi->eth_stats_offsets;
+
+	/* Gather up the stats that the hw collects */
+	i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
+			   vsi->stat_offsets_loaded,
+			   &oes->tx_errors, &es->tx_errors);
+	i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
+			   vsi->stat_offsets_loaded,
+			   &oes->rx_discards, &es->rx_discards);
+
+	i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
+			   I40E_GLV_GORCL(stat_idx),
+			   vsi->stat_offsets_loaded,
+			   &oes->rx_bytes, &es->rx_bytes);
+	i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
+			   I40E_GLV_UPRCL(stat_idx),
+			   vsi->stat_offsets_loaded,
+			   &oes->rx_unicast, &es->rx_unicast);
+	i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
+			   I40E_GLV_MPRCL(stat_idx),
+			   vsi->stat_offsets_loaded,
+			   &oes->rx_multicast, &es->rx_multicast);
+	i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
+			   I40E_GLV_BPRCL(stat_idx),
+			   vsi->stat_offsets_loaded,
+			   &oes->rx_broadcast, &es->rx_broadcast);
+
+	i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
+			   I40E_GLV_GOTCL(stat_idx),
+			   vsi->stat_offsets_loaded,
+			   &oes->tx_bytes, &es->tx_bytes);
+	i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
+			   I40E_GLV_UPTCL(stat_idx),
+			   vsi->stat_offsets_loaded,
+			   &oes->tx_unicast, &es->tx_unicast);
+	i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
+			   I40E_GLV_MPTCL(stat_idx),
+			   vsi->stat_offsets_loaded,
+			   &oes->tx_multicast, &es->tx_multicast);
+	i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
+			   I40E_GLV_BPTCL(stat_idx),
+			   vsi->stat_offsets_loaded,
+			   &oes->tx_broadcast, &es->tx_broadcast);
+	vsi->stat_offsets_loaded = true;
+}
+
+/**
+ * i40e_update_veb_stats - Update Switch component statistics
+ * @veb: the VEB being updated
+ **/
+static void i40e_update_veb_stats(struct i40e_veb *veb)
+{
+	struct i40e_pf *pf = veb->pf;
+	struct i40e_hw *hw = &pf->hw;
+	struct i40e_eth_stats *oes;
+	struct i40e_eth_stats *es;     /* device's eth stats */
+	int idx = 0;
+
+	idx = veb->stats_idx;
+	es = &veb->stats;
+	oes = &veb->stats_offsets;
+
+	/* Gather up the stats that the hw collects */
+	i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
+			   veb->stat_offsets_loaded,
+			   &oes->tx_discards, &es->tx_discards);
+	i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
+			   veb->stat_offsets_loaded,
+			   &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
+
+	i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
+			   veb->stat_offsets_loaded,
+			   &oes->rx_bytes, &es->rx_bytes);
+	i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
+			   veb->stat_offsets_loaded,
+			   &oes->rx_unicast, &es->rx_unicast);
+	i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
+			   veb->stat_offsets_loaded,
+			   &oes->rx_multicast, &es->rx_multicast);
+	i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
+			   veb->stat_offsets_loaded,
+			   &oes->rx_broadcast, &es->rx_broadcast);
+
+	i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
+			   veb->stat_offsets_loaded,
+			   &oes->tx_bytes, &es->tx_bytes);
+	i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
+			   veb->stat_offsets_loaded,
+			   &oes->tx_unicast, &es->tx_unicast);
+	i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
+			   veb->stat_offsets_loaded,
+			   &oes->tx_multicast, &es->tx_multicast);
+	i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
+			   veb->stat_offsets_loaded,
+			   &oes->tx_broadcast, &es->tx_broadcast);
+	veb->stat_offsets_loaded = true;
+}
+
+/**
+ * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode
+ * @pf: the corresponding PF
+ *
+ * Update the Rx XOFF counter (PAUSE frames) in link flow control mode
+ **/
+static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
+{
+	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
+	struct i40e_hw_port_stats *nsd = &pf->stats;
+	struct i40e_hw *hw = &pf->hw;
+	u64 xoff = 0;
+	u16 i, v;
+
+	if ((hw->fc.current_mode != I40E_FC_FULL) &&
+	    (hw->fc.current_mode != I40E_FC_RX_PAUSE))
+		return;
+
+	xoff = nsd->link_xoff_rx;
+	i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
+			   pf->stat_offsets_loaded,
+			   &osd->link_xoff_rx, &nsd->link_xoff_rx);
+
+	/* No new LFC xoff rx */
+	if (!(nsd->link_xoff_rx - xoff))
+		return;
+
+	/* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */
+	for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+		struct i40e_vsi *vsi = pf->vsi[v];
+
+		if (!vsi)
+			continue;
+
+		for (i = 0; i < vsi->num_queue_pairs; i++) {
+			struct i40e_ring *ring = &vsi->tx_rings[i];
+			clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
+		}
+	}
+}
+
+/**
+ * i40e_update_prio_xoff_rx - Update XOFF received in PFC mode
+ * @pf: the corresponding PF
+ *
+ * Update the Rx XOFF counter (PAUSE frames) in PFC mode
+ **/
+static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
+{
+	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
+	struct i40e_hw_port_stats *nsd = &pf->stats;
+	bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false};
+	struct i40e_dcbx_config *dcb_cfg;
+	struct i40e_hw *hw = &pf->hw;
+	u16 i, v;
+	u8 tc;
+
+	dcb_cfg = &hw->local_dcbx_config;
+
+	/* See if DCB enabled with PFC TC */
+	if (!(pf->flags & I40E_FLAG_DCB_ENABLED) ||
+	    !(dcb_cfg->pfc.pfcenable)) {
+		i40e_update_link_xoff_rx(pf);
+		return;
+	}
+
+	for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+		u64 prio_xoff = nsd->priority_xoff_rx[i];
+		i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
+				   pf->stat_offsets_loaded,
+				   &osd->priority_xoff_rx[i],
+				   &nsd->priority_xoff_rx[i]);
+
+		/* No new PFC xoff rx */
+		if (!(nsd->priority_xoff_rx[i] - prio_xoff))
+			continue;
+		/* Get the TC for given priority */
+		tc = dcb_cfg->etscfg.prioritytable[i];
+		xoff[tc] = true;
+	}
+
+	/* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */
+	for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+		struct i40e_vsi *vsi = pf->vsi[v];
+
+		if (!vsi)
+			continue;
+
+		for (i = 0; i < vsi->num_queue_pairs; i++) {
+			struct i40e_ring *ring = &vsi->tx_rings[i];
+
+			tc = ring->dcb_tc;
+			if (xoff[tc])
+				clear_bit(__I40E_HANG_CHECK_ARMED,
+					  &ring->state);
+		}
+	}
+}
+
+/**
+ * i40e_update_stats - Update the board statistics counters.
+ * @vsi: the VSI to be updated
+ *
+ * There are a few instances where we store the same stat in a
+ * couple of different structs.  This is partly because we have
+ * the netdev stats that need to be filled out, which is slightly
+ * different from the "eth_stats" defined by the chip and used in
+ * VF communications.  We sort it all out here in a central place.
+ **/
+void i40e_update_stats(struct i40e_vsi *vsi)
+{
+	struct i40e_pf *pf = vsi->back;
+	struct i40e_hw *hw = &pf->hw;
+	struct rtnl_link_stats64 *ons;
+	struct rtnl_link_stats64 *ns;   /* netdev stats */
+	struct i40e_eth_stats *oes;
+	struct i40e_eth_stats *es;     /* device's eth stats */
+	u32 tx_restart, tx_busy;
+	u32 rx_page, rx_buf;
+	u64 rx_p, rx_b;
+	u64 tx_p, tx_b;
+	int i;
+	u16 q;
+
+	if (test_bit(__I40E_DOWN, &vsi->state) ||
+	    test_bit(__I40E_CONFIG_BUSY, &pf->state))
+		return;
+
+	ns = i40e_get_vsi_stats_struct(vsi);
+	ons = &vsi->net_stats_offsets;
+	es = &vsi->eth_stats;
+	oes = &vsi->eth_stats_offsets;
+
+	/* Gather up the netdev and vsi stats that the driver collects
+	 * on the fly during packet processing
+	 */
+	rx_b = rx_p = 0;
+	tx_b = tx_p = 0;
+	tx_restart = tx_busy = 0;
+	rx_page = 0;
+	rx_buf = 0;
+	for (q = 0; q < vsi->num_queue_pairs; q++) {
+		struct i40e_ring *p;
+
+		p = &vsi->rx_rings[q];
+		rx_b += p->rx_stats.bytes;
+		rx_p += p->rx_stats.packets;
+		rx_buf += p->rx_stats.alloc_rx_buff_failed;
+		rx_page += p->rx_stats.alloc_rx_page_failed;
+
+		p = &vsi->tx_rings[q];
+		tx_b += p->tx_stats.bytes;
+		tx_p += p->tx_stats.packets;
+		tx_restart += p->tx_stats.restart_queue;
+		tx_busy += p->tx_stats.tx_busy;
+	}
+	vsi->tx_restart = tx_restart;
+	vsi->tx_busy = tx_busy;
+	vsi->rx_page_failed = rx_page;
+	vsi->rx_buf_failed = rx_buf;
+
+	ns->rx_packets = rx_p;
+	ns->rx_bytes = rx_b;
+	ns->tx_packets = tx_p;
+	ns->tx_bytes = tx_b;
+
+	i40e_update_eth_stats(vsi);
+	/* update netdev stats from eth stats */
+	ons->rx_errors = oes->rx_errors;
+	ns->rx_errors = es->rx_errors;
+	ons->tx_errors = oes->tx_errors;
+	ns->tx_errors = es->tx_errors;
+	ons->multicast = oes->rx_multicast;
+	ns->multicast = es->rx_multicast;
+	ons->tx_dropped = oes->tx_discards;
+	ns->tx_dropped = es->tx_discards;
+
+	/* Get the port data only if this is the main PF VSI */
+	if (vsi == pf->vsi[pf->lan_vsi]) {
+		struct i40e_hw_port_stats *nsd = &pf->stats;
+		struct i40e_hw_port_stats *osd = &pf->stats_offsets;
+
+		i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
+				   I40E_GLPRT_GORCL(hw->port),
+				   pf->stat_offsets_loaded,
+				   &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
+		i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
+				   I40E_GLPRT_GOTCL(hw->port),
+				   pf->stat_offsets_loaded,
+				   &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
+		i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
+				   pf->stat_offsets_loaded,
+				   &osd->eth.rx_discards,
+				   &nsd->eth.rx_discards);
+		i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
+				   pf->stat_offsets_loaded,
+				   &osd->eth.tx_discards,
+				   &nsd->eth.tx_discards);
+		i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
+				   I40E_GLPRT_MPRCL(hw->port),
+				   pf->stat_offsets_loaded,
+				   &osd->eth.rx_multicast,
+				   &nsd->eth.rx_multicast);
+
+		i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
+				   pf->stat_offsets_loaded,
+				   &osd->tx_dropped_link_down,
+				   &nsd->tx_dropped_link_down);
+
+		i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
+				   pf->stat_offsets_loaded,
+				   &osd->crc_errors, &nsd->crc_errors);
+		ns->rx_crc_errors = nsd->crc_errors;
+
+		i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
+				   pf->stat_offsets_loaded,
+				   &osd->illegal_bytes, &nsd->illegal_bytes);
+		ns->rx_errors = nsd->crc_errors
+				+ nsd->illegal_bytes;
+
+		i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
+				   pf->stat_offsets_loaded,
+				   &osd->mac_local_faults,
+				   &nsd->mac_local_faults);
+		i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
+				   pf->stat_offsets_loaded,
+				   &osd->mac_remote_faults,
+				   &nsd->mac_remote_faults);
+
+		i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
+				   pf->stat_offsets_loaded,
+				   &osd->rx_length_errors,
+				   &nsd->rx_length_errors);
+		ns->rx_length_errors = nsd->rx_length_errors;
+
+		i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
+				   pf->stat_offsets_loaded,
+				   &osd->link_xon_rx, &nsd->link_xon_rx);
+		i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
+				   pf->stat_offsets_loaded,
+				   &osd->link_xon_tx, &nsd->link_xon_tx);
+		i40e_update_prio_xoff_rx(pf);  /* handles I40E_GLPRT_LXOFFRXC */
+		i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
+				   pf->stat_offsets_loaded,
+				   &osd->link_xoff_tx, &nsd->link_xoff_tx);
+
+		for (i = 0; i < 8; i++) {
+			i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
+					   pf->stat_offsets_loaded,
+					   &osd->priority_xon_rx[i],
+					   &nsd->priority_xon_rx[i]);
+			i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
+					   pf->stat_offsets_loaded,
+					   &osd->priority_xon_tx[i],
+					   &nsd->priority_xon_tx[i]);
+			i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
+					   pf->stat_offsets_loaded,
+					   &osd->priority_xoff_tx[i],
+					   &nsd->priority_xoff_tx[i]);
+			i40e_stat_update32(hw,
+					   I40E_GLPRT_RXON2OFFCNT(hw->port, i),
+					   pf->stat_offsets_loaded,
+					   &osd->priority_xon_2_xoff[i],
+					   &nsd->priority_xon_2_xoff[i]);
+		}
+
+		i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
+				   I40E_GLPRT_PRC64L(hw->port),
+				   pf->stat_offsets_loaded,
+				   &osd->rx_size_64, &nsd->rx_size_64);
+		i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
+				   I40E_GLPRT_PRC127L(hw->port),
+				   pf->stat_offsets_loaded,
+				   &osd->rx_size_127, &nsd->rx_size_127);
+		i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
+				   I40E_GLPRT_PRC255L(hw->port),
+				   pf->stat_offsets_loaded,
+				   &osd->rx_size_255, &nsd->rx_size_255);
+		i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
+				   I40E_GLPRT_PRC511L(hw->port),
+				   pf->stat_offsets_loaded,
+				   &osd->rx_size_511, &nsd->rx_size_511);
+		i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
+				   I40E_GLPRT_PRC1023L(hw->port),
+				   pf->stat_offsets_loaded,
+				   &osd->rx_size_1023, &nsd->rx_size_1023);
+		i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
+				   I40E_GLPRT_PRC1522L(hw->port),
+				   pf->stat_offsets_loaded,
+				   &osd->rx_size_1522, &nsd->rx_size_1522);
+		i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
+				   I40E_GLPRT_PRC9522L(hw->port),
+				   pf->stat_offsets_loaded,
+				   &osd->rx_size_big, &nsd->rx_size_big);
+
+		i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
+				   I40E_GLPRT_PTC64L(hw->port),
+				   pf->stat_offsets_loaded,
+				   &osd->tx_size_64, &nsd->tx_size_64);
+		i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
+				   I40E_GLPRT_PTC127L(hw->port),
+				   pf->stat_offsets_loaded,
+				   &osd->tx_size_127, &nsd->tx_size_127);
+		i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
+				   I40E_GLPRT_PTC255L(hw->port),
+				   pf->stat_offsets_loaded,
+				   &osd->tx_size_255, &nsd->tx_size_255);
+		i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
+				   I40E_GLPRT_PTC511L(hw->port),
+				   pf->stat_offsets_loaded,
+				   &osd->tx_size_511, &nsd->tx_size_511);
+		i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
+				   I40E_GLPRT_PTC1023L(hw->port),
+				   pf->stat_offsets_loaded,
+				   &osd->tx_size_1023, &nsd->tx_size_1023);
+		i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
+				   I40E_GLPRT_PTC1522L(hw->port),
+				   pf->stat_offsets_loaded,
+				   &osd->tx_size_1522, &nsd->tx_size_1522);
+		i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
+				   I40E_GLPRT_PTC9522L(hw->port),
+				   pf->stat_offsets_loaded,
+				   &osd->tx_size_big, &nsd->tx_size_big);
+
+		i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
+				   pf->stat_offsets_loaded,
+				   &osd->rx_undersize, &nsd->rx_undersize);
+		i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
+				   pf->stat_offsets_loaded,
+				   &osd->rx_fragments, &nsd->rx_fragments);
+		i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
+				   pf->stat_offsets_loaded,
+				   &osd->rx_oversize, &nsd->rx_oversize);
+		i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
+				   pf->stat_offsets_loaded,
+				   &osd->rx_jabber, &nsd->rx_jabber);
+	}
+
+	pf->stat_offsets_loaded = true;
+}
+
+/**
+ * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
+ * @vsi: the VSI to be searched
+ * @macaddr: the MAC address
+ * @vlan: the vlan
+ * @is_vf: make sure its a vf filter, else doesn't matter
+ * @is_netdev: make sure its a netdev filter, else doesn't matter
+ *
+ * Returns ptr to the filter object or NULL
+ **/
+static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
+						u8 *macaddr, s16 vlan,
+						bool is_vf, bool is_netdev)
+{
+	struct i40e_mac_filter *f;
+
+	if (!vsi || !macaddr)
+		return NULL;
+
+	list_for_each_entry(f, &vsi->mac_filter_list, list) {
+		if ((ether_addr_equal(macaddr, f->macaddr)) &&
+		    (vlan == f->vlan)    &&
+		    (!is_vf || f->is_vf) &&
+		    (!is_netdev || f->is_netdev))
+			return f;
+	}
+	return NULL;
+}
+
+/**
+ * i40e_find_mac - Find a mac addr in the macvlan filters list
+ * @vsi: the VSI to be searched
+ * @macaddr: the MAC address we are searching for
+ * @is_vf: make sure its a vf filter, else doesn't matter
+ * @is_netdev: make sure its a netdev filter, else doesn't matter
+ *
+ * Returns the first filter with the provided MAC address or NULL if
+ * MAC address was not found
+ **/
+struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
+				      bool is_vf, bool is_netdev)
+{
+	struct i40e_mac_filter *f;
+
+	if (!vsi || !macaddr)
+		return NULL;
+
+	list_for_each_entry(f, &vsi->mac_filter_list, list) {
+		if ((ether_addr_equal(macaddr, f->macaddr)) &&
+		    (!is_vf || f->is_vf) &&
+		    (!is_netdev || f->is_netdev))
+			return f;
+	}
+	return NULL;
+}
+
+/**
+ * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
+ * @vsi: the VSI to be searched
+ *
+ * Returns true if VSI is in vlan mode or false otherwise
+ **/
+bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
+{
+	struct i40e_mac_filter *f;
+
+	/* Only -1 for all the filters denotes not in vlan mode
+	 * so we have to go through all the list in order to make sure
+	 */
+	list_for_each_entry(f, &vsi->mac_filter_list, list) {
+		if (f->vlan >= 0)
+			return true;
+	}
+
+	return false;
+}
+
+/**
+ * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
+ * @vsi: the VSI to be searched
+ * @macaddr: the mac address to be filtered
+ * @is_vf: true if it is a vf
+ * @is_netdev: true if it is a netdev
+ *
+ * Goes through all the macvlan filters and adds a
+ * macvlan filter for each unique vlan that already exists
+ *
+ * Returns first filter found on success, else NULL
+ **/
+struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
+					     bool is_vf, bool is_netdev)
+{
+	struct i40e_mac_filter *f;
+
+	list_for_each_entry(f, &vsi->mac_filter_list, list) {
+		if (!i40e_find_filter(vsi, macaddr, f->vlan,
+				      is_vf, is_netdev)) {
+			if (!i40e_add_filter(vsi, macaddr, f->vlan,
+						is_vf, is_netdev))
+				return NULL;
+		}
+	}
+
+	return list_first_entry_or_null(&vsi->mac_filter_list,
+					struct i40e_mac_filter, list);
+}
+
+/**
+ * i40e_add_filter - Add a mac/vlan filter to the VSI
+ * @vsi: the VSI to be searched
+ * @macaddr: the MAC address
+ * @vlan: the vlan
+ * @is_vf: make sure its a vf filter, else doesn't matter
+ * @is_netdev: make sure its a netdev filter, else doesn't matter
+ *
+ * Returns ptr to the filter object or NULL when no memory available.
+ **/
+struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
+					u8 *macaddr, s16 vlan,
+					bool is_vf, bool is_netdev)
+{
+	struct i40e_mac_filter *f;
+
+	if (!vsi || !macaddr)
+		return NULL;
+
+	f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
+	if (!f) {
+		f = kzalloc(sizeof(*f), GFP_ATOMIC);
+		if (!f)
+			goto add_filter_out;
+
+		memcpy(f->macaddr, macaddr, ETH_ALEN);
+		f->vlan = vlan;
+		f->changed = true;
+
+		INIT_LIST_HEAD(&f->list);
+		list_add(&f->list, &vsi->mac_filter_list);
+	}
+
+	/* increment counter and add a new flag if needed */
+	if (is_vf) {
+		if (!f->is_vf) {
+			f->is_vf = true;
+			f->counter++;
+		}
+	} else if (is_netdev) {
+		if (!f->is_netdev) {
+			f->is_netdev = true;
+			f->counter++;
+		}
+	} else {
+		f->counter++;
+	}
+
+	/* changed tells sync_filters_subtask to
+	 * push the filter down to the firmware
+	 */
+	if (f->changed) {
+		vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+		vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
+	}
+
+add_filter_out:
+	return f;
+}
+
+/**
+ * i40e_del_filter - Remove a mac/vlan filter from the VSI
+ * @vsi: the VSI to be searched
+ * @macaddr: the MAC address
+ * @vlan: the vlan
+ * @is_vf: make sure it's a vf filter, else doesn't matter
+ * @is_netdev: make sure it's a netdev filter, else doesn't matter
+ **/
+void i40e_del_filter(struct i40e_vsi *vsi,
+		     u8 *macaddr, s16 vlan,
+		     bool is_vf, bool is_netdev)
+{
+	struct i40e_mac_filter *f;
+
+	if (!vsi || !macaddr)
+		return;
+
+	f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
+	if (!f || f->counter == 0)
+		return;
+
+	if (is_vf) {
+		if (f->is_vf) {
+			f->is_vf = false;
+			f->counter--;
+		}
+	} else if (is_netdev) {
+		if (f->is_netdev) {
+			f->is_netdev = false;
+			f->counter--;
+		}
+	} else {
+		/* make sure we don't remove a filter in use by vf or netdev */
+		int min_f = 0;
+		min_f += (f->is_vf ? 1 : 0);
+		min_f += (f->is_netdev ? 1 : 0);
+
+		if (f->counter > min_f)
+			f->counter--;
+	}
+
+	/* counter == 0 tells sync_filters_subtask to
+	 * remove the filter from the firmware's list
+	 */
+	if (f->counter == 0) {
+		f->changed = true;
+		vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+		vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
+	}
+}
+
+/**
+ * i40e_set_mac - NDO callback to set mac address
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_set_mac(struct net_device *netdev, void *p)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_vsi *vsi = np->vsi;
+	struct sockaddr *addr = p;
+	struct i40e_mac_filter *f;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	netdev_info(netdev, "set mac address=%pM\n", addr->sa_data);
+
+	if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
+		return 0;
+
+	if (vsi->type == I40E_VSI_MAIN) {
+		i40e_status ret;
+		ret = i40e_aq_mac_address_write(&vsi->back->hw,
+						I40E_AQC_WRITE_TYPE_LAA_ONLY,
+						addr->sa_data, NULL);
+		if (ret) {
+			netdev_info(netdev,
+				    "Addr change for Main VSI failed: %d\n",
+				    ret);
+			return -EADDRNOTAVAIL;
+		}
+
+		memcpy(vsi->back->hw.mac.addr, addr->sa_data, netdev->addr_len);
+	}
+
+	/* In order to be sure to not drop any packets, add the new address
+	 * then delete the old one.
+	 */
+	f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY, false, false);
+	if (!f)
+		return -ENOMEM;
+
+	i40e_sync_vsi_filters(vsi);
+	i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, false, false);
+	i40e_sync_vsi_filters(vsi);
+
+	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+
+	return 0;
+}
+
+/**
+ * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
+ * @vsi: the VSI being setup
+ * @ctxt: VSI context structure
+ * @enabled_tc: Enabled TCs bitmap
+ * @is_add: True if called before Add VSI
+ *
+ * Setup VSI queue mapping for enabled traffic classes.
+ **/
+static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
+				     struct i40e_vsi_context *ctxt,
+				     u8 enabled_tc,
+				     bool is_add)
+{
+	struct i40e_pf *pf = vsi->back;
+	u16 sections = 0;
+	u8 netdev_tc = 0;
+	u16 numtc = 0;
+	u16 qcount;
+	u8 offset;
+	u16 qmap;
+	int i;
+
+	sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
+	offset = 0;
+
+	if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
+		/* Find numtc from enabled TC bitmap */
+		for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+			if (enabled_tc & (1 << i)) /* TC is enabled */
+				numtc++;
+		}
+		if (!numtc) {
+			dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
+			numtc = 1;
+		}
+	} else {
+		/* At least TC0 is enabled in case of non-DCB case */
+		numtc = 1;
+	}
+
+	vsi->tc_config.numtc = numtc;
+	vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
+
+	/* Setup queue offset/count for all TCs for given VSI */
+	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+		/* See if the given TC is enabled for the given VSI */
+		if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */
+			int pow, num_qps;
+
+			vsi->tc_config.tc_info[i].qoffset = offset;
+			switch (vsi->type) {
+			case I40E_VSI_MAIN:
+				if (i == 0)
+					qcount = pf->rss_size;
+				else
+					qcount = pf->num_tc_qps;
+				vsi->tc_config.tc_info[i].qcount = qcount;
+				break;
+			case I40E_VSI_FDIR:
+			case I40E_VSI_SRIOV:
+			case I40E_VSI_VMDQ2:
+			default:
+				qcount = vsi->alloc_queue_pairs;
+				vsi->tc_config.tc_info[i].qcount = qcount;
+				WARN_ON(i != 0);
+				break;
+			}
+
+			/* find the power-of-2 of the number of queue pairs */
+			num_qps = vsi->tc_config.tc_info[i].qcount;
+			pow = 0;
+			while (num_qps &&
+			      ((1 << pow) < vsi->tc_config.tc_info[i].qcount)) {
+				pow++;
+				num_qps >>= 1;
+			}
+
+			vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
+			qmap =
+			    (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
+			    (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
+
+			offset += vsi->tc_config.tc_info[i].qcount;
+		} else {
+			/* TC is not enabled so set the offset to
+			 * default queue and allocate one queue
+			 * for the given TC.
+			 */
+			vsi->tc_config.tc_info[i].qoffset = 0;
+			vsi->tc_config.tc_info[i].qcount = 1;
+			vsi->tc_config.tc_info[i].netdev_tc = 0;
+
+			qmap = 0;
+		}
+		ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
+	}
+
+	/* Set actual Tx/Rx queue pairs */
+	vsi->num_queue_pairs = offset;
+
+	/* Scheduler section valid can only be set for ADD VSI */
+	if (is_add) {
+		sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
+
+		ctxt->info.up_enable_bits = enabled_tc;
+	}
+	if (vsi->type == I40E_VSI_SRIOV) {
+		ctxt->info.mapping_flags |=
+				     cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
+		for (i = 0; i < vsi->num_queue_pairs; i++)
+			ctxt->info.queue_mapping[i] =
+					       cpu_to_le16(vsi->base_queue + i);
+	} else {
+		ctxt->info.mapping_flags |=
+					cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
+		ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
+	}
+	ctxt->info.valid_sections |= cpu_to_le16(sections);
+}
+
+/**
+ * i40e_set_rx_mode - NDO callback to set the netdev filters
+ * @netdev: network interface device structure
+ **/
+static void i40e_set_rx_mode(struct net_device *netdev)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_mac_filter *f, *ftmp;
+	struct i40e_vsi *vsi = np->vsi;
+	struct netdev_hw_addr *uca;
+	struct netdev_hw_addr *mca;
+	struct netdev_hw_addr *ha;
+
+	/* add addr if not already in the filter list */
+	netdev_for_each_uc_addr(uca, netdev) {
+		if (!i40e_find_mac(vsi, uca->addr, false, true)) {
+			if (i40e_is_vsi_in_vlan(vsi))
+				i40e_put_mac_in_vlan(vsi, uca->addr,
+						     false, true);
+			else
+				i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY,
+						false, true);
+		}
+	}
+
+	netdev_for_each_mc_addr(mca, netdev) {
+		if (!i40e_find_mac(vsi, mca->addr, false, true)) {
+			if (i40e_is_vsi_in_vlan(vsi))
+				i40e_put_mac_in_vlan(vsi, mca->addr,
+						     false, true);
+			else
+				i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY,
+						false, true);
+		}
+	}
+
+	/* remove filter if not in netdev list */
+	list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+		bool found = false;
+
+		if (!f->is_netdev)
+			continue;
+
+		if (is_multicast_ether_addr(f->macaddr)) {
+			netdev_for_each_mc_addr(mca, netdev) {
+				if (ether_addr_equal(mca->addr, f->macaddr)) {
+					found = true;
+					break;
+				}
+			}
+		} else {
+			netdev_for_each_uc_addr(uca, netdev) {
+				if (ether_addr_equal(uca->addr, f->macaddr)) {
+					found = true;
+					break;
+				}
+			}
+
+			for_each_dev_addr(netdev, ha) {
+				if (ether_addr_equal(ha->addr, f->macaddr)) {
+					found = true;
+					break;
+				}
+			}
+		}
+		if (!found)
+			i40e_del_filter(
+			   vsi, f->macaddr, I40E_VLAN_ANY, false, true);
+	}
+
+	/* check for other flag changes */
+	if (vsi->current_netdev_flags != vsi->netdev->flags) {
+		vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+		vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
+	}
+}
+
+/**
+ * i40e_sync_vsi_filters - Update the VSI filter list to the HW
+ * @vsi: ptr to the VSI
+ *
+ * Push any outstanding VSI filter changes through the AdminQ.
+ *
+ * Returns 0 or error value
+ **/
+int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
+{
+	struct i40e_mac_filter *f, *ftmp;
+	bool promisc_forced_on = false;
+	bool add_happened = false;
+	int filter_list_len = 0;
+	u32 changed_flags = 0;
+	i40e_status ret = 0;
+	struct i40e_pf *pf;
+	int num_add = 0;
+	int num_del = 0;
+	u16 cmd_flags;
+
+	/* empty array typed pointers, kcalloc later */
+	struct i40e_aqc_add_macvlan_element_data *add_list;
+	struct i40e_aqc_remove_macvlan_element_data *del_list;
+
+	while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state))
+		usleep_range(1000, 2000);
+	pf = vsi->back;
+
+	if (vsi->netdev) {
+		changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
+		vsi->current_netdev_flags = vsi->netdev->flags;
+	}
+
+	if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
+		vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
+
+		filter_list_len = pf->hw.aq.asq_buf_size /
+			    sizeof(struct i40e_aqc_remove_macvlan_element_data);
+		del_list = kcalloc(filter_list_len,
+			    sizeof(struct i40e_aqc_remove_macvlan_element_data),
+			    GFP_KERNEL);
+		if (!del_list)
+			return -ENOMEM;
+
+		list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+			if (!f->changed)
+				continue;
+
+			if (f->counter != 0)
+				continue;
+			f->changed = false;
+			cmd_flags = 0;
+
+			/* add to delete list */
+			memcpy(del_list[num_del].mac_addr,
+			       f->macaddr, ETH_ALEN);
+			del_list[num_del].vlan_tag =
+				cpu_to_le16((u16)(f->vlan ==
+					    I40E_VLAN_ANY ? 0 : f->vlan));
+
+			/* vlan0 as wild card to allow packets from all vlans */
+			if (f->vlan == I40E_VLAN_ANY ||
+			    (vsi->netdev && !(vsi->netdev->features &
+					      NETIF_F_HW_VLAN_CTAG_FILTER)))
+				cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
+			cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
+			del_list[num_del].flags = cmd_flags;
+			num_del++;
+
+			/* unlink from filter list */
+			list_del(&f->list);
+			kfree(f);
+
+			/* flush a full buffer */
+			if (num_del == filter_list_len) {
+				ret = i40e_aq_remove_macvlan(&pf->hw,
+					    vsi->seid, del_list, num_del,
+					    NULL);
+				num_del = 0;
+				memset(del_list, 0, sizeof(*del_list));
+
+				if (ret)
+					dev_info(&pf->pdev->dev,
+						 "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
+						 ret,
+						 pf->hw.aq.asq_last_status);
+			}
+		}
+		if (num_del) {
+			ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
+						     del_list, num_del, NULL);
+			num_del = 0;
+
+			if (ret)
+				dev_info(&pf->pdev->dev,
+					 "ignoring delete macvlan error, err %d, aq_err %d\n",
+					 ret, pf->hw.aq.asq_last_status);
+		}
+
+		kfree(del_list);
+		del_list = NULL;
+
+		/* do all the adds now */
+		filter_list_len = pf->hw.aq.asq_buf_size /
+			       sizeof(struct i40e_aqc_add_macvlan_element_data),
+		add_list = kcalloc(filter_list_len,
+			       sizeof(struct i40e_aqc_add_macvlan_element_data),
+			       GFP_KERNEL);
+		if (!add_list)
+			return -ENOMEM;
+
+		list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+			if (!f->changed)
+				continue;
+
+			if (f->counter == 0)
+				continue;
+			f->changed = false;
+			add_happened = true;
+			cmd_flags = 0;
+
+			/* add to add array */
+			memcpy(add_list[num_add].mac_addr,
+			       f->macaddr, ETH_ALEN);
+			add_list[num_add].vlan_tag =
+				cpu_to_le16(
+				 (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan));
+			add_list[num_add].queue_number = 0;
+
+			cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
+
+			/* vlan0 as wild card to allow packets from all vlans */
+			if (f->vlan == I40E_VLAN_ANY || (vsi->netdev &&
+			    !(vsi->netdev->features &
+						 NETIF_F_HW_VLAN_CTAG_FILTER)))
+				cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
+			add_list[num_add].flags = cpu_to_le16(cmd_flags);
+			num_add++;
+
+			/* flush a full buffer */
+			if (num_add == filter_list_len) {
+				ret = i40e_aq_add_macvlan(&pf->hw,
+							  vsi->seid,
+							  add_list,
+							  num_add,
+							  NULL);
+				num_add = 0;
+
+				if (ret)
+					break;
+				memset(add_list, 0, sizeof(*add_list));
+			}
+		}
+		if (num_add) {
+			ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
+						  add_list, num_add, NULL);
+			num_add = 0;
+		}
+		kfree(add_list);
+		add_list = NULL;
+
+		if (add_happened && (!ret)) {
+			/* do nothing */;
+		} else if (add_happened && (ret)) {
+			dev_info(&pf->pdev->dev,
+				 "add filter failed, err %d, aq_err %d\n",
+				 ret, pf->hw.aq.asq_last_status);
+			if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
+			    !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
+				      &vsi->state)) {
+				promisc_forced_on = true;
+				set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
+					&vsi->state);
+				dev_info(&pf->pdev->dev, "promiscuous mode forced on\n");
+			}
+		}
+	}
+
+	/* check for changes in promiscuous modes */
+	if (changed_flags & IFF_ALLMULTI) {
+		bool cur_multipromisc;
+		cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
+		ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
+							    vsi->seid,
+							    cur_multipromisc,
+							    NULL);
+		if (ret)
+			dev_info(&pf->pdev->dev,
+				 "set multi promisc failed, err %d, aq_err %d\n",
+				 ret, pf->hw.aq.asq_last_status);
+	}
+	if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
+		bool cur_promisc;
+		cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
+			       test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
+					&vsi->state));
+		ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
+							  vsi->seid,
+							  cur_promisc,
+							  NULL);
+		if (ret)
+			dev_info(&pf->pdev->dev,
+				 "set uni promisc failed, err %d, aq_err %d\n",
+				 ret, pf->hw.aq.asq_last_status);
+	}
+
+	clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
+	return 0;
+}
+
+/**
+ * i40e_sync_filters_subtask - Sync the VSI filter list with HW
+ * @pf: board private structure
+ **/
+static void i40e_sync_filters_subtask(struct i40e_pf *pf)
+{
+	int v;
+
+	if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
+		return;
+	pf->flags &= ~I40E_FLAG_FILTER_SYNC;
+
+	for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+		if (pf->vsi[v] &&
+		    (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED))
+			i40e_sync_vsi_filters(pf->vsi[v]);
+	}
+}
+
+/**
+ * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+	struct i40e_vsi *vsi = np->vsi;
+
+	/* MTU < 68 is an error and causes problems on some kernels */
+	if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
+		return -EINVAL;
+
+	netdev_info(netdev, "changing MTU from %d to %d\n",
+		    netdev->mtu, new_mtu);
+	netdev->mtu = new_mtu;
+	if (netif_running(netdev))
+		i40e_vsi_reinit_locked(vsi);
+
+	return 0;
+}
+
+/**
+ * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
+ * @vsi: the vsi being adjusted
+ **/
+void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
+{
+	struct i40e_vsi_context ctxt;
+	i40e_status ret;
+
+	if ((vsi->info.valid_sections &
+	     cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
+	    ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
+		return;  /* already enabled */
+
+	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
+	vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
+				    I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
+
+	ctxt.seid = vsi->seid;
+	memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+	if (ret) {
+		dev_info(&vsi->back->pdev->dev,
+			 "%s: update vsi failed, aq_err=%d\n",
+			 __func__, vsi->back->hw.aq.asq_last_status);
+	}
+}
+
+/**
+ * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
+ * @vsi: the vsi being adjusted
+ **/
+void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
+{
+	struct i40e_vsi_context ctxt;
+	i40e_status ret;
+
+	if ((vsi->info.valid_sections &
+	     cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
+	    ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
+	     I40E_AQ_VSI_PVLAN_EMOD_MASK))
+		return;  /* already disabled */
+
+	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
+	vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
+				    I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
+
+	ctxt.seid = vsi->seid;
+	memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+	if (ret) {
+		dev_info(&vsi->back->pdev->dev,
+			 "%s: update vsi failed, aq_err=%d\n",
+			 __func__, vsi->back->hw.aq.asq_last_status);
+	}
+}
+
+/**
+ * i40e_vlan_rx_register - Setup or shutdown vlan offload
+ * @netdev: network interface to be adjusted
+ * @features: netdev features to test if VLAN offload is enabled or not
+ **/
+static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_vsi *vsi = np->vsi;
+
+	if (features & NETIF_F_HW_VLAN_CTAG_RX)
+		i40e_vlan_stripping_enable(vsi);
+	else
+		i40e_vlan_stripping_disable(vsi);
+}
+
+/**
+ * i40e_vsi_add_vlan - Add vsi membership for given vlan
+ * @vsi: the vsi being configured
+ * @vid: vlan id to be added (0 = untagged only , -1 = any)
+ **/
+int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
+{
+	struct i40e_mac_filter *f, *add_f;
+	bool is_netdev, is_vf;
+	int ret;
+
+	is_vf = (vsi->type == I40E_VSI_SRIOV);
+	is_netdev = !!(vsi->netdev);
+
+	if (is_netdev) {
+		add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid,
+					is_vf, is_netdev);
+		if (!add_f) {
+			dev_info(&vsi->back->pdev->dev,
+				 "Could not add vlan filter %d for %pM\n",
+				 vid, vsi->netdev->dev_addr);
+			return -ENOMEM;
+		}
+	}
+
+	list_for_each_entry(f, &vsi->mac_filter_list, list) {
+		add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
+		if (!add_f) {
+			dev_info(&vsi->back->pdev->dev,
+				 "Could not add vlan filter %d for %pM\n",
+				 vid, f->macaddr);
+			return -ENOMEM;
+		}
+	}
+
+	ret = i40e_sync_vsi_filters(vsi);
+	if (ret) {
+		dev_info(&vsi->back->pdev->dev,
+			 "Could not sync filters for vid %d\n", vid);
+		return ret;
+	}
+
+	/* Now if we add a vlan tag, make sure to check if it is the first
+	 * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
+	 * with 0, so we now accept untagged and specified tagged traffic
+	 * (and not any taged and untagged)
+	 */
+	if (vid > 0) {
+		if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr,
+						  I40E_VLAN_ANY,
+						  is_vf, is_netdev)) {
+			i40e_del_filter(vsi, vsi->netdev->dev_addr,
+					I40E_VLAN_ANY, is_vf, is_netdev);
+			add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0,
+						is_vf, is_netdev);
+			if (!add_f) {
+				dev_info(&vsi->back->pdev->dev,
+					 "Could not add filter 0 for %pM\n",
+					 vsi->netdev->dev_addr);
+				return -ENOMEM;
+			}
+		}
+
+		list_for_each_entry(f, &vsi->mac_filter_list, list) {
+			if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
+					     is_vf, is_netdev)) {
+				i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
+						is_vf, is_netdev);
+				add_f = i40e_add_filter(vsi, f->macaddr,
+							0, is_vf, is_netdev);
+				if (!add_f) {
+					dev_info(&vsi->back->pdev->dev,
+						 "Could not add filter 0 for %pM\n",
+						 f->macaddr);
+					return -ENOMEM;
+				}
+			}
+		}
+		ret = i40e_sync_vsi_filters(vsi);
+	}
+
+	return ret;
+}
+
+/**
+ * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
+ * @vsi: the vsi being configured
+ * @vid: vlan id to be removed (0 = untagged only , -1 = any)
+ **/
+int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
+{
+	struct net_device *netdev = vsi->netdev;
+	struct i40e_mac_filter *f, *add_f;
+	bool is_vf, is_netdev;
+	int filter_count = 0;
+	int ret;
+
+	is_vf = (vsi->type == I40E_VSI_SRIOV);
+	is_netdev = !!(netdev);
+
+	if (is_netdev)
+		i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev);
+
+	list_for_each_entry(f, &vsi->mac_filter_list, list)
+		i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
+
+	ret = i40e_sync_vsi_filters(vsi);
+	if (ret) {
+		dev_info(&vsi->back->pdev->dev, "Could not sync filters\n");
+		return ret;
+	}
+
+	/* go through all the filters for this VSI and if there is only
+	 * vid == 0 it means there are no other filters, so vid 0 must
+	 * be replaced with -1. This signifies that we should from now
+	 * on accept any traffic (with any tag present, or untagged)
+	 */
+	list_for_each_entry(f, &vsi->mac_filter_list, list) {
+		if (is_netdev) {
+			if (f->vlan &&
+			    ether_addr_equal(netdev->dev_addr, f->macaddr))
+				filter_count++;
+		}
+
+		if (f->vlan)
+			filter_count++;
+	}
+
+	if (!filter_count && is_netdev) {
+		i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev);
+		f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
+				    is_vf, is_netdev);
+		if (!f) {
+			dev_info(&vsi->back->pdev->dev,
+				 "Could not add filter %d for %pM\n",
+				 I40E_VLAN_ANY, netdev->dev_addr);
+			return -ENOMEM;
+		}
+	}
+
+	if (!filter_count) {
+		list_for_each_entry(f, &vsi->mac_filter_list, list) {
+			i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev);
+			add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY,
+					    is_vf, is_netdev);
+			if (!add_f) {
+				dev_info(&vsi->back->pdev->dev,
+					 "Could not add filter %d for %pM\n",
+					 I40E_VLAN_ANY, f->macaddr);
+				return -ENOMEM;
+			}
+		}
+	}
+
+	return i40e_sync_vsi_filters(vsi);
+}
+
+/**
+ * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
+ * @netdev: network interface to be adjusted
+ * @vid: vlan id to be added
+ **/
+static int i40e_vlan_rx_add_vid(struct net_device *netdev,
+				__always_unused __be16 proto, u16 vid)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_vsi *vsi = np->vsi;
+	int ret;
+
+	if (vid > 4095)
+		return 0;
+
+	netdev_info(vsi->netdev, "adding %pM vid=%d\n",
+		    netdev->dev_addr, vid);
+	/* If the network stack called us with vid = 0, we should
+	 * indicate to i40e_vsi_add_vlan() that we want to receive
+	 * any traffic (i.e. with any vlan tag, or untagged)
+	 */
+	ret = i40e_vsi_add_vlan(vsi, vid ? vid : I40E_VLAN_ANY);
+
+	if (!ret) {
+		if (vid < VLAN_N_VID)
+			set_bit(vid, vsi->active_vlans);
+	}
+
+	return 0;
+}
+
+/**
+ * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
+ * @netdev: network interface to be adjusted
+ * @vid: vlan id to be removed
+ **/
+static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
+				 __always_unused __be16 proto, u16 vid)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_vsi *vsi = np->vsi;
+
+	netdev_info(vsi->netdev, "removing %pM vid=%d\n",
+		    netdev->dev_addr, vid);
+	/* return code is ignored as there is nothing a user
+	 * can do about failure to remove and a log message was
+	 * already printed from another function
+	 */
+	i40e_vsi_kill_vlan(vsi, vid);
+
+	clear_bit(vid, vsi->active_vlans);
+	return 0;
+}
+
+/**
+ * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
+ * @vsi: the vsi being brought back up
+ **/
+static void i40e_restore_vlan(struct i40e_vsi *vsi)
+{
+	u16 vid;
+
+	if (!vsi->netdev)
+		return;
+
+	i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
+
+	for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
+		i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
+				     vid);
+}
+
+/**
+ * i40e_vsi_add_pvid - Add pvid for the VSI
+ * @vsi: the vsi being adjusted
+ * @vid: the vlan id to set as a PVID
+ **/
+i40e_status i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
+{
+	struct i40e_vsi_context ctxt;
+	i40e_status ret;
+
+	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
+	vsi->info.pvid = cpu_to_le16(vid);
+	vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID;
+	vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
+
+	ctxt.seid = vsi->seid;
+	memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+	if (ret) {
+		dev_info(&vsi->back->pdev->dev,
+			 "%s: update vsi failed, aq_err=%d\n",
+			 __func__, vsi->back->hw.aq.asq_last_status);
+	}
+
+	return ret;
+}
+
+/**
+ * i40e_vsi_remove_pvid - Remove the pvid from the VSI
+ * @vsi: the vsi being adjusted
+ *
+ * Just use the vlan_rx_register() service to put it back to normal
+ **/
+void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
+{
+	vsi->info.pvid = 0;
+	i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
+}
+
+/**
+ * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
+ * @vsi: ptr to the VSI
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not).  It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
+{
+	int i, err = 0;
+
+	for (i = 0; i < vsi->num_queue_pairs && !err; i++)
+		err = i40e_setup_tx_descriptors(&vsi->tx_rings[i]);
+
+	return err;
+}
+
+/**
+ * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
+ * @vsi: ptr to the VSI
+ *
+ * Free VSI's transmit software resources
+ **/
+static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
+{
+	int i;
+
+	for (i = 0; i < vsi->num_queue_pairs; i++)
+		if (vsi->tx_rings[i].desc)
+			i40e_free_tx_resources(&vsi->tx_rings[i]);
+}
+
+/**
+ * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
+ * @vsi: ptr to the VSI
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not).  It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
+{
+	int i, err = 0;
+
+	for (i = 0; i < vsi->num_queue_pairs && !err; i++)
+		err = i40e_setup_rx_descriptors(&vsi->rx_rings[i]);
+	return err;
+}
+
+/**
+ * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
+ * @vsi: ptr to the VSI
+ *
+ * Free all receive software resources
+ **/
+static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
+{
+	int i;
+
+	for (i = 0; i < vsi->num_queue_pairs; i++)
+		if (vsi->rx_rings[i].desc)
+			i40e_free_rx_resources(&vsi->rx_rings[i]);
+}
+
+/**
+ * i40e_configure_tx_ring - Configure a transmit ring context and rest
+ * @ring: The Tx ring to configure
+ *
+ * Configure the Tx descriptor ring in the HMC context.
+ **/
+static int i40e_configure_tx_ring(struct i40e_ring *ring)
+{
+	struct i40e_vsi *vsi = ring->vsi;
+	u16 pf_q = vsi->base_queue + ring->queue_index;
+	struct i40e_hw *hw = &vsi->back->hw;
+	struct i40e_hmc_obj_txq tx_ctx;
+	i40e_status err = 0;
+	u32 qtx_ctl = 0;
+
+	/* some ATR related tx ring init */
+	if (vsi->back->flags & I40E_FLAG_FDIR_ATR_ENABLED) {
+		ring->atr_sample_rate = vsi->back->atr_sample_rate;
+		ring->atr_count = 0;
+	} else {
+		ring->atr_sample_rate = 0;
+	}
+
+	/* initialize XPS */
+	if (ring->q_vector && ring->netdev &&
+	    !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
+		netif_set_xps_queue(ring->netdev,
+				    &ring->q_vector->affinity_mask,
+				    ring->queue_index);
+
+	/* clear the context structure first */
+	memset(&tx_ctx, 0, sizeof(tx_ctx));
+
+	tx_ctx.new_context = 1;
+	tx_ctx.base = (ring->dma / 128);
+	tx_ctx.qlen = ring->count;
+	tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FDIR_ENABLED |
+			I40E_FLAG_FDIR_ATR_ENABLED));
+
+	/* As part of VSI creation/update, FW allocates certain
+	 * Tx arbitration queue sets for each TC enabled for
+	 * the VSI. The FW returns the handles to these queue
+	 * sets as part of the response buffer to Add VSI,
+	 * Update VSI, etc. AQ commands. It is expected that
+	 * these queue set handles be associated with the Tx
+	 * queues by the driver as part of the TX queue context
+	 * initialization. This has to be done regardless of
+	 * DCB as by default everything is mapped to TC0.
+	 */
+	tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
+	tx_ctx.rdylist_act = 0;
+
+	/* clear the context in the HMC */
+	err = i40e_clear_lan_tx_queue_context(hw, pf_q);
+	if (err) {
+		dev_info(&vsi->back->pdev->dev,
+			 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
+			 ring->queue_index, pf_q, err);
+		return -ENOMEM;
+	}
+
+	/* set the context in the HMC */
+	err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
+	if (err) {
+		dev_info(&vsi->back->pdev->dev,
+			 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
+			 ring->queue_index, pf_q, err);
+		return -ENOMEM;
+	}
+
+	/* Now associate this queue with this PCI function */
+	qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
+	qtx_ctl |= ((hw->hmc.hmc_fn_id << I40E_QTX_CTL_PF_INDX_SHIFT)
+						& I40E_QTX_CTL_PF_INDX_MASK);
+	wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
+	i40e_flush(hw);
+
+	clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
+
+	/* cache tail off for easier writes later */
+	ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
+
+	return 0;
+}
+
+/**
+ * i40e_configure_rx_ring - Configure a receive ring context
+ * @ring: The Rx ring to configure
+ *
+ * Configure the Rx descriptor ring in the HMC context.
+ **/
+static int i40e_configure_rx_ring(struct i40e_ring *ring)
+{
+	struct i40e_vsi *vsi = ring->vsi;
+	u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
+	u16 pf_q = vsi->base_queue + ring->queue_index;
+	struct i40e_hw *hw = &vsi->back->hw;
+	struct i40e_hmc_obj_rxq rx_ctx;
+	i40e_status err = 0;
+
+	ring->state = 0;
+
+	/* clear the context structure first */
+	memset(&rx_ctx, 0, sizeof(rx_ctx));
+
+	ring->rx_buf_len = vsi->rx_buf_len;
+	ring->rx_hdr_len = vsi->rx_hdr_len;
+
+	rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
+	rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT;
+
+	rx_ctx.base = (ring->dma / 128);
+	rx_ctx.qlen = ring->count;
+
+	if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) {
+		set_ring_16byte_desc_enabled(ring);
+		rx_ctx.dsize = 0;
+	} else {
+		rx_ctx.dsize = 1;
+	}
+
+	rx_ctx.dtype = vsi->dtype;
+	if (vsi->dtype) {
+		set_ring_ps_enabled(ring);
+		rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2      |
+				  I40E_RX_SPLIT_IP      |
+				  I40E_RX_SPLIT_TCP_UDP |
+				  I40E_RX_SPLIT_SCTP;
+	} else {
+		rx_ctx.hsplit_0 = 0;
+	}
+
+	rx_ctx.rxmax = min_t(u16, vsi->max_frame,
+				  (chain_len * ring->rx_buf_len));
+	rx_ctx.tphrdesc_ena = 1;
+	rx_ctx.tphwdesc_ena = 1;
+	rx_ctx.tphdata_ena = 1;
+	rx_ctx.tphhead_ena = 1;
+	rx_ctx.lrxqthresh = 2;
+	rx_ctx.crcstrip = 1;
+	rx_ctx.l2tsel = 1;
+	rx_ctx.showiv = 1;
+
+	/* clear the context in the HMC */
+	err = i40e_clear_lan_rx_queue_context(hw, pf_q);
+	if (err) {
+		dev_info(&vsi->back->pdev->dev,
+			 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
+			 ring->queue_index, pf_q, err);
+		return -ENOMEM;
+	}
+
+	/* set the context in the HMC */
+	err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
+	if (err) {
+		dev_info(&vsi->back->pdev->dev,
+			 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
+			 ring->queue_index, pf_q, err);
+		return -ENOMEM;
+	}
+
+	/* cache tail for quicker writes, and clear the reg before use */
+	ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
+	writel(0, ring->tail);
+
+	i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
+
+	return 0;
+}
+
+/**
+ * i40e_vsi_configure_tx - Configure the VSI for Tx
+ * @vsi: VSI structure describing this set of rings and resources
+ *
+ * Configure the Tx VSI for operation.
+ **/
+static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
+{
+	int err = 0;
+	u16 i;
+
+	for (i = 0; (i < vsi->num_queue_pairs) && (!err); i++)
+		err = i40e_configure_tx_ring(&vsi->tx_rings[i]);
+
+	return err;
+}
+
+/**
+ * i40e_vsi_configure_rx - Configure the VSI for Rx
+ * @vsi: the VSI being configured
+ *
+ * Configure the Rx VSI for operation.
+ **/
+static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
+{
+	int err = 0;
+	u16 i;
+
+	if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN))
+		vsi->max_frame = vsi->netdev->mtu + ETH_HLEN
+			       + ETH_FCS_LEN + VLAN_HLEN;
+	else
+		vsi->max_frame = I40E_RXBUFFER_2048;
+
+	/* figure out correct receive buffer length */
+	switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED |
+				    I40E_FLAG_RX_PS_ENABLED)) {
+	case I40E_FLAG_RX_1BUF_ENABLED:
+		vsi->rx_hdr_len = 0;
+		vsi->rx_buf_len = vsi->max_frame;
+		vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
+		break;
+	case I40E_FLAG_RX_PS_ENABLED:
+		vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
+		vsi->rx_buf_len = I40E_RXBUFFER_2048;
+		vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT;
+		break;
+	default:
+		vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
+		vsi->rx_buf_len = I40E_RXBUFFER_2048;
+		vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS;
+		break;
+	}
+
+	/* round up for the chip's needs */
+	vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
+				(1 << I40E_RXQ_CTX_HBUFF_SHIFT));
+	vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
+				(1 << I40E_RXQ_CTX_DBUFF_SHIFT));
+
+	/* set up individual rings */
+	for (i = 0; i < vsi->num_queue_pairs && !err; i++)
+		err = i40e_configure_rx_ring(&vsi->rx_rings[i]);
+
+	return err;
+}
+
+/**
+ * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
+ * @vsi: ptr to the VSI
+ **/
+static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
+{
+	u16 qoffset, qcount;
+	int i, n;
+
+	if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED))
+		return;
+
+	for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
+		if (!(vsi->tc_config.enabled_tc & (1 << n)))
+			continue;
+
+		qoffset = vsi->tc_config.tc_info[n].qoffset;
+		qcount = vsi->tc_config.tc_info[n].qcount;
+		for (i = qoffset; i < (qoffset + qcount); i++) {
+			struct i40e_ring *rx_ring = &vsi->rx_rings[i];
+			struct i40e_ring *tx_ring = &vsi->tx_rings[i];
+			rx_ring->dcb_tc = n;
+			tx_ring->dcb_tc = n;
+		}
+	}
+}
+
+/**
+ * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
+ * @vsi: ptr to the VSI
+ **/
+static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
+{
+	if (vsi->netdev)
+		i40e_set_rx_mode(vsi->netdev);
+}
+
+/**
+ * i40e_vsi_configure - Set up the VSI for action
+ * @vsi: the VSI being configured
+ **/
+static int i40e_vsi_configure(struct i40e_vsi *vsi)
+{
+	int err;
+
+	i40e_set_vsi_rx_mode(vsi);
+	i40e_restore_vlan(vsi);
+	i40e_vsi_config_dcb_rings(vsi);
+	err = i40e_vsi_configure_tx(vsi);
+	if (!err)
+		err = i40e_vsi_configure_rx(vsi);
+
+	return err;
+}
+
+/**
+ * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
+ * @vsi: the VSI being configured
+ **/
+static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
+{
+	struct i40e_pf *pf = vsi->back;
+	struct i40e_q_vector *q_vector;
+	struct i40e_hw *hw = &pf->hw;
+	u16 vector;
+	int i, q;
+	u32 val;
+	u32 qp;
+
+	/* The interrupt indexing is offset by 1 in the PFINT_ITRn
+	 * and PFINT_LNKLSTn registers, e.g.:
+	 *   PFINT_ITRn[0..n-1] gets msix-1..msix-n  (qpair interrupts)
+	 */
+	qp = vsi->base_queue;
+	vector = vsi->base_vector;
+	q_vector = vsi->q_vectors;
+	for (i = 0; i < vsi->num_q_vectors; i++, q_vector++, vector++) {
+		q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
+		q_vector->rx.latency_range = I40E_LOW_LATENCY;
+		wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
+		     q_vector->rx.itr);
+		q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
+		q_vector->tx.latency_range = I40E_LOW_LATENCY;
+		wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
+		     q_vector->tx.itr);
+
+		/* Linked list for the queuepairs assigned to this vector */
+		wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
+		for (q = 0; q < q_vector->num_ringpairs; q++) {
+			val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
+			      (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)  |
+			      (vector      << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
+			      (qp          << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
+			      (I40E_QUEUE_TYPE_TX
+				      << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
+
+			wr32(hw, I40E_QINT_RQCTL(qp), val);
+
+			val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
+			      (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)  |
+			      (vector      << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
+			      ((qp+1)      << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)|
+			      (I40E_QUEUE_TYPE_RX
+				      << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
+
+			/* Terminate the linked list */
+			if (q == (q_vector->num_ringpairs - 1))
+				val |= (I40E_QUEUE_END_OF_LIST
+					   << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
+
+			wr32(hw, I40E_QINT_TQCTL(qp), val);
+			qp++;
+		}
+	}
+
+	i40e_flush(hw);
+}
+
+/**
+ * i40e_enable_misc_int_causes - enable the non-queue interrupts
+ * @hw: ptr to the hardware info
+ **/
+static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
+{
+	u32 val;
+
+	/* clear things first */
+	wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
+	rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
+
+	val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK       |
+	      I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK    |
+	      I40E_PFINT_ICR0_ENA_GRST_MASK          |
+	      I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
+	      I40E_PFINT_ICR0_ENA_GPIO_MASK          |
+	      I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK  |
+	      I40E_PFINT_ICR0_ENA_HMC_ERR_MASK       |
+	      I40E_PFINT_ICR0_ENA_VFLR_MASK          |
+	      I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
+
+	wr32(hw, I40E_PFINT_ICR0_ENA, val);
+
+	/* SW_ITR_IDX = 0, but don't change INTENA */
+	wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
+					I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
+
+	/* OTHER_ITR_IDX = 0 */
+	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
+}
+
+/**
+ * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
+ * @vsi: the VSI being configured
+ **/
+static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
+{
+	struct i40e_q_vector *q_vector = vsi->q_vectors;
+	struct i40e_pf *pf = vsi->back;
+	struct i40e_hw *hw = &pf->hw;
+	u32 val;
+
+	/* set the ITR configuration */
+	q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
+	q_vector->rx.latency_range = I40E_LOW_LATENCY;
+	wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
+	q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
+	q_vector->tx.latency_range = I40E_LOW_LATENCY;
+	wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
+
+	i40e_enable_misc_int_causes(hw);
+
+	/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
+	wr32(hw, I40E_PFINT_LNKLST0, 0);
+
+	/* Associate the queue pair to the vector and enable the q int */
+	val = I40E_QINT_RQCTL_CAUSE_ENA_MASK		      |
+	      (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
+	      (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
+
+	wr32(hw, I40E_QINT_RQCTL(0), val);
+
+	val = I40E_QINT_TQCTL_CAUSE_ENA_MASK		      |
+	      (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
+	      (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
+
+	wr32(hw, I40E_QINT_TQCTL(0), val);
+	i40e_flush(hw);
+}
+
+/**
+ * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
+ * @pf: board private structure
+ **/
+static void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
+{
+	struct i40e_hw *hw = &pf->hw;
+	u32 val;
+
+	val = I40E_PFINT_DYN_CTL0_INTENA_MASK   |
+	      I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
+	      (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
+
+	wr32(hw, I40E_PFINT_DYN_CTL0, val);
+	i40e_flush(hw);
+}
+
+/**
+ * i40e_irq_dynamic_enable - Enable default interrupt generation settings
+ * @vsi: pointer to a vsi
+ * @vector: enable a particular Hw Interrupt vector
+ **/
+void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
+{
+	struct i40e_pf *pf = vsi->back;
+	struct i40e_hw *hw = &pf->hw;
+	u32 val;
+
+	val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+	      I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+	      (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
+	wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
+	i40e_flush(hw);
+}
+
+/**
+ * i40e_msix_clean_rings - MSIX mode Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ **/
+static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
+{
+	struct i40e_q_vector *q_vector = data;
+
+	if (!q_vector->tx.ring[0] && !q_vector->rx.ring[0])
+		return IRQ_HANDLED;
+
+	napi_schedule(&q_vector->napi);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * i40e_fdir_clean_rings - Interrupt Handler for FDIR rings
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ **/
+static irqreturn_t i40e_fdir_clean_rings(int irq, void *data)
+{
+	struct i40e_q_vector *q_vector = data;
+
+	if (!q_vector->tx.ring[0] && !q_vector->rx.ring[0])
+		return IRQ_HANDLED;
+
+	pr_info("fdir ring cleaning needed\n");
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
+ * @vsi: the VSI being configured
+ * @basename: name for the vector
+ *
+ * Allocates MSI-X vectors and requests interrupts from the kernel.
+ **/
+static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
+{
+	int q_vectors = vsi->num_q_vectors;
+	struct i40e_pf *pf = vsi->back;
+	int base = vsi->base_vector;
+	int rx_int_idx = 0;
+	int tx_int_idx = 0;
+	int vector, err;
+
+	for (vector = 0; vector < q_vectors; vector++) {
+		struct i40e_q_vector *q_vector = &(vsi->q_vectors[vector]);
+
+		if (q_vector->tx.ring[0] && q_vector->rx.ring[0]) {
+			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+				 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
+			tx_int_idx++;
+		} else if (q_vector->rx.ring[0]) {
+			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+				 "%s-%s-%d", basename, "rx", rx_int_idx++);
+		} else if (q_vector->tx.ring[0]) {
+			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+				 "%s-%s-%d", basename, "tx", tx_int_idx++);
+		} else {
+			/* skip this unused q_vector */
+			continue;
+		}
+		err = request_irq(pf->msix_entries[base + vector].vector,
+				  vsi->irq_handler,
+				  0,
+				  q_vector->name,
+				  q_vector);
+		if (err) {
+			dev_info(&pf->pdev->dev,
+				 "%s: request_irq failed, error: %d\n",
+				 __func__, err);
+			goto free_queue_irqs;
+		}
+		/* assign the mask for this irq */
+		irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
+				      &q_vector->affinity_mask);
+	}
+
+	return 0;
+
+free_queue_irqs:
+	while (vector) {
+		vector--;
+		irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
+				      NULL);
+		free_irq(pf->msix_entries[base + vector].vector,
+			 &(vsi->q_vectors[vector]));
+	}
+	return err;
+}
+
+/**
+ * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
+ * @vsi: the VSI being un-configured
+ **/
+static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
+{
+	struct i40e_pf *pf = vsi->back;
+	struct i40e_hw *hw = &pf->hw;
+	int base = vsi->base_vector;
+	int i;
+
+	for (i = 0; i < vsi->num_queue_pairs; i++) {
+		wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i].reg_idx), 0);
+		wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i].reg_idx), 0);
+	}
+
+	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+		for (i = vsi->base_vector;
+		     i < (vsi->num_q_vectors + vsi->base_vector); i++)
+			wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
+
+		i40e_flush(hw);
+		for (i = 0; i < vsi->num_q_vectors; i++)
+			synchronize_irq(pf->msix_entries[i + base].vector);
+	} else {
+		/* Legacy and MSI mode - this stops all interrupt handling */
+		wr32(hw, I40E_PFINT_ICR0_ENA, 0);
+		wr32(hw, I40E_PFINT_DYN_CTL0, 0);
+		i40e_flush(hw);
+		synchronize_irq(pf->pdev->irq);
+	}
+}
+
+/**
+ * i40e_vsi_enable_irq - Enable IRQ for the given VSI
+ * @vsi: the VSI being configured
+ **/
+static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
+{
+	struct i40e_pf *pf = vsi->back;
+	int i;
+
+	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+		for (i = vsi->base_vector;
+		     i < (vsi->num_q_vectors + vsi->base_vector); i++)
+			i40e_irq_dynamic_enable(vsi, i);
+	} else {
+		i40e_irq_dynamic_enable_icr0(pf);
+	}
+
+	return 0;
+}
+
+/**
+ * i40e_stop_misc_vector - Stop the vector that handles non-queue events
+ * @pf: board private structure
+ **/
+static void i40e_stop_misc_vector(struct i40e_pf *pf)
+{
+	/* Disable ICR 0 */
+	wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
+	i40e_flush(&pf->hw);
+}
+
+/**
+ * i40e_intr - MSI/Legacy and non-queue interrupt handler
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ *
+ * This is the handler used for all MSI/Legacy interrupts, and deals
+ * with both queue and non-queue interrupts.  This is also used in
+ * MSIX mode to handle the non-queue interrupts.
+ **/
+static irqreturn_t i40e_intr(int irq, void *data)
+{
+	struct i40e_pf *pf = (struct i40e_pf *)data;
+	struct i40e_hw *hw = &pf->hw;
+	u32 icr0, icr0_remaining;
+	u32 val, ena_mask;
+
+	icr0 = rd32(hw, I40E_PFINT_ICR0);
+
+	/* if sharing a legacy IRQ, we might get called w/o an intr pending */
+	if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
+		return IRQ_NONE;
+
+	val = rd32(hw, I40E_PFINT_DYN_CTL0);
+	val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
+	wr32(hw, I40E_PFINT_DYN_CTL0, val);
+
+	ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
+
+	/* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
+	if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
+
+		/* temporarily disable queue cause for NAPI processing */
+		u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
+		qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
+		wr32(hw, I40E_QINT_RQCTL(0), qval);
+
+		qval = rd32(hw, I40E_QINT_TQCTL(0));
+		qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
+		wr32(hw, I40E_QINT_TQCTL(0), qval);
+		i40e_flush(hw);
+
+		if (!test_bit(__I40E_DOWN, &pf->state))
+			napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0].napi);
+	}
+
+	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
+		ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
+		set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
+	}
+
+	if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
+		ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
+		set_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
+	}
+
+	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
+		ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
+		set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
+	}
+
+	if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
+		if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
+			set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
+		ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
+		val = rd32(hw, I40E_GLGEN_RSTAT);
+		val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
+		       >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
+		if (val & I40E_RESET_CORER)
+			pf->corer_count++;
+		else if (val & I40E_RESET_GLOBR)
+			pf->globr_count++;
+		else if (val & I40E_RESET_EMPR)
+			pf->empr_count++;
+	}
+
+	/* If a critical error is pending we have no choice but to reset the
+	 * device.
+	 * Report and mask out any remaining unexpected interrupts.
+	 */
+	icr0_remaining = icr0 & ena_mask;
+	if (icr0_remaining) {
+		dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
+			 icr0_remaining);
+		if ((icr0_remaining & I40E_PFINT_ICR0_HMC_ERR_MASK) ||
+		    (icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
+		    (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
+		    (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK) ||
+		    (icr0_remaining & I40E_PFINT_ICR0_MAL_DETECT_MASK)) {
+			if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
+				dev_info(&pf->pdev->dev, "HMC error interrupt\n");
+			} else {
+				dev_info(&pf->pdev->dev, "device will be reset\n");
+				set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+				i40e_service_event_schedule(pf);
+			}
+		}
+		ena_mask &= ~icr0_remaining;
+	}
+
+	/* re-enable interrupt causes */
+	wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
+	i40e_flush(hw);
+	if (!test_bit(__I40E_DOWN, &pf->state)) {
+		i40e_service_event_schedule(pf);
+		i40e_irq_dynamic_enable_icr0(pf);
+	}
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * i40e_map_vector_to_rxq - Assigns the Rx queue to the vector
+ * @vsi: the VSI being configured
+ * @v_idx: vector index
+ * @r_idx: rx queue index
+ **/
+static void map_vector_to_rxq(struct i40e_vsi *vsi, int v_idx, int r_idx)
+{
+	struct i40e_q_vector *q_vector = &(vsi->q_vectors[v_idx]);
+	struct i40e_ring *rx_ring = &(vsi->rx_rings[r_idx]);
+
+	rx_ring->q_vector = q_vector;
+	q_vector->rx.ring[q_vector->rx.count] = rx_ring;
+	q_vector->rx.count++;
+	q_vector->rx.latency_range = I40E_LOW_LATENCY;
+	q_vector->vsi = vsi;
+}
+
+/**
+ * i40e_map_vector_to_txq - Assigns the Tx queue to the vector
+ * @vsi: the VSI being configured
+ * @v_idx: vector index
+ * @t_idx: tx queue index
+ **/
+static void map_vector_to_txq(struct i40e_vsi *vsi, int v_idx, int t_idx)
+{
+	struct i40e_q_vector *q_vector = &(vsi->q_vectors[v_idx]);
+	struct i40e_ring *tx_ring = &(vsi->tx_rings[t_idx]);
+
+	tx_ring->q_vector = q_vector;
+	q_vector->tx.ring[q_vector->tx.count] = tx_ring;
+	q_vector->tx.count++;
+	q_vector->tx.latency_range = I40E_LOW_LATENCY;
+	q_vector->num_ringpairs++;
+	q_vector->vsi = vsi;
+}
+
+/**
+ * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
+ * @vsi: the VSI being configured
+ *
+ * This function maps descriptor rings to the queue-specific vectors
+ * we were allotted through the MSI-X enabling code.  Ideally, we'd have
+ * one vector per queue pair, but on a constrained vector budget, we
+ * group the queue pairs as "efficiently" as possible.
+ **/
+static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
+{
+	int qp_remaining = vsi->num_queue_pairs;
+	int q_vectors = vsi->num_q_vectors;
+	int qp_per_vector;
+	int v_start = 0;
+	int qp_idx = 0;
+
+	/* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
+	 * group them so there are multiple queues per vector.
+	 */
+	for (; v_start < q_vectors && qp_remaining; v_start++) {
+		qp_per_vector = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
+		for (; qp_per_vector;
+		     qp_per_vector--, qp_idx++, qp_remaining--)	{
+			map_vector_to_rxq(vsi, v_start, qp_idx);
+			map_vector_to_txq(vsi, v_start, qp_idx);
+		}
+	}
+}
+
+/**
+ * i40e_vsi_request_irq - Request IRQ from the OS
+ * @vsi: the VSI being configured
+ * @basename: name for the vector
+ **/
+static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
+{
+	struct i40e_pf *pf = vsi->back;
+	int err;
+
+	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+		err = i40e_vsi_request_irq_msix(vsi, basename);
+	else if (pf->flags & I40E_FLAG_MSI_ENABLED)
+		err = request_irq(pf->pdev->irq, i40e_intr, 0,
+				  pf->misc_int_name, pf);
+	else
+		err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
+				  pf->misc_int_name, pf);
+
+	if (err)
+		dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
+
+	return err;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/**
+ * i40e_netpoll - A Polling 'interrupt'handler
+ * @netdev: network interface device structure
+ *
+ * This is used by netconsole to send skbs without having to re-enable
+ * interrupts.  It's not called while the normal interrupt routine is executing.
+ **/
+static void i40e_netpoll(struct net_device *netdev)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_vsi *vsi = np->vsi;
+	struct i40e_pf *pf = vsi->back;
+	int i;
+
+	/* if interface is down do nothing */
+	if (test_bit(__I40E_DOWN, &vsi->state))
+		return;
+
+	pf->flags |= I40E_FLAG_IN_NETPOLL;
+	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+		for (i = 0; i < vsi->num_q_vectors; i++)
+			i40e_msix_clean_rings(0, &vsi->q_vectors[i]);
+	} else {
+		i40e_intr(pf->pdev->irq, netdev);
+	}
+	pf->flags &= ~I40E_FLAG_IN_NETPOLL;
+}
+#endif
+
+/**
+ * i40e_vsi_control_tx - Start or stop a VSI's rings
+ * @vsi: the VSI being configured
+ * @enable: start or stop the rings
+ **/
+static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
+{
+	struct i40e_pf *pf = vsi->back;
+	struct i40e_hw *hw = &pf->hw;
+	int i, j, pf_q;
+	u32 tx_reg;
+
+	pf_q = vsi->base_queue;
+	for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
+		j = 1000;
+		do {
+			usleep_range(1000, 2000);
+			tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
+		} while (j-- && ((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT)
+			       ^ (tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)) & 1);
+
+		if (enable) {
+			/* is STAT set ? */
+			if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) {
+				dev_info(&pf->pdev->dev,
+					 "Tx %d already enabled\n", i);
+				continue;
+			}
+		} else {
+			/* is !STAT set ? */
+			if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) {
+				dev_info(&pf->pdev->dev,
+					 "Tx %d already disabled\n", i);
+				continue;
+			}
+		}
+
+		/* turn on/off the queue */
+		if (enable)
+			tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK |
+				  I40E_QTX_ENA_QENA_STAT_MASK;
+		else
+			tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
+
+		wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
+
+		/* wait for the change to finish */
+		for (j = 0; j < 10; j++) {
+			tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
+			if (enable) {
+				if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
+					break;
+			} else {
+				if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
+					break;
+			}
+
+			udelay(10);
+		}
+		if (j >= 10) {
+			dev_info(&pf->pdev->dev, "Tx ring %d %sable timeout\n",
+				 pf_q, (enable ? "en" : "dis"));
+			return -ETIMEDOUT;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * i40e_vsi_control_rx - Start or stop a VSI's rings
+ * @vsi: the VSI being configured
+ * @enable: start or stop the rings
+ **/
+static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
+{
+	struct i40e_pf *pf = vsi->back;
+	struct i40e_hw *hw = &pf->hw;
+	int i, j, pf_q;
+	u32 rx_reg;
+
+	pf_q = vsi->base_queue;
+	for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
+		j = 1000;
+		do {
+			usleep_range(1000, 2000);
+			rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
+		} while (j-- && ((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT)
+			       ^ (rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT)) & 1);
+
+		if (enable) {
+			/* is STAT set ? */
+			if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
+				continue;
+		} else {
+			/* is !STAT set ? */
+			if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
+				continue;
+		}
+
+		/* turn on/off the queue */
+		if (enable)
+			rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK |
+				  I40E_QRX_ENA_QENA_STAT_MASK;
+		else
+			rx_reg &= ~(I40E_QRX_ENA_QENA_REQ_MASK |
+				  I40E_QRX_ENA_QENA_STAT_MASK);
+		wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
+
+		/* wait for the change to finish */
+		for (j = 0; j < 10; j++) {
+			rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
+
+			if (enable) {
+				if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
+					break;
+			} else {
+				if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
+					break;
+			}
+
+			udelay(10);
+		}
+		if (j >= 10) {
+			dev_info(&pf->pdev->dev, "Rx ring %d %sable timeout\n",
+				 pf_q, (enable ? "en" : "dis"));
+			return -ETIMEDOUT;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * i40e_vsi_control_rings - Start or stop a VSI's rings
+ * @vsi: the VSI being configured
+ * @enable: start or stop the rings
+ **/
+static int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
+{
+	int ret;
+
+	/* do rx first for enable and last for disable */
+	if (request) {
+		ret = i40e_vsi_control_rx(vsi, request);
+		if (ret)
+			return ret;
+		ret = i40e_vsi_control_tx(vsi, request);
+	} else {
+		ret = i40e_vsi_control_tx(vsi, request);
+		if (ret)
+			return ret;
+		ret = i40e_vsi_control_rx(vsi, request);
+	}
+
+	return ret;
+}
+
+/**
+ * i40e_vsi_free_irq - Free the irq association with the OS
+ * @vsi: the VSI being configured
+ **/
+static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
+{
+	struct i40e_pf *pf = vsi->back;
+	struct i40e_hw *hw = &pf->hw;
+	int base = vsi->base_vector;
+	u32 val, qp;
+	int i;
+
+	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+		if (!vsi->q_vectors)
+			return;
+
+		for (i = 0; i < vsi->num_q_vectors; i++) {
+			u16 vector = i + base;
+
+			/* free only the irqs that were actually requested */
+			if (vsi->q_vectors[i].num_ringpairs == 0)
+				continue;
+
+			/* clear the affinity_mask in the IRQ descriptor */
+			irq_set_affinity_hint(pf->msix_entries[vector].vector,
+					      NULL);
+			free_irq(pf->msix_entries[vector].vector,
+				 &vsi->q_vectors[i]);
+
+			/* Tear down the interrupt queue link list
+			 *
+			 * We know that they come in pairs and always
+			 * the Rx first, then the Tx.  To clear the
+			 * link list, stick the EOL value into the
+			 * next_q field of the registers.
+			 */
+			val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
+			qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
+				>> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
+			val |= I40E_QUEUE_END_OF_LIST
+				<< I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
+			wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
+
+			while (qp != I40E_QUEUE_END_OF_LIST) {
+				u32 next;
+
+				val = rd32(hw, I40E_QINT_RQCTL(qp));
+
+				val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK  |
+					 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
+					 I40E_QINT_RQCTL_CAUSE_ENA_MASK  |
+					 I40E_QINT_RQCTL_INTEVENT_MASK);
+
+				val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
+					 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
+
+				wr32(hw, I40E_QINT_RQCTL(qp), val);
+
+				val = rd32(hw, I40E_QINT_TQCTL(qp));
+
+				next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
+					>> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
+
+				val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK  |
+					 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
+					 I40E_QINT_TQCTL_CAUSE_ENA_MASK  |
+					 I40E_QINT_TQCTL_INTEVENT_MASK);
+
+				val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
+					 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
+
+				wr32(hw, I40E_QINT_TQCTL(qp), val);
+				qp = next;
+			}
+		}
+	} else {
+		free_irq(pf->pdev->irq, pf);
+
+		val = rd32(hw, I40E_PFINT_LNKLST0);
+		qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
+			>> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
+		val |= I40E_QUEUE_END_OF_LIST
+			<< I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
+		wr32(hw, I40E_PFINT_LNKLST0, val);
+
+		val = rd32(hw, I40E_QINT_RQCTL(qp));
+		val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK  |
+			 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
+			 I40E_QINT_RQCTL_CAUSE_ENA_MASK  |
+			 I40E_QINT_RQCTL_INTEVENT_MASK);
+
+		val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
+			I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
+
+		wr32(hw, I40E_QINT_RQCTL(qp), val);
+
+		val = rd32(hw, I40E_QINT_TQCTL(qp));
+
+		val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK  |
+			 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
+			 I40E_QINT_TQCTL_CAUSE_ENA_MASK  |
+			 I40E_QINT_TQCTL_INTEVENT_MASK);
+
+		val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
+			I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
+
+		wr32(hw, I40E_QINT_TQCTL(qp), val);
+	}
+}
+
+/**
+ * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
+ * @vsi: the VSI being un-configured
+ *
+ * This frees the memory allocated to the q_vectors and
+ * deletes references to the NAPI struct.
+ **/
+static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
+{
+	int v_idx;
+
+	for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) {
+		struct i40e_q_vector *q_vector = &vsi->q_vectors[v_idx];
+		int r_idx;
+
+		if (!q_vector)
+			continue;
+
+		/* disassociate q_vector from rings */
+		for (r_idx = 0; r_idx < q_vector->tx.count; r_idx++)
+			q_vector->tx.ring[r_idx]->q_vector = NULL;
+		for (r_idx = 0; r_idx < q_vector->rx.count; r_idx++)
+			q_vector->rx.ring[r_idx]->q_vector = NULL;
+
+		/* only VSI w/ an associated netdev is set up w/ NAPI */
+		if (vsi->netdev)
+			netif_napi_del(&q_vector->napi);
+	}
+	kfree(vsi->q_vectors);
+}
+
+/**
+ * i40e_reset_interrupt_capability - Disable interrupt setup in OS
+ * @pf: board private structure
+ **/
+static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
+{
+	/* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
+	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+		pci_disable_msix(pf->pdev);
+		kfree(pf->msix_entries);
+		pf->msix_entries = NULL;
+	} else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
+		pci_disable_msi(pf->pdev);
+	}
+	pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
+}
+
+/**
+ * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
+ * @pf: board private structure
+ *
+ * We go through and clear interrupt specific resources and reset the structure
+ * to pre-load conditions
+ **/
+static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
+{
+	int i;
+
+	i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
+	for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+		if (pf->vsi[i])
+			i40e_vsi_free_q_vectors(pf->vsi[i]);
+	i40e_reset_interrupt_capability(pf);
+}
+
+/**
+ * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
+ * @vsi: the VSI being configured
+ **/
+static void i40e_napi_enable_all(struct i40e_vsi *vsi)
+{
+	int q_idx;
+
+	if (!vsi->netdev)
+		return;
+
+	for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
+		napi_enable(&vsi->q_vectors[q_idx].napi);
+}
+
+/**
+ * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
+ * @vsi: the VSI being configured
+ **/
+static void i40e_napi_disable_all(struct i40e_vsi *vsi)
+{
+	int q_idx;
+
+	if (!vsi->netdev)
+		return;
+
+	for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
+		napi_disable(&vsi->q_vectors[q_idx].napi);
+}
+
+/**
+ * i40e_quiesce_vsi - Pause a given VSI
+ * @vsi: the VSI being paused
+ **/
+static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
+{
+	if (test_bit(__I40E_DOWN, &vsi->state))
+		return;
+
+	set_bit(__I40E_NEEDS_RESTART, &vsi->state);
+	if (vsi->netdev && netif_running(vsi->netdev)) {
+		vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
+	} else {
+		set_bit(__I40E_DOWN, &vsi->state);
+		i40e_down(vsi);
+	}
+}
+
+/**
+ * i40e_unquiesce_vsi - Resume a given VSI
+ * @vsi: the VSI being resumed
+ **/
+static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
+{
+	if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state))
+		return;
+
+	clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
+	if (vsi->netdev && netif_running(vsi->netdev))
+		vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
+	else
+		i40e_up(vsi);   /* this clears the DOWN bit */
+}
+
+/**
+ * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
+ * @pf: the PF
+ **/
+static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
+{
+	int v;
+
+	for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+		if (pf->vsi[v])
+			i40e_quiesce_vsi(pf->vsi[v]);
+	}
+}
+
+/**
+ * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
+ * @pf: the PF
+ **/
+static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
+{
+	int v;
+
+	for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+		if (pf->vsi[v])
+			i40e_unquiesce_vsi(pf->vsi[v]);
+	}
+}
+
+/**
+ * i40e_dcb_get_num_tc -  Get the number of TCs from DCBx config
+ * @dcbcfg: the corresponding DCBx configuration structure
+ *
+ * Return the number of TCs from given DCBx configuration
+ **/
+static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
+{
+	int num_tc = 0, i;
+
+	/* Scan the ETS Config Priority Table to find
+	 * traffic class enabled for a given priority
+	 * and use the traffic class index to get the
+	 * number of traffic classes enabled
+	 */
+	for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+		if (dcbcfg->etscfg.prioritytable[i] > num_tc)
+			num_tc = dcbcfg->etscfg.prioritytable[i];
+	}
+
+	/* Traffic class index starts from zero so
+	 * increment to return the actual count
+	 */
+	num_tc++;
+
+	return num_tc;
+}
+
+/**
+ * i40e_dcb_get_enabled_tc - Get enabled traffic classes
+ * @dcbcfg: the corresponding DCBx configuration structure
+ *
+ * Query the current DCB configuration and return the number of
+ * traffic classes enabled from the given DCBX config
+ **/
+static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
+{
+	u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
+	u8 enabled_tc = 1;
+	u8 i;
+
+	for (i = 0; i < num_tc; i++)
+		enabled_tc |= 1 << i;
+
+	return enabled_tc;
+}
+
+/**
+ * i40e_pf_get_num_tc - Get enabled traffic classes for PF
+ * @pf: PF being queried
+ *
+ * Return number of traffic classes enabled for the given PF
+ **/
+static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
+{
+	struct i40e_hw *hw = &pf->hw;
+	u8 i, enabled_tc;
+	u8 num_tc = 0;
+	struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
+
+	/* If DCB is not enabled then always in single TC */
+	if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
+		return 1;
+
+	/* MFP mode return count of enabled TCs for this PF */
+	if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+		enabled_tc = pf->hw.func_caps.enabled_tcmap;
+		for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+			if (enabled_tc & (1 << i))
+				num_tc++;
+		}
+		return num_tc;
+	}
+
+	/* SFP mode will be enabled for all TCs on port */
+	return i40e_dcb_get_num_tc(dcbcfg);
+}
+
+/**
+ * i40e_pf_get_default_tc - Get bitmap for first enabled TC
+ * @pf: PF being queried
+ *
+ * Return a bitmap for first enabled traffic class for this PF.
+ **/
+static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
+{
+	u8 enabled_tc = pf->hw.func_caps.enabled_tcmap;
+	u8 i = 0;
+
+	if (!enabled_tc)
+		return 0x1; /* TC0 */
+
+	/* Find the first enabled TC */
+	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+		if (enabled_tc & (1 << i))
+			break;
+	}
+
+	return 1 << i;
+}
+
+/**
+ * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
+ * @pf: PF being queried
+ *
+ * Return a bitmap for enabled traffic classes for this PF.
+ **/
+static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
+{
+	/* If DCB is not enabled for this PF then just return default TC */
+	if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
+		return i40e_pf_get_default_tc(pf);
+
+	/* MFP mode will have enabled TCs set by FW */
+	if (pf->flags & I40E_FLAG_MFP_ENABLED)
+		return pf->hw.func_caps.enabled_tcmap;
+
+	/* SFP mode we want PF to be enabled for all TCs */
+	return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
+}
+
+/**
+ * i40e_vsi_get_bw_info - Query VSI BW Information
+ * @vsi: the VSI being queried
+ *
+ * Returns 0 on success, negative value on failure
+ **/
+static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
+{
+	struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
+	struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
+	struct i40e_pf *pf = vsi->back;
+	struct i40e_hw *hw = &pf->hw;
+	u32 tc_bw_max;
+	int ret;
+	int i;
+
+	/* Get the VSI level BW configuration */
+	ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
+	if (ret) {
+		dev_info(&pf->pdev->dev,
+			 "couldn't get pf vsi bw config, err %d, aq_err %d\n",
+			 ret, pf->hw.aq.asq_last_status);
+		return ret;
+	}
+
+	/* Get the VSI level BW configuration per TC */
+	ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
+					       &bw_ets_config,
+					       NULL);
+	if (ret) {
+		dev_info(&pf->pdev->dev,
+			 "couldn't get pf vsi ets bw config, err %d, aq_err %d\n",
+			 ret, pf->hw.aq.asq_last_status);
+		return ret;
+	}
+
+	if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
+		dev_info(&pf->pdev->dev,
+			 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
+			 bw_config.tc_valid_bits,
+			 bw_ets_config.tc_valid_bits);
+		/* Still continuing */
+	}
+
+	vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
+	vsi->bw_max_quanta = bw_config.max_bw;
+	tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
+		    (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
+	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+		vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
+		vsi->bw_ets_limit_credits[i] =
+					le16_to_cpu(bw_ets_config.credits[i]);
+		/* 3 bits out of 4 for each TC */
+		vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
+	}
+	return ret;
+}
+
+/**
+ * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
+ * @vsi: the VSI being configured
+ * @enabled_tc: TC bitmap
+ * @bw_credits: BW shared credits per TC
+ *
+ * Returns 0 on success, negative value on failure
+ **/
+static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi,
+				       u8 enabled_tc,
+				       u8 *bw_share)
+{
+	struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
+	int i, ret = 0;
+
+	bw_data.tc_valid_bits = enabled_tc;
+	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+		bw_data.tc_bw_credits[i] = bw_share[i];
+
+	ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid,
+				       &bw_data, NULL);
+	if (ret) {
+		dev_info(&vsi->back->pdev->dev,
+			 "%s: AQ command Config VSI BW allocation per TC failed = %d\n",
+			 __func__, vsi->back->hw.aq.asq_last_status);
+		return ret;
+	}
+
+	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+		vsi->info.qs_handle[i] = bw_data.qs_handles[i];
+
+	return ret;
+}
+
+/**
+ * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
+ * @vsi: the VSI being configured
+ * @enabled_tc: TC map to be enabled
+ *
+ **/
+static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
+{
+	struct net_device *netdev = vsi->netdev;
+	struct i40e_pf *pf = vsi->back;
+	struct i40e_hw *hw = &pf->hw;
+	u8 netdev_tc = 0;
+	int i;
+	struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
+
+	if (!netdev)
+		return;
+
+	if (!enabled_tc) {
+		netdev_reset_tc(netdev);
+		return;
+	}
+
+	/* Set up actual enabled TCs on the VSI */
+	if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
+		return;
+
+	/* set per TC queues for the VSI */
+	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+		/* Only set TC queues for enabled tcs
+		 *
+		 * e.g. For a VSI that has TC0 and TC3 enabled the
+		 * enabled_tc bitmap would be 0x00001001; the driver
+		 * will set the numtc for netdev as 2 that will be
+		 * referenced by the netdev layer as TC 0 and 1.
+		 */
+		if (vsi->tc_config.enabled_tc & (1 << i))
+			netdev_set_tc_queue(netdev,
+					vsi->tc_config.tc_info[i].netdev_tc,
+					vsi->tc_config.tc_info[i].qcount,
+					vsi->tc_config.tc_info[i].qoffset);
+	}
+
+	/* Assign UP2TC map for the VSI */
+	for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+		/* Get the actual TC# for the UP */
+		u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
+		/* Get the mapped netdev TC# for the UP */
+		netdev_tc =  vsi->tc_config.tc_info[ets_tc].netdev_tc;
+		netdev_set_prio_tc_map(netdev, i, netdev_tc);
+	}
+}
+
+/**
+ * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
+ * @vsi: the VSI being configured
+ * @ctxt: the ctxt buffer returned from AQ VSI update param command
+ **/
+static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
+				      struct i40e_vsi_context *ctxt)
+{
+	/* copy just the sections touched not the entire info
+	 * since not all sections are valid as returned by
+	 * update vsi params
+	 */
+	vsi->info.mapping_flags = ctxt->info.mapping_flags;
+	memcpy(&vsi->info.queue_mapping,
+	       &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
+	memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
+	       sizeof(vsi->info.tc_mapping));
+}
+
+/**
+ * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
+ * @vsi: VSI to be configured
+ * @enabled_tc: TC bitmap
+ *
+ * This configures a particular VSI for TCs that are mapped to the
+ * given TC bitmap. It uses default bandwidth share for TCs across
+ * VSIs to configure TC for a particular VSI.
+ *
+ * NOTE:
+ * It is expected that the VSI queues have been quisced before calling
+ * this function.
+ **/
+static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
+{
+	u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
+	struct i40e_vsi_context ctxt;
+	int ret = 0;
+	int i;
+
+	/* Check if enabled_tc is same as existing or new TCs */
+	if (vsi->tc_config.enabled_tc == enabled_tc)
+		return ret;
+
+	/* Enable ETS TCs with equal BW Share for now across all VSIs */
+	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+		if (enabled_tc & (1 << i))
+			bw_share[i] = 1;
+	}
+
+	ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
+	if (ret) {
+		dev_info(&vsi->back->pdev->dev,
+			 "Failed configuring TC map %d for VSI %d\n",
+			 enabled_tc, vsi->seid);
+		goto out;
+	}
+
+	/* Update Queue Pairs Mapping for currently enabled UPs */
+	ctxt.seid = vsi->seid;
+	ctxt.pf_num = vsi->back->hw.pf_id;
+	ctxt.vf_num = 0;
+	ctxt.uplink_seid = vsi->uplink_seid;
+	memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+	i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
+
+	/* Update the VSI after updating the VSI queue-mapping information */
+	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+	if (ret) {
+		dev_info(&vsi->back->pdev->dev,
+			 "update vsi failed, aq_err=%d\n",
+			 vsi->back->hw.aq.asq_last_status);
+		goto out;
+	}
+	/* update the local VSI info with updated queue map */
+	i40e_vsi_update_queue_map(vsi, &ctxt);
+	vsi->info.valid_sections = 0;
+
+	/* Update current VSI BW information */
+	ret = i40e_vsi_get_bw_info(vsi);
+	if (ret) {
+		dev_info(&vsi->back->pdev->dev,
+			 "Failed updating vsi bw info, aq_err=%d\n",
+			 vsi->back->hw.aq.asq_last_status);
+		goto out;
+	}
+
+	/* Update the netdev TC setup */
+	i40e_vsi_config_netdev_tc(vsi, enabled_tc);
+out:
+	return ret;
+}
+
+/**
+ * i40e_up_complete - Finish the last steps of bringing up a connection
+ * @vsi: the VSI being configured
+ **/
+static int i40e_up_complete(struct i40e_vsi *vsi)
+{
+	struct i40e_pf *pf = vsi->back;
+	int err;
+
+	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+		i40e_vsi_configure_msix(vsi);
+	else
+		i40e_configure_msi_and_legacy(vsi);
+
+	/* start rings */
+	err = i40e_vsi_control_rings(vsi, true);
+	if (err)
+		return err;
+
+	clear_bit(__I40E_DOWN, &vsi->state);
+	i40e_napi_enable_all(vsi);
+	i40e_vsi_enable_irq(vsi);
+
+	if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
+	    (vsi->netdev)) {
+		netif_tx_start_all_queues(vsi->netdev);
+		netif_carrier_on(vsi->netdev);
+	}
+	i40e_service_event_schedule(pf);
+
+	return 0;
+}
+
+/**
+ * i40e_vsi_reinit_locked - Reset the VSI
+ * @vsi: the VSI being configured
+ *
+ * Rebuild the ring structs after some configuration
+ * has changed, e.g. MTU size.
+ **/
+static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
+{
+	struct i40e_pf *pf = vsi->back;
+
+	WARN_ON(in_interrupt());
+	while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
+		usleep_range(1000, 2000);
+	i40e_down(vsi);
+
+	/* Give a VF some time to respond to the reset.  The
+	 * two second wait is based upon the watchdog cycle in
+	 * the VF driver.
+	 */
+	if (vsi->type == I40E_VSI_SRIOV)
+		msleep(2000);
+	i40e_up(vsi);
+	clear_bit(__I40E_CONFIG_BUSY, &pf->state);
+}
+
+/**
+ * i40e_up - Bring the connection back up after being down
+ * @vsi: the VSI being configured
+ **/
+int i40e_up(struct i40e_vsi *vsi)
+{
+	int err;
+
+	err = i40e_vsi_configure(vsi);
+	if (!err)
+		err = i40e_up_complete(vsi);
+
+	return err;
+}
+
+/**
+ * i40e_down - Shutdown the connection processing
+ * @vsi: the VSI being stopped
+ **/
+void i40e_down(struct i40e_vsi *vsi)
+{
+	int i;
+
+	/* It is assumed that the caller of this function
+	 * sets the vsi->state __I40E_DOWN bit.
+	 */
+	if (vsi->netdev) {
+		netif_carrier_off(vsi->netdev);
+		netif_tx_disable(vsi->netdev);
+	}
+	i40e_vsi_disable_irq(vsi);
+	i40e_vsi_control_rings(vsi, false);
+	i40e_napi_disable_all(vsi);
+
+	for (i = 0; i < vsi->num_queue_pairs; i++) {
+		i40e_clean_tx_ring(&vsi->tx_rings[i]);
+		i40e_clean_rx_ring(&vsi->rx_rings[i]);
+	}
+}
+
+/**
+ * i40e_setup_tc - configure multiple traffic classes
+ * @netdev: net device to configure
+ * @tc: number of traffic classes to enable
+ **/
+static int i40e_setup_tc(struct net_device *netdev, u8 tc)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_vsi *vsi = np->vsi;
+	struct i40e_pf *pf = vsi->back;
+	u8 enabled_tc = 0;
+	int ret = -EINVAL;
+	int i;
+
+	/* Check if DCB enabled to continue */
+	if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
+		netdev_info(netdev, "DCB is not enabled for adapter\n");
+		goto exit;
+	}
+
+	/* Check if MFP enabled */
+	if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+		netdev_info(netdev, "Configuring TC not supported in MFP mode\n");
+		goto exit;
+	}
+
+	/* Check whether tc count is within enabled limit */
+	if (tc > i40e_pf_get_num_tc(pf)) {
+		netdev_info(netdev, "TC count greater than enabled on link for adapter\n");
+		goto exit;
+	}
+
+	/* Generate TC map for number of tc requested */
+	for (i = 0; i < tc; i++)
+		enabled_tc |= (1 << i);
+
+	/* Requesting same TC configuration as already enabled */
+	if (enabled_tc == vsi->tc_config.enabled_tc)
+		return 0;
+
+	/* Quiesce VSI queues */
+	i40e_quiesce_vsi(vsi);
+
+	/* Configure VSI for enabled TCs */
+	ret = i40e_vsi_config_tc(vsi, enabled_tc);
+	if (ret) {
+		netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
+			    vsi->seid);
+		goto exit;
+	}
+
+	/* Unquiesce VSI */
+	i40e_unquiesce_vsi(vsi);
+
+exit:
+	return ret;
+}
+
+/**
+ * i40e_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP).  At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the netdev watchdog subtask is
+ * enabled, and the stack is notified that the interface is ready.
+ *
+ * Returns 0 on success, negative value on failure
+ **/
+static int i40e_open(struct net_device *netdev)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_vsi *vsi = np->vsi;
+	struct i40e_pf *pf = vsi->back;
+	char int_name[IFNAMSIZ];
+	int err;
+
+	/* disallow open during test */
+	if (test_bit(__I40E_TESTING, &pf->state))
+		return -EBUSY;
+
+	netif_carrier_off(netdev);
+
+	/* allocate descriptors */
+	err = i40e_vsi_setup_tx_resources(vsi);
+	if (err)
+		goto err_setup_tx;
+	err = i40e_vsi_setup_rx_resources(vsi);
+	if (err)
+		goto err_setup_rx;
+
+	err = i40e_vsi_configure(vsi);
+	if (err)
+		goto err_setup_rx;
+
+	snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
+		 dev_driver_string(&pf->pdev->dev), netdev->name);
+	err = i40e_vsi_request_irq(vsi, int_name);
+	if (err)
+		goto err_setup_rx;
+
+	err = i40e_up_complete(vsi);
+	if (err)
+		goto err_up_complete;
+
+	if ((vsi->type == I40E_VSI_MAIN) || (vsi->type == I40E_VSI_VMDQ2)) {
+		err = i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, true, NULL);
+		if (err)
+			netdev_info(netdev,
+				    "couldn't set broadcast err %d aq_err %d\n",
+				    err, pf->hw.aq.asq_last_status);
+	}
+
+	return 0;
+
+err_up_complete:
+	i40e_down(vsi);
+	i40e_vsi_free_irq(vsi);
+err_setup_rx:
+	i40e_vsi_free_rx_resources(vsi);
+err_setup_tx:
+	i40e_vsi_free_tx_resources(vsi);
+	if (vsi == pf->vsi[pf->lan_vsi])
+		i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+
+	return err;
+}
+
+/**
+ * i40e_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS.  The hardware is still under the driver's control, but
+ * this netdev interface is disabled.
+ *
+ * Returns 0, this is not allowed to fail
+ **/
+static int i40e_close(struct net_device *netdev)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_vsi *vsi = np->vsi;
+
+	if (test_and_set_bit(__I40E_DOWN, &vsi->state))
+		return 0;
+
+	i40e_down(vsi);
+	i40e_vsi_free_irq(vsi);
+
+	i40e_vsi_free_tx_resources(vsi);
+	i40e_vsi_free_rx_resources(vsi);
+
+	return 0;
+}
+
+/**
+ * i40e_do_reset - Start a PF or Core Reset sequence
+ * @pf: board private structure
+ * @reset_flags: which reset is requested
+ *
+ * The essential difference in resets is that the PF Reset
+ * doesn't clear the packet buffers, doesn't reset the PE
+ * firmware, and doesn't bother the other PFs on the chip.
+ **/
+void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
+{
+	u32 val;
+
+	WARN_ON(in_interrupt());
+
+	/* do the biggest reset indicated */
+	if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) {
+
+		/* Request a Global Reset
+		 *
+		 * This will start the chip's countdown to the actual full
+		 * chip reset event, and a warning interrupt to be sent
+		 * to all PFs, including the requestor.  Our handler
+		 * for the warning interrupt will deal with the shutdown
+		 * and recovery of the switch setup.
+		 */
+		dev_info(&pf->pdev->dev, "GlobalR requested\n");
+		val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
+		val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
+		wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
+
+	} else if (reset_flags & (1 << __I40E_CORE_RESET_REQUESTED)) {
+
+		/* Request a Core Reset
+		 *
+		 * Same as Global Reset, except does *not* include the MAC/PHY
+		 */
+		dev_info(&pf->pdev->dev, "CoreR requested\n");
+		val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
+		val |= I40E_GLGEN_RTRIG_CORER_MASK;
+		wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
+		i40e_flush(&pf->hw);
+
+	} else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) {
+
+		/* Request a PF Reset
+		 *
+		 * Resets only the PF-specific registers
+		 *
+		 * This goes directly to the tear-down and rebuild of
+		 * the switch, since we need to do all the recovery as
+		 * for the Core Reset.
+		 */
+		dev_info(&pf->pdev->dev, "PFR requested\n");
+		i40e_handle_reset_warning(pf);
+
+	} else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) {
+		int v;
+
+		/* Find the VSI(s) that requested a re-init */
+		dev_info(&pf->pdev->dev,
+			 "VSI reinit requested\n");
+		for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+			struct i40e_vsi *vsi = pf->vsi[v];
+			if (vsi != NULL &&
+			    test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
+				i40e_vsi_reinit_locked(pf->vsi[v]);
+				clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
+			}
+		}
+
+		/* no further action needed, so return now */
+		return;
+	} else {
+		dev_info(&pf->pdev->dev,
+			 "bad reset request 0x%08x\n", reset_flags);
+		return;
+	}
+}
+
+/**
+ * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
+ * @pf: board private structure
+ * @e: event info posted on ARQ
+ *
+ * Handler for LAN Queue Overflow Event generated by the firmware for PF
+ * and VF queues
+ **/
+static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
+					   struct i40e_arq_event_info *e)
+{
+	struct i40e_aqc_lan_overflow *data =
+		(struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
+	u32 queue = le32_to_cpu(data->prtdcb_rupto);
+	u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
+	struct i40e_hw *hw = &pf->hw;
+	struct i40e_vf *vf;
+	u16 vf_id;
+
+	dev_info(&pf->pdev->dev, "%s: Rx Queue Number = %d QTX_CTL=0x%08x\n",
+		 __func__, queue, qtx_ctl);
+
+	/* Queue belongs to VF, find the VF and issue VF reset */
+	if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
+	    >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
+		vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
+			 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
+		vf_id -= hw->func_caps.vf_base_id;
+		vf = &pf->vf[vf_id];
+		i40e_vc_notify_vf_reset(vf);
+		/* Allow VF to process pending reset notification */
+		msleep(20);
+		i40e_reset_vf(vf, false);
+	}
+}
+
+/**
+ * i40e_service_event_complete - Finish up the service event
+ * @pf: board private structure
+ **/
+static void i40e_service_event_complete(struct i40e_pf *pf)
+{
+	BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
+
+	/* flush memory to make sure state is correct before next watchog */
+	smp_mb__before_clear_bit();
+	clear_bit(__I40E_SERVICE_SCHED, &pf->state);
+}
+
+/**
+ * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
+ * @pf: board private structure
+ **/
+static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
+{
+	if (!(pf->flags & I40E_FLAG_FDIR_REQUIRES_REINIT))
+		return;
+
+	pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT;
+
+	/* if interface is down do nothing */
+	if (test_bit(__I40E_DOWN, &pf->state))
+		return;
+}
+
+/**
+ * i40e_vsi_link_event - notify VSI of a link event
+ * @vsi: vsi to be notified
+ * @link_up: link up or down
+ **/
+static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
+{
+	if (!vsi)
+		return;
+
+	switch (vsi->type) {
+	case I40E_VSI_MAIN:
+		if (!vsi->netdev || !vsi->netdev_registered)
+			break;
+
+		if (link_up) {
+			netif_carrier_on(vsi->netdev);
+			netif_tx_wake_all_queues(vsi->netdev);
+		} else {
+			netif_carrier_off(vsi->netdev);
+			netif_tx_stop_all_queues(vsi->netdev);
+		}
+		break;
+
+	case I40E_VSI_SRIOV:
+		break;
+
+	case I40E_VSI_VMDQ2:
+	case I40E_VSI_CTRL:
+	case I40E_VSI_MIRROR:
+	default:
+		/* there is no notification for other VSIs */
+		break;
+	}
+}
+
+/**
+ * i40e_veb_link_event - notify elements on the veb of a link event
+ * @veb: veb to be notified
+ * @link_up: link up or down
+ **/
+static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
+{
+	struct i40e_pf *pf;
+	int i;
+
+	if (!veb || !veb->pf)
+		return;
+	pf = veb->pf;
+
+	/* depth first... */
+	for (i = 0; i < I40E_MAX_VEB; i++)
+		if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
+			i40e_veb_link_event(pf->veb[i], link_up);
+
+	/* ... now the local VSIs */
+	for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+		if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
+			i40e_vsi_link_event(pf->vsi[i], link_up);
+}
+
+/**
+ * i40e_link_event - Update netif_carrier status
+ * @pf: board private structure
+ **/
+static void i40e_link_event(struct i40e_pf *pf)
+{
+	bool new_link, old_link;
+
+	new_link = (pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP);
+	old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
+
+	if (new_link == old_link)
+		return;
+
+	netdev_info(pf->vsi[pf->lan_vsi]->netdev,
+		    "NIC Link is %s\n", (new_link ? "Up" : "Down"));
+
+	/* Notify the base of the switch tree connected to
+	 * the link.  Floating VEBs are not notified.
+	 */
+	if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
+		i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
+	else
+		i40e_vsi_link_event(pf->vsi[pf->lan_vsi], new_link);
+
+	if (pf->vf)
+		i40e_vc_notify_link_state(pf);
+}
+
+/**
+ * i40e_check_hang_subtask - Check for hung queues and dropped interrupts
+ * @pf: board private structure
+ *
+ * Set the per-queue flags to request a check for stuck queues in the irq
+ * clean functions, then force interrupts to be sure the irq clean is called.
+ **/
+static void i40e_check_hang_subtask(struct i40e_pf *pf)
+{
+	int i, v;
+
+	/* If we're down or resetting, just bail */
+	if (test_bit(__I40E_CONFIG_BUSY, &pf->state))
+		return;
+
+	/* for each VSI/netdev
+	 *     for each Tx queue
+	 *         set the check flag
+	 *     for each q_vector
+	 *         force an interrupt
+	 */
+	for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+		struct i40e_vsi *vsi = pf->vsi[v];
+		int armed = 0;
+
+		if (!pf->vsi[v] ||
+		    test_bit(__I40E_DOWN, &vsi->state) ||
+		    (vsi->netdev && !netif_carrier_ok(vsi->netdev)))
+			continue;
+
+		for (i = 0; i < vsi->num_queue_pairs; i++) {
+			set_check_for_tx_hang(&vsi->tx_rings[i]);
+			if (test_bit(__I40E_HANG_CHECK_ARMED,
+				     &vsi->tx_rings[i].state))
+				armed++;
+		}
+
+		if (armed) {
+			if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
+				wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0,
+				     (I40E_PFINT_DYN_CTL0_INTENA_MASK |
+				      I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK));
+			} else {
+				u16 vec = vsi->base_vector - 1;
+				u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
+					   I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
+				for (i = 0; i < vsi->num_q_vectors; i++, vec++)
+					wr32(&vsi->back->hw,
+					     I40E_PFINT_DYN_CTLN(vec), val);
+			}
+			i40e_flush(&vsi->back->hw);
+		}
+	}
+}
+
+/**
+ * i40e_watchdog_subtask - Check and bring link up
+ * @pf: board private structure
+ **/
+static void i40e_watchdog_subtask(struct i40e_pf *pf)
+{
+	int i;
+
+	/* if interface is down do nothing */
+	if (test_bit(__I40E_DOWN, &pf->state) ||
+	    test_bit(__I40E_CONFIG_BUSY, &pf->state))
+		return;
+
+	/* Update the stats for active netdevs so the network stack
+	 * can look at updated numbers whenever it cares to
+	 */
+	for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+		if (pf->vsi[i] && pf->vsi[i]->netdev)
+			i40e_update_stats(pf->vsi[i]);
+
+	/* Update the stats for the active switching components */
+	for (i = 0; i < I40E_MAX_VEB; i++)
+		if (pf->veb[i])
+			i40e_update_veb_stats(pf->veb[i]);
+}
+
+/**
+ * i40e_reset_subtask - Set up for resetting the device and driver
+ * @pf: board private structure
+ **/
+static void i40e_reset_subtask(struct i40e_pf *pf)
+{
+	u32 reset_flags = 0;
+
+	if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
+		reset_flags |= (1 << __I40E_REINIT_REQUESTED);
+		clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
+	}
+	if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
+		reset_flags |= (1 << __I40E_PF_RESET_REQUESTED);
+		clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+	}
+	if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
+		reset_flags |= (1 << __I40E_CORE_RESET_REQUESTED);
+		clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
+	}
+	if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
+		reset_flags |= (1 << __I40E_GLOBAL_RESET_REQUESTED);
+		clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
+	}
+
+	/* If there's a recovery already waiting, it takes
+	 * precedence before starting a new reset sequence.
+	 */
+	if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
+		i40e_handle_reset_warning(pf);
+		return;
+	}
+
+	/* If we're already down or resetting, just bail */
+	if (reset_flags &&
+	    !test_bit(__I40E_DOWN, &pf->state) &&
+	    !test_bit(__I40E_CONFIG_BUSY, &pf->state))
+		i40e_do_reset(pf, reset_flags);
+}
+
+/**
+ * i40e_handle_link_event - Handle link event
+ * @pf: board private structure
+ * @e: event info posted on ARQ
+ **/
+static void i40e_handle_link_event(struct i40e_pf *pf,
+				   struct i40e_arq_event_info *e)
+{
+	struct i40e_hw *hw = &pf->hw;
+	struct i40e_aqc_get_link_status *status =
+		(struct i40e_aqc_get_link_status *)&e->desc.params.raw;
+	struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+
+	/* save off old link status information */
+	memcpy(&pf->hw.phy.link_info_old, hw_link_info,
+	       sizeof(pf->hw.phy.link_info_old));
+
+	/* update link status */
+	hw_link_info->phy_type = (enum i40e_aq_phy_type)status->phy_type;
+	hw_link_info->link_speed = (enum i40e_aq_link_speed)status->link_speed;
+	hw_link_info->link_info = status->link_info;
+	hw_link_info->an_info = status->an_info;
+	hw_link_info->ext_info = status->ext_info;
+	hw_link_info->lse_enable =
+		le16_to_cpu(status->command_flags) &
+			    I40E_AQ_LSE_ENABLE;
+
+	/* process the event */
+	i40e_link_event(pf);
+
+	/* Do a new status request to re-enable LSE reporting
+	 * and load new status information into the hw struct,
+	 * then see if the status changed while processing the
+	 * initial event.
+	 */
+	i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
+	i40e_link_event(pf);
+}
+
+/**
+ * i40e_clean_adminq_subtask - Clean the AdminQ rings
+ * @pf: board private structure
+ **/
+static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
+{
+	struct i40e_arq_event_info event;
+	struct i40e_hw *hw = &pf->hw;
+	u16 pending, i = 0;
+	i40e_status ret;
+	u16 opcode;
+	u32 val;
+
+	if (!test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state))
+		return;
+
+	event.msg_size = I40E_MAX_AQ_BUF_SIZE;
+	event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
+	if (!event.msg_buf)
+		return;
+
+	do {
+		ret = i40e_clean_arq_element(hw, &event, &pending);
+		if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
+			dev_info(&pf->pdev->dev, "No ARQ event found\n");
+			break;
+		} else if (ret) {
+			dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
+			break;
+		}
+
+		opcode = le16_to_cpu(event.desc.opcode);
+		switch (opcode) {
+
+		case i40e_aqc_opc_get_link_status:
+			i40e_handle_link_event(pf, &event);
+			break;
+		case i40e_aqc_opc_send_msg_to_pf:
+			ret = i40e_vc_process_vf_msg(pf,
+					le16_to_cpu(event.desc.retval),
+					le32_to_cpu(event.desc.cookie_high),
+					le32_to_cpu(event.desc.cookie_low),
+					event.msg_buf,
+					event.msg_size);
+			break;
+		case i40e_aqc_opc_lldp_update_mib:
+			dev_info(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
+			break;
+		case i40e_aqc_opc_event_lan_overflow:
+			dev_info(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
+			i40e_handle_lan_overflow_event(pf, &event);
+			break;
+		default:
+			dev_info(&pf->pdev->dev,
+				 "ARQ Error: Unknown event %d received\n",
+				 event.desc.opcode);
+			break;
+		}
+	} while (pending && (i++ < pf->adminq_work_limit));
+
+	clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
+	/* re-enable Admin queue interrupt cause */
+	val = rd32(hw, I40E_PFINT_ICR0_ENA);
+	val |=  I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
+	wr32(hw, I40E_PFINT_ICR0_ENA, val);
+	i40e_flush(hw);
+
+	kfree(event.msg_buf);
+}
+
+/**
+ * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
+ * @veb: pointer to the VEB instance
+ *
+ * This is a recursive function that first builds the attached VSIs then
+ * recurses in to build the next layer of VEB.  We track the connections
+ * through our own index numbers because the seid's from the HW could
+ * change across the reset.
+ **/
+static int i40e_reconstitute_veb(struct i40e_veb *veb)
+{
+	struct i40e_vsi *ctl_vsi = NULL;
+	struct i40e_pf *pf = veb->pf;
+	int v, veb_idx;
+	int ret;
+
+	/* build VSI that owns this VEB, temporarily attached to base VEB */
+	for (v = 0; v < pf->hw.func_caps.num_vsis && !ctl_vsi; v++) {
+		if (pf->vsi[v] &&
+		    pf->vsi[v]->veb_idx == veb->idx &&
+		    pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
+			ctl_vsi = pf->vsi[v];
+			break;
+		}
+	}
+	if (!ctl_vsi) {
+		dev_info(&pf->pdev->dev,
+			 "missing owner VSI for veb_idx %d\n", veb->idx);
+		ret = -ENOENT;
+		goto end_reconstitute;
+	}
+	if (ctl_vsi != pf->vsi[pf->lan_vsi])
+		ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
+	ret = i40e_add_vsi(ctl_vsi);
+	if (ret) {
+		dev_info(&pf->pdev->dev,
+			 "rebuild of owner VSI failed: %d\n", ret);
+		goto end_reconstitute;
+	}
+	i40e_vsi_reset_stats(ctl_vsi);
+
+	/* create the VEB in the switch and move the VSI onto the VEB */
+	ret = i40e_add_veb(veb, ctl_vsi);
+	if (ret)
+		goto end_reconstitute;
+
+	/* create the remaining VSIs attached to this VEB */
+	for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+		if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
+			continue;
+
+		if (pf->vsi[v]->veb_idx == veb->idx) {
+			struct i40e_vsi *vsi = pf->vsi[v];
+			vsi->uplink_seid = veb->seid;
+			ret = i40e_add_vsi(vsi);
+			if (ret) {
+				dev_info(&pf->pdev->dev,
+					 "rebuild of vsi_idx %d failed: %d\n",
+					 v, ret);
+				goto end_reconstitute;
+			}
+			i40e_vsi_reset_stats(vsi);
+		}
+	}
+
+	/* create any VEBs attached to this VEB - RECURSION */
+	for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
+		if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
+			pf->veb[veb_idx]->uplink_seid = veb->seid;
+			ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
+			if (ret)
+				break;
+		}
+	}
+
+end_reconstitute:
+	return ret;
+}
+
+/**
+ * i40e_get_capabilities - get info about the HW
+ * @pf: the PF struct
+ **/
+static int i40e_get_capabilities(struct i40e_pf *pf)
+{
+	struct i40e_aqc_list_capabilities_element_resp *cap_buf;
+	u16 data_size;
+	int buf_len;
+	int err;
+
+	buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
+	do {
+		cap_buf = kzalloc(buf_len, GFP_KERNEL);
+		if (!cap_buf)
+			return -ENOMEM;
+
+		/* this loads the data into the hw struct for us */
+		err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
+					    &data_size,
+					    i40e_aqc_opc_list_func_capabilities,
+					    NULL);
+		/* data loaded, buffer no longer needed */
+		kfree(cap_buf);
+
+		if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
+			/* retry with a larger buffer */
+			buf_len = data_size;
+		} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
+			dev_info(&pf->pdev->dev,
+				 "capability discovery failed: aq=%d\n",
+				 pf->hw.aq.asq_last_status);
+			return -ENODEV;
+		}
+	} while (err);
+
+	if (pf->hw.debug_mask & I40E_DEBUG_USER)
+		dev_info(&pf->pdev->dev,
+			 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
+			 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
+			 pf->hw.func_caps.num_msix_vectors,
+			 pf->hw.func_caps.num_msix_vectors_vf,
+			 pf->hw.func_caps.fd_filters_guaranteed,
+			 pf->hw.func_caps.fd_filters_best_effort,
+			 pf->hw.func_caps.num_tx_qp,
+			 pf->hw.func_caps.num_vsis);
+
+	return 0;
+}
+
+/**
+ * i40e_fdir_setup - initialize the Flow Director resources
+ * @pf: board private structure
+ **/
+static void i40e_fdir_setup(struct i40e_pf *pf)
+{
+	struct i40e_vsi *vsi;
+	bool new_vsi = false;
+	int err, i;
+
+	if (!(pf->flags & (I40E_FLAG_FDIR_ENABLED|I40E_FLAG_FDIR_ATR_ENABLED)))
+		return;
+
+	pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
+
+	/* find existing or make new FDIR VSI */
+	vsi = NULL;
+	for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+		if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
+			vsi = pf->vsi[i];
+	if (!vsi) {
+		vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->mac_seid, 0);
+		if (!vsi) {
+			dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
+			pf->flags &= ~I40E_FLAG_FDIR_ENABLED;
+			return;
+		}
+		new_vsi = true;
+	}
+	WARN_ON(vsi->base_queue != I40E_FDIR_RING);
+	i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_rings);
+
+	err = i40e_vsi_setup_tx_resources(vsi);
+	if (!err)
+		err = i40e_vsi_setup_rx_resources(vsi);
+	if (!err)
+		err = i40e_vsi_configure(vsi);
+	if (!err && new_vsi) {
+		char int_name[IFNAMSIZ + 9];
+		snprintf(int_name, sizeof(int_name) - 1, "%s-fdir",
+			 dev_driver_string(&pf->pdev->dev));
+		err = i40e_vsi_request_irq(vsi, int_name);
+	}
+	if (!err)
+		err = i40e_up_complete(vsi);
+
+	clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
+}
+
+/**
+ * i40e_fdir_teardown - release the Flow Director resources
+ * @pf: board private structure
+ **/
+static void i40e_fdir_teardown(struct i40e_pf *pf)
+{
+	int i;
+
+	for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+		if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
+			i40e_vsi_release(pf->vsi[i]);
+			break;
+		}
+	}
+}
+
+/**
+ * i40e_handle_reset_warning - prep for the core to reset
+ * @pf: board private structure
+ *
+ * Close up the VFs and other things in prep for a Core Reset,
+ * then get ready to rebuild the world.
+ **/
+static void i40e_handle_reset_warning(struct i40e_pf *pf)
+{
+	struct i40e_driver_version dv;
+	struct i40e_hw *hw = &pf->hw;
+	i40e_status ret;
+	u32 v;
+
+	clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
+	if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
+		return;
+
+	dev_info(&pf->pdev->dev, "Tearing down internal switch for reset\n");
+
+	i40e_vc_notify_reset(pf);
+
+	/* quiesce the VSIs and their queues that are not already DOWN */
+	i40e_pf_quiesce_all_vsi(pf);
+
+	for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+		if (pf->vsi[v])
+			pf->vsi[v]->seid = 0;
+	}
+
+	i40e_shutdown_adminq(&pf->hw);
+
+	/* Now we wait for GRST to settle out.
+	 * We don't have to delete the VEBs or VSIs from the hw switch
+	 * because the reset will make them disappear.
+	 */
+	ret = i40e_pf_reset(hw);
+	if (ret)
+		dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
+	pf->pfr_count++;
+
+	if (test_bit(__I40E_DOWN, &pf->state))
+		goto end_core_reset;
+	dev_info(&pf->pdev->dev, "Rebuilding internal switch\n");
+
+	/* rebuild the basics for the AdminQ, HMC, and initial HW switch */
+	ret = i40e_init_adminq(&pf->hw);
+	if (ret) {
+		dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret);
+		goto end_core_reset;
+	}
+
+	ret = i40e_get_capabilities(pf);
+	if (ret) {
+		dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n",
+			 ret);
+		goto end_core_reset;
+	}
+
+	/* call shutdown HMC */
+	ret = i40e_shutdown_lan_hmc(hw);
+	if (ret) {
+		dev_info(&pf->pdev->dev, "shutdown_lan_hmc failed: %d\n", ret);
+		goto end_core_reset;
+	}
+
+	ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
+				hw->func_caps.num_rx_qp,
+				pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
+	if (ret) {
+		dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
+		goto end_core_reset;
+	}
+	ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
+	if (ret) {
+		dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
+		goto end_core_reset;
+	}
+
+	/* do basic switch setup */
+	ret = i40e_setup_pf_switch(pf);
+	if (ret)
+		goto end_core_reset;
+
+	/* Rebuild the VSIs and VEBs that existed before reset.
+	 * They are still in our local switch element arrays, so only
+	 * need to rebuild the switch model in the HW.
+	 *
+	 * If there were VEBs but the reconstitution failed, we'll try
+	 * try to recover minimal use by getting the basic PF VSI working.
+	 */
+	if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
+		dev_info(&pf->pdev->dev, "attempting to rebuild switch\n");
+		/* find the one VEB connected to the MAC, and find orphans */
+		for (v = 0; v < I40E_MAX_VEB; v++) {
+			if (!pf->veb[v])
+				continue;
+
+			if (pf->veb[v]->uplink_seid == pf->mac_seid ||
+			    pf->veb[v]->uplink_seid == 0) {
+				ret = i40e_reconstitute_veb(pf->veb[v]);
+
+				if (!ret)
+					continue;
+
+				/* If Main VEB failed, we're in deep doodoo,
+				 * so give up rebuilding the switch and set up
+				 * for minimal rebuild of PF VSI.
+				 * If orphan failed, we'll report the error
+				 * but try to keep going.
+				 */
+				if (pf->veb[v]->uplink_seid == pf->mac_seid) {
+					dev_info(&pf->pdev->dev,
+						 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
+						 ret);
+					pf->vsi[pf->lan_vsi]->uplink_seid
+								= pf->mac_seid;
+					break;
+				} else if (pf->veb[v]->uplink_seid == 0) {
+					dev_info(&pf->pdev->dev,
+						 "rebuild of orphan VEB failed: %d\n",
+						 ret);
+				}
+			}
+		}
+	}
+
+	if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
+		dev_info(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
+		/* no VEB, so rebuild only the Main VSI */
+		ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
+		if (ret) {
+			dev_info(&pf->pdev->dev,
+				 "rebuild of Main VSI failed: %d\n", ret);
+			goto end_core_reset;
+		}
+	}
+
+	/* reinit the misc interrupt */
+	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+		ret = i40e_setup_misc_vector(pf);
+
+	/* restart the VSIs that were rebuilt and running before the reset */
+	i40e_pf_unquiesce_all_vsi(pf);
+
+	/* tell the firmware that we're starting */
+	dv.major_version = DRV_VERSION_MAJOR;
+	dv.minor_version = DRV_VERSION_MINOR;
+	dv.build_version = DRV_VERSION_BUILD;
+	dv.subbuild_version = 0;
+	i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
+
+	dev_info(&pf->pdev->dev, "PF reset done\n");
+
+end_core_reset:
+	clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
+}
+
+/**
+ * i40e_handle_mdd_event
+ * @pf: pointer to the pf structure
+ *
+ * Called from the MDD irq handler to identify possibly malicious vfs
+ **/
+static void i40e_handle_mdd_event(struct i40e_pf *pf)
+{
+	struct i40e_hw *hw = &pf->hw;
+	bool mdd_detected = false;
+	struct i40e_vf *vf;
+	u32 reg;
+	int i;
+
+	if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state))
+		return;
+
+	/* find what triggered the MDD event */
+	reg = rd32(hw, I40E_GL_MDET_TX);
+	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
+		u8 func = (reg & I40E_GL_MDET_TX_FUNCTION_MASK)
+				>> I40E_GL_MDET_TX_FUNCTION_SHIFT;
+		u8 event = (reg & I40E_GL_MDET_TX_EVENT_SHIFT)
+				>> I40E_GL_MDET_TX_EVENT_SHIFT;
+		u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK)
+				>> I40E_GL_MDET_TX_QUEUE_SHIFT;
+		dev_info(&pf->pdev->dev,
+			 "Malicious Driver Detection TX event 0x%02x on q %d of function 0x%02x\n",
+			 event, queue, func);
+		wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
+		mdd_detected = true;
+	}
+	reg = rd32(hw, I40E_GL_MDET_RX);
+	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
+		u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK)
+				>> I40E_GL_MDET_RX_FUNCTION_SHIFT;
+		u8 event = (reg & I40E_GL_MDET_RX_EVENT_SHIFT)
+				>> I40E_GL_MDET_RX_EVENT_SHIFT;
+		u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK)
+				>> I40E_GL_MDET_RX_QUEUE_SHIFT;
+		dev_info(&pf->pdev->dev,
+			 "Malicious Driver Detection RX event 0x%02x on q %d of function 0x%02x\n",
+			 event, queue, func);
+		wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
+		mdd_detected = true;
+	}
+
+	/* see if one of the VFs needs its hand slapped */
+	for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
+		vf = &(pf->vf[i]);
+		reg = rd32(hw, I40E_VP_MDET_TX(i));
+		if (reg & I40E_VP_MDET_TX_VALID_MASK) {
+			wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
+			vf->num_mdd_events++;
+			dev_info(&pf->pdev->dev, "MDD TX event on VF %d\n", i);
+		}
+
+		reg = rd32(hw, I40E_VP_MDET_RX(i));
+		if (reg & I40E_VP_MDET_RX_VALID_MASK) {
+			wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
+			vf->num_mdd_events++;
+			dev_info(&pf->pdev->dev, "MDD RX event on VF %d\n", i);
+		}
+
+		if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
+			dev_info(&pf->pdev->dev,
+				 "Too many MDD events on VF %d, disabled\n", i);
+			dev_info(&pf->pdev->dev,
+				 "Use PF Control I/F to re-enable the VF\n");
+			set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
+		}
+	}
+
+	/* re-enable mdd interrupt cause */
+	clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
+	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
+	reg |=  I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
+	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
+	i40e_flush(hw);
+}
+
+/**
+ * i40e_service_task - Run the driver's async subtasks
+ * @work: pointer to work_struct containing our data
+ **/
+static void i40e_service_task(struct work_struct *work)
+{
+	struct i40e_pf *pf = container_of(work,
+					  struct i40e_pf,
+					  service_task);
+	unsigned long start_time = jiffies;
+
+	i40e_reset_subtask(pf);
+	i40e_handle_mdd_event(pf);
+	i40e_vc_process_vflr_event(pf);
+	i40e_watchdog_subtask(pf);
+	i40e_fdir_reinit_subtask(pf);
+	i40e_check_hang_subtask(pf);
+	i40e_sync_filters_subtask(pf);
+	i40e_clean_adminq_subtask(pf);
+
+	i40e_service_event_complete(pf);
+
+	/* If the tasks have taken longer than one timer cycle or there
+	 * is more work to be done, reschedule the service task now
+	 * rather than wait for the timer to tick again.
+	 */
+	if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
+	    test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state)		 ||
+	    test_bit(__I40E_MDD_EVENT_PENDING, &pf->state)		 ||
+	    test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
+		i40e_service_event_schedule(pf);
+}
+
+/**
+ * i40e_service_timer - timer callback
+ * @data: pointer to PF struct
+ **/
+static void i40e_service_timer(unsigned long data)
+{
+	struct i40e_pf *pf = (struct i40e_pf *)data;
+
+	mod_timer(&pf->service_timer,
+		  round_jiffies(jiffies + pf->service_timer_period));
+	i40e_service_event_schedule(pf);
+}
+
+/**
+ * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
+ * @vsi: the VSI being configured
+ **/
+static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
+{
+	struct i40e_pf *pf = vsi->back;
+
+	switch (vsi->type) {
+	case I40E_VSI_MAIN:
+		vsi->alloc_queue_pairs = pf->num_lan_qps;
+		vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
+				      I40E_REQ_DESCRIPTOR_MULTIPLE);
+		if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+			vsi->num_q_vectors = pf->num_lan_msix;
+		else
+			vsi->num_q_vectors = 1;
+
+		break;
+
+	case I40E_VSI_FDIR:
+		vsi->alloc_queue_pairs = 1;
+		vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
+				      I40E_REQ_DESCRIPTOR_MULTIPLE);
+		vsi->num_q_vectors = 1;
+		break;
+
+	case I40E_VSI_VMDQ2:
+		vsi->alloc_queue_pairs = pf->num_vmdq_qps;
+		vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
+				      I40E_REQ_DESCRIPTOR_MULTIPLE);
+		vsi->num_q_vectors = pf->num_vmdq_msix;
+		break;
+
+	case I40E_VSI_SRIOV:
+		vsi->alloc_queue_pairs = pf->num_vf_qps;
+		vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
+				      I40E_REQ_DESCRIPTOR_MULTIPLE);
+		break;
+
+	default:
+		WARN_ON(1);
+		return -ENODATA;
+	}
+
+	return 0;
+}
+
+/**
+ * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
+ * @pf: board private structure
+ * @type: type of VSI
+ *
+ * On error: returns error code (negative)
+ * On success: returns vsi index in PF (positive)
+ **/
+static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
+{
+	int ret = -ENODEV;
+	struct i40e_vsi *vsi;
+	int vsi_idx;
+	int i;
+
+	/* Need to protect the allocation of the VSIs at the PF level */
+	mutex_lock(&pf->switch_mutex);
+
+	/* VSI list may be fragmented if VSI creation/destruction has
+	 * been happening.  We can afford to do a quick scan to look
+	 * for any free VSIs in the list.
+	 *
+	 * find next empty vsi slot, looping back around if necessary
+	 */
+	i = pf->next_vsi;
+	while (i < pf->hw.func_caps.num_vsis && pf->vsi[i])
+		i++;
+	if (i >= pf->hw.func_caps.num_vsis) {
+		i = 0;
+		while (i < pf->next_vsi && pf->vsi[i])
+			i++;
+	}
+
+	if (i < pf->hw.func_caps.num_vsis && !pf->vsi[i]) {
+		vsi_idx = i;             /* Found one! */
+	} else {
+		ret = -ENODEV;
+		goto err_alloc_vsi;  /* out of VSI slots! */
+	}
+	pf->next_vsi = ++i;
+
+	vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
+	if (!vsi) {
+		ret = -ENOMEM;
+		goto err_alloc_vsi;
+	}
+	vsi->type = type;
+	vsi->back = pf;
+	set_bit(__I40E_DOWN, &vsi->state);
+	vsi->flags = 0;
+	vsi->idx = vsi_idx;
+	vsi->rx_itr_setting = pf->rx_itr_default;
+	vsi->tx_itr_setting = pf->tx_itr_default;
+	vsi->netdev_registered = false;
+	vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
+	INIT_LIST_HEAD(&vsi->mac_filter_list);
+
+	i40e_set_num_rings_in_vsi(vsi);
+
+	/* Setup default MSIX irq handler for VSI */
+	i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
+
+	pf->vsi[vsi_idx] = vsi;
+	ret = vsi_idx;
+err_alloc_vsi:
+	mutex_unlock(&pf->switch_mutex);
+	return ret;
+}
+
+/**
+ * i40e_vsi_clear - Deallocate the VSI provided
+ * @vsi: the VSI being un-configured
+ **/
+static int i40e_vsi_clear(struct i40e_vsi *vsi)
+{
+	struct i40e_pf *pf;
+
+	if (!vsi)
+		return 0;
+
+	if (!vsi->back)
+		goto free_vsi;
+	pf = vsi->back;
+
+	mutex_lock(&pf->switch_mutex);
+	if (!pf->vsi[vsi->idx]) {
+		dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
+			vsi->idx, vsi->idx, vsi, vsi->type);
+		goto unlock_vsi;
+	}
+
+	if (pf->vsi[vsi->idx] != vsi) {
+		dev_err(&pf->pdev->dev,
+			"pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
+			pf->vsi[vsi->idx]->idx,
+			pf->vsi[vsi->idx],
+			pf->vsi[vsi->idx]->type,
+			vsi->idx, vsi, vsi->type);
+		goto unlock_vsi;
+	}
+
+	/* updates the pf for this cleared vsi */
+	i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
+	i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
+
+	pf->vsi[vsi->idx] = NULL;
+	if (vsi->idx < pf->next_vsi)
+		pf->next_vsi = vsi->idx;
+
+unlock_vsi:
+	mutex_unlock(&pf->switch_mutex);
+free_vsi:
+	kfree(vsi);
+
+	return 0;
+}
+
+/**
+ * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
+ * @vsi: the VSI being configured
+ **/
+static int i40e_alloc_rings(struct i40e_vsi *vsi)
+{
+	struct i40e_pf *pf = vsi->back;
+	int ret = 0;
+	int i;
+
+	vsi->rx_rings = kcalloc(vsi->alloc_queue_pairs,
+				sizeof(struct i40e_ring), GFP_KERNEL);
+	if (!vsi->rx_rings) {
+		ret = -ENOMEM;
+		goto err_alloc_rings;
+	}
+
+	vsi->tx_rings = kcalloc(vsi->alloc_queue_pairs,
+				sizeof(struct i40e_ring), GFP_KERNEL);
+	if (!vsi->tx_rings) {
+		ret = -ENOMEM;
+		kfree(vsi->rx_rings);
+		goto err_alloc_rings;
+	}
+
+	/* Set basic values in the rings to be used later during open() */
+	for (i = 0; i < vsi->alloc_queue_pairs; i++) {
+		struct i40e_ring *rx_ring = &vsi->rx_rings[i];
+		struct i40e_ring *tx_ring = &vsi->tx_rings[i];
+
+		tx_ring->queue_index = i;
+		tx_ring->reg_idx = vsi->base_queue + i;
+		tx_ring->ring_active = false;
+		tx_ring->vsi = vsi;
+		tx_ring->netdev = vsi->netdev;
+		tx_ring->dev = &pf->pdev->dev;
+		tx_ring->count = vsi->num_desc;
+		tx_ring->size = 0;
+		tx_ring->dcb_tc = 0;
+
+		rx_ring->queue_index = i;
+		rx_ring->reg_idx = vsi->base_queue + i;
+		rx_ring->ring_active = false;
+		rx_ring->vsi = vsi;
+		rx_ring->netdev = vsi->netdev;
+		rx_ring->dev = &pf->pdev->dev;
+		rx_ring->count = vsi->num_desc;
+		rx_ring->size = 0;
+		rx_ring->dcb_tc = 0;
+		if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED)
+			set_ring_16byte_desc_enabled(rx_ring);
+		else
+			clear_ring_16byte_desc_enabled(rx_ring);
+	}
+
+err_alloc_rings:
+	return ret;
+}
+
+/**
+ * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
+ * @vsi: the VSI being cleaned
+ **/
+static int i40e_vsi_clear_rings(struct i40e_vsi *vsi)
+{
+	if (vsi) {
+		kfree(vsi->rx_rings);
+		kfree(vsi->tx_rings);
+	}
+
+	return 0;
+}
+
+/**
+ * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
+ * @pf: board private structure
+ * @vectors: the number of MSI-X vectors to request
+ *
+ * Returns the number of vectors reserved, or error
+ **/
+static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
+{
+	int err = 0;
+
+	pf->num_msix_entries = 0;
+	while (vectors >= I40E_MIN_MSIX) {
+		err = pci_enable_msix(pf->pdev, pf->msix_entries, vectors);
+		if (err == 0) {
+			/* good to go */
+			pf->num_msix_entries = vectors;
+			break;
+		} else if (err < 0) {
+			/* total failure */
+			dev_info(&pf->pdev->dev,
+				 "MSI-X vector reservation failed: %d\n", err);
+			vectors = 0;
+			break;
+		} else {
+			/* err > 0 is the hint for retry */
+			dev_info(&pf->pdev->dev,
+				 "MSI-X vectors wanted %d, retrying with %d\n",
+				 vectors, err);
+			vectors = err;
+		}
+	}
+
+	if (vectors > 0 && vectors < I40E_MIN_MSIX) {
+		dev_info(&pf->pdev->dev,
+			 "Couldn't get enough vectors, only %d available\n",
+			 vectors);
+		vectors = 0;
+	}
+
+	return vectors;
+}
+
+/**
+ * i40e_init_msix - Setup the MSIX capability
+ * @pf: board private structure
+ *
+ * Work with the OS to set up the MSIX vectors needed.
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_init_msix(struct i40e_pf *pf)
+{
+	i40e_status err = 0;
+	struct i40e_hw *hw = &pf->hw;
+	int v_budget, i;
+	int vec;
+
+	if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
+		return -ENODEV;
+
+	/* The number of vectors we'll request will be comprised of:
+	 *   - Add 1 for "other" cause for Admin Queue events, etc.
+	 *   - The number of LAN queue pairs
+	 *        already adjusted for the NUMA node
+	 *        assumes symmetric Tx/Rx pairing
+	 *   - The number of VMDq pairs
+	 * Once we count this up, try the request.
+	 *
+	 * If we can't get what we want, we'll simplify to nearly nothing
+	 * and try again.  If that still fails, we punt.
+	 */
+	pf->num_lan_msix = pf->num_lan_qps;
+	pf->num_vmdq_msix = pf->num_vmdq_qps;
+	v_budget = 1 + pf->num_lan_msix;
+	v_budget += (pf->num_vmdq_vsis * pf->num_vmdq_msix);
+	if (pf->flags & I40E_FLAG_FDIR_ENABLED)
+		v_budget++;
+
+	/* Scale down if necessary, and the rings will share vectors */
+	v_budget = min_t(int, v_budget, hw->func_caps.num_msix_vectors);
+
+	pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
+				   GFP_KERNEL);
+	if (!pf->msix_entries)
+		return -ENOMEM;
+
+	for (i = 0; i < v_budget; i++)
+		pf->msix_entries[i].entry = i;
+	vec = i40e_reserve_msix_vectors(pf, v_budget);
+	if (vec < I40E_MIN_MSIX) {
+		pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
+		kfree(pf->msix_entries);
+		pf->msix_entries = NULL;
+		return -ENODEV;
+
+	} else if (vec == I40E_MIN_MSIX) {
+		/* Adjust for minimal MSIX use */
+		dev_info(&pf->pdev->dev, "Features disabled, not enough MSIX vectors\n");
+		pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
+		pf->num_vmdq_vsis = 0;
+		pf->num_vmdq_qps = 0;
+		pf->num_vmdq_msix = 0;
+		pf->num_lan_qps = 1;
+		pf->num_lan_msix = 1;
+
+	} else if (vec != v_budget) {
+		/* Scale vector usage down */
+		pf->num_vmdq_msix = 1;    /* force VMDqs to only one vector */
+		vec--;                    /* reserve the misc vector */
+
+		/* partition out the remaining vectors */
+		switch (vec) {
+		case 2:
+			pf->num_vmdq_vsis = 1;
+			pf->num_lan_msix = 1;
+			break;
+		case 3:
+			pf->num_vmdq_vsis = 1;
+			pf->num_lan_msix = 2;
+			break;
+		default:
+			pf->num_lan_msix = min_t(int, (vec / 2),
+						 pf->num_lan_qps);
+			pf->num_vmdq_vsis = min_t(int, (vec - pf->num_lan_msix),
+						  I40E_DEFAULT_NUM_VMDQ_VSI);
+			break;
+		}
+	}
+
+	return err;
+}
+
+/**
+ * i40e_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @vsi: the VSI being configured
+ *
+ * We allocate one q_vector per queue interrupt.  If allocation fails we
+ * return -ENOMEM.
+ **/
+static int i40e_alloc_q_vectors(struct i40e_vsi *vsi)
+{
+	struct i40e_pf *pf = vsi->back;
+	int v_idx, num_q_vectors;
+
+	/* if not MSIX, give the one vector only to the LAN VSI */
+	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+		num_q_vectors = vsi->num_q_vectors;
+	else if (vsi == pf->vsi[pf->lan_vsi])
+		num_q_vectors = 1;
+	else
+		return -EINVAL;
+
+	vsi->q_vectors = kcalloc(num_q_vectors,
+				 sizeof(struct i40e_q_vector),
+				 GFP_KERNEL);
+	if (!vsi->q_vectors)
+		return -ENOMEM;
+
+	for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
+		vsi->q_vectors[v_idx].vsi = vsi;
+		vsi->q_vectors[v_idx].v_idx = v_idx;
+		cpumask_set_cpu(v_idx, &vsi->q_vectors[v_idx].affinity_mask);
+		if (vsi->netdev)
+			netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx].napi,
+				       i40e_napi_poll, vsi->work_limit);
+	}
+
+	return 0;
+}
+
+/**
+ * i40e_init_interrupt_scheme - Determine proper interrupt scheme
+ * @pf: board private structure to initialize
+ **/
+static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
+{
+	int err = 0;
+
+	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+		err = i40e_init_msix(pf);
+		if (err) {
+			pf->flags &= ~(I40E_FLAG_RSS_ENABLED	   |
+					I40E_FLAG_MQ_ENABLED	   |
+					I40E_FLAG_DCB_ENABLED	   |
+					I40E_FLAG_SRIOV_ENABLED	   |
+					I40E_FLAG_FDIR_ENABLED	   |
+					I40E_FLAG_FDIR_ATR_ENABLED |
+					I40E_FLAG_VMDQ_ENABLED);
+
+			/* rework the queue expectations without MSIX */
+			i40e_determine_queue_usage(pf);
+		}
+	}
+
+	if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
+	    (pf->flags & I40E_FLAG_MSI_ENABLED)) {
+		err = pci_enable_msi(pf->pdev);
+		if (err) {
+			dev_info(&pf->pdev->dev,
+				 "MSI init failed (%d), trying legacy.\n", err);
+			pf->flags &= ~I40E_FLAG_MSI_ENABLED;
+		}
+	}
+
+	/* track first vector for misc interrupts */
+	err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1);
+}
+
+/**
+ * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
+ * @pf: board private structure
+ *
+ * This sets up the handler for MSIX 0, which is used to manage the
+ * non-queue interrupts, e.g. AdminQ and errors.  This is not used
+ * when in MSI or Legacy interrupt mode.
+ **/
+static int i40e_setup_misc_vector(struct i40e_pf *pf)
+{
+	struct i40e_hw *hw = &pf->hw;
+	int err = 0;
+
+	/* Only request the irq if this is the first time through, and
+	 * not when we're rebuilding after a Reset
+	 */
+	if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
+		err = request_irq(pf->msix_entries[0].vector,
+				  i40e_intr, 0, pf->misc_int_name, pf);
+		if (err) {
+			dev_info(&pf->pdev->dev,
+				 "request_irq for msix_misc failed: %d\n", err);
+			return -EFAULT;
+		}
+	}
+
+	i40e_enable_misc_int_causes(hw);
+
+	/* associate no queues to the misc vector */
+	wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
+	wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
+
+	i40e_flush(hw);
+
+	i40e_irq_dynamic_enable_icr0(pf);
+
+	return err;
+}
+
+/**
+ * i40e_config_rss - Prepare for RSS if used
+ * @pf: board private structure
+ **/
+static int i40e_config_rss(struct i40e_pf *pf)
+{
+	struct i40e_hw *hw = &pf->hw;
+	u32 lut = 0;
+	int i, j;
+	u64 hena;
+	/* Set of random keys generated using kernel random number generator */
+	static const u32 seed[I40E_PFQF_HKEY_MAX_INDEX + 1] = {0x41b01687,
+				0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377,
+				0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d,
+				0xcd15a2c1, 0xe8580225, 0x4a1e9d11, 0xfe5731be};
+
+	/* Fill out hash function seed */
+	for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
+		wr32(hw, I40E_PFQF_HKEY(i), seed[i]);
+
+	/* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
+	hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
+		((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
+	hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+		((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
+		((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
+		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
+		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
+		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+		((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
+		((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
+		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4)|
+		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
+	wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
+	wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
+
+	/* Populate the LUT with max no. of queues in round robin fashion */
+	for (i = 0, j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
+
+		/* The assumption is that lan qp count will be the highest
+		 * qp count for any PF VSI that needs RSS.
+		 * If multiple VSIs need RSS support, all the qp counts
+		 * for those VSIs should be a power of 2 for RSS to work.
+		 * If LAN VSI is the only consumer for RSS then this requirement
+		 * is not necessary.
+		 */
+		if (j == pf->rss_size)
+			j = 0;
+		/* lut = 4-byte sliding window of 4 lut entries */
+		lut = (lut << 8) | (j &
+			 ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
+		/* On i = 3, we have 4 entries in lut; write to the register */
+		if ((i & 3) == 3)
+			wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
+	}
+	i40e_flush(hw);
+
+	return 0;
+}
+
+/**
+ * i40e_sw_init - Initialize general software structures (struct i40e_pf)
+ * @pf: board private structure to initialize
+ *
+ * i40e_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ **/
+static int i40e_sw_init(struct i40e_pf *pf)
+{
+	int err = 0;
+	int size;
+
+	pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
+				(NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
+	if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
+		if (I40E_DEBUG_USER & debug)
+			pf->hw.debug_mask = debug;
+		pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER),
+						I40E_DEFAULT_MSG_ENABLE);
+	}
+
+	/* Set default capability flags */
+	pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
+		    I40E_FLAG_MSI_ENABLED     |
+		    I40E_FLAG_MSIX_ENABLED    |
+		    I40E_FLAG_RX_PS_ENABLED   |
+		    I40E_FLAG_MQ_ENABLED      |
+		    I40E_FLAG_RX_1BUF_ENABLED;
+
+	pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width;
+	if (pf->hw.func_caps.rss) {
+		pf->flags |= I40E_FLAG_RSS_ENABLED;
+		pf->rss_size = min_t(int, pf->rss_size_max,
+				     nr_cpus_node(numa_node_id()));
+	} else {
+		pf->rss_size = 1;
+	}
+
+	if (pf->hw.func_caps.dcb)
+		pf->num_tc_qps = I40E_DEFAULT_QUEUES_PER_TC;
+	else
+		pf->num_tc_qps = 0;
+
+	if (pf->hw.func_caps.fd) {
+		/* FW/NVM is not yet fixed in this regard */
+		if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
+		    (pf->hw.func_caps.fd_filters_best_effort > 0)) {
+			pf->flags |= I40E_FLAG_FDIR_ATR_ENABLED;
+			dev_info(&pf->pdev->dev,
+				 "Flow Director ATR mode Enabled\n");
+			pf->flags |= I40E_FLAG_FDIR_ENABLED;
+			dev_info(&pf->pdev->dev,
+				 "Flow Director Side Band mode Enabled\n");
+			pf->fdir_pf_filter_count =
+					 pf->hw.func_caps.fd_filters_guaranteed;
+		}
+	} else {
+		pf->fdir_pf_filter_count = 0;
+	}
+
+	if (pf->hw.func_caps.vmdq) {
+		pf->flags |= I40E_FLAG_VMDQ_ENABLED;
+		pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
+		pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ;
+	}
+
+	/* MFP mode enabled */
+	if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) {
+		pf->flags |= I40E_FLAG_MFP_ENABLED;
+		dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
+	}
+
+#ifdef CONFIG_PCI_IOV
+	if (pf->hw.func_caps.num_vfs) {
+		pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
+		pf->flags |= I40E_FLAG_SRIOV_ENABLED;
+		pf->num_req_vfs = min_t(int,
+					pf->hw.func_caps.num_vfs,
+					I40E_MAX_VF_COUNT);
+	}
+#endif /* CONFIG_PCI_IOV */
+	pf->eeprom_version = 0xDEAD;
+	pf->lan_veb = I40E_NO_VEB;
+	pf->lan_vsi = I40E_NO_VSI;
+
+	/* set up queue assignment tracking */
+	size = sizeof(struct i40e_lump_tracking)
+		+ (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
+	pf->qp_pile = kzalloc(size, GFP_KERNEL);
+	if (!pf->qp_pile) {
+		err = -ENOMEM;
+		goto sw_init_done;
+	}
+	pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
+	pf->qp_pile->search_hint = 0;
+
+	/* set up vector assignment tracking */
+	size = sizeof(struct i40e_lump_tracking)
+		+ (sizeof(u16) * pf->hw.func_caps.num_msix_vectors);
+	pf->irq_pile = kzalloc(size, GFP_KERNEL);
+	if (!pf->irq_pile) {
+		kfree(pf->qp_pile);
+		err = -ENOMEM;
+		goto sw_init_done;
+	}
+	pf->irq_pile->num_entries = pf->hw.func_caps.num_msix_vectors;
+	pf->irq_pile->search_hint = 0;
+
+	mutex_init(&pf->switch_mutex);
+
+sw_init_done:
+	return err;
+}
+
+/**
+ * i40e_set_features - set the netdev feature flags
+ * @netdev: ptr to the netdev being adjusted
+ * @features: the feature set that the stack is suggesting
+ **/
+static int i40e_set_features(struct net_device *netdev,
+			     netdev_features_t features)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_vsi *vsi = np->vsi;
+
+	if (features & NETIF_F_HW_VLAN_CTAG_RX)
+		i40e_vlan_stripping_enable(vsi);
+	else
+		i40e_vlan_stripping_disable(vsi);
+
+	return 0;
+}
+
+static const struct net_device_ops i40e_netdev_ops = {
+	.ndo_open		= i40e_open,
+	.ndo_stop		= i40e_close,
+	.ndo_start_xmit		= i40e_lan_xmit_frame,
+	.ndo_get_stats64	= i40e_get_netdev_stats_struct,
+	.ndo_set_rx_mode	= i40e_set_rx_mode,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address	= i40e_set_mac,
+	.ndo_change_mtu		= i40e_change_mtu,
+	.ndo_tx_timeout		= i40e_tx_timeout,
+	.ndo_vlan_rx_add_vid	= i40e_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid	= i40e_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= i40e_netpoll,
+#endif
+	.ndo_setup_tc		= i40e_setup_tc,
+	.ndo_set_features	= i40e_set_features,
+	.ndo_set_vf_mac		= i40e_ndo_set_vf_mac,
+	.ndo_set_vf_vlan	= i40e_ndo_set_vf_port_vlan,
+	.ndo_set_vf_tx_rate	= i40e_ndo_set_vf_bw,
+	.ndo_get_vf_config	= i40e_ndo_get_vf_config,
+};
+
+/**
+ * i40e_config_netdev - Setup the netdev flags
+ * @vsi: the VSI being configured
+ *
+ * Returns 0 on success, negative value on failure
+ **/
+static int i40e_config_netdev(struct i40e_vsi *vsi)
+{
+	struct i40e_pf *pf = vsi->back;
+	struct i40e_hw *hw = &pf->hw;
+	struct i40e_netdev_priv *np;
+	struct net_device *netdev;
+	u8 mac_addr[ETH_ALEN];
+	int etherdev_size;
+
+	etherdev_size = sizeof(struct i40e_netdev_priv);
+	netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
+	if (!netdev)
+		return -ENOMEM;
+
+	vsi->netdev = netdev;
+	np = netdev_priv(netdev);
+	np->vsi = vsi;
+
+	netdev->hw_enc_features = NETIF_F_IP_CSUM	 |
+				  NETIF_F_GSO_UDP_TUNNEL |
+				  NETIF_F_TSO		 |
+				  NETIF_F_SG;
+
+	netdev->features = NETIF_F_SG		       |
+			   NETIF_F_IP_CSUM	       |
+			   NETIF_F_SCTP_CSUM	       |
+			   NETIF_F_HIGHDMA	       |
+			   NETIF_F_GSO_UDP_TUNNEL      |
+			   NETIF_F_HW_VLAN_CTAG_TX     |
+			   NETIF_F_HW_VLAN_CTAG_RX     |
+			   NETIF_F_HW_VLAN_CTAG_FILTER |
+			   NETIF_F_IPV6_CSUM	       |
+			   NETIF_F_TSO		       |
+			   NETIF_F_TSO6		       |
+			   NETIF_F_RXCSUM	       |
+			   NETIF_F_RXHASH	       |
+			   0;
+
+	/* copy netdev features into list of user selectable features */
+	netdev->hw_features |= netdev->features;
+
+	if (vsi->type == I40E_VSI_MAIN) {
+		SET_NETDEV_DEV(netdev, &pf->pdev->dev);
+		memcpy(mac_addr, hw->mac.perm_addr, ETH_ALEN);
+	} else {
+		/* relate the VSI_VMDQ name to the VSI_MAIN name */
+		snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
+			 pf->vsi[pf->lan_vsi]->netdev->name);
+		random_ether_addr(mac_addr);
+		i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
+	}
+
+	memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
+	memcpy(netdev->perm_addr, mac_addr, ETH_ALEN);
+	/* vlan gets same features (except vlan offload)
+	 * after any tweaks for specific VSI types
+	 */
+	netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX |
+						     NETIF_F_HW_VLAN_CTAG_RX |
+						   NETIF_F_HW_VLAN_CTAG_FILTER);
+	netdev->priv_flags |= IFF_UNICAST_FLT;
+	netdev->priv_flags |= IFF_SUPP_NOFCS;
+	/* Setup netdev TC information */
+	i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
+
+	netdev->netdev_ops = &i40e_netdev_ops;
+	netdev->watchdog_timeo = 5 * HZ;
+	i40e_set_ethtool_ops(netdev);
+
+	return 0;
+}
+
+/**
+ * i40e_vsi_delete - Delete a VSI from the switch
+ * @vsi: the VSI being removed
+ *
+ * Returns 0 on success, negative value on failure
+ **/
+static void i40e_vsi_delete(struct i40e_vsi *vsi)
+{
+	/* remove default VSI is not allowed */
+	if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
+		return;
+
+	/* there is no HW VSI for FDIR */
+	if (vsi->type == I40E_VSI_FDIR)
+		return;
+
+	i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
+	return;
+}
+
+/**
+ * i40e_add_vsi - Add a VSI to the switch
+ * @vsi: the VSI being configured
+ *
+ * This initializes a VSI context depending on the VSI type to be added and
+ * passes it down to the add_vsi aq command.
+ **/
+static int i40e_add_vsi(struct i40e_vsi *vsi)
+{
+	int ret = -ENODEV;
+	struct i40e_mac_filter *f, *ftmp;
+	struct i40e_pf *pf = vsi->back;
+	struct i40e_hw *hw = &pf->hw;
+	struct i40e_vsi_context ctxt;
+	u8 enabled_tc = 0x1; /* TC0 enabled */
+	int f_count = 0;
+
+	memset(&ctxt, 0, sizeof(ctxt));
+	switch (vsi->type) {
+	case I40E_VSI_MAIN:
+		/* The PF's main VSI is already setup as part of the
+		 * device initialization, so we'll not bother with
+		 * the add_vsi call, but we will retrieve the current
+		 * VSI context.
+		 */
+		ctxt.seid = pf->main_vsi_seid;
+		ctxt.pf_num = pf->hw.pf_id;
+		ctxt.vf_num = 0;
+		ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+		ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+		if (ret) {
+			dev_info(&pf->pdev->dev,
+				 "couldn't get pf vsi config, err %d, aq_err %d\n",
+				 ret, pf->hw.aq.asq_last_status);
+			return -ENOENT;
+		}
+		memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
+		vsi->info.valid_sections = 0;
+
+		vsi->seid = ctxt.seid;
+		vsi->id = ctxt.vsi_number;
+
+		enabled_tc = i40e_pf_get_tc_map(pf);
+
+		/* MFP mode setup queue map and update VSI */
+		if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+			memset(&ctxt, 0, sizeof(ctxt));
+			ctxt.seid = pf->main_vsi_seid;
+			ctxt.pf_num = pf->hw.pf_id;
+			ctxt.vf_num = 0;
+			i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
+			ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+			if (ret) {
+				dev_info(&pf->pdev->dev,
+					 "update vsi failed, aq_err=%d\n",
+					 pf->hw.aq.asq_last_status);
+				ret = -ENOENT;
+				goto err;
+			}
+			/* update the local VSI info queue map */
+			i40e_vsi_update_queue_map(vsi, &ctxt);
+			vsi->info.valid_sections = 0;
+		} else {
+			/* Default/Main VSI is only enabled for TC0
+			 * reconfigure it to enable all TCs that are
+			 * available on the port in SFP mode.
+			 */
+			ret = i40e_vsi_config_tc(vsi, enabled_tc);
+			if (ret) {
+				dev_info(&pf->pdev->dev,
+					 "failed to configure TCs for main VSI tc_map 0x%08x, err %d, aq_err %d\n",
+					 enabled_tc, ret,
+					 pf->hw.aq.asq_last_status);
+				ret = -ENOENT;
+			}
+		}
+		break;
+
+	case I40E_VSI_FDIR:
+		/* no queue mapping or actual HW VSI needed */
+		vsi->info.valid_sections = 0;
+		vsi->seid = 0;
+		vsi->id = 0;
+		i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
+		return 0;
+		break;
+
+	case I40E_VSI_VMDQ2:
+		ctxt.pf_num = hw->pf_id;
+		ctxt.vf_num = 0;
+		ctxt.uplink_seid = vsi->uplink_seid;
+		ctxt.connection_type = 0x1;     /* regular data port */
+		ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
+
+		ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+
+		/* This VSI is connected to VEB so the switch_id
+		 * should be set to zero by default.
+		 */
+		ctxt.info.switch_id = 0;
+		ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
+		ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+
+		/* Setup the VSI tx/rx queue map for TC0 only for now */
+		i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
+		break;
+
+	case I40E_VSI_SRIOV:
+		ctxt.pf_num = hw->pf_id;
+		ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
+		ctxt.uplink_seid = vsi->uplink_seid;
+		ctxt.connection_type = 0x1;     /* regular data port */
+		ctxt.flags = I40E_AQ_VSI_TYPE_VF;
+
+		ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+
+		/* This VSI is connected to VEB so the switch_id
+		 * should be set to zero by default.
+		 */
+		ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+
+		ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
+		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
+		/* Setup the VSI tx/rx queue map for TC0 only for now */
+		i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
+		break;
+
+	default:
+		return -ENODEV;
+	}
+
+	if (vsi->type != I40E_VSI_MAIN) {
+		ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
+		if (ret) {
+			dev_info(&vsi->back->pdev->dev,
+				 "add vsi failed, aq_err=%d\n",
+				 vsi->back->hw.aq.asq_last_status);
+			ret = -ENOENT;
+			goto err;
+		}
+		memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
+		vsi->info.valid_sections = 0;
+		vsi->seid = ctxt.seid;
+		vsi->id = ctxt.vsi_number;
+	}
+
+	/* If macvlan filters already exist, force them to get loaded */
+	list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+		f->changed = true;
+		f_count++;
+	}
+	if (f_count) {
+		vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+		pf->flags |= I40E_FLAG_FILTER_SYNC;
+	}
+
+	/* Update VSI BW information */
+	ret = i40e_vsi_get_bw_info(vsi);
+	if (ret) {
+		dev_info(&pf->pdev->dev,
+			 "couldn't get vsi bw info, err %d, aq_err %d\n",
+			 ret, pf->hw.aq.asq_last_status);
+		/* VSI is already added so not tearing that up */
+		ret = 0;
+	}
+
+err:
+	return ret;
+}
+
+/**
+ * i40e_vsi_release - Delete a VSI and free its resources
+ * @vsi: the VSI being removed
+ *
+ * Returns 0 on success or < 0 on error
+ **/
+int i40e_vsi_release(struct i40e_vsi *vsi)
+{
+	struct i40e_mac_filter *f, *ftmp;
+	struct i40e_veb *veb = NULL;
+	struct i40e_pf *pf;
+	u16 uplink_seid;
+	int i, n;
+
+	pf = vsi->back;
+
+	/* release of a VEB-owner or last VSI is not allowed */
+	if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
+		dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
+			 vsi->seid, vsi->uplink_seid);
+		return -ENODEV;
+	}
+	if (vsi == pf->vsi[pf->lan_vsi] &&
+	    !test_bit(__I40E_DOWN, &pf->state)) {
+		dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
+		return -ENODEV;
+	}
+
+	uplink_seid = vsi->uplink_seid;
+	if (vsi->type != I40E_VSI_SRIOV) {
+		if (vsi->netdev_registered) {
+			vsi->netdev_registered = false;
+			if (vsi->netdev) {
+				/* results in a call to i40e_close() */
+				unregister_netdev(vsi->netdev);
+				free_netdev(vsi->netdev);
+				vsi->netdev = NULL;
+			}
+		} else {
+			if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
+				i40e_down(vsi);
+			i40e_vsi_free_irq(vsi);
+			i40e_vsi_free_tx_resources(vsi);
+			i40e_vsi_free_rx_resources(vsi);
+		}
+		i40e_vsi_disable_irq(vsi);
+	}
+
+	list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
+		i40e_del_filter(vsi, f->macaddr, f->vlan,
+				f->is_vf, f->is_netdev);
+	i40e_sync_vsi_filters(vsi);
+
+	i40e_vsi_delete(vsi);
+	i40e_vsi_free_q_vectors(vsi);
+	i40e_vsi_clear_rings(vsi);
+	i40e_vsi_clear(vsi);
+
+	/* If this was the last thing on the VEB, except for the
+	 * controlling VSI, remove the VEB, which puts the controlling
+	 * VSI onto the next level down in the switch.
+	 *
+	 * Well, okay, there's one more exception here: don't remove
+	 * the orphan VEBs yet.  We'll wait for an explicit remove request
+	 * from up the network stack.
+	 */
+	for (n = 0, i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+		if (pf->vsi[i] &&
+		    pf->vsi[i]->uplink_seid == uplink_seid &&
+		    (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
+			n++;      /* count the VSIs */
+		}
+	}
+	for (i = 0; i < I40E_MAX_VEB; i++) {
+		if (!pf->veb[i])
+			continue;
+		if (pf->veb[i]->uplink_seid == uplink_seid)
+			n++;     /* count the VEBs */
+		if (pf->veb[i]->seid == uplink_seid)
+			veb = pf->veb[i];
+	}
+	if (n == 0 && veb && veb->uplink_seid != 0)
+		i40e_veb_release(veb);
+
+	return 0;
+}
+
+/**
+ * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
+ * @vsi: ptr to the VSI
+ *
+ * This should only be called after i40e_vsi_mem_alloc() which allocates the
+ * corresponding SW VSI structure and initializes num_queue_pairs for the
+ * newly allocated VSI.
+ *
+ * Returns 0 on success or negative on failure
+ **/
+static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
+{
+	int ret = -ENOENT;
+	struct i40e_pf *pf = vsi->back;
+
+	if (vsi->q_vectors) {
+		dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
+			 vsi->seid);
+		return -EEXIST;
+	}
+
+	if (vsi->base_vector) {
+		dev_info(&pf->pdev->dev,
+			 "VSI %d has non-zero base vector %d\n",
+			 vsi->seid, vsi->base_vector);
+		return -EEXIST;
+	}
+
+	ret = i40e_alloc_q_vectors(vsi);
+	if (ret) {
+		dev_info(&pf->pdev->dev,
+			 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
+			 vsi->num_q_vectors, vsi->seid, ret);
+		vsi->num_q_vectors = 0;
+		goto vector_setup_out;
+	}
+
+	vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
+					 vsi->num_q_vectors, vsi->idx);
+	if (vsi->base_vector < 0) {
+		dev_info(&pf->pdev->dev,
+			 "failed to get q tracking for VSI %d, err=%d\n",
+			 vsi->seid, vsi->base_vector);
+		i40e_vsi_free_q_vectors(vsi);
+		ret = -ENOENT;
+		goto vector_setup_out;
+	}
+
+vector_setup_out:
+	return ret;
+}
+
+/**
+ * i40e_vsi_setup - Set up a VSI by a given type
+ * @pf: board private structure
+ * @type: VSI type
+ * @uplink_seid: the switch element to link to
+ * @param1: usage depends upon VSI type. For VF types, indicates VF id
+ *
+ * This allocates the sw VSI structure and its queue resources, then add a VSI
+ * to the identified VEB.
+ *
+ * Returns pointer to the successfully allocated and configure VSI sw struct on
+ * success, otherwise returns NULL on failure.
+ **/
+struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
+				u16 uplink_seid, u32 param1)
+{
+	struct i40e_vsi *vsi = NULL;
+	struct i40e_veb *veb = NULL;
+	int ret, i;
+	int v_idx;
+
+	/* The requested uplink_seid must be either
+	 *     - the PF's port seid
+	 *              no VEB is needed because this is the PF
+	 *              or this is a Flow Director special case VSI
+	 *     - seid of an existing VEB
+	 *     - seid of a VSI that owns an existing VEB
+	 *     - seid of a VSI that doesn't own a VEB
+	 *              a new VEB is created and the VSI becomes the owner
+	 *     - seid of the PF VSI, which is what creates the first VEB
+	 *              this is a special case of the previous
+	 *
+	 * Find which uplink_seid we were given and create a new VEB if needed
+	 */
+	for (i = 0; i < I40E_MAX_VEB; i++) {
+		if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
+			veb = pf->veb[i];
+			break;
+		}
+	}
+
+	if (!veb && uplink_seid != pf->mac_seid) {
+
+		for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+			if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
+				vsi = pf->vsi[i];
+				break;
+			}
+		}
+		if (!vsi) {
+			dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
+				 uplink_seid);
+			return NULL;
+		}
+
+		if (vsi->uplink_seid == pf->mac_seid)
+			veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
+					     vsi->tc_config.enabled_tc);
+		else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
+			veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
+					     vsi->tc_config.enabled_tc);
+
+		for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
+			if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
+				veb = pf->veb[i];
+		}
+		if (!veb) {
+			dev_info(&pf->pdev->dev, "couldn't add VEB\n");
+			return NULL;
+		}
+
+		vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
+		uplink_seid = veb->seid;
+	}
+
+	/* get vsi sw struct */
+	v_idx = i40e_vsi_mem_alloc(pf, type);
+	if (v_idx < 0)
+		goto err_alloc;
+	vsi = pf->vsi[v_idx];
+	vsi->type = type;
+	vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
+
+	if (type == I40E_VSI_MAIN)
+		pf->lan_vsi = v_idx;
+	else if (type == I40E_VSI_SRIOV)
+		vsi->vf_id = param1;
+	/* assign it some queues */
+	ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
+	if (ret < 0) {
+		dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n",
+			 vsi->seid, ret);
+		goto err_vsi;
+	}
+	vsi->base_queue = ret;
+
+	/* get a VSI from the hardware */
+	vsi->uplink_seid = uplink_seid;
+	ret = i40e_add_vsi(vsi);
+	if (ret)
+		goto err_vsi;
+
+	switch (vsi->type) {
+	/* setup the netdev if needed */
+	case I40E_VSI_MAIN:
+	case I40E_VSI_VMDQ2:
+		ret = i40e_config_netdev(vsi);
+		if (ret)
+			goto err_netdev;
+		ret = register_netdev(vsi->netdev);
+		if (ret)
+			goto err_netdev;
+		vsi->netdev_registered = true;
+		netif_carrier_off(vsi->netdev);
+		/* fall through */
+
+	case I40E_VSI_FDIR:
+		/* set up vectors and rings if needed */
+		ret = i40e_vsi_setup_vectors(vsi);
+		if (ret)
+			goto err_msix;
+
+		ret = i40e_alloc_rings(vsi);
+		if (ret)
+			goto err_rings;
+
+		/* map all of the rings to the q_vectors */
+		i40e_vsi_map_rings_to_vectors(vsi);
+
+		i40e_vsi_reset_stats(vsi);
+		break;
+
+	default:
+		/* no netdev or rings for the other VSI types */
+		break;
+	}
+
+	return vsi;
+
+err_rings:
+	i40e_vsi_free_q_vectors(vsi);
+err_msix:
+	if (vsi->netdev_registered) {
+		vsi->netdev_registered = false;
+		unregister_netdev(vsi->netdev);
+		free_netdev(vsi->netdev);
+		vsi->netdev = NULL;
+	}
+err_netdev:
+	i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
+err_vsi:
+	i40e_vsi_clear(vsi);
+err_alloc:
+	return NULL;
+}
+
+/**
+ * i40e_veb_get_bw_info - Query VEB BW information
+ * @veb: the veb to query
+ *
+ * Query the Tx scheduler BW configuration data for given VEB
+ **/
+static int i40e_veb_get_bw_info(struct i40e_veb *veb)
+{
+	struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
+	struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
+	struct i40e_pf *pf = veb->pf;
+	struct i40e_hw *hw = &pf->hw;
+	u32 tc_bw_max;
+	int ret = 0;
+	int i;
+
+	ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
+						  &bw_data, NULL);
+	if (ret) {
+		dev_info(&pf->pdev->dev,
+			 "query veb bw config failed, aq_err=%d\n",
+			 hw->aq.asq_last_status);
+		goto out;
+	}
+
+	ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
+						   &ets_data, NULL);
+	if (ret) {
+		dev_info(&pf->pdev->dev,
+			 "query veb bw ets config failed, aq_err=%d\n",
+			 hw->aq.asq_last_status);
+		goto out;
+	}
+
+	veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
+	veb->bw_max_quanta = ets_data.tc_bw_max;
+	veb->is_abs_credits = bw_data.absolute_credits_enable;
+	tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
+		    (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
+	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+		veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
+		veb->bw_tc_limit_credits[i] =
+					le16_to_cpu(bw_data.tc_bw_limits[i]);
+		veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
+	}
+
+out:
+	return ret;
+}
+
+/**
+ * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
+ * @pf: board private structure
+ *
+ * On error: returns error code (negative)
+ * On success: returns vsi index in PF (positive)
+ **/
+static int i40e_veb_mem_alloc(struct i40e_pf *pf)
+{
+	int ret = -ENOENT;
+	struct i40e_veb *veb;
+	int i;
+
+	/* Need to protect the allocation of switch elements at the PF level */
+	mutex_lock(&pf->switch_mutex);
+
+	/* VEB list may be fragmented if VEB creation/destruction has
+	 * been happening.  We can afford to do a quick scan to look
+	 * for any free slots in the list.
+	 *
+	 * find next empty veb slot, looping back around if necessary
+	 */
+	i = 0;
+	while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
+		i++;
+	if (i >= I40E_MAX_VEB) {
+		ret = -ENOMEM;
+		goto err_alloc_veb;  /* out of VEB slots! */
+	}
+
+	veb = kzalloc(sizeof(*veb), GFP_KERNEL);
+	if (!veb) {
+		ret = -ENOMEM;
+		goto err_alloc_veb;
+	}
+	veb->pf = pf;
+	veb->idx = i;
+	veb->enabled_tc = 1;
+
+	pf->veb[i] = veb;
+	ret = i;
+err_alloc_veb:
+	mutex_unlock(&pf->switch_mutex);
+	return ret;
+}
+
+/**
+ * i40e_switch_branch_release - Delete a branch of the switch tree
+ * @branch: where to start deleting
+ *
+ * This uses recursion to find the tips of the branch to be
+ * removed, deleting until we get back to and can delete this VEB.
+ **/
+static void i40e_switch_branch_release(struct i40e_veb *branch)
+{
+	struct i40e_pf *pf = branch->pf;
+	u16 branch_seid = branch->seid;
+	u16 veb_idx = branch->idx;
+	int i;
+
+	/* release any VEBs on this VEB - RECURSION */
+	for (i = 0; i < I40E_MAX_VEB; i++) {
+		if (!pf->veb[i])
+			continue;
+		if (pf->veb[i]->uplink_seid == branch->seid)
+			i40e_switch_branch_release(pf->veb[i]);
+	}
+
+	/* Release the VSIs on this VEB, but not the owner VSI.
+	 *
+	 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
+	 *       the VEB itself, so don't use (*branch) after this loop.
+	 */
+	for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+		if (!pf->vsi[i])
+			continue;
+		if (pf->vsi[i]->uplink_seid == branch_seid &&
+		   (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
+			i40e_vsi_release(pf->vsi[i]);
+		}
+	}
+
+	/* There's one corner case where the VEB might not have been
+	 * removed, so double check it here and remove it if needed.
+	 * This case happens if the veb was created from the debugfs
+	 * commands and no VSIs were added to it.
+	 */
+	if (pf->veb[veb_idx])
+		i40e_veb_release(pf->veb[veb_idx]);
+}
+
+/**
+ * i40e_veb_clear - remove veb struct
+ * @veb: the veb to remove
+ **/
+static void i40e_veb_clear(struct i40e_veb *veb)
+{
+	if (!veb)
+		return;
+
+	if (veb->pf) {
+		struct i40e_pf *pf = veb->pf;
+
+		mutex_lock(&pf->switch_mutex);
+		if (pf->veb[veb->idx] == veb)
+			pf->veb[veb->idx] = NULL;
+		mutex_unlock(&pf->switch_mutex);
+	}
+
+	kfree(veb);
+}
+
+/**
+ * i40e_veb_release - Delete a VEB and free its resources
+ * @veb: the VEB being removed
+ **/
+void i40e_veb_release(struct i40e_veb *veb)
+{
+	struct i40e_vsi *vsi = NULL;
+	struct i40e_pf *pf;
+	int i, n = 0;
+
+	pf = veb->pf;
+
+	/* find the remaining VSI and check for extras */
+	for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+		if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
+			n++;
+			vsi = pf->vsi[i];
+		}
+	}
+	if (n != 1) {
+		dev_info(&pf->pdev->dev,
+			 "can't remove VEB %d with %d VSIs left\n",
+			 veb->seid, n);
+		return;
+	}
+
+	/* move the remaining VSI to uplink veb */
+	vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
+	if (veb->uplink_seid) {
+		vsi->uplink_seid = veb->uplink_seid;
+		if (veb->uplink_seid == pf->mac_seid)
+			vsi->veb_idx = I40E_NO_VEB;
+		else
+			vsi->veb_idx = veb->veb_idx;
+	} else {
+		/* floating VEB */
+		vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
+		vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
+	}
+
+	i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
+	i40e_veb_clear(veb);
+
+	return;
+}
+
+/**
+ * i40e_add_veb - create the VEB in the switch
+ * @veb: the VEB to be instantiated
+ * @vsi: the controlling VSI
+ **/
+static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
+{
+	bool is_default = (vsi->idx == vsi->back->lan_vsi);
+	int ret;
+
+	/* get a VEB from the hardware */
+	ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid,
+			      veb->enabled_tc, is_default, &veb->seid, NULL);
+	if (ret) {
+		dev_info(&veb->pf->pdev->dev,
+			 "couldn't add VEB, err %d, aq_err %d\n",
+			 ret, veb->pf->hw.aq.asq_last_status);
+		return -EPERM;
+	}
+
+	/* get statistics counter */
+	ret = i40e_aq_get_veb_parameters(&veb->pf->hw, veb->seid, NULL, NULL,
+					 &veb->stats_idx, NULL, NULL, NULL);
+	if (ret) {
+		dev_info(&veb->pf->pdev->dev,
+			 "couldn't get VEB statistics idx, err %d, aq_err %d\n",
+			 ret, veb->pf->hw.aq.asq_last_status);
+		return -EPERM;
+	}
+	ret = i40e_veb_get_bw_info(veb);
+	if (ret) {
+		dev_info(&veb->pf->pdev->dev,
+			 "couldn't get VEB bw info, err %d, aq_err %d\n",
+			 ret, veb->pf->hw.aq.asq_last_status);
+		i40e_aq_delete_element(&veb->pf->hw, veb->seid, NULL);
+		return -ENOENT;
+	}
+
+	vsi->uplink_seid = veb->seid;
+	vsi->veb_idx = veb->idx;
+	vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
+
+	return 0;
+}
+
+/**
+ * i40e_veb_setup - Set up a VEB
+ * @pf: board private structure
+ * @flags: VEB setup flags
+ * @uplink_seid: the switch element to link to
+ * @vsi_seid: the initial VSI seid
+ * @enabled_tc: Enabled TC bit-map
+ *
+ * This allocates the sw VEB structure and links it into the switch
+ * It is possible and legal for this to be a duplicate of an already
+ * existing VEB.  It is also possible for both uplink and vsi seids
+ * to be zero, in order to create a floating VEB.
+ *
+ * Returns pointer to the successfully allocated VEB sw struct on
+ * success, otherwise returns NULL on failure.
+ **/
+struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
+				u16 uplink_seid, u16 vsi_seid,
+				u8 enabled_tc)
+{
+	struct i40e_veb *veb, *uplink_veb = NULL;
+	int vsi_idx, veb_idx;
+	int ret;
+
+	/* if one seid is 0, the other must be 0 to create a floating relay */
+	if ((uplink_seid == 0 || vsi_seid == 0) &&
+	    (uplink_seid + vsi_seid != 0)) {
+		dev_info(&pf->pdev->dev,
+			 "one, not both seid's are 0: uplink=%d vsi=%d\n",
+			 uplink_seid, vsi_seid);
+		return NULL;
+	}
+
+	/* make sure there is such a vsi and uplink */
+	for (vsi_idx = 0; vsi_idx < pf->hw.func_caps.num_vsis; vsi_idx++)
+		if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
+			break;
+	if (vsi_idx >= pf->hw.func_caps.num_vsis && vsi_seid != 0) {
+		dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
+			 vsi_seid);
+		return NULL;
+	}
+
+	if (uplink_seid && uplink_seid != pf->mac_seid) {
+		for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
+			if (pf->veb[veb_idx] &&
+			    pf->veb[veb_idx]->seid == uplink_seid) {
+				uplink_veb = pf->veb[veb_idx];
+				break;
+			}
+		}
+		if (!uplink_veb) {
+			dev_info(&pf->pdev->dev,
+				 "uplink seid %d not found\n", uplink_seid);
+			return NULL;
+		}
+	}
+
+	/* get veb sw struct */
+	veb_idx = i40e_veb_mem_alloc(pf);
+	if (veb_idx < 0)
+		goto err_alloc;
+	veb = pf->veb[veb_idx];
+	veb->flags = flags;
+	veb->uplink_seid = uplink_seid;
+	veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
+	veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
+
+	/* create the VEB in the switch */
+	ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
+	if (ret)
+		goto err_veb;
+
+	return veb;
+
+err_veb:
+	i40e_veb_clear(veb);
+err_alloc:
+	return NULL;
+}
+
+/**
+ * i40e_setup_pf_switch_element - set pf vars based on switch type
+ * @pf: board private structure
+ * @ele: element we are building info from
+ * @num_reported: total number of elements
+ * @printconfig: should we print the contents
+ *
+ * helper function to assist in extracting a few useful SEID values.
+ **/
+static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
+				struct i40e_aqc_switch_config_element_resp *ele,
+				u16 num_reported, bool printconfig)
+{
+	u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
+	u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
+	u8 element_type = ele->element_type;
+	u16 seid = le16_to_cpu(ele->seid);
+
+	if (printconfig)
+		dev_info(&pf->pdev->dev,
+			 "type=%d seid=%d uplink=%d downlink=%d\n",
+			 element_type, seid, uplink_seid, downlink_seid);
+
+	switch (element_type) {
+	case I40E_SWITCH_ELEMENT_TYPE_MAC:
+		pf->mac_seid = seid;
+		break;
+	case I40E_SWITCH_ELEMENT_TYPE_VEB:
+		/* Main VEB? */
+		if (uplink_seid != pf->mac_seid)
+			break;
+		if (pf->lan_veb == I40E_NO_VEB) {
+			int v;
+
+			/* find existing or else empty VEB */
+			for (v = 0; v < I40E_MAX_VEB; v++) {
+				if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
+					pf->lan_veb = v;
+					break;
+				}
+			}
+			if (pf->lan_veb == I40E_NO_VEB) {
+				v = i40e_veb_mem_alloc(pf);
+				if (v < 0)
+					break;
+				pf->lan_veb = v;
+			}
+		}
+
+		pf->veb[pf->lan_veb]->seid = seid;
+		pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
+		pf->veb[pf->lan_veb]->pf = pf;
+		pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
+		break;
+	case I40E_SWITCH_ELEMENT_TYPE_VSI:
+		if (num_reported != 1)
+			break;
+		/* This is immediately after a reset so we can assume this is
+		 * the PF's VSI
+		 */
+		pf->mac_seid = uplink_seid;
+		pf->pf_seid = downlink_seid;
+		pf->main_vsi_seid = seid;
+		if (printconfig)
+			dev_info(&pf->pdev->dev,
+				 "pf_seid=%d main_vsi_seid=%d\n",
+				 pf->pf_seid, pf->main_vsi_seid);
+		break;
+	case I40E_SWITCH_ELEMENT_TYPE_PF:
+	case I40E_SWITCH_ELEMENT_TYPE_VF:
+	case I40E_SWITCH_ELEMENT_TYPE_EMP:
+	case I40E_SWITCH_ELEMENT_TYPE_BMC:
+	case I40E_SWITCH_ELEMENT_TYPE_PE:
+	case I40E_SWITCH_ELEMENT_TYPE_PA:
+		/* ignore these for now */
+		break;
+	default:
+		dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
+			 element_type, seid);
+		break;
+	}
+}
+
+/**
+ * i40e_fetch_switch_configuration - Get switch config from firmware
+ * @pf: board private structure
+ * @printconfig: should we print the contents
+ *
+ * Get the current switch configuration from the device and
+ * extract a few useful SEID values.
+ **/
+int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
+{
+	struct i40e_aqc_get_switch_config_resp *sw_config;
+	u16 next_seid = 0;
+	int ret = 0;
+	u8 *aq_buf;
+	int i;
+
+	aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
+	if (!aq_buf)
+		return -ENOMEM;
+
+	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
+	do {
+		u16 num_reported, num_total;
+
+		ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
+						I40E_AQ_LARGE_BUF,
+						&next_seid, NULL);
+		if (ret) {
+			dev_info(&pf->pdev->dev,
+				 "get switch config failed %d aq_err=%x\n",
+				 ret, pf->hw.aq.asq_last_status);
+			kfree(aq_buf);
+			return -ENOENT;
+		}
+
+		num_reported = le16_to_cpu(sw_config->header.num_reported);
+		num_total = le16_to_cpu(sw_config->header.num_total);
+
+		if (printconfig)
+			dev_info(&pf->pdev->dev,
+				 "header: %d reported %d total\n",
+				 num_reported, num_total);
+
+		if (num_reported) {
+			int sz = sizeof(*sw_config) * num_reported;
+
+			kfree(pf->sw_config);
+			pf->sw_config = kzalloc(sz, GFP_KERNEL);
+			if (pf->sw_config)
+				memcpy(pf->sw_config, sw_config, sz);
+		}
+
+		for (i = 0; i < num_reported; i++) {
+			struct i40e_aqc_switch_config_element_resp *ele =
+				&sw_config->element[i];
+
+			i40e_setup_pf_switch_element(pf, ele, num_reported,
+						     printconfig);
+		}
+	} while (next_seid != 0);
+
+	kfree(aq_buf);
+	return ret;
+}
+
+/**
+ * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
+ * @pf: board private structure
+ *
+ * Returns 0 on success, negative value on failure
+ **/
+static int i40e_setup_pf_switch(struct i40e_pf *pf)
+{
+	int ret;
+
+	/* find out what's out there already */
+	ret = i40e_fetch_switch_configuration(pf, false);
+	if (ret) {
+		dev_info(&pf->pdev->dev,
+			 "couldn't fetch switch config, err %d, aq_err %d\n",
+			 ret, pf->hw.aq.asq_last_status);
+		return ret;
+	}
+	i40e_pf_reset_stats(pf);
+
+	/* fdir VSI must happen first to be sure it gets queue 0, but only
+	 * if there is enough room for the fdir VSI
+	 */
+	if (pf->num_lan_qps > 1)
+		i40e_fdir_setup(pf);
+
+	/* first time setup */
+	if (pf->lan_vsi == I40E_NO_VSI) {
+		struct i40e_vsi *vsi = NULL;
+		u16 uplink_seid;
+
+		/* Set up the PF VSI associated with the PF's main VSI
+		 * that is already in the HW switch
+		 */
+		if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
+			uplink_seid = pf->veb[pf->lan_veb]->seid;
+		else
+			uplink_seid = pf->mac_seid;
+
+		vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
+		if (!vsi) {
+			dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
+			i40e_fdir_teardown(pf);
+			return -EAGAIN;
+		}
+		/* accommodate kcompat by copying the main VSI queue count
+		 * into the pf, since this newer code pushes the pf queue
+		 * info down a level into a VSI
+		 */
+		pf->num_rx_queues = vsi->alloc_queue_pairs;
+		pf->num_tx_queues = vsi->alloc_queue_pairs;
+	} else {
+		/* force a reset of TC and queue layout configurations */
+		u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
+		pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
+		pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
+		i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
+	}
+	i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
+
+	/* Setup static PF queue filter control settings */
+	ret = i40e_setup_pf_filter_control(pf);
+	if (ret) {
+		dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
+			 ret);
+		/* Failure here should not stop continuing other steps */
+	}
+
+	/* enable RSS in the HW, even for only one queue, as the stack can use
+	 * the hash
+	 */
+	if ((pf->flags & I40E_FLAG_RSS_ENABLED))
+		i40e_config_rss(pf);
+
+	/* fill in link information and enable LSE reporting */
+	i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
+	i40e_link_event(pf);
+
+	/* Initialize user-specifics link properties */
+	pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
+				  I40E_AQ_AN_COMPLETED) ? true : false);
+	pf->hw.fc.requested_mode = I40E_FC_DEFAULT;
+	if (pf->hw.phy.link_info.an_info &
+	   (I40E_AQ_LINK_PAUSE_TX | I40E_AQ_LINK_PAUSE_RX))
+		pf->hw.fc.current_mode = I40E_FC_FULL;
+	else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
+		pf->hw.fc.current_mode = I40E_FC_TX_PAUSE;
+	else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
+		pf->hw.fc.current_mode = I40E_FC_RX_PAUSE;
+	else
+		pf->hw.fc.current_mode = I40E_FC_DEFAULT;
+
+	return ret;
+}
+
+/**
+ * i40e_set_rss_size - helper to set rss_size
+ * @pf: board private structure
+ * @queues_left: how many queues
+ */
+static u16 i40e_set_rss_size(struct i40e_pf *pf, int queues_left)
+{
+	int num_tc0;
+
+	num_tc0 = min_t(int, queues_left, pf->rss_size_max);
+	num_tc0 = min_t(int, num_tc0, nr_cpus_node(numa_node_id()));
+	num_tc0 = rounddown_pow_of_two(num_tc0);
+
+	return num_tc0;
+}
+
+/**
+ * i40e_determine_queue_usage - Work out queue distribution
+ * @pf: board private structure
+ **/
+static void i40e_determine_queue_usage(struct i40e_pf *pf)
+{
+	int accum_tc_size;
+	int queues_left;
+
+	pf->num_lan_qps = 0;
+	pf->num_tc_qps = rounddown_pow_of_two(pf->num_tc_qps);
+	accum_tc_size = (I40E_MAX_TRAFFIC_CLASS - 1) * pf->num_tc_qps;
+
+	/* Find the max queues to be put into basic use.  We'll always be
+	 * using TC0, whether or not DCB is running, and TC0 will get the
+	 * big RSS set.
+	 */
+	queues_left = pf->hw.func_caps.num_tx_qp;
+
+	if   (!((pf->flags & I40E_FLAG_MSIX_ENABLED)		 &&
+		(pf->flags & I40E_FLAG_MQ_ENABLED))		 ||
+		!(pf->flags & (I40E_FLAG_RSS_ENABLED |
+		I40E_FLAG_FDIR_ENABLED | I40E_FLAG_DCB_ENABLED)) ||
+		(queues_left == 1)) {
+
+		/* one qp for PF, no queues for anything else */
+		queues_left = 0;
+		pf->rss_size = pf->num_lan_qps = 1;
+
+		/* make sure all the fancies are disabled */
+		pf->flags &= ~(I40E_FLAG_RSS_ENABLED       |
+				I40E_FLAG_MQ_ENABLED	   |
+				I40E_FLAG_FDIR_ENABLED	   |
+				I40E_FLAG_FDIR_ATR_ENABLED |
+				I40E_FLAG_DCB_ENABLED	   |
+				I40E_FLAG_SRIOV_ENABLED	   |
+				I40E_FLAG_VMDQ_ENABLED);
+
+	} else if (pf->flags & I40E_FLAG_RSS_ENABLED	  &&
+		   !(pf->flags & I40E_FLAG_FDIR_ENABLED)  &&
+		   !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
+
+		pf->rss_size = i40e_set_rss_size(pf, queues_left);
+
+		queues_left -= pf->rss_size;
+		pf->num_lan_qps = pf->rss_size;
+
+	} else if (pf->flags & I40E_FLAG_RSS_ENABLED	  &&
+		   !(pf->flags & I40E_FLAG_FDIR_ENABLED)  &&
+		   (pf->flags & I40E_FLAG_DCB_ENABLED)) {
+
+		/* save num_tc_qps queues for TCs 1 thru 7 and the rest
+		 * are set up for RSS in TC0
+		 */
+		queues_left -= accum_tc_size;
+
+		pf->rss_size = i40e_set_rss_size(pf, queues_left);
+
+		queues_left -= pf->rss_size;
+		if (queues_left < 0) {
+			dev_info(&pf->pdev->dev, "not enough queues for DCB\n");
+			return;
+		}
+
+		pf->num_lan_qps = pf->rss_size + accum_tc_size;
+
+	} else if (pf->flags & I40E_FLAG_RSS_ENABLED   &&
+		  (pf->flags & I40E_FLAG_FDIR_ENABLED) &&
+		  !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
+
+		queues_left -= 1; /* save 1 queue for FD */
+
+		pf->rss_size = i40e_set_rss_size(pf, queues_left);
+
+		queues_left -= pf->rss_size;
+		if (queues_left < 0) {
+			dev_info(&pf->pdev->dev, "not enough queues for Flow Director\n");
+			return;
+		}
+
+		pf->num_lan_qps = pf->rss_size;
+
+	} else if (pf->flags & I40E_FLAG_RSS_ENABLED   &&
+		  (pf->flags & I40E_FLAG_FDIR_ENABLED) &&
+		  (pf->flags & I40E_FLAG_DCB_ENABLED)) {
+
+		/* save 1 queue for TCs 1 thru 7,
+		 * 1 queue for flow director,
+		 * and the rest are set up for RSS in TC0
+		 */
+		queues_left -= 1;
+		queues_left -= accum_tc_size;
+
+		pf->rss_size = i40e_set_rss_size(pf, queues_left);
+		queues_left -= pf->rss_size;
+		if (queues_left < 0) {
+			dev_info(&pf->pdev->dev, "not enough queues for DCB and Flow Director\n");
+			return;
+		}
+
+		pf->num_lan_qps = pf->rss_size + accum_tc_size;
+
+	} else {
+		dev_info(&pf->pdev->dev,
+			 "Invalid configuration, flags=0x%08llx\n", pf->flags);
+		return;
+	}
+
+	if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
+	    pf->num_vf_qps && pf->num_req_vfs && queues_left) {
+		pf->num_req_vfs = min_t(int, pf->num_req_vfs, (queues_left /
+							       pf->num_vf_qps));
+		queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
+	}
+
+	if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
+	    pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
+		pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
+					  (queues_left / pf->num_vmdq_qps));
+		queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
+	}
+
+	return;
+}
+
+/**
+ * i40e_setup_pf_filter_control - Setup PF static filter control
+ * @pf: PF to be setup
+ *
+ * i40e_setup_pf_filter_control sets up a pf's initial filter control
+ * settings. If PE/FCoE are enabled then it will also set the per PF
+ * based filter sizes required for them. It also enables Flow director,
+ * ethertype and macvlan type filter settings for the pf.
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
+{
+	struct i40e_filter_control_settings *settings = &pf->filter_settings;
+
+	settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
+
+	/* Flow Director is enabled */
+	if (pf->flags & (I40E_FLAG_FDIR_ENABLED | I40E_FLAG_FDIR_ATR_ENABLED))
+		settings->enable_fdir = true;
+
+	/* Ethtype and MACVLAN filters enabled for PF */
+	settings->enable_ethtype = true;
+	settings->enable_macvlan = true;
+
+	if (i40e_set_filter_control(&pf->hw, settings))
+		return -ENOENT;
+
+	return 0;
+}
+
+/**
+ * i40e_probe - Device initialization routine
+ * @pdev: PCI device information struct
+ * @ent: entry in i40e_pci_tbl
+ *
+ * i40e_probe initializes a pf identified by a pci_dev structure.
+ * The OS initialization, configuring of the pf private structure,
+ * and a hardware reset occur.
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct i40e_driver_version dv;
+	struct i40e_pf *pf;
+	struct i40e_hw *hw;
+	int err = 0;
+	u32 len;
+
+	err = pci_enable_device_mem(pdev);
+	if (err)
+		return err;
+
+	/* set up for high or low dma */
+	if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+		/* coherent mask for the same size will always succeed if
+		 * dma_set_mask does
+		 */
+		dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+	} else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
+		dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+	} else {
+		dev_err(&pdev->dev, "DMA configuration failed: %d\n", err);
+		err = -EIO;
+		goto err_dma;
+	}
+
+	/* set up pci connections */
+	err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
+					   IORESOURCE_MEM), i40e_driver_name);
+	if (err) {
+		dev_info(&pdev->dev,
+			 "pci_request_selected_regions failed %d\n", err);
+		goto err_pci_reg;
+	}
+
+	pci_enable_pcie_error_reporting(pdev);
+	pci_set_master(pdev);
+
+	/* Now that we have a PCI connection, we need to do the
+	 * low level device setup.  This is primarily setting up
+	 * the Admin Queue structures and then querying for the
+	 * device's current profile information.
+	 */
+	pf = kzalloc(sizeof(*pf), GFP_KERNEL);
+	if (!pf) {
+		err = -ENOMEM;
+		goto err_pf_alloc;
+	}
+	pf->next_vsi = 0;
+	pf->pdev = pdev;
+	set_bit(__I40E_DOWN, &pf->state);
+
+	hw = &pf->hw;
+	hw->back = pf;
+	hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
+			      pci_resource_len(pdev, 0));
+	if (!hw->hw_addr) {
+		err = -EIO;
+		dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
+			 (unsigned int)pci_resource_start(pdev, 0),
+			 (unsigned int)pci_resource_len(pdev, 0), err);
+		goto err_ioremap;
+	}
+	hw->vendor_id = pdev->vendor;
+	hw->device_id = pdev->device;
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+	hw->subsystem_vendor_id = pdev->subsystem_vendor;
+	hw->subsystem_device_id = pdev->subsystem_device;
+	hw->bus.device = PCI_SLOT(pdev->devfn);
+	hw->bus.func = PCI_FUNC(pdev->devfn);
+
+	/* Reset here to make sure all is clean and to define PF 'n' */
+	err = i40e_pf_reset(hw);
+	if (err) {
+		dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
+		goto err_pf_reset;
+	}
+	pf->pfr_count++;
+
+	hw->aq.num_arq_entries = I40E_AQ_LEN;
+	hw->aq.num_asq_entries = I40E_AQ_LEN;
+	hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
+	hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
+	pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
+	snprintf(pf->misc_int_name, sizeof(pf->misc_int_name) - 1,
+		 "%s-pf%d:misc",
+		 dev_driver_string(&pf->pdev->dev), pf->hw.pf_id);
+
+	err = i40e_init_shared_code(hw);
+	if (err) {
+		dev_info(&pdev->dev, "init_shared_code failed: %d\n", err);
+		goto err_pf_reset;
+	}
+
+	err = i40e_init_adminq(hw);
+	dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
+	if (err) {
+		dev_info(&pdev->dev,
+			 "init_adminq failed: %d expecting API %02x.%02x\n",
+			 err,
+			 I40E_FW_API_VERSION_MAJOR, I40E_FW_API_VERSION_MINOR);
+		goto err_pf_reset;
+	}
+
+	err = i40e_get_capabilities(pf);
+	if (err)
+		goto err_adminq_setup;
+
+	err = i40e_sw_init(pf);
+	if (err) {
+		dev_info(&pdev->dev, "sw_init failed: %d\n", err);
+		goto err_sw_init;
+	}
+
+	err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
+				hw->func_caps.num_rx_qp,
+				pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
+	if (err) {
+		dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
+		goto err_init_lan_hmc;
+	}
+
+	err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
+	if (err) {
+		dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
+		err = -ENOENT;
+		goto err_configure_lan_hmc;
+	}
+
+	i40e_get_mac_addr(hw, hw->mac.addr);
+	if (i40e_validate_mac_addr(hw->mac.addr)) {
+		dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
+		err = -EIO;
+		goto err_mac_addr;
+	}
+	dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
+	memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
+
+	pci_set_drvdata(pdev, pf);
+	pci_save_state(pdev);
+
+	/* set up periodic task facility */
+	setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
+	pf->service_timer_period = HZ;
+
+	INIT_WORK(&pf->service_task, i40e_service_task);
+	clear_bit(__I40E_SERVICE_SCHED, &pf->state);
+	pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
+	pf->link_check_timeout = jiffies;
+
+	/* set up the main switch operations */
+	i40e_determine_queue_usage(pf);
+	i40e_init_interrupt_scheme(pf);
+
+	/* Set up the *vsi struct based on the number of VSIs in the HW,
+	 * and set up our local tracking of the MAIN PF vsi.
+	 */
+	len = sizeof(struct i40e_vsi *) * pf->hw.func_caps.num_vsis;
+	pf->vsi = kzalloc(len, GFP_KERNEL);
+	if (!pf->vsi)
+		goto err_switch_setup;
+
+	err = i40e_setup_pf_switch(pf);
+	if (err) {
+		dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
+		goto err_vsis;
+	}
+
+	/* The main driver is (mostly) up and happy. We need to set this state
+	 * before setting up the misc vector or we get a race and the vector
+	 * ends up disabled forever.
+	 */
+	clear_bit(__I40E_DOWN, &pf->state);
+
+	/* In case of MSIX we are going to setup the misc vector right here
+	 * to handle admin queue events etc. In case of legacy and MSI
+	 * the misc functionality and queue processing is combined in
+	 * the same vector and that gets setup at open.
+	 */
+	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+		err = i40e_setup_misc_vector(pf);
+		if (err) {
+			dev_info(&pdev->dev,
+				 "setup of misc vector failed: %d\n", err);
+			goto err_vsis;
+		}
+	}
+
+	/* prep for VF support */
+	if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
+	    (pf->flags & I40E_FLAG_MSIX_ENABLED)) {
+		u32 val;
+
+		/* disable link interrupts for VFs */
+		val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
+		val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
+		wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
+		i40e_flush(hw);
+	}
+
+	i40e_dbg_pf_init(pf);
+
+	/* tell the firmware that we're starting */
+	dv.major_version = DRV_VERSION_MAJOR;
+	dv.minor_version = DRV_VERSION_MINOR;
+	dv.build_version = DRV_VERSION_BUILD;
+	dv.subbuild_version = 0;
+	i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
+
+	/* since everything's happy, start the service_task timer */
+	mod_timer(&pf->service_timer,
+		  round_jiffies(jiffies + pf->service_timer_period));
+
+	return 0;
+
+	/* Unwind what we've done if something failed in the setup */
+err_vsis:
+	set_bit(__I40E_DOWN, &pf->state);
+err_switch_setup:
+	i40e_clear_interrupt_scheme(pf);
+	kfree(pf->vsi);
+	del_timer_sync(&pf->service_timer);
+err_mac_addr:
+err_configure_lan_hmc:
+	(void)i40e_shutdown_lan_hmc(hw);
+err_init_lan_hmc:
+	kfree(pf->qp_pile);
+	kfree(pf->irq_pile);
+err_sw_init:
+err_adminq_setup:
+	(void)i40e_shutdown_adminq(hw);
+err_pf_reset:
+	iounmap(hw->hw_addr);
+err_ioremap:
+	kfree(pf);
+err_pf_alloc:
+	pci_disable_pcie_error_reporting(pdev);
+	pci_release_selected_regions(pdev,
+				     pci_select_bars(pdev, IORESOURCE_MEM));
+err_pci_reg:
+err_dma:
+	pci_disable_device(pdev);
+	return err;
+}
+
+/**
+ * i40e_remove - Device removal routine
+ * @pdev: PCI device information struct
+ *
+ * i40e_remove is called by the PCI subsystem to alert the driver
+ * that is should release a PCI device.  This could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void i40e_remove(struct pci_dev *pdev)
+{
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	i40e_status ret_code;
+	u32 reg;
+	int i;
+
+	i40e_dbg_pf_exit(pf);
+
+	if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
+		i40e_free_vfs(pf);
+		pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
+	}
+
+	/* no more scheduling of any task */
+	set_bit(__I40E_DOWN, &pf->state);
+	del_timer_sync(&pf->service_timer);
+	cancel_work_sync(&pf->service_task);
+
+	i40e_fdir_teardown(pf);
+
+	/* If there is a switch structure or any orphans, remove them.
+	 * This will leave only the PF's VSI remaining.
+	 */
+	for (i = 0; i < I40E_MAX_VEB; i++) {
+		if (!pf->veb[i])
+			continue;
+
+		if (pf->veb[i]->uplink_seid == pf->mac_seid ||
+		    pf->veb[i]->uplink_seid == 0)
+			i40e_switch_branch_release(pf->veb[i]);
+	}
+
+	/* Now we can shutdown the PF's VSI, just before we kill
+	 * adminq and hmc.
+	 */
+	if (pf->vsi[pf->lan_vsi])
+		i40e_vsi_release(pf->vsi[pf->lan_vsi]);
+
+	i40e_stop_misc_vector(pf);
+	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+		synchronize_irq(pf->msix_entries[0].vector);
+		free_irq(pf->msix_entries[0].vector, pf);
+	}
+
+	/* shutdown and destroy the HMC */
+	ret_code = i40e_shutdown_lan_hmc(&pf->hw);
+	if (ret_code)
+		dev_warn(&pdev->dev,
+			 "Failed to destroy the HMC resources: %d\n", ret_code);
+
+	/* shutdown the adminq */
+	i40e_aq_queue_shutdown(&pf->hw, true);
+	ret_code = i40e_shutdown_adminq(&pf->hw);
+	if (ret_code)
+		dev_warn(&pdev->dev,
+			 "Failed to destroy the Admin Queue resources: %d\n",
+			 ret_code);
+
+	/* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
+	i40e_clear_interrupt_scheme(pf);
+	for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+		if (pf->vsi[i]) {
+			i40e_vsi_clear_rings(pf->vsi[i]);
+			i40e_vsi_clear(pf->vsi[i]);
+			pf->vsi[i] = NULL;
+		}
+	}
+
+	for (i = 0; i < I40E_MAX_VEB; i++) {
+		kfree(pf->veb[i]);
+		pf->veb[i] = NULL;
+	}
+
+	kfree(pf->qp_pile);
+	kfree(pf->irq_pile);
+	kfree(pf->sw_config);
+	kfree(pf->vsi);
+
+	/* force a PF reset to clean anything leftover */
+	reg = rd32(&pf->hw, I40E_PFGEN_CTRL);
+	wr32(&pf->hw, I40E_PFGEN_CTRL, (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
+	i40e_flush(&pf->hw);
+
+	iounmap(pf->hw.hw_addr);
+	kfree(pf);
+	pci_release_selected_regions(pdev,
+				     pci_select_bars(pdev, IORESOURCE_MEM));
+
+	pci_disable_pcie_error_reporting(pdev);
+	pci_disable_device(pdev);
+}
+
+/**
+ * i40e_pci_error_detected - warning that something funky happened in PCI land
+ * @pdev: PCI device information struct
+ *
+ * Called to warn that something happened and the error handling steps
+ * are in progress.  Allows the driver to quiesce things, be ready for
+ * remediation.
+ **/
+static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
+						enum pci_channel_state error)
+{
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+
+	dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
+
+	/* shutdown all operations */
+	i40e_pf_quiesce_all_vsi(pf);
+
+	/* Request a slot reset */
+	return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * i40e_pci_error_slot_reset - a PCI slot reset just happened
+ * @pdev: PCI device information struct
+ *
+ * Called to find if the driver can work with the device now that
+ * the pci slot has been reset.  If a basic connection seems good
+ * (registers are readable and have sane content) then return a
+ * happy little PCI_ERS_RESULT_xxx.
+ **/
+static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
+{
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	pci_ers_result_t result;
+	int err;
+	u32 reg;
+
+	dev_info(&pdev->dev, "%s\n", __func__);
+	if (pci_enable_device_mem(pdev)) {
+		dev_info(&pdev->dev,
+			 "Cannot re-enable PCI device after reset.\n");
+		result = PCI_ERS_RESULT_DISCONNECT;
+	} else {
+		pci_set_master(pdev);
+		pci_restore_state(pdev);
+		pci_save_state(pdev);
+		pci_wake_from_d3(pdev, false);
+
+		reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
+		if (reg == 0)
+			result = PCI_ERS_RESULT_RECOVERED;
+		else
+			result = PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	err = pci_cleanup_aer_uncorrect_error_status(pdev);
+	if (err) {
+		dev_info(&pdev->dev,
+			 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
+			 err);
+		/* non-fatal, continue */
+	}
+
+	return result;
+}
+
+/**
+ * i40e_pci_error_resume - restart operations after PCI error recovery
+ * @pdev: PCI device information struct
+ *
+ * Called to allow the driver to bring things back up after PCI error
+ * and/or reset recovery has finished.
+ **/
+static void i40e_pci_error_resume(struct pci_dev *pdev)
+{
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+
+	dev_info(&pdev->dev, "%s\n", __func__);
+	i40e_handle_reset_warning(pf);
+}
+
+static const struct pci_error_handlers i40e_err_handler = {
+	.error_detected = i40e_pci_error_detected,
+	.slot_reset = i40e_pci_error_slot_reset,
+	.resume = i40e_pci_error_resume,
+};
+
+static struct pci_driver i40e_driver = {
+	.name     = i40e_driver_name,
+	.id_table = i40e_pci_tbl,
+	.probe    = i40e_probe,
+	.remove   = i40e_remove,
+	.err_handler = &i40e_err_handler,
+	.sriov_configure = i40e_pci_sriov_configure,
+};
+
+/**
+ * i40e_init_module - Driver registration routine
+ *
+ * i40e_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+static int __init i40e_init_module(void)
+{
+	pr_info("%s: %s - version %s\n", i40e_driver_name,
+		i40e_driver_string, i40e_driver_version_str);
+	pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
+	i40e_dbg_init();
+	return pci_register_driver(&i40e_driver);
+}
+module_init(i40e_init_module);
+
+/**
+ * i40e_exit_module - Driver exit cleanup routine
+ *
+ * i40e_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+static void __exit i40e_exit_module(void)
+{
+	pci_unregister_driver(&i40e_driver);
+	i40e_dbg_exit();
+}
+module_exit(i40e_exit_module);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
new file mode 100644
index 0000000..97e1bb3
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -0,0 +1,391 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_prototype.h"
+
+/**
+ *  i40e_init_nvm_ops - Initialize NVM function pointers.
+ *  @hw: pointer to the HW structure.
+ *
+ *  Setups the function pointers and the NVM info structure. Should be called
+ *  once per NVM initialization, e.g. inside the i40e_init_shared_code().
+ *  Please notice that the NVM term is used here (& in all methods covered
+ *  in this file) as an equivalent of the FLASH part mapped into the SR.
+ *  We are accessing FLASH always thru the Shadow RAM.
+ **/
+i40e_status i40e_init_nvm(struct i40e_hw *hw)
+{
+	struct i40e_nvm_info *nvm = &hw->nvm;
+	i40e_status ret_code = 0;
+	u32 fla, gens;
+	u8 sr_size;
+
+	/* The SR size is stored regardless of the nvm programming mode
+	 * as the blank mode may be used in the factory line.
+	 */
+	gens = rd32(hw, I40E_GLNVM_GENS);
+	sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
+			   I40E_GLNVM_GENS_SR_SIZE_SHIFT);
+	/* Switching to words (sr_size contains power of 2KB). */
+	nvm->sr_size = (1 << sr_size) * I40E_SR_WORDS_IN_1KB;
+
+	/* Check if we are in the normal or blank NVM programming mode. */
+	fla = rd32(hw, I40E_GLNVM_FLA);
+	if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode. */
+		/* Max NVM timeout. */
+		nvm->timeout = I40E_MAX_NVM_TIMEOUT;
+		nvm->blank_nvm_mode = false;
+	} else { /* Blank programming mode. */
+		nvm->blank_nvm_mode = true;
+		ret_code = I40E_ERR_NVM_BLANK_MODE;
+		hw_dbg(hw, "NVM init error: unsupported blank mode.\n");
+	}
+
+	return ret_code;
+}
+
+/**
+ *  i40e_acquire_nvm - Generic request for acquiring the NVM ownership.
+ *  @hw: pointer to the HW structure.
+ *  @access: NVM access type (read or write).
+ *
+ *  This function will request NVM ownership for reading
+ *  via the proper Admin Command.
+ **/
+i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
+				       enum i40e_aq_resource_access_type access)
+{
+	i40e_status ret_code = 0;
+	u64 gtime, timeout;
+	u64 time = 0;
+
+	if (hw->nvm.blank_nvm_mode)
+		goto i40e_i40e_acquire_nvm_exit;
+
+	ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
+					    0, &time, NULL);
+	/* Reading the Global Device Timer. */
+	gtime = rd32(hw, I40E_GLVFGEN_TIMER);
+
+	/* Store the timeout. */
+	hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time) + gtime;
+
+	if (ret_code) {
+		/* Set the polling timeout. */
+		if (time > I40E_MAX_NVM_TIMEOUT)
+			timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT)
+				  + gtime;
+		else
+			timeout = hw->nvm.hw_semaphore_timeout;
+		/* Poll until the current NVM owner timeouts. */
+		while (gtime < timeout) {
+			usleep_range(10000, 20000);
+			ret_code = i40e_aq_request_resource(hw,
+							I40E_NVM_RESOURCE_ID,
+							access, 0, &time,
+							NULL);
+			if (!ret_code) {
+				hw->nvm.hw_semaphore_timeout =
+						I40E_MS_TO_GTIME(time) + gtime;
+				break;
+			}
+			gtime = rd32(hw, I40E_GLVFGEN_TIMER);
+		}
+		if (ret_code) {
+			hw->nvm.hw_semaphore_timeout = 0;
+			hw->nvm.hw_semaphore_wait =
+						I40E_MS_TO_GTIME(time) + gtime;
+			hw_dbg(hw, "NVM acquire timed out, wait %llu ms before trying again.\n",
+				  time);
+		}
+	}
+
+i40e_i40e_acquire_nvm_exit:
+	return ret_code;
+}
+
+/**
+ *  i40e_release_nvm - Generic request for releasing the NVM ownership.
+ *  @hw: pointer to the HW structure.
+ *
+ *  This function will release NVM resource via the proper Admin Command.
+ **/
+void i40e_release_nvm(struct i40e_hw *hw)
+{
+	if (!hw->nvm.blank_nvm_mode)
+		i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
+}
+
+/**
+ *  i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit.
+ *  @hw: pointer to the HW structure.
+ *
+ *  Polls the SRCTL Shadow RAM register done bit.
+ **/
+static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
+{
+	i40e_status ret_code = I40E_ERR_TIMEOUT;
+	u32 srctl, wait_cnt;
+
+	/* Poll the I40E_GLNVM_SRCTL until the done bit is set. */
+	for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
+		srctl = rd32(hw, I40E_GLNVM_SRCTL);
+		if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
+			ret_code = 0;
+			break;
+		}
+		udelay(5);
+	}
+	if (ret_code == I40E_ERR_TIMEOUT)
+		hw_dbg(hw, "Done bit in GLNVM_SRCTL not set");
+	return ret_code;
+}
+
+/**
+ *  i40e_read_nvm_srctl - Reads Shadow RAM.
+ *  @hw: pointer to the HW structure.
+ *  @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ *  @data: word read from the Shadow RAM.
+ *
+ *  Reads 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
+ **/
+static i40e_status i40e_read_nvm_srctl(struct i40e_hw *hw, u16 offset,
+						 u16 *data)
+{
+	i40e_status ret_code = I40E_ERR_TIMEOUT;
+	u32 sr_reg;
+
+	if (offset >= hw->nvm.sr_size) {
+		hw_dbg(hw, "NVM read error: Offset beyond Shadow RAM limit.\n");
+		ret_code = I40E_ERR_PARAM;
+		goto read_nvm_exit;
+	}
+
+	/* Poll the done bit first. */
+	ret_code = i40e_poll_sr_srctl_done_bit(hw);
+	if (!ret_code) {
+		/* Write the address and start reading. */
+		sr_reg = (u32)(offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
+			 (1 << I40E_GLNVM_SRCTL_START_SHIFT);
+		wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
+
+		/* Poll I40E_GLNVM_SRCTL until the done bit is set. */
+		ret_code = i40e_poll_sr_srctl_done_bit(hw);
+		if (!ret_code) {
+			sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
+			*data = (u16)((sr_reg &
+				       I40E_GLNVM_SRDATA_RDDATA_MASK)
+				    >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
+		}
+	}
+	if (ret_code)
+		hw_dbg(hw, "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
+			  offset);
+
+read_nvm_exit:
+	return ret_code;
+}
+
+/**
+ *  i40e_read_nvm_word - Reads Shadow RAM word.
+ *  @hw: pointer to the HW structure.
+ *  @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ *  @data: word read from the Shadow RAM.
+ *
+ *  Reads 16 bit word from the Shadow RAM. Each read is preceded
+ *  with the NVM ownership taking and followed by the release.
+ **/
+i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+					 u16 *data)
+{
+	i40e_status ret_code = 0;
+
+	ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+	if (!ret_code) {
+		ret_code = i40e_read_nvm_srctl(hw, offset, data);
+		i40e_release_nvm(hw);
+	}
+
+	return ret_code;
+}
+
+/**
+ *  i40e_read_nvm_buffer - Reads Shadow RAM buffer.
+ *  @hw: pointer to the HW structure.
+ *  @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ *  @words: number of words to read (in) &
+ *          number of words read before the NVM ownership timeout (out).
+ *  @data: words read from the Shadow RAM.
+ *
+ *  Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
+ *  method. The buffer read is preceded by the NVM ownership take
+ *  and followed by the release.
+ **/
+i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+					   u16 *words, u16 *data)
+{
+	i40e_status ret_code = 0;
+	u16 index, word;
+	u32 time;
+
+	ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+	if (!ret_code) {
+		/* Loop thru the selected region. */
+		for (word = 0; word < *words; word++) {
+			index = offset + word;
+			ret_code = i40e_read_nvm_srctl(hw, index, &data[word]);
+			if (ret_code)
+				break;
+			/* Check if we didn't exceeded the semaphore timeout. */
+			time = rd32(hw, I40E_GLVFGEN_TIMER);
+			if (time >= hw->nvm.hw_semaphore_timeout) {
+				ret_code = I40E_ERR_TIMEOUT;
+				hw_dbg(hw, "NVM read error: timeout.\n");
+				break;
+			}
+		}
+		/* Update the number of words read from the Shadow RAM. */
+		*words = word;
+		/* Release the NVM ownership. */
+		i40e_release_nvm(hw);
+	}
+
+	return ret_code;
+}
+
+/**
+ *  i40e_calc_nvm_checksum - Calculates and returns the checksum
+ *  @hw: pointer to hardware structure
+ *
+ *  This function calculate SW Checksum that covers the whole 64kB shadow RAM
+ *  except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
+ *  is customer specific and unknown. Therefore, this function skips all maximum
+ *  possible size of VPD (1kB).
+ **/
+static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
+						    u16 *checksum)
+{
+	i40e_status ret_code = 0;
+	u16 pcie_alt_module = 0;
+	u16 checksum_local = 0;
+	u16 vpd_module = 0;
+	u16 word = 0;
+	u32 i = 0;
+
+	/* read pointer to VPD area */
+	ret_code = i40e_read_nvm_srctl(hw, I40E_SR_VPD_PTR, &vpd_module);
+	if (ret_code) {
+		ret_code = I40E_ERR_NVM_CHECKSUM;
+		goto i40e_calc_nvm_checksum_exit;
+	}
+
+	/* read pointer to PCIe Alt Auto-load module */
+	ret_code = i40e_read_nvm_srctl(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
+				       &pcie_alt_module);
+	if (ret_code) {
+		ret_code = I40E_ERR_NVM_CHECKSUM;
+		goto i40e_calc_nvm_checksum_exit;
+	}
+
+	/* Calculate SW checksum that covers the whole 64kB shadow RAM
+	 * except the VPD and PCIe ALT Auto-load modules
+	 */
+	for (i = 0; i < hw->nvm.sr_size; i++) {
+		/* Skip Checksum word */
+		if (i == I40E_SR_SW_CHECKSUM_WORD)
+			i++;
+		/* Skip VPD module (convert byte size to word count) */
+		if (i == (u32)vpd_module) {
+			i += (I40E_SR_VPD_MODULE_MAX_SIZE / 2);
+			if (i >= hw->nvm.sr_size)
+				break;
+		}
+		/* Skip PCIe ALT module (convert byte size to word count) */
+		if (i == (u32)pcie_alt_module) {
+			i += (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2);
+			if (i >= hw->nvm.sr_size)
+				break;
+		}
+
+		ret_code = i40e_read_nvm_srctl(hw, (u16)i, &word);
+		if (ret_code) {
+			ret_code = I40E_ERR_NVM_CHECKSUM;
+			goto i40e_calc_nvm_checksum_exit;
+		}
+		checksum_local += word;
+	}
+
+	*checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
+
+i40e_calc_nvm_checksum_exit:
+	return ret_code;
+}
+
+/**
+ *  i40e_validate_nvm_checksum - Validate EEPROM checksum
+ *  @hw: pointer to hardware structure
+ *  @checksum: calculated checksum
+ *
+ *  Performs checksum calculation and validates the NVM SW checksum. If the
+ *  caller does not need checksum, the value can be NULL.
+ **/
+i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
+						 u16 *checksum)
+{
+	i40e_status ret_code = 0;
+	u16 checksum_sr = 0;
+	u16 checksum_local;
+
+	ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+	if (ret_code)
+		goto i40e_validate_nvm_checksum_exit;
+
+	ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
+	if (ret_code)
+		goto i40e_validate_nvm_checksum_free;
+
+	/* Do not use i40e_read_nvm_word() because we do not want to take
+	 * the synchronization semaphores twice here.
+	 */
+	i40e_read_nvm_srctl(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
+
+	/* Verify read checksum from EEPROM is the same as
+	 * calculated checksum
+	 */
+	if (checksum_local != checksum_sr)
+		ret_code = I40E_ERR_NVM_CHECKSUM;
+
+	/* If the user cares, return the calculated checksum */
+	if (checksum)
+		*checksum = checksum_local;
+
+i40e_validate_nvm_checksum_free:
+	i40e_release_nvm(hw);
+
+i40e_validate_nvm_checksum_exit:
+	return ret_code;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
new file mode 100644
index 0000000..702c81b
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
@@ -0,0 +1,82 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_OSDEP_H_
+#define _I40E_OSDEP_H_
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/tcp.h>
+#include <linux/pci.h>
+#include <linux/highuid.h>
+
+/* get readq/writeq support for 32 bit kernels, use the low-first version */
+#include <asm-generic/io-64-nonatomic-lo-hi.h>
+
+/* File to be the magic between shared code and
+ * actual OS primitives
+ */
+
+#define hw_dbg(hw, S, A...)	do {} while (0)
+
+#define wr32(a, reg, value)	writel((value), ((a)->hw_addr + (reg)))
+#define rd32(a, reg)		readl((a)->hw_addr + (reg))
+
+#define wr64(a, reg, value)	writeq((value), ((a)->hw_addr + (reg)))
+#define rd64(a, reg)		readq((a)->hw_addr + (reg))
+#define i40e_flush(a)		readl((a)->hw_addr + I40E_GLGEN_STAT)
+
+/* memory allocation tracking */
+struct i40e_dma_mem {
+	void *va;
+	dma_addr_t pa;
+	u32 size;
+} __packed;
+
+#define i40e_allocate_dma_mem(h, m, unused, s, a) \
+			i40e_allocate_dma_mem_d(h, m, s, a)
+#define i40e_free_dma_mem(h, m) i40e_free_dma_mem_d(h, m)
+
+struct i40e_virt_mem {
+	void *va;
+	u32 size;
+} __packed;
+
+#define i40e_allocate_virt_mem(h, m, s) i40e_allocate_virt_mem_d(h, m, s)
+#define i40e_free_virt_mem(h, m) i40e_free_virt_mem_d(h, m)
+
+#define i40e_debug(h, m, s, ...)                                \
+do {                                                            \
+	if (((m) & (h)->debug_mask))                            \
+		pr_info("i40e %02x.%x " s,                      \
+			(h)->bus.device, (h)->bus.func,         \
+			##__VA_ARGS__);                         \
+} while (0)
+
+typedef enum i40e_status_code i40e_status;
+#endif /* _I40E_OSDEP_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
new file mode 100644
index 0000000..f75bb9c
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -0,0 +1,239 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_PROTOTYPE_H_
+#define _I40E_PROTOTYPE_H_
+
+#include "i40e_type.h"
+#include "i40e_alloc.h"
+#include "i40e_virtchnl.h"
+
+/* Prototypes for shared code functions that are not in
+ * the standard function pointer structures.  These are
+ * mostly because they are needed even before the init
+ * has happened and will assist in the early SW and FW
+ * setup.
+ */
+
+/* adminq functions */
+i40e_status i40e_init_adminq(struct i40e_hw *hw);
+i40e_status i40e_shutdown_adminq(struct i40e_hw *hw);
+void i40e_adminq_init_ring_data(struct i40e_hw *hw);
+i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
+					     struct i40e_arq_event_info *e,
+					     u16 *events_pending);
+i40e_status i40e_asq_send_command(struct i40e_hw *hw,
+				struct i40e_aq_desc *desc,
+				void *buff, /* can be NULL */
+				u16  buff_size,
+				struct i40e_asq_cmd_details *cmd_details);
+bool i40e_asq_done(struct i40e_hw *hw);
+
+/* debug function for adminq */
+void i40e_debug_aq(struct i40e_hw *hw,
+		   enum i40e_debug_mask mask,
+		   void *desc,
+		   void *buffer);
+
+void i40e_idle_aq(struct i40e_hw *hw);
+void i40e_resume_aq(struct i40e_hw *hw);
+
+u32 i40e_led_get(struct i40e_hw *hw);
+void i40e_led_set(struct i40e_hw *hw, u32 mode);
+
+/* admin send queue commands */
+
+i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
+				u16 *fw_major_version, u16 *fw_minor_version,
+				u16 *api_major_version, u16 *api_minor_version,
+				struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
+					     bool unloading);
+i40e_status i40e_aq_set_phy_reset(struct i40e_hw *hw,
+				struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
+				struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
+				struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
+				bool enable_lse, struct i40e_link_status *link,
+				struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_local_advt_reg(struct i40e_hw *hw,
+				u64 advt_reg,
+				struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
+				struct i40e_driver_version *dv,
+				struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
+				struct i40e_vsi_context *vsi_ctx,
+				struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
+				u16 vsi_id, bool set_filter,
+				struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
+				u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
+				u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
+				struct i40e_vsi_context *vsi_ctx,
+				struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
+				struct i40e_vsi_context *vsi_ctx,
+				struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
+				u16 downlink_seid, u8 enabled_tc,
+				bool default_port, u16 *pveb_seid,
+				struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
+				u16 veb_seid, u16 *switch_id, bool *floating,
+				u16 *statistic_index, u16 *vebs_used,
+				u16 *vebs_free,
+				struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id,
+			struct i40e_aqc_add_macvlan_element_data *mv_list,
+			u16 count, struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,
+			struct i40e_aqc_remove_macvlan_element_data *mv_list,
+			u16 count, struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_add_vlan(struct i40e_hw *hw, u16 vsi_id,
+			struct i40e_aqc_add_remove_vlan_element_data *v_list,
+			u8 count, struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_remove_vlan(struct i40e_hw *hw, u16 vsi_id,
+			struct i40e_aqc_add_remove_vlan_element_data *v_list,
+			u8 count, struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
+				u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
+				struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
+				struct i40e_aqc_get_switch_config_resp *buf,
+				u16 buf_size, u16 *start_seid,
+				struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
+				enum i40e_aq_resources_ids resource,
+				enum i40e_aq_resource_access_type access,
+				u8 sdp_number, u64 *timeout,
+				struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_release_resource(struct i40e_hw *hw,
+				enum i40e_aq_resources_ids resource,
+				u8 sdp_number,
+				struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
+				u32 offset, u16 length, void *data,
+				bool last_command,
+				struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
+				void *buff, u16 buff_size, u16 *data_size,
+				enum i40e_admin_queue_opc list_type_opc,
+				struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
+				u32 offset, u16 length, void *data,
+				bool last_command,
+				struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
+				u8 mib_type, void *buff, u16 buff_size,
+				u16 *local_len, u16 *remote_len,
+				struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
+				bool enable_update,
+				struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+				struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
+				struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
+				struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
+				    u16 flags, u8 *mac_addr,
+				    struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
+				enum i40e_aq_hmc_profile profile,
+				u8 pe_vf_enabled_count,
+				struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
+				u16 seid, u16 credit, u8 max_bw,
+				struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid,
+			struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
+			struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
+			u16 seid,
+			struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
+			struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
+			u16 seid,
+			struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
+			struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
+		u16 seid,
+		struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
+		struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw,
+		u16 seid,
+		struct i40e_aqc_query_port_ets_config_resp *bw_data,
+		struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
+		u16 seid,
+		struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
+		struct i40e_asq_cmd_details *cmd_details);
+/* i40e_common */
+i40e_status i40e_init_shared_code(struct i40e_hw *hw);
+i40e_status i40e_pf_reset(struct i40e_hw *hw);
+void i40e_clear_pxe_mode(struct i40e_hw *hw);
+bool i40e_get_link_status(struct i40e_hw *hw);
+i40e_status i40e_get_mac_addr(struct i40e_hw *hw,
+						u8 *mac_addr);
+i40e_status i40e_validate_mac_addr(u8 *mac_addr);
+i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw,
+					struct i40e_lldp_variables *lldp_cfg);
+/* prototype for functions used for NVM access */
+i40e_status i40e_init_nvm(struct i40e_hw *hw);
+i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
+				      enum i40e_aq_resource_access_type access);
+void i40e_release_nvm(struct i40e_hw *hw);
+i40e_status i40e_read_nvm_srrd(struct i40e_hw *hw, u16 offset,
+					 u16 *data);
+i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+					 u16 *data);
+i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+					   u16 *words, u16 *data);
+i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
+						 u16 *checksum);
+
+/* prototype for functions used for SW locks */
+
+/* i40e_common for VF drivers*/
+void i40e_vf_parse_hw_config(struct i40e_hw *hw,
+			     struct i40e_virtchnl_vf_resource *msg);
+i40e_status i40e_vf_reset(struct i40e_hw *hw);
+i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
+				enum i40e_virtchnl_ops v_opcode,
+				i40e_status v_retval,
+				u8 *msg, u16 msglen,
+				struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_set_filter_control(struct i40e_hw *hw,
+				struct i40e_filter_control_settings *settings);
+#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
new file mode 100644
index 0000000..6bd333c
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -0,0 +1,4688 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_REGISTER_H_
+#define _I40E_REGISTER_H_
+
+#define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4
+#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0
+#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT 16
+#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_MASK (0x1F << I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_PFB 0x0009C4F0
+#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT 0
+#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK (0x1F << I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16
+#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT)
+#define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8
+#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0
+#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK (0xFFFFFFFF << I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT)
+#define I40E_GLPCI_SPARE_BITS_1 0x0009C4FC
+#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT 0
+#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_MASK (0xFFFFFFFF << I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT)
+#define I40E_PFPCI_PF_FLUSH_DONE 0x0009C800
+#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VF_FLUSH_DONE 0x0009C600
+#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880
+#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PF_ARQBAH 0x00080180
+#define I40E_PF_ARQBAH_ARQBAH_SHIFT 0
+#define I40E_PF_ARQBAH_ARQBAH_MASK (0xFFFFFFFF << I40E_PF_ARQBAH_ARQBAH_SHIFT)
+#define I40E_PF_ARQBAL 0x00080080
+#define I40E_PF_ARQBAL_ARQBAL_SHIFT 0
+#define I40E_PF_ARQBAL_ARQBAL_MASK (0xFFFFFFFF << I40E_PF_ARQBAL_ARQBAL_SHIFT)
+#define I40E_PF_ARQH 0x00080380
+#define I40E_PF_ARQH_ARQH_SHIFT 0
+#define I40E_PF_ARQH_ARQH_MASK (0x3FF << I40E_PF_ARQH_ARQH_SHIFT)
+#define I40E_PF_ARQLEN 0x00080280
+#define I40E_PF_ARQLEN_ARQLEN_SHIFT 0
+#define I40E_PF_ARQLEN_ARQLEN_MASK (0x3FF << I40E_PF_ARQLEN_ARQLEN_SHIFT)
+#define I40E_PF_ARQLEN_ARQVFE_SHIFT 28
+#define I40E_PF_ARQLEN_ARQVFE_MASK (0x1 << I40E_PF_ARQLEN_ARQVFE_SHIFT)
+#define I40E_PF_ARQLEN_ARQOVFL_SHIFT 29
+#define I40E_PF_ARQLEN_ARQOVFL_MASK (0x1 << I40E_PF_ARQLEN_ARQOVFL_SHIFT)
+#define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30
+#define I40E_PF_ARQLEN_ARQCRIT_MASK (0x1 << I40E_PF_ARQLEN_ARQCRIT_SHIFT)
+#define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31
+#define I40E_PF_ARQLEN_ARQENABLE_MASK (0x1 << I40E_PF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_PF_ARQT 0x00080480
+#define I40E_PF_ARQT_ARQT_SHIFT 0
+#define I40E_PF_ARQT_ARQT_MASK (0x3FF << I40E_PF_ARQT_ARQT_SHIFT)
+#define I40E_PF_ATQBAH 0x00080100
+#define I40E_PF_ATQBAH_ATQBAH_SHIFT 0
+#define I40E_PF_ATQBAH_ATQBAH_MASK (0xFFFFFFFF << I40E_PF_ATQBAH_ATQBAH_SHIFT)
+#define I40E_PF_ATQBAL 0x00080000
+#define I40E_PF_ATQBAL_ATQBAL_SHIFT 0
+#define I40E_PF_ATQBAL_ATQBAL_MASK (0xFFFFFFFF << I40E_PF_ATQBAL_ATQBAL_SHIFT)
+#define I40E_PF_ATQH 0x00080300
+#define I40E_PF_ATQH_ATQH_SHIFT 0
+#define I40E_PF_ATQH_ATQH_MASK (0x3FF << I40E_PF_ATQH_ATQH_SHIFT)
+#define I40E_PF_ATQLEN 0x00080200
+#define I40E_PF_ATQLEN_ATQLEN_SHIFT 0
+#define I40E_PF_ATQLEN_ATQLEN_MASK (0x3FF << I40E_PF_ATQLEN_ATQLEN_SHIFT)
+#define I40E_PF_ATQLEN_ATQVFE_SHIFT 28
+#define I40E_PF_ATQLEN_ATQVFE_MASK (0x1 << I40E_PF_ATQLEN_ATQVFE_SHIFT)
+#define I40E_PF_ATQLEN_ATQOVFL_SHIFT 29
+#define I40E_PF_ATQLEN_ATQOVFL_MASK (0x1 << I40E_PF_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30
+#define I40E_PF_ATQLEN_ATQCRIT_MASK (0x1 << I40E_PF_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31
+#define I40E_PF_ATQLEN_ATQENABLE_MASK (0x1 << I40E_PF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_PF_ATQT 0x00080400
+#define I40E_PF_ATQT_ATQT_SHIFT 0
+#define I40E_PF_ATQT_ATQT_MASK (0x3FF << I40E_PF_ATQT_ATQT_SHIFT)
+#define I40E_VF_ARQBAH(_VF) (0x00081400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQBAH_MAX_INDEX 127
+#define I40E_VF_ARQBAH_ARQBAH_SHIFT 0
+#define I40E_VF_ARQBAH_ARQBAH_MASK (0xFFFFFFFF << I40E_VF_ARQBAH_ARQBAH_SHIFT)
+#define I40E_VF_ARQBAL(_VF) (0x00080C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQBAL_MAX_INDEX 127
+#define I40E_VF_ARQBAL_ARQBAL_SHIFT 0
+#define I40E_VF_ARQBAL_ARQBAL_MASK (0xFFFFFFFF << I40E_VF_ARQBAL_ARQBAL_SHIFT)
+#define I40E_VF_ARQH(_VF) (0x00082400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQH_MAX_INDEX 127
+#define I40E_VF_ARQH_ARQH_SHIFT 0
+#define I40E_VF_ARQH_ARQH_MASK (0x3FF << I40E_VF_ARQH_ARQH_SHIFT)
+#define I40E_VF_ARQLEN(_VF) (0x00081C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQLEN_MAX_INDEX 127
+#define I40E_VF_ARQLEN_ARQLEN_SHIFT 0
+#define I40E_VF_ARQLEN_ARQLEN_MASK (0x3FF << I40E_VF_ARQLEN_ARQLEN_SHIFT)
+#define I40E_VF_ARQLEN_ARQVFE_SHIFT 28
+#define I40E_VF_ARQLEN_ARQVFE_MASK (0x1 << I40E_VF_ARQLEN_ARQVFE_SHIFT)
+#define I40E_VF_ARQLEN_ARQOVFL_SHIFT 29
+#define I40E_VF_ARQLEN_ARQOVFL_MASK (0x1 << I40E_VF_ARQLEN_ARQOVFL_SHIFT)
+#define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30
+#define I40E_VF_ARQLEN_ARQCRIT_MASK (0x1 << I40E_VF_ARQLEN_ARQCRIT_SHIFT)
+#define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31
+#define I40E_VF_ARQLEN_ARQENABLE_MASK (0x1 << I40E_VF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQT_MAX_INDEX 127
+#define I40E_VF_ARQT_ARQT_SHIFT 0
+#define I40E_VF_ARQT_ARQT_MASK (0x3FF << I40E_VF_ARQT_ARQT_SHIFT)
+#define I40E_VF_ATQBAH(_VF) (0x00081000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQBAH_MAX_INDEX 127
+#define I40E_VF_ATQBAH_ATQBAH_SHIFT 0
+#define I40E_VF_ATQBAH_ATQBAH_MASK (0xFFFFFFFF << I40E_VF_ATQBAH_ATQBAH_SHIFT)
+#define I40E_VF_ATQBAL(_VF) (0x00080800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQBAL_MAX_INDEX 127
+#define I40E_VF_ATQBAL_ATQBAL_SHIFT 0
+#define I40E_VF_ATQBAL_ATQBAL_MASK (0xFFFFFFFF << I40E_VF_ATQBAL_ATQBAL_SHIFT)
+#define I40E_VF_ATQH(_VF) (0x00082000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQH_MAX_INDEX 127
+#define I40E_VF_ATQH_ATQH_SHIFT 0
+#define I40E_VF_ATQH_ATQH_MASK (0x3FF << I40E_VF_ATQH_ATQH_SHIFT)
+#define I40E_VF_ATQLEN(_VF) (0x00081800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQLEN_MAX_INDEX 127
+#define I40E_VF_ATQLEN_ATQLEN_SHIFT 0
+#define I40E_VF_ATQLEN_ATQLEN_MASK (0x3FF << I40E_VF_ATQLEN_ATQLEN_SHIFT)
+#define I40E_VF_ATQLEN_ATQVFE_SHIFT 28
+#define I40E_VF_ATQLEN_ATQVFE_MASK (0x1 << I40E_VF_ATQLEN_ATQVFE_SHIFT)
+#define I40E_VF_ATQLEN_ATQOVFL_SHIFT 29
+#define I40E_VF_ATQLEN_ATQOVFL_MASK (0x1 << I40E_VF_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30
+#define I40E_VF_ATQLEN_ATQCRIT_MASK (0x1 << I40E_VF_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31
+#define I40E_VF_ATQLEN_ATQENABLE_MASK (0x1 << I40E_VF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQT_MAX_INDEX 127
+#define I40E_VF_ATQT_ATQT_SHIFT 0
+#define I40E_VF_ATQT_ATQT_MASK (0x3FF << I40E_VF_ATQT_ATQT_SHIFT)
+#define I40E_PRT_L2TAGSEN 0x001C0B20
+#define I40E_PRT_L2TAGSEN_ENABLE_SHIFT 0
+#define I40E_PRT_L2TAGSEN_ENABLE_MASK (0xFF << I40E_PRT_L2TAGSEN_ENABLE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA 0x0010C080
+#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_MASK (0xF << I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_MASK (0x7 << I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_PFCM_LAN_ERRDATA_Q_NUM_MASK (0xFFF << I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO 0x0010C000
+#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_MASK (0x7 << I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LANCTXCTL 0x0010C300
+#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT 0
+#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_MASK (0xFFF << I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT)
+#define I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT 12
+#define I40E_PFCM_LANCTXCTL_SUB_LINE_MASK (0x7 << I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT)
+#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT 15
+#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_MASK (0x3 << I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT)
+#define I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT 17
+#define I40E_PFCM_LANCTXCTL_OP_CODE_MASK (0x3 << I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT)
+#define I40E_PFCM_LANCTXDATA(_i) (0x0010C100 + ((_i) * 128)) /* _i=0...3 */
+#define I40E_PFCM_LANCTXDATA_MAX_INDEX 3
+#define I40E_PFCM_LANCTXDATA_DATA_SHIFT 0
+#define I40E_PFCM_LANCTXDATA_DATA_MASK (0xFFFFFFFF << I40E_PFCM_LANCTXDATA_DATA_SHIFT)
+#define I40E_PFCM_LANCTXSTAT 0x0010C380
+#define I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT 0
+#define I40E_PFCM_LANCTXSTAT_CTX_DONE_MASK (0x1 << I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT)
+#define I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT 1
+#define I40E_PFCM_LANCTXSTAT_CTX_MISS_MASK (0x1 << I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT)
+#define I40E_PFCM_PE_ERRDATA 0x00138D00
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_MASK (0xF << I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_MASK (0x7 << I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_MASK (0x3FFFF << I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define I40E_PFCM_PE_ERRINFO 0x00138C80
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_MASK (0x7 << I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1(_VF) (0x00138800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFCM_PE_ERRDATA1_MAX_INDEX 127
+#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT 0
+#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_MASK (0xF << I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT 4
+#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_MASK (0x7 << I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT 8
+#define I40E_VFCM_PE_ERRDATA1_Q_NUM_MASK (0x3FFFF << I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1(_VF) (0x00138400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFCM_PE_ERRINFO1_MAX_INDEX 127
+#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT 0
+#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_MASK (0x1 << I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT 4
+#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_MASK (0x7 << I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT 8
+#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT 16
+#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT 24
+#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT)
+#define I40E_GLDCB_GENC 0x00083044
+#define I40E_GLDCB_GENC_PCIRTT_SHIFT 0
+#define I40E_GLDCB_GENC_PCIRTT_MASK (0xFFFF << I40E_GLDCB_GENC_PCIRTT_SHIFT)
+#define I40E_GLDCB_RUPTI 0x00122618
+#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT 0
+#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_MASK (0xFFFFFFFF << I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT)
+#define I40E_PRTDCB_FCCFG 0x001E4640
+#define I40E_PRTDCB_FCCFG_TFCE_SHIFT 3
+#define I40E_PRTDCB_FCCFG_TFCE_MASK (0x3 << I40E_PRTDCB_FCCFG_TFCE_SHIFT)
+#define I40E_PRTDCB_FCRTV 0x001E4600
+#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT 0
+#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_MASK (0xFFFF << I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT)
+#define I40E_PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTDCB_FCTTVN_MAX_INDEX 3
+#define I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT 0
+#define I40E_PRTDCB_FCTTVN_TTV_2N_MASK (0xFFFF << I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT)
+#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT 16
+#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_MASK (0xFFFF << I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT)
+#define I40E_PRTDCB_GENC 0x00083000
+#define I40E_PRTDCB_GENC_RESERVED_1_SHIFT 0
+#define I40E_PRTDCB_GENC_RESERVED_1_MASK (0x3 << I40E_PRTDCB_GENC_RESERVED_1_SHIFT)
+#define I40E_PRTDCB_GENC_NUMTC_SHIFT 2
+#define I40E_PRTDCB_GENC_NUMTC_MASK (0xF << I40E_PRTDCB_GENC_NUMTC_SHIFT)
+#define I40E_PRTDCB_GENC_FCOEUP_SHIFT 6
+#define I40E_PRTDCB_GENC_FCOEUP_MASK (0x7 << I40E_PRTDCB_GENC_FCOEUP_SHIFT)
+#define I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT 9
+#define I40E_PRTDCB_GENC_FCOEUP_VALID_MASK (0x1 << I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT)
+#define I40E_PRTDCB_GENC_PFCLDA_SHIFT 16
+#define I40E_PRTDCB_GENC_PFCLDA_MASK (0xFFFF << I40E_PRTDCB_GENC_PFCLDA_SHIFT)
+#define I40E_PRTDCB_GENS 0x00083020
+#define I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT 0
+#define I40E_PRTDCB_GENS_DCBX_STATUS_MASK (0x7 << I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT)
+#define I40E_PRTDCB_MFLCN 0x001E2400
+#define I40E_PRTDCB_MFLCN_PMCF_SHIFT 0
+#define I40E_PRTDCB_MFLCN_PMCF_MASK (0x1 << I40E_PRTDCB_MFLCN_PMCF_SHIFT)
+#define I40E_PRTDCB_MFLCN_DPF_SHIFT 1
+#define I40E_PRTDCB_MFLCN_DPF_MASK (0x1 << I40E_PRTDCB_MFLCN_DPF_SHIFT)
+#define I40E_PRTDCB_MFLCN_RPFCM_SHIFT 2
+#define I40E_PRTDCB_MFLCN_RPFCM_MASK (0x1 << I40E_PRTDCB_MFLCN_RPFCM_SHIFT)
+#define I40E_PRTDCB_MFLCN_RFCE_SHIFT 3
+#define I40E_PRTDCB_MFLCN_RFCE_MASK (0x1 << I40E_PRTDCB_MFLCN_RFCE_SHIFT)
+#define I40E_PRTDCB_MFLCN_RPFCE_SHIFT 4
+#define I40E_PRTDCB_MFLCN_RPFCE_MASK (0xFF << I40E_PRTDCB_MFLCN_RPFCE_SHIFT)
+#define I40E_PRTDCB_RETSC 0x001223E0
+#define I40E_PRTDCB_RETSC_ETS_MODE_SHIFT 0
+#define I40E_PRTDCB_RETSC_ETS_MODE_MASK (0x1 << I40E_PRTDCB_RETSC_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT 1
+#define I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK (0x1 << I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT 2
+#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK (0xF << I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT)
+#define I40E_PRTDCB_RETSC_LLTC_SHIFT 8
+#define I40E_PRTDCB_RETSC_LLTC_MASK (0xFF << I40E_PRTDCB_RETSC_LLTC_SHIFT)
+#define I40E_PRTDCB_RETSTCC(_i) (0x00122180 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_RETSTCC_MAX_INDEX 7
+#define I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT 0
+#define I40E_PRTDCB_RETSTCC_BWSHARE_MASK (0x7F << I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30
+#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK (0x1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31
+#define I40E_PRTDCB_RETSTCC_ETSTC_MASK (0x1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
+#define I40E_PRTDCB_RPPMC 0x001223A0
+#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0
+#define I40E_PRTDCB_RPPMC_LANRPPM_MASK (0xFF << I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT 8
+#define I40E_PRTDCB_RPPMC_RDMARPPM_MASK (0xFF << I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT 16
+#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK (0xFF << I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT)
+#define I40E_PRTDCB_RUP 0x001C0B00
+#define I40E_PRTDCB_RUP_NOVLANUP_SHIFT 0
+#define I40E_PRTDCB_RUP_NOVLANUP_MASK (0x7 << I40E_PRTDCB_RUP_NOVLANUP_SHIFT)
+#define I40E_PRTDCB_RUP2TC 0x001C09A0
+#define I40E_PRTDCB_RUP2TC_UP0TC_SHIFT 0
+#define I40E_PRTDCB_RUP2TC_UP0TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP0TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP1TC_SHIFT 3
+#define I40E_PRTDCB_RUP2TC_UP1TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP1TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP2TC_SHIFT 6
+#define I40E_PRTDCB_RUP2TC_UP2TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP2TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP3TC_SHIFT 9
+#define I40E_PRTDCB_RUP2TC_UP3TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP3TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP4TC_SHIFT 12
+#define I40E_PRTDCB_RUP2TC_UP4TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP4TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP5TC_SHIFT 15
+#define I40E_PRTDCB_RUP2TC_UP5TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP5TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP6TC_SHIFT 18
+#define I40E_PRTDCB_RUP2TC_UP6TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP6TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21
+#define I40E_PRTDCB_RUP2TC_UP7TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP7TC_SHIFT)
+#define I40E_PRTDCB_TC2PFC 0x001C0980
+#define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0
+#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK (0xFF << I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT)
+#define I40E_PRTDCB_TCPMC 0x000A21A0
+#define I40E_PRTDCB_TCPMC_CPM_SHIFT 0
+#define I40E_PRTDCB_TCPMC_CPM_MASK (0x1FFF << I40E_PRTDCB_TCPMC_CPM_SHIFT)
+#define I40E_PRTDCB_TCPMC_LLTC_SHIFT 13
+#define I40E_PRTDCB_TCPMC_LLTC_MASK (0xFF << I40E_PRTDCB_TCPMC_LLTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TCPMC_TCPM_MODE_MASK (0x1 << I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TCWSTC(_i) (0x000A2040 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_TCWSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TCWSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCWSTC_MSTC_MASK (0xFFFFF << I40E_PRTDCB_TCWSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TDPMC 0x000A0180
+#define I40E_PRTDCB_TDPMC_DPM_SHIFT 0
+#define I40E_PRTDCB_TDPMC_DPM_MASK (0xFF << I40E_PRTDCB_TDPMC_DPM_SHIFT)
+#define I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TDPMC_TCPM_MODE_MASK (0x1 << I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TDPUC 0x00044100
+#define I40E_PRTDCB_TDPUC_MAX_TXFRAME_SHIFT 0
+#define I40E_PRTDCB_TDPUC_MAX_TXFRAME_MASK (0xFFFF << I40E_PRTDCB_TDPUC_MAX_TXFRAME_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB 0x000AE060
+#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT 0
+#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_MASK (0x1 << I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT 8
+#define I40E_PRTDCB_TETSC_TCB_LLTC_MASK (0xFF << I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB 0x00098060
+#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT 0
+#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_MASK (0x1 << I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT 8
+#define I40E_PRTDCB_TETSC_TPB_LLTC_MASK (0xFF << I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT)
+#define I40E_PRTDCB_TFCS 0x001E4560
+#define I40E_PRTDCB_TFCS_TXOFF_SHIFT 0
+#define I40E_PRTDCB_TFCS_TXOFF_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF0_SHIFT 8
+#define I40E_PRTDCB_TFCS_TXOFF0_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF0_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF1_SHIFT 9
+#define I40E_PRTDCB_TFCS_TXOFF1_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF1_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF2_SHIFT 10
+#define I40E_PRTDCB_TFCS_TXOFF2_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF2_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF3_SHIFT 11
+#define I40E_PRTDCB_TFCS_TXOFF3_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF3_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF4_SHIFT 12
+#define I40E_PRTDCB_TFCS_TXOFF4_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF4_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF5_SHIFT 13
+#define I40E_PRTDCB_TFCS_TXOFF5_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF5_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF6_SHIFT 14
+#define I40E_PRTDCB_TFCS_TXOFF6_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF6_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF7_SHIFT 15
+#define I40E_PRTDCB_TFCS_TXOFF7_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF7_SHIFT)
+#define I40E_PRTDCB_TFWSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_TFWSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TFWSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TFWSTC_MSTC_MASK (0xFFFFF << I40E_PRTDCB_TFWSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_TPFCTS_MAX_INDEX 7
+#define I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT 0
+#define I40E_PRTDCB_TPFCTS_PFCTIMER_MASK (0x3FFF << I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT)
+#define I40E_GLFCOE_RCTL 0x00269B94
+#define I40E_GLFCOE_RCTL_FCOEVER_SHIFT 0
+#define I40E_GLFCOE_RCTL_FCOEVER_MASK (0xF << I40E_GLFCOE_RCTL_FCOEVER_SHIFT)
+#define I40E_GLFCOE_RCTL_SAVBAD_SHIFT 4
+#define I40E_GLFCOE_RCTL_SAVBAD_MASK (0x1 << I40E_GLFCOE_RCTL_SAVBAD_SHIFT)
+#define I40E_GLFCOE_RCTL_ICRC_SHIFT 5
+#define I40E_GLFCOE_RCTL_ICRC_MASK (0x1 << I40E_GLFCOE_RCTL_ICRC_SHIFT)
+#define I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT 16
+#define I40E_GLFCOE_RCTL_MAX_SIZE_MASK (0x3FFF << I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT)
+#define I40E_GL_FWSTS 0x00083048
+#define I40E_GL_FWSTS_FWS0B_SHIFT 0
+#define I40E_GL_FWSTS_FWS0B_MASK (0xFF << I40E_GL_FWSTS_FWS0B_SHIFT)
+#define I40E_GL_FWSTS_FWRI_SHIFT 9
+#define I40E_GL_FWSTS_FWRI_MASK (0x1 << I40E_GL_FWSTS_FWRI_SHIFT)
+#define I40E_GL_FWSTS_FWS1B_SHIFT 16
+#define I40E_GL_FWSTS_FWS1B_MASK (0xFF << I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_GLGEN_CLKSTAT 0x000B8184
+#define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0
+#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK (0x1 << I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT)
+#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT 4
+#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_MASK (0x3 << I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT 8
+#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT 12
+#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT 16
+#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT 20
+#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_GPIO_CTL(_i) (0x00088100 + ((_i) * 4)) /* _i=0...29 */
+#define I40E_GLGEN_GPIO_CTL_MAX_INDEX 29
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT 0
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK (0x3 << I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT 3
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK (0x1 << I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT 4
+#define I40E_GLGEN_GPIO_CTL_PIN_DIR_MASK (0x1 << I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT 5
+#define I40E_GLGEN_GPIO_CTL_TRI_CTL_MASK (0x1 << I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT 6
+#define I40E_GLGEN_GPIO_CTL_OUT_CTL_MASK (0x1 << I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT 7
+#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK (0x7 << I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT 10
+#define I40E_GLGEN_GPIO_CTL_LED_INVRT_MASK (0x1 << I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT 11
+#define I40E_GLGEN_GPIO_CTL_LED_BLINK_MASK (0x1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT 12
+#define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK (0xF << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT 17
+#define I40E_GLGEN_GPIO_CTL_INT_MODE_MASK (0x3 << I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT 19
+#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK (0x1 << I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20
+#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK (0x3F << I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT)
+#define I40E_GLGEN_GPIO_SET 0x00088184
+#define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0
+#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK (0x1F << I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT)
+#define I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT 5
+#define I40E_GLGEN_GPIO_SET_SDP_DATA_MASK (0x1 << I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT)
+#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT 6
+#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_MASK (0x1 << I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT)
+#define I40E_GLGEN_GPIO_STAT 0x0008817C
+#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT 0
+#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_MASK (0x3FFFFFFF << I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT)
+#define I40E_GLGEN_GPIO_TRANSIT 0x00088180
+#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT 0
+#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_MASK (0x3FFFFFFF << I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT)
+#define I40E_GLGEN_I2CCMD(_i) (0x000881E0 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_I2CCMD_MAX_INDEX 3
+#define I40E_GLGEN_I2CCMD_DATA_SHIFT 0
+#define I40E_GLGEN_I2CCMD_DATA_MASK (0xFFFF << I40E_GLGEN_I2CCMD_DATA_SHIFT)
+#define I40E_GLGEN_I2CCMD_REGADD_SHIFT 16
+#define I40E_GLGEN_I2CCMD_REGADD_MASK (0xFF << I40E_GLGEN_I2CCMD_REGADD_SHIFT)
+#define I40E_GLGEN_I2CCMD_PHYADD_SHIFT 24
+#define I40E_GLGEN_I2CCMD_PHYADD_MASK (0x7 << I40E_GLGEN_I2CCMD_PHYADD_SHIFT)
+#define I40E_GLGEN_I2CCMD_OP_SHIFT 27
+#define I40E_GLGEN_I2CCMD_OP_MASK (0x1 << I40E_GLGEN_I2CCMD_OP_SHIFT)
+#define I40E_GLGEN_I2CCMD_RESET_SHIFT 28
+#define I40E_GLGEN_I2CCMD_RESET_MASK (0x1 << I40E_GLGEN_I2CCMD_RESET_SHIFT)
+#define I40E_GLGEN_I2CCMD_R_SHIFT 29
+#define I40E_GLGEN_I2CCMD_R_MASK (0x1 << I40E_GLGEN_I2CCMD_R_SHIFT)
+#define I40E_GLGEN_I2CCMD_E_SHIFT 31
+#define I40E_GLGEN_I2CCMD_E_MASK (0x1 << I40E_GLGEN_I2CCMD_E_SHIFT)
+#define I40E_GLGEN_I2CPARAMS(_i) (0x000881AC + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_I2CPARAMS_MAX_INDEX 3
+#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT 0
+#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_MASK (0x1F << I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT 5
+#define I40E_GLGEN_I2CPARAMS_READ_TIME_MASK (0x7 << I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT 8
+#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_SHIFT 9
+#define I40E_GLGEN_I2CPARAMS_CLK_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT 10
+#define I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT 11
+#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT 12
+#define I40E_GLGEN_I2CPARAMS_DATA_IN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT 13
+#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT 14
+#define I40E_GLGEN_I2CPARAMS_CLK_IN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT 15
+#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT 31
+#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_MASK (0x1 << I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT)
+#define I40E_GLGEN_LED_CTL 0x00088178
+#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT 0
+#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_MASK (0x1 << I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL(_i) (0x000881D0 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MDIO_CTRL_MAX_INDEX 3
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT 0
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_MASK (0x1FFFF << I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17
+#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK (0x1 << I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK (0x3FFF << I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK (0x1 << I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT 1
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK (0xF << I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT 5
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT 10
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT 15
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT 20
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT 25
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_MASK (0xF << I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT 31
+#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_MASK (0x1 << I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT)
+#define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MSCA_MAX_INDEX 3
+#define I40E_GLGEN_MSCA_MDIADD_SHIFT 0
+#define I40E_GLGEN_MSCA_MDIADD_MASK (0xFFFF << I40E_GLGEN_MSCA_MDIADD_SHIFT)
+#define I40E_GLGEN_MSCA_DEVADD_SHIFT 16
+#define I40E_GLGEN_MSCA_DEVADD_MASK (0x1F << I40E_GLGEN_MSCA_DEVADD_SHIFT)
+#define I40E_GLGEN_MSCA_PHYADD_SHIFT 21
+#define I40E_GLGEN_MSCA_PHYADD_MASK (0x1F << I40E_GLGEN_MSCA_PHYADD_SHIFT)
+#define I40E_GLGEN_MSCA_OPCODE_SHIFT 26
+#define I40E_GLGEN_MSCA_OPCODE_MASK (0x3 << I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_GLGEN_MSCA_STCODE_SHIFT 28
+#define I40E_GLGEN_MSCA_STCODE_MASK (0x3 << I40E_GLGEN_MSCA_STCODE_SHIFT)
+#define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
+#define I40E_GLGEN_MSCA_MDICMD_MASK (0x1 << I40E_GLGEN_MSCA_MDICMD_SHIFT)
+#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
+#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK (0x1 << I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
+#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MSRWD_MAX_INDEX 3
+#define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
+#define I40E_GLGEN_MSRWD_MDIWRDATA_MASK (0xFFFF << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT)
+#define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16
+#define I40E_GLGEN_MSRWD_MDIRDDATA_MASK (0xFFFF << I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK (0x1F << I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK (0xFF << I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT)
+#define I40E_GLGEN_PE_ENA 0x000B81A0
+#define I40E_GLGEN_PE_ENA_PE_ENA_SHIFT 0
+#define I40E_GLGEN_PE_ENA_PE_ENA_MASK (0x1 << I40E_GLGEN_PE_ENA_PE_ENA_SHIFT)
+#define I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_SHIFT 1
+#define I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_MASK (0x3 << I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_SHIFT)
+#define I40E_GLGEN_RSTAT 0x000B8188
+#define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0
+#define I40E_GLGEN_RSTAT_DEVSTATE_MASK (0x3 << I40E_GLGEN_RSTAT_DEVSTATE_SHIFT)
+#define I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT 2
+#define I40E_GLGEN_RSTAT_RESET_TYPE_MASK (0x3 << I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT)
+#define I40E_GLGEN_RSTAT_CORERCNT_SHIFT 4
+#define I40E_GLGEN_RSTAT_CORERCNT_MASK (0x3 << I40E_GLGEN_RSTAT_CORERCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT 6
+#define I40E_GLGEN_RSTAT_GLOBRCNT_MASK (0x3 << I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_EMPRCNT_SHIFT 8
+#define I40E_GLGEN_RSTAT_EMPRCNT_MASK (0x3 << I40E_GLGEN_RSTAT_EMPRCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT 10
+#define I40E_GLGEN_RSTAT_TIME_TO_RST_MASK (0x3F << I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT)
+#define I40E_GLGEN_RSTCTL 0x000B8180
+#define I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT 0
+#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK (0x3F << I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT)
+#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8
+#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK (0x1 << I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT)
+#define I40E_GLGEN_RSTENA_EMP 0x000B818C
+#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT 0
+#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK (0x1 << I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT)
+#define I40E_GLGEN_RTRIG 0x000B8190
+#define I40E_GLGEN_RTRIG_CORER_SHIFT 0
+#define I40E_GLGEN_RTRIG_CORER_MASK (0x1 << I40E_GLGEN_RTRIG_CORER_SHIFT)
+#define I40E_GLGEN_RTRIG_GLOBR_SHIFT 1
+#define I40E_GLGEN_RTRIG_GLOBR_MASK (0x1 << I40E_GLGEN_RTRIG_GLOBR_SHIFT)
+#define I40E_GLGEN_RTRIG_EMPFWR_SHIFT 2
+#define I40E_GLGEN_RTRIG_EMPFWR_MASK (0x1 << I40E_GLGEN_RTRIG_EMPFWR_SHIFT)
+#define I40E_GLGEN_STAT 0x000B612C
+#define I40E_GLGEN_STAT_HWRSVD0_SHIFT 0
+#define I40E_GLGEN_STAT_HWRSVD0_MASK (0x3 << I40E_GLGEN_STAT_HWRSVD0_SHIFT)
+#define I40E_GLGEN_STAT_DCBEN_SHIFT 2
+#define I40E_GLGEN_STAT_DCBEN_MASK (0x1 << I40E_GLGEN_STAT_DCBEN_SHIFT)
+#define I40E_GLGEN_STAT_VTEN_SHIFT 3
+#define I40E_GLGEN_STAT_VTEN_MASK (0x1 << I40E_GLGEN_STAT_VTEN_SHIFT)
+#define I40E_GLGEN_STAT_FCOEN_SHIFT 4
+#define I40E_GLGEN_STAT_FCOEN_MASK (0x1 << I40E_GLGEN_STAT_FCOEN_SHIFT)
+#define I40E_GLGEN_STAT_EVBEN_SHIFT 5
+#define I40E_GLGEN_STAT_EVBEN_MASK (0x1 << I40E_GLGEN_STAT_EVBEN_SHIFT)
+#define I40E_GLGEN_STAT_HWRSVD1_SHIFT 6
+#define I40E_GLGEN_STAT_HWRSVD1_MASK (0x3 << I40E_GLGEN_STAT_HWRSVD1_SHIFT)
+#define I40E_GLGEN_VFLRSTAT(_i) (0x00092600 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_VFLRSTAT_MAX_INDEX 3
+#define I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT 0
+#define I40E_GLGEN_VFLRSTAT_VFLRE_MASK (0xFFFFFFFF << I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT)
+#define I40E_GLVFGEN_TIMER 0x000881BC
+#define I40E_GLVFGEN_TIMER_GTIME_SHIFT 0
+#define I40E_GLVFGEN_TIMER_GTIME_MASK (0xFFFFFFFF << I40E_GLVFGEN_TIMER_GTIME_SHIFT)
+#define I40E_PFGEN_CTRL 0x00092400
+#define I40E_PFGEN_CTRL_PFSWR_SHIFT 0
+#define I40E_PFGEN_CTRL_PFSWR_MASK (0x1 << I40E_PFGEN_CTRL_PFSWR_SHIFT)
+#define I40E_PFGEN_DRUN 0x00092500
+#define I40E_PFGEN_DRUN_DRVUNLD_SHIFT 0
+#define I40E_PFGEN_DRUN_DRVUNLD_MASK (0x1 << I40E_PFGEN_DRUN_DRVUNLD_SHIFT)
+#define I40E_PFGEN_PORTNUM 0x001C0480
+#define I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT 0
+#define I40E_PFGEN_PORTNUM_PORT_NUM_MASK (0x3 << I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT)
+#define I40E_PFGEN_STATE 0x00088000
+#define I40E_PFGEN_STATE_PFPEEN_SHIFT 0
+#define I40E_PFGEN_STATE_PFPEEN_MASK (0x1 << I40E_PFGEN_STATE_PFPEEN_SHIFT)
+#define I40E_PFGEN_STATE_PFFCEN_SHIFT 1
+#define I40E_PFGEN_STATE_PFFCEN_MASK (0x1 << I40E_PFGEN_STATE_PFFCEN_SHIFT)
+#define I40E_PFGEN_STATE_PFLINKEN_SHIFT 2
+#define I40E_PFGEN_STATE_PFLINKEN_MASK (0x1 << I40E_PFGEN_STATE_PFLINKEN_SHIFT)
+#define I40E_PFGEN_STATE_PFSCEN_SHIFT 3
+#define I40E_PFGEN_STATE_PFSCEN_MASK (0x1 << I40E_PFGEN_STATE_PFSCEN_SHIFT)
+#define I40E_PRTGEN_CNF 0x000B8120
+#define I40E_PRTGEN_CNF_PORT_DIS_SHIFT 0
+#define I40E_PRTGEN_CNF_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT 1
+#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT 2
+#define I40E_PRTGEN_CNF_EMP_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF2 0x000B8160
+#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT 0
+#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_MASK (0x1 << I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT)
+#define I40E_PRTGEN_STATUS 0x000B8100
+#define I40E_PRTGEN_STATUS_PORT_VALID_SHIFT 0
+#define I40E_PRTGEN_STATUS_PORT_VALID_MASK (0x1 << I40E_PRTGEN_STATUS_PORT_VALID_SHIFT)
+#define I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT 1
+#define I40E_PRTGEN_STATUS_PORT_ACTIVE_MASK (0x1 << I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT)
+#define I40E_VFGEN_RSTAT1(_VF) (0x00074400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFGEN_RSTAT1_MAX_INDEX 127
+#define I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT 0
+#define I40E_VFGEN_RSTAT1_VFR_STATE_MASK (0x3 << I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT)
+#define I40E_VPGEN_VFRSTAT(_VF) (0x00091C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPGEN_VFRSTAT_MAX_INDEX 127
+#define I40E_VPGEN_VFRSTAT_VFRD_SHIFT 0
+#define I40E_VPGEN_VFRSTAT_VFRD_MASK (0x1 << I40E_VPGEN_VFRSTAT_VFRD_SHIFT)
+#define I40E_VPGEN_VFRTRIG(_VF) (0x00091800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPGEN_VFRTRIG_MAX_INDEX 127
+#define I40E_VPGEN_VFRTRIG_VFSWR_SHIFT 0
+#define I40E_VPGEN_VFRTRIG_VFSWR_MASK (0x1 << I40E_VPGEN_VFRTRIG_VFSWR_SHIFT)
+#define I40E_VSIGEN_RSTAT(_VSI) (0x00090800 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VSIGEN_RSTAT_MAX_INDEX 383
+#define I40E_VSIGEN_RSTAT_VMRD_SHIFT 0
+#define I40E_VSIGEN_RSTAT_VMRD_MASK (0x1 << I40E_VSIGEN_RSTAT_VMRD_SHIFT)
+#define I40E_VSIGEN_RTRIG(_VSI) (0x00090000 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VSIGEN_RTRIG_MAX_INDEX 383
+#define I40E_VSIGEN_RTRIG_VMSWR_SHIFT 0
+#define I40E_VSIGEN_RTRIG_VMSWR_MASK (0x1 << I40E_VSIGEN_RTRIG_VMSWR_SHIFT)
+#define I40E_GLHMC_APBVTINUSEBASE(_i) (0x000C4a00 + ((_i) * 4))
+#define I40E_GLHMC_APBVTINUSEBASE_MAX_INDEX 15
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_MASK (0xFFFFFF << I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_CEQPART(_i) (0x001312C0 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_CEQPART_MAX_INDEX 15
+#define I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_CEQPART_PMCEQBASE_MASK (0xFF << I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_MASK (0x1FF << I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_DBCQPART(_i) (0x00131240 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_DBCQPART_MAX_INDEX 15
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_MASK (0x3FFF << I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_MASK (0x7FFF << I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_DBQPPART(_i) (0x00138D80 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_DBQPPART_MAX_INDEX 15
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_MASK (0x3FFF << I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_MASK (0x7FFF << I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_FCOEDDPBASE(_i) (0x000C6600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEDDPBASE_MAX_INDEX 15
+#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT 0
+#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK (0xFFFFFF << I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT)
+#define I40E_GLHMC_FCOEDDPCNT(_i) (0x000C6700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEDDPCNT_MAX_INDEX 15
+#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT 0
+#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_MASK (0xFFFFF << I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT)
+#define I40E_GLHMC_FCOEDDPOBJSZ 0x000C2010
+#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT 0
+#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_MASK (0xF << I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT)
+#define I40E_GLHMC_FCOEFBASE(_i) (0x000C6800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEFBASE_MAX_INDEX 15
+#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT 0
+#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK (0xFFFFFF << I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT)
+#define I40E_GLHMC_FCOEFCNT(_i) (0x000C6900 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEFCNT_MAX_INDEX 15
+#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT 0
+#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_MASK (0x7FFFFF << I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT)
+#define I40E_GLHMC_FCOEFMAX 0x000C20D0
+#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT 0
+#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK (0xFFFF << I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT)
+#define I40E_GLHMC_FCOEFOBJSZ 0x000C2018
+#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT 0
+#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_MASK (0xF << I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT)
+#define I40E_GLHMC_FCOEMAX 0x000C2014
+#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT 0
+#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_MASK (0x1FFF << I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT)
+#define I40E_GLHMC_FSIAVBASE(_i) (0x000C5600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIAVBASE_MAX_INDEX 15
+#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT 0
+#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_MASK (0xFFFFFF << I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT)
+#define I40E_GLHMC_FSIAVCNT(_i) (0x000C5700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIAVCNT_MAX_INDEX 15
+#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT 0
+#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_MASK (0x1FFFFFFF << I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT)
+#define I40E_GLHMC_FSIAVCNT_RSVD_SHIFT 29
+#define I40E_GLHMC_FSIAVCNT_RSVD_MASK (0x7 << I40E_GLHMC_FSIAVCNT_RSVD_SHIFT)
+#define I40E_GLHMC_FSIAVMAX 0x000C2068
+#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT 0
+#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_MASK (0x1FFFF << I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT)
+#define I40E_GLHMC_FSIAVOBJSZ 0x000C2064
+#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT 0
+#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_MASK (0xF << I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT)
+#define I40E_GLHMC_FSIMCBASE(_i) (0x000C6000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIMCBASE_MAX_INDEX 15
+#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT 0
+#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_MASK (0xFFFFFF << I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT)
+#define I40E_GLHMC_FSIMCCNT(_i) (0x000C6100 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIMCCNT_MAX_INDEX 15
+#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT 0
+#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_MASK (0x1FFFFFFF << I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT)
+#define I40E_GLHMC_FSIMCMAX 0x000C2060
+#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT 0
+#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_MASK (0x3FFF << I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT)
+#define I40E_GLHMC_FSIMCOBJSZ 0x000C205c
+#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT 0
+#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_MASK (0xF << I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT)
+#define I40E_GLHMC_LANQMAX 0x000C2008
+#define I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT 0
+#define I40E_GLHMC_LANQMAX_PMLANQMAX_MASK (0x7FF << I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT)
+#define I40E_GLHMC_LANRXBASE(_i) (0x000C6400 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANRXBASE_MAX_INDEX 15
+#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT 0
+#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK (0xFFFFFF << I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT)
+#define I40E_GLHMC_LANRXCNT(_i) (0x000C6500 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANRXCNT_MAX_INDEX 15
+#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT 0
+#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_MASK (0x7FF << I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT)
+#define I40E_GLHMC_LANRXOBJSZ 0x000C200c
+#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT 0
+#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_MASK (0xF << I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT)
+#define I40E_GLHMC_LANTXBASE(_i) (0x000C6200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANTXBASE_MAX_INDEX 15
+#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT 0
+#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK (0xFFFFFF << I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT)
+#define I40E_GLHMC_LANTXBASE_RSVD_SHIFT 24
+#define I40E_GLHMC_LANTXBASE_RSVD_MASK (0xFF << I40E_GLHMC_LANTXBASE_RSVD_SHIFT)
+#define I40E_GLHMC_LANTXCNT(_i) (0x000C6300 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANTXCNT_MAX_INDEX 15
+#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT 0
+#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_MASK (0x7FF << I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT)
+#define I40E_GLHMC_LANTXOBJSZ 0x000C2004
+#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT 0
+#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_MASK (0xF << I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT)
+#define I40E_GLHMC_PEARPBASE(_i) (0x000C4800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEARPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_MASK (0xFFFFFF << I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_PEARPCNT(_i) (0x000C4900 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEARPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_PEARPMAX 0x000C2038
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT 0
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_MASK (0x1FFFF << I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT)
+#define I40E_GLHMC_PEARPOBJSZ 0x000C2034
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_MASK (0x7 << I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT)
+#define I40E_GLHMC_PECQBASE(_i) (0x000C4200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PECQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_MASK (0xFFFFFF << I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_PECQCNT(_i) (0x000C4300 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PECQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_PECQOBJSZ 0x000C2020
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_MASK (0xF << I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTCNT(_i) (0x000C4700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEHTCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_PEHTEBASE(_i) (0x000C4600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEHTEBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_MASK (0xFFFFFF << I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_PEHTEOBJSZ 0x000C202c
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_MASK (0xF << I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTMAX 0x000C2030
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT 0
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_MASK (0x1FFFFF << I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT)
+#define I40E_GLHMC_PEMRBASE(_i) (0x000C4c00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEMRBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_MASK (0xFFFFFF << I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_PEMRCNT(_i) (0x000C4d00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEMRCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_MASK (0x1FFFFFFF << I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_PEMRMAX 0x000C2040
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT 0
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_MASK (0x7FFFFF << I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT)
+#define I40E_GLHMC_PEMROBJSZ 0x000C203c
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT 0
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_MASK (0xF << I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT)
+#define I40E_GLHMC_PEPBLBASE(_i) (0x000C5800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEPBLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_PEPBLCNT(_i) (0x000C5900 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEPBLCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_PEPBLMAX 0x000C206c
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT 0
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_MASK (0x1FFFFFFF << I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT)
+#define I40E_GLHMC_PEQ1BASE(_i) (0x000C5200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQ1BASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_MASK (0xFFFFFF << I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_PEQ1CNT(_i) (0x000C5300 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQ1CNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_PEQ1FLBASE(_i) (0x000C5400 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQ1FLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_PEQ1FLCNT(_i) (0x000C5500 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQ1FLCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_SHIFT 0
+#define I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_SHIFT)
+#define I40E_GLHMC_PEQ1FLMAX 0x000C2058
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK (0x3FFFFF << I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT)
+#define I40E_GLHMC_PEQ1MAX 0x000C2054
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK (0x3FFFFFF << I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT)
+#define I40E_GLHMC_PEQ1OBJSZ 0x000C2050
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_MASK (0xF << I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT)
+#define I40E_GLHMC_PEQPBASE(_i) (0x000C4000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_MASK (0xFFFFFF << I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_PEQPCNT(_i) (0x000C4100 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_PEQPOBJSZ 0x000C201c
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_MASK (0xF << I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT)
+#define I40E_GLHMC_PESRQBASE(_i) (0x000C4400 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PESRQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_MASK (0xFFFFFF << I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_PESRQCNT(_i) (0x000C4500 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PESRQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_PESRQMAX 0x000C2028
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT 0
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_MASK (0xFFFF << I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT)
+#define I40E_GLHMC_PESRQOBJSZ 0x000C2024
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_MASK (0xF << I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT)
+#define I40E_GLHMC_PESRQOBJSZ_RSVD_SHIFT 4
+#define I40E_GLHMC_PESRQOBJSZ_RSVD_MASK (0xFFFFFFF << I40E_GLHMC_PESRQOBJSZ_RSVD_SHIFT)
+#define I40E_GLHMC_PETIMERBASE(_i) (0x000C5A00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PETIMERBASE_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_MASK (0xFFFFFF << I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_PETIMERCNT(_i) (0x000C5B00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PETIMERCNT_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_PETIMERMAX 0x000C2084
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT 0
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_MASK (0x1FFFFFFF << I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT)
+#define I40E_GLHMC_PETIMEROBJSZ 0x000C2080
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT 0
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_MASK (0xF << I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT)
+#define I40E_GLHMC_PEXFBASE(_i) (0x000C4e00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEXFBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_MASK (0xFFFFFF << I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_PEXFCNT(_i) (0x000C4f00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEXFCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_PEXFFLBASE(_i) (0x000C5000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEXFFLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_PEXFFLCNT(_i) (0x000C5100 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEXFFLCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_SHIFT 0
+#define I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_SHIFT)
+#define I40E_GLHMC_PEXFFLMAX 0x000C204c
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK (0x3FFFFF << I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT)
+#define I40E_GLHMC_PEXFMAX 0x000C2048
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK (0x3FFFFFF << I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT)
+#define I40E_GLHMC_PEXFOBJSZ 0x000C2044
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_MASK (0xF << I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT)
+#define I40E_GLHMC_PEXFOBJSZ_RSVD_SHIFT 4
+#define I40E_GLHMC_PEXFOBJSZ_RSVD_MASK (0xFFFFFFF << I40E_GLHMC_PEXFOBJSZ_RSVD_SHIFT)
+#define I40E_GLHMC_PFASSIGN(_i) (0x000C0c00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PFASSIGN_MAX_INDEX 15
+#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT 0
+#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_MASK (0xF << I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT)
+#define I40E_GLHMC_SDPART(_i) (0x000C0800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_SDPART_MAX_INDEX 15
+#define I40E_GLHMC_SDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_SDPART_PMSDBASE_MASK (0xFFF << I40E_GLHMC_SDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_SDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_SDPART_PMSDSIZE_MASK (0x1FFF << I40E_GLHMC_SDPART_PMSDSIZE_SHIFT)
+#define I40E_GLHMC_VFAPBVTINUSEBASE(_i) (0x000Cca00 + ((_i) * 4))
+#define I40E_GLHMC_VFAPBVTINUSEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_MASK (0xFFFFFF << I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART(_i) (0x00132240 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFCEQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_MASK (0xFF << I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_MASK (0x1FF << I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART(_i) (0x00132140 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFDBCQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_MASK (0x3FFF << I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_MASK (0x7FFF << I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART(_i) (0x00138E00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFDBQPPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_MASK (0x3FFF << I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_MASK (0x7FFF << I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_VFFSIAVBASE(_i) (0x000Cd600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFFSIAVBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT 0
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_MASK (0xFFFFFF << I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT)
+#define I40E_GLHMC_VFFSIAVCNT(_i) (0x000Cd700 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFFSIAVCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT 0
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT)
+#define I40E_GLHMC_VFFSIAVCNT_RSVD_SHIFT 29
+#define I40E_GLHMC_VFFSIAVCNT_RSVD_MASK (0x7 << I40E_GLHMC_VFFSIAVCNT_RSVD_SHIFT)
+#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPDINV_MAX_INDEX 31
+#define I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT 0
+#define I40E_GLHMC_VFPDINV_PMSDIDX_MASK (0xFFF << I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT)
+#define I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT 16
+#define I40E_GLHMC_VFPDINV_PMPDIDX_MASK (0x1FF << I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT)
+#define I40E_GLHMC_VFPEARPBASE(_i) (0x000Cc800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEARPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_VFPEARPCNT(_i) (0x000Cc900 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEARPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_VFPECQBASE(_i) (0x000Cc200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPECQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_VFPECQCNT(_i) (0x000Cc300 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPECQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTCNT(_i) (0x000Cc700 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEHTCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTEBASE(_i) (0x000Cc600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEHTEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRBASE(_i) (0x000Ccc00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEMRBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRCNT(_i) (0x000Ccd00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEMRCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_VFPEPBLBASE(_i) (0x000Cd800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEPBLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_VFPEPBLCNT(_i) (0x000Cd900 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEPBLCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1BASE(_i) (0x000Cd200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQ1BASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_VFPEQ1CNT(_i) (0x000Cd300 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQ1CNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1FLBASE(_i) (0x000Cd400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQ1FLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_VFPEQ1FLCNT(_i) (0x000Cd500 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQ1FLCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_SHIFT 0
+#define I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_SHIFT)
+#define I40E_GLHMC_VFPEQPBASE(_i) (0x000Cc000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_VFPEQPCNT(_i) (0x000Cc100 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_VFPESRQBASE(_i) (0x000Cc400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPESRQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_VFPESRQCNT(_i) (0x000Cc500 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPESRQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_VFPETIMERBASE(_i) (0x000CDA00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPETIMERBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_VFPETIMERCNT(_i) (0x000CDB00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPETIMERCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFBASE(_i) (0x000Cce00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEXFBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_VFPEXFCNT(_i) (0x000Ccf00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEXFCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFFLBASE(_i) (0x000Cd000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEXFFLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_VFPEXFFLCNT(_i) (0x000Cd100 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEXFFLCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_SHIFT 0
+#define I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_SHIFT)
+#define I40E_GLHMC_VFSDPART(_i) (0x000C8800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFSDPART_MAX_INDEX 31
+#define I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_VFSDPART_PMSDBASE_MASK (0xFFF << I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_MASK (0x1FFF << I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT)
+#define I40E_PFHMC_ERRORDATA 0x000C0500
+#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT 0
+#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_MASK (0x3FFFFFFF << I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT)
+#define I40E_PFHMC_ERRORINFO 0x000C0400
+#define I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT 0
+#define I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK (0x1F << I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT)
+#define I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT 7
+#define I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK (0x1 << I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT)
+#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT 8
+#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK (0xF << I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT)
+#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT 16
+#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK (0x1F << I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT)
+#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT 31
+#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK (0x1 << I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT)
+#define I40E_PFHMC_PDINV 0x000C0300
+#define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0
+#define I40E_PFHMC_PDINV_PMSDIDX_MASK (0xFFF << I40E_PFHMC_PDINV_PMSDIDX_SHIFT)
+#define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16
+#define I40E_PFHMC_PDINV_PMPDIDX_MASK (0x1FF << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)
+#define I40E_PFHMC_SDCMD 0x000C0000
+#define I40E_PFHMC_SDCMD_PMSDIDX_SHIFT 0
+#define I40E_PFHMC_SDCMD_PMSDIDX_MASK (0xFFF << I40E_PFHMC_SDCMD_PMSDIDX_SHIFT)
+#define I40E_PFHMC_SDCMD_PMSDWR_SHIFT 31
+#define I40E_PFHMC_SDCMD_PMSDWR_MASK (0x1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT)
+#define I40E_PFHMC_SDDATAHIGH 0x000C0200
+#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT 0
+#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_MASK (0xFFFFFFFF << I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT)
+#define I40E_PFHMC_SDDATALOW 0x000C0100
+#define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT 0
+#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK (0x1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT 1
+#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK (0x1 << I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2
+#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK (0x3FF << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT 12
+#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK (0xFFFFF << I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT)
+#define I40E_GL_UFUSE 0x00094008
+#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT 1
+#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_MASK (0x1 << I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT)
+#define I40E_GL_UFUSE_NIC_ID_SHIFT 2
+#define I40E_GL_UFUSE_NIC_ID_MASK (0x1 << I40E_GL_UFUSE_NIC_ID_SHIFT)
+#define I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT 10
+#define I40E_GL_UFUSE_ULT_LOCKOUT_MASK (0x1 << I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT)
+#define I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT 11
+#define I40E_GL_UFUSE_CLS_LOCKOUT_MASK (0x1 << I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT)
+#define I40E_EMPINT_GPIO_ENA 0x00088188
+#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
+#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
+#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
+#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
+#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
+#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
+#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
+#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
+#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
+#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
+#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
+#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
+#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
+#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
+#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
+#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
+#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
+#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
+#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
+#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
+#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
+#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
+#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
+#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
+#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
+#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
+#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
+#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
+#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
+#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
+#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT)
+#define I40E_PFGEN_PORTMDIO_NUM 0x0003F100
+#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT 0
+#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_MASK (0x3 << I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT)
+#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT 4
+#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK (0x1 << I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT)
+#define I40E_PFINT_AEQCTL 0x00038700
+#define I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_PFINT_AEQCTL_MSIX_INDX_MASK (0xFF << I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_ITR_INDX_SHIFT 11
+#define I40E_PFINT_AEQCTL_ITR_INDX_MASK (0x3 << I40E_PFINT_AEQCTL_ITR_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_PFINT_AEQCTL_MSIX0_INDX_MASK (0x7 << I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_PFINT_AEQCTL_CAUSE_ENA_MASK (0x1 << I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_PFINT_AEQCTL_INTEVENT_SHIFT 31
+#define I40E_PFINT_AEQCTL_INTEVENT_MASK (0x1 << I40E_PFINT_AEQCTL_INTEVENT_SHIFT)
+#define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_CEQCTL_MAX_INDEX 511
+#define I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_PFINT_CEQCTL_MSIX_INDX_MASK (0xFF << I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_ITR_INDX_SHIFT 11
+#define I40E_PFINT_CEQCTL_ITR_INDX_MASK (0x3 << I40E_PFINT_CEQCTL_ITR_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_PFINT_CEQCTL_MSIX0_INDX_MASK (0x7 << I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_PFINT_CEQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK (0x1 << I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31
+#define I40E_PFINT_CEQCTL_INTEVENT_MASK (0x1 << I40E_PFINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_PFINT_DYN_CTL0 0x00038480
+#define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0
+#define I40E_PFINT_DYN_CTL0_INTENA_MASK (0x1 << I40E_PFINT_DYN_CTL0_INTENA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT 1
+#define I40E_PFINT_DYN_CTL0_CLEARPBA_MASK (0x1 << I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
+#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK (0x1 << I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
+#define I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT 3
+#define I40E_PFINT_DYN_CTL0_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT 5
+#define I40E_PFINT_DYN_CTL0_INTERVAL_MASK (0xFFF << I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK (0x1 << I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
+#define I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK (0x1 << I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT)
+#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_DYN_CTLN_MAX_INDEX 511
+#define I40E_PFINT_DYN_CTLN_INTENA_SHIFT 0
+#define I40E_PFINT_DYN_CTLN_INTENA_MASK (0x1 << I40E_PFINT_DYN_CTLN_INTENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT 1
+#define I40E_PFINT_DYN_CTLN_CLEARPBA_MASK (0x1 << I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
+#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK (0x1 << I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5
+#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK (0xFFF << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK (0x1 << I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
+#define I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK (0x1 << I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT)
+#define I40E_PFINT_GPIO_ENA 0x00088080
+#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
+#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
+#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
+#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
+#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
+#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
+#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
+#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
+#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
+#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
+#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
+#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
+#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
+#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
+#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
+#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
+#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
+#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
+#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
+#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
+#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
+#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
+#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
+#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
+#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
+#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
+#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
+#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
+#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
+#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
+#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT)
+#define I40E_PFINT_ICR0 0x00038780
+#define I40E_PFINT_ICR0_INTEVENT_SHIFT 0
+#define I40E_PFINT_ICR0_INTEVENT_MASK (0x1 << I40E_PFINT_ICR0_INTEVENT_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_0_SHIFT 1
+#define I40E_PFINT_ICR0_QUEUE_0_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_0_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_1_SHIFT 2
+#define I40E_PFINT_ICR0_QUEUE_1_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_1_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_2_SHIFT 3
+#define I40E_PFINT_ICR0_QUEUE_2_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_2_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_3_SHIFT 4
+#define I40E_PFINT_ICR0_QUEUE_3_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_3_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_4_SHIFT 5
+#define I40E_PFINT_ICR0_QUEUE_4_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_4_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_5_SHIFT 6
+#define I40E_PFINT_ICR0_QUEUE_5_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_5_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_6_SHIFT 7
+#define I40E_PFINT_ICR0_QUEUE_6_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_6_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_7_SHIFT 8
+#define I40E_PFINT_ICR0_QUEUE_7_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_7_SHIFT)
+#define I40E_PFINT_ICR0_ECC_ERR_SHIFT 16
+#define I40E_PFINT_ICR0_ECC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ECC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_MAL_DETECT_SHIFT 19
+#define I40E_PFINT_ICR0_MAL_DETECT_MASK (0x1 << I40E_PFINT_ICR0_MAL_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_GRST_SHIFT 20
+#define I40E_PFINT_ICR0_GRST_MASK (0x1 << I40E_PFINT_ICR0_GRST_SHIFT)
+#define I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT 21
+#define I40E_PFINT_ICR0_PCI_EXCEPTION_MASK (0x1 << I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT)
+#define I40E_PFINT_ICR0_GPIO_SHIFT 22
+#define I40E_PFINT_ICR0_GPIO_MASK (0x1 << I40E_PFINT_ICR0_GPIO_SHIFT)
+#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23
+#define I40E_PFINT_ICR0_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24
+#define I40E_PFINT_ICR0_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_STORM_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
+#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26
+#define I40E_PFINT_ICR0_HMC_ERR_MASK (0x1 << I40E_PFINT_ICR0_HMC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_PE_CRITERR_SHIFT 28
+#define I40E_PFINT_ICR0_PE_CRITERR_MASK (0x1 << I40E_PFINT_ICR0_PE_CRITERR_SHIFT)
+#define I40E_PFINT_ICR0_VFLR_SHIFT 29
+#define I40E_PFINT_ICR0_VFLR_MASK (0x1 << I40E_PFINT_ICR0_VFLR_SHIFT)
+#define I40E_PFINT_ICR0_ADMINQ_SHIFT 30
+#define I40E_PFINT_ICR0_ADMINQ_MASK (0x1 << I40E_PFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_PFINT_ICR0_SWINT_SHIFT 31
+#define I40E_PFINT_ICR0_SWINT_MASK (0x1 << I40E_PFINT_ICR0_SWINT_SHIFT)
+#define I40E_PFINT_ICR0_ENA 0x00038800
+#define I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT 16
+#define I40E_PFINT_ICR0_ENA_ECC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT 19
+#define I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK (0x1 << I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_ENA_GRST_SHIFT 20
+#define I40E_PFINT_ICR0_ENA_GRST_MASK (0x1 << I40E_PFINT_ICR0_ENA_GRST_SHIFT)
+#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT 21
+#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK (0x1 << I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT)
+#define I40E_PFINT_ICR0_ENA_GPIO_SHIFT 22
+#define I40E_PFINT_ICR0_ENA_GPIO_MASK (0x1 << I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
+#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23
+#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24
+#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
+#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26
+#define I40E_PFINT_ICR0_ENA_HMC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT 28
+#define I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_VFLR_SHIFT 29
+#define I40E_PFINT_ICR0_ENA_VFLR_MASK (0x1 << I40E_PFINT_ICR0_ENA_VFLR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT 30
+#define I40E_PFINT_ICR0_ENA_ADMINQ_MASK (0x1 << I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT)
+#define I40E_PFINT_ICR0_ENA_RSVD_SHIFT 31
+#define I40E_PFINT_ICR0_ENA_RSVD_MASK (0x1 << I40E_PFINT_ICR0_ENA_RSVD_SHIFT)
+#define I40E_PFINT_ITR0(_i) (0x00038000 + ((_i) * 128)) /* _i=0...2 */
+#define I40E_PFINT_ITR0_MAX_INDEX 2
+#define I40E_PFINT_ITR0_INTERVAL_SHIFT 0
+#define I40E_PFINT_ITR0_INTERVAL_MASK (0xFFF << I40E_PFINT_ITR0_INTERVAL_SHIFT)
+#define I40E_PFINT_ITRN(_i, _INTPF) (0x00030000 + ((_i) * 2048 + (_INTPF) * 4))
+#define I40E_PFINT_ITRN_MAX_INDEX 2
+#define I40E_PFINT_ITRN_INTERVAL_SHIFT 0
+#define I40E_PFINT_ITRN_INTERVAL_MASK (0xFFF << I40E_PFINT_ITRN_INTERVAL_SHIFT)
+#define I40E_PFINT_LNKLST0 0x00038500
+#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
+#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK (0x7FF << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT)
+#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
+#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_MASK (0x3 << I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
+#define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_LNKLSTN_MAX_INDEX 511
+#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
+#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK (0x7FF << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
+#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
+#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK (0x3 << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+#define I40E_PFINT_RATE0 0x00038580
+#define I40E_PFINT_RATE0_INTERVAL_SHIFT 0
+#define I40E_PFINT_RATE0_INTERVAL_MASK (0x3F << I40E_PFINT_RATE0_INTERVAL_SHIFT)
+#define I40E_PFINT_RATE0_INTRL_ENA_SHIFT 6
+#define I40E_PFINT_RATE0_INTRL_ENA_MASK (0x1 << I40E_PFINT_RATE0_INTRL_ENA_SHIFT)
+#define I40E_PFINT_RATEN(_INTPF) (0x00035800 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_RATEN_MAX_INDEX 511
+#define I40E_PFINT_RATEN_INTERVAL_SHIFT 0
+#define I40E_PFINT_RATEN_INTERVAL_MASK (0x3F << I40E_PFINT_RATEN_INTERVAL_SHIFT)
+#define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6
+#define I40E_PFINT_RATEN_INTRL_ENA_MASK (0x1 << I40E_PFINT_RATEN_INTRL_ENA_SHIFT)
+#define I40E_PFINT_STAT_CTL0 0x00038400
+#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
+#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK (0x3 << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
+#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QINT_RQCTL_MAX_INDEX 1535
+#define I40E_QINT_RQCTL_MSIX_INDX_SHIFT 0
+#define I40E_QINT_RQCTL_MSIX_INDX_MASK (0xFF << I40E_QINT_RQCTL_MSIX_INDX_SHIFT)
+#define I40E_QINT_RQCTL_ITR_INDX_SHIFT 11
+#define I40E_QINT_RQCTL_ITR_INDX_MASK (0x3 << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
+#define I40E_QINT_RQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_QINT_RQCTL_MSIX0_INDX_MASK (0x7 << I40E_QINT_RQCTL_MSIX0_INDX_SHIFT)
+#define I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_QINT_RQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_QINT_RQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_QINT_RQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_QINT_RQCTL_CAUSE_ENA_MASK (0x1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT)
+#define I40E_QINT_RQCTL_INTEVENT_SHIFT 31
+#define I40E_QINT_RQCTL_INTEVENT_MASK (0x1 << I40E_QINT_RQCTL_INTEVENT_SHIFT)
+#define I40E_QINT_TQCTL(_Q) (0x0003C000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QINT_TQCTL_MAX_INDEX 1535
+#define I40E_QINT_TQCTL_MSIX_INDX_SHIFT 0
+#define I40E_QINT_TQCTL_MSIX_INDX_MASK (0xFF << I40E_QINT_TQCTL_MSIX_INDX_SHIFT)
+#define I40E_QINT_TQCTL_ITR_INDX_SHIFT 11
+#define I40E_QINT_TQCTL_ITR_INDX_MASK (0x3 << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
+#define I40E_QINT_TQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_QINT_TQCTL_MSIX0_INDX_MASK (0x7 << I40E_QINT_TQCTL_MSIX0_INDX_SHIFT)
+#define I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_QINT_TQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_QINT_TQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_QINT_TQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_QINT_TQCTL_CAUSE_ENA_MASK (0x1 << I40E_QINT_TQCTL_CAUSE_ENA_SHIFT)
+#define I40E_QINT_TQCTL_INTEVENT_SHIFT 31
+#define I40E_QINT_TQCTL_INTEVENT_MASK (0x1 << I40E_QINT_TQCTL_INTEVENT_SHIFT)
+#define I40E_VFINT_DYN_CTL0(_VF) (0x0002A400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_DYN_CTL0_MAX_INDEX 127
+#define I40E_VFINT_DYN_CTL0_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTL0_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTL0_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTL0_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTL0_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTL0_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT)
+#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VFINT_DYN_CTLN_MAX_INDEX 511
+#define I40E_VFINT_DYN_CTLN_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTLN_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTLN_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTLN_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTLN_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTLN_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT)
+#define I40E_VFINT_ICR0(_VF) (0x0002BC00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_ICR0_MAX_INDEX 127
+#define I40E_VFINT_ICR0_INTEVENT_SHIFT 0
+#define I40E_VFINT_ICR0_INTEVENT_MASK (0x1 << I40E_VFINT_ICR0_INTEVENT_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_0_SHIFT 1
+#define I40E_VFINT_ICR0_QUEUE_0_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_0_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_1_SHIFT 2
+#define I40E_VFINT_ICR0_QUEUE_1_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_1_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_2_SHIFT 3
+#define I40E_VFINT_ICR0_QUEUE_2_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_2_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_3_SHIFT 4
+#define I40E_VFINT_ICR0_QUEUE_3_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_3_SHIFT)
+#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_SWINT_SHIFT 31
+#define I40E_VFINT_ICR0_SWINT_MASK (0x1 << I40E_VFINT_ICR0_SWINT_SHIFT)
+#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_ICR0_ENA_MAX_INDEX 127
+#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ENA_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA_RSVD_SHIFT 31
+#define I40E_VFINT_ICR0_ENA_RSVD_MASK (0x1 << I40E_VFINT_ICR0_ENA_RSVD_SHIFT)
+#define I40E_VFINT_ITR0(_i, _VF) (0x00028000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...2, _VF=0...127 */
+#define I40E_VFINT_ITR0_MAX_INDEX 2
+#define I40E_VFINT_ITR0_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITR0_INTERVAL_MASK (0xFFF << I40E_VFINT_ITR0_INTERVAL_SHIFT)
+#define I40E_VFINT_ITRN(_i, _INTVF) (0x00020000 + ((_i) * 2048 + (_INTVF) * 4))
+#define I40E_VFINT_ITRN_MAX_INDEX 2
+#define I40E_VFINT_ITRN_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITRN_INTERVAL_MASK (0xFFF << I40E_VFINT_ITRN_INTERVAL_SHIFT)
+#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_STAT_CTL0_MAX_INDEX 127
+#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
+#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK (0x3 << I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPINT_AEQCTL_MAX_INDEX 127
+#define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_VPINT_AEQCTL_MSIX_INDX_MASK (0xFF << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11
+#define I40E_VPINT_AEQCTL_ITR_INDX_MASK (0x3 << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_VPINT_AEQCTL_MSIX0_INDX_MASK (0x7 << I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_VPINT_AEQCTL_CAUSE_ENA_MASK (0x1 << I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_VPINT_AEQCTL_INTEVENT_SHIFT 31
+#define I40E_VPINT_AEQCTL_INTEVENT_MASK (0x1 << I40E_VPINT_AEQCTL_INTEVENT_SHIFT)
+#define I40E_VPINT_CEQCTL(_INTVF) (0x00026800 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VPINT_CEQCTL_MAX_INDEX 511
+#define I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_VPINT_CEQCTL_MSIX_INDX_MASK (0xFF << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_ITR_INDX_SHIFT 11
+#define I40E_VPINT_CEQCTL_ITR_INDX_MASK (0x3 << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_VPINT_CEQCTL_MSIX0_INDX_MASK (0x7 << I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_VPINT_CEQCTL_CAUSE_ENA_MASK (0x1 << I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_VPINT_CEQCTL_INTEVENT_SHIFT 31
+#define I40E_VPINT_CEQCTL_INTEVENT_MASK (0x1 << I40E_VPINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_VPINT_LNKLST0(_VF) (0x0002A800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPINT_LNKLST0_MAX_INDEX 127
+#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
+#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK (0x7FF << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT)
+#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
+#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_MASK (0x3 << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
+#define I40E_VPINT_LNKLSTN(_INTVF) (0x00025000 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VPINT_LNKLSTN_MAX_INDEX 511
+#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
+#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK (0x7FF << I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
+#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
+#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK (0x3 << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+#define I40E_VPINT_RATE0(_VF) (0x0002AC00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPINT_RATE0_MAX_INDEX 127
+#define I40E_VPINT_RATE0_INTERVAL_SHIFT 0
+#define I40E_VPINT_RATE0_INTERVAL_MASK (0x3F << I40E_VPINT_RATE0_INTERVAL_SHIFT)
+#define I40E_VPINT_RATE0_INTRL_ENA_SHIFT 6
+#define I40E_VPINT_RATE0_INTRL_ENA_MASK (0x1 << I40E_VPINT_RATE0_INTRL_ENA_SHIFT)
+#define I40E_VPINT_RATEN(_INTVF) (0x00025800 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VPINT_RATEN_MAX_INDEX 511
+#define I40E_VPINT_RATEN_INTERVAL_SHIFT 0
+#define I40E_VPINT_RATEN_INTERVAL_MASK (0x3F << I40E_VPINT_RATEN_INTERVAL_SHIFT)
+#define I40E_VPINT_RATEN_INTRL_ENA_SHIFT 6
+#define I40E_VPINT_RATEN_INTRL_ENA_MASK (0x1 << I40E_VPINT_RATEN_INTRL_ENA_SHIFT)
+#define I40E_GL_RDPU_CNTRL 0x00051060
+#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT 0
+#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_MASK (0x1 << I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT)
+#define I40E_GL_RDPU_CNTRL_ECO_SHIFT 1
+#define I40E_GL_RDPU_CNTRL_ECO_MASK (0x7FFFFFFF << I40E_GL_RDPU_CNTRL_ECO_SHIFT)
+#define I40E_GLLAN_RCTL_0 0x0012A500
+#define I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT 0
+#define I40E_GLLAN_RCTL_0_PXE_MODE_MASK (0x1 << I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT)
+#define I40E_GLLAN_TSOMSK_F 0x000442D8
+#define I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT 0
+#define I40E_GLLAN_TSOMSK_F_TCPMSKF_MASK (0xFFF << I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT)
+#define I40E_GLLAN_TSOMSK_L 0x000442E0
+#define I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT 0
+#define I40E_GLLAN_TSOMSK_L_TCPMSKL_MASK (0xFFF << I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT)
+#define I40E_GLLAN_TSOMSK_M 0x000442DC
+#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0
+#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK (0xFFF << I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT)
+#define I40E_PFLAN_QALLOC 0x001C0400
+#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
+#define I40E_PFLAN_QALLOC_FIRSTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16
+#define I40E_PFLAN_QALLOC_LASTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_LASTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_VALID_SHIFT 31
+#define I40E_PFLAN_QALLOC_VALID_MASK (0x1 << I40E_PFLAN_QALLOC_VALID_SHIFT)
+#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QRX_ENA_MAX_INDEX 1535
+#define I40E_QRX_ENA_QENA_REQ_SHIFT 0
+#define I40E_QRX_ENA_QENA_REQ_MASK (0x1 << I40E_QRX_ENA_QENA_REQ_SHIFT)
+#define I40E_QRX_ENA_FAST_QDIS_SHIFT 1
+#define I40E_QRX_ENA_FAST_QDIS_MASK (0x1 << I40E_QRX_ENA_FAST_QDIS_SHIFT)
+#define I40E_QRX_ENA_QENA_STAT_SHIFT 2
+#define I40E_QRX_ENA_QENA_STAT_MASK (0x1 << I40E_QRX_ENA_QENA_STAT_SHIFT)
+#define I40E_QRX_TAIL(_Q) (0x00128000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QRX_TAIL_MAX_INDEX 1535
+#define I40E_QRX_TAIL_TAIL_SHIFT 0
+#define I40E_QRX_TAIL_TAIL_MASK (0x1FFF << I40E_QRX_TAIL_TAIL_SHIFT)
+#define I40E_QTX_CTL(_Q) (0x00104000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_CTL_MAX_INDEX 1535
+#define I40E_QTX_CTL_PFVF_Q_SHIFT 0
+#define I40E_QTX_CTL_PFVF_Q_MASK (0x3 << I40E_QTX_CTL_PFVF_Q_SHIFT)
+#define I40E_QTX_CTL_PF_INDX_SHIFT 2
+#define I40E_QTX_CTL_PF_INDX_MASK (0xF << I40E_QTX_CTL_PF_INDX_SHIFT)
+#define I40E_QTX_CTL_VFVM_INDX_SHIFT 7
+#define I40E_QTX_CTL_VFVM_INDX_MASK (0x1FF << I40E_QTX_CTL_VFVM_INDX_SHIFT)
+#define I40E_QTX_ENA(_Q) (0x00100000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_ENA_MAX_INDEX 1535
+#define I40E_QTX_ENA_QENA_REQ_SHIFT 0
+#define I40E_QTX_ENA_QENA_REQ_MASK (0x1 << I40E_QTX_ENA_QENA_REQ_SHIFT)
+#define I40E_QTX_ENA_FAST_QDIS_SHIFT 1
+#define I40E_QTX_ENA_FAST_QDIS_MASK (0x1 << I40E_QTX_ENA_FAST_QDIS_SHIFT)
+#define I40E_QTX_ENA_QENA_STAT_SHIFT 2
+#define I40E_QTX_ENA_QENA_STAT_MASK (0x1 << I40E_QTX_ENA_QENA_STAT_SHIFT)
+#define I40E_QTX_HEAD(_Q) (0x000E4000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_HEAD_MAX_INDEX 1535
+#define I40E_QTX_HEAD_HEAD_SHIFT 0
+#define I40E_QTX_HEAD_HEAD_MASK (0x1FFF << I40E_QTX_HEAD_HEAD_SHIFT)
+#define I40E_QTX_HEAD_RS_PENDING_SHIFT 16
+#define I40E_QTX_HEAD_RS_PENDING_MASK (0x1 << I40E_QTX_HEAD_RS_PENDING_SHIFT)
+#define I40E_QTX_TAIL(_Q) (0x00108000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_TAIL_MAX_INDEX 1535
+#define I40E_QTX_TAIL_TAIL_SHIFT 0
+#define I40E_QTX_TAIL_TAIL_MASK (0x1FFF << I40E_QTX_TAIL_TAIL_SHIFT)
+#define I40E_VPLAN_MAPENA(_VF) (0x00074000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPLAN_MAPENA_MAX_INDEX 127
+#define I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT 0
+#define I40E_VPLAN_MAPENA_TXRX_ENA_MASK (0x1 << I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT)
+#define I40E_VPLAN_QTABLE(_i, _VF) (0x00070000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */
+#define I40E_VPLAN_QTABLE_MAX_INDEX 15
+#define I40E_VPLAN_QTABLE_QINDEX_SHIFT 0
+#define I40E_VPLAN_QTABLE_QINDEX_MASK (0x7FF << I40E_VPLAN_QTABLE_QINDEX_SHIFT)
+#define I40E_VSILAN_QBASE(_VSI) (0x0020C800 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VSILAN_QBASE_MAX_INDEX 383
+#define I40E_VSILAN_QBASE_VSIBASE_SHIFT 0
+#define I40E_VSILAN_QBASE_VSIBASE_MASK (0x7FF << I40E_VSILAN_QBASE_VSIBASE_SHIFT)
+#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11
+#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK (0x1 << I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT)
+#define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4))
+#define I40E_VSILAN_QTABLE_MAX_INDEX 15
+#define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0
+#define I40E_VSILAN_QTABLE_QINDEX_0_MASK (0x7FF << I40E_VSILAN_QTABLE_QINDEX_0_SHIFT)
+#define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16
+#define I40E_VSILAN_QTABLE_QINDEX_1_MASK (0x7FF << I40E_VSILAN_QTABLE_QINDEX_1_SHIFT)
+#define I40E_PRTGL_SAH 0x001E2140
+#define I40E_PRTGL_SAH_FC_SAH_SHIFT 0
+#define I40E_PRTGL_SAH_FC_SAH_MASK (0xFFFF << I40E_PRTGL_SAH_FC_SAH_SHIFT)
+#define I40E_PRTGL_SAH_MFS_SHIFT 16
+#define I40E_PRTGL_SAH_MFS_MASK (0xFFFF << I40E_PRTGL_SAH_MFS_SHIFT)
+#define I40E_PRTGL_SAL 0x001E2120
+#define I40E_PRTGL_SAL_FC_SAL_SHIFT 0
+#define I40E_PRTGL_SAL_FC_SAL_MASK (0xFFFFFFFF << I40E_PRTGL_SAL_FC_SAL_SHIFT)
+#define I40E_PRTMAC_HLCTLA 0x001E4760
+#define I40E_PRTMAC_HLCTLA_DROP_US_PKTS_SHIFT 0
+#define I40E_PRTMAC_HLCTLA_DROP_US_PKTS_MASK (0x1 << I40E_PRTMAC_HLCTLA_DROP_US_PKTS_SHIFT)
+#define I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_SHIFT 1
+#define I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_MASK (0x1 << I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_SHIFT)
+#define I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_SHIFT 2
+#define I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_MASK (0x1 << I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_SHIFT)
+#define I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_SHIFT 4
+#define I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_MASK (0x7 << I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_SHIFT)
+#define I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_SHIFT 7
+#define I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_MASK (0x1 << I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP 0x001E3130
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP 0x001E3290
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP 0x001E3310
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP 0x001E3100
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP 0x001E3280
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP 0x001E3300
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E30E0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E3260
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E32E0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E3360
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3110
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3120
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E30C0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_MASK (0x1FF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3140
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E3150
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE 0x001E3000
+#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E30D0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_MASK (0x1FF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E3370 + ((_i) * 16))
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3400 + ((_i) * 16))
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E34B0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E34C0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT)
+#define I40E_PRTMAC_HSECTL1 0x001E3560
+#define I40E_PRTMAC_HSECTL1_DROP_US_PKTS_SHIFT 0
+#define I40E_PRTMAC_HSECTL1_DROP_US_PKTS_MASK (0x1 << I40E_PRTMAC_HSECTL1_DROP_US_PKTS_SHIFT)
+#define I40E_PRTMAC_HSECTL1_PAD_US_PKT_SHIFT 3
+#define I40E_PRTMAC_HSECTL1_PAD_US_PKT_MASK (0x1 << I40E_PRTMAC_HSECTL1_PAD_US_PKT_SHIFT)
+#define I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_SHIFT 4
+#define I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_MASK (0x7 << I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_SHIFT)
+#define I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_SHIFT 7
+#define I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_MASK (0x1 << I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_SHIFT)
+#define I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_SHIFT 30
+#define I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_MASK (0x1 << I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_SHIFT)
+#define I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_SHIFT 31
+#define I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_MASK (0x1 << I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A 0x0008C480
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT 0
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT 2
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT 4
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT 6
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT 8
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT 10
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT 12
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT 14
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B 0x0008C484
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT 0
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT 2
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT 4
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT 6
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT 8
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT 10
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT 12
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT)
+#define I40E_GL_MNG_FWSM 0x000B6134
+#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 0
+#define I40E_GL_MNG_FWSM_FW_MODES_MASK (0x3FF << I40E_GL_MNG_FWSM_FW_MODES_SHIFT)
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 10
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK (0x1 << I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT)
+#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT 11
+#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK (0xF << I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT)
+#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT 15
+#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK (0x1 << I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT)
+#define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT 19
+#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK (0x3F << I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26
+#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27
+#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT 28
+#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT 29
+#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_HWARB_CTRL 0x000B6130
+#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT 0
+#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_MASK (0x1 << I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT)
+#define I40E_PRT_MNG_FTFT_DATA(_i) (0x000852A0 + ((_i) * 32)) /* _i=0...31 */
+#define I40E_PRT_MNG_FTFT_DATA_MAX_INDEX 31
+#define I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT 0
+#define I40E_PRT_MNG_FTFT_DATA_DWORD_MASK (0xFFFFFFFF << I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT)
+#define I40E_PRT_MNG_FTFT_LENGTH 0x00085260
+#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT 0
+#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_MASK (0xFF << I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT)
+#define I40E_PRT_MNG_FTFT_MASK(_i) (0x00085160 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRT_MNG_FTFT_MASK_MAX_INDEX 7
+#define I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT 0
+#define I40E_PRT_MNG_FTFT_MASK_MASK_MASK (0xFFFF << I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT)
+#define I40E_PRT_MNG_MANC 0x00256A20
+#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT 0
+#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_MASK (0x1 << I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT)
+#define I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT 1
+#define I40E_PRT_MNG_MANC_NCSI_DISCARD_MASK (0x1 << I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT)
+#define I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT 17
+#define I40E_PRT_MNG_MANC_RCV_TCO_EN_MASK (0x1 << I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT)
+#define I40E_PRT_MNG_MANC_RCV_ALL_SHIFT 19
+#define I40E_PRT_MNG_MANC_RCV_ALL_MASK (0x1 << I40E_PRT_MNG_MANC_RCV_ALL_SHIFT)
+#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT 25
+#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_MASK (0x1 << I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT)
+#define I40E_PRT_MNG_MANC_NET_TYPE_SHIFT 26
+#define I40E_PRT_MNG_MANC_NET_TYPE_MASK (0x1 << I40E_PRT_MNG_MANC_NET_TYPE_SHIFT)
+#define I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT 28
+#define I40E_PRT_MNG_MANC_EN_BMC2OS_MASK (0x1 << I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT)
+#define I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT 29
+#define I40E_PRT_MNG_MANC_EN_BMC2NET_MASK (0x1 << I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT)
+#define I40E_PRT_MNG_MAVTV(_i) (0x00255900 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRT_MNG_MAVTV_MAX_INDEX 7
+#define I40E_PRT_MNG_MAVTV_VID_SHIFT 0
+#define I40E_PRT_MNG_MAVTV_VID_MASK (0xFFF << I40E_PRT_MNG_MAVTV_VID_SHIFT)
+#define I40E_PRT_MNG_MDEF(_i) (0x00255D00 + ((_i) * 32))
+#define I40E_PRT_MNG_MDEF_MAX_INDEX 7
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT 0
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_MASK (0xF << I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT 4
+#define I40E_PRT_MNG_MDEF_BROADCAST_AND_MASK (0x1 << I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT 5
+#define I40E_PRT_MNG_MDEF_VLAN_AND_MASK (0xFF << I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT 13
+#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_MASK (0xF << I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT 17
+#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_MASK (0xF << I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT 21
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_MASK (0xF << I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT 25
+#define I40E_PRT_MNG_MDEF_BROADCAST_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT 26
+#define I40E_PRT_MNG_MDEF_MULTICAST_AND_MASK (0x1 << I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT 27
+#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT 28
+#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT 29
+#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT 30
+#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT 31
+#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT(_i) (0x00255F00 + ((_i) * 32))
+#define I40E_PRT_MNG_MDEF_EXT_MAX_INDEX 7
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT 0
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_MASK (0xF << I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT 4
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_MASK (0xF << I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT 8
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_MASK (0xFFFF << I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT 24
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT 25
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT 26
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT 27
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT 28
+#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT 29
+#define I40E_PRT_MNG_MDEF_EXT_MLD_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT 30
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT 31
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT)
+#define I40E_PRT_MNG_MDEFVSI(_i) (0x00256580 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MDEFVSI_MAX_INDEX 3
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT 0
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_MASK (0xFFFF << I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT)
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT 16
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_MASK (0xFFFF << I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT)
+#define I40E_PRT_MNG_METF(_i) (0x00256780 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_METF_MAX_INDEX 3
+#define I40E_PRT_MNG_METF_ETYPE_SHIFT 0
+#define I40E_PRT_MNG_METF_ETYPE_MASK (0xFFFF << I40E_PRT_MNG_METF_ETYPE_SHIFT)
+#define I40E_PRT_MNG_METF_POLARITY_SHIFT 30
+#define I40E_PRT_MNG_METF_POLARITY_MASK (0x1 << I40E_PRT_MNG_METF_POLARITY_SHIFT)
+#define I40E_PRT_MNG_MFUTP(_i) (0x00254E00 + ((_i) * 32)) /* _i=0...15 */
+#define I40E_PRT_MNG_MFUTP_MAX_INDEX 15
+#define I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT 0
+#define I40E_PRT_MNG_MFUTP_MFUTP_N_MASK (0xFFFF << I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT)
+#define I40E_PRT_MNG_MFUTP_UDP_SHIFT 16
+#define I40E_PRT_MNG_MFUTP_UDP_MASK (0x1 << I40E_PRT_MNG_MFUTP_UDP_SHIFT)
+#define I40E_PRT_MNG_MFUTP_TCP_SHIFT 17
+#define I40E_PRT_MNG_MFUTP_TCP_MASK (0x1 << I40E_PRT_MNG_MFUTP_TCP_SHIFT)
+#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT 18
+#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_MASK (0x1 << I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT)
+#define I40E_PRT_MNG_MIPAF4(_i) (0x00256280 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MIPAF4_MAX_INDEX 3
+#define I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT 0
+#define I40E_PRT_MNG_MIPAF4_MIPAF_MASK (0xFFFFFFFF << I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT)
+#define I40E_PRT_MNG_MIPAF6(_i) (0x00254200 + ((_i) * 32)) /* _i=0...15 */
+#define I40E_PRT_MNG_MIPAF6_MAX_INDEX 15
+#define I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT 0
+#define I40E_PRT_MNG_MIPAF6_MIPAF_MASK (0xFFFFFFFF << I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT)
+#define I40E_PRT_MNG_MMAH(_i) (0x00256380 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MMAH_MAX_INDEX 3
+#define I40E_PRT_MNG_MMAH_MMAH_SHIFT 0
+#define I40E_PRT_MNG_MMAH_MMAH_MASK (0xFFFF << I40E_PRT_MNG_MMAH_MMAH_SHIFT)
+#define I40E_PRT_MNG_MMAL(_i) (0x00256480 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MMAL_MAX_INDEX 3
+#define I40E_PRT_MNG_MMAL_MMAL_SHIFT 0
+#define I40E_PRT_MNG_MMAL_MMAL_MASK (0xFFFFFFFF << I40E_PRT_MNG_MMAL_MMAL_SHIFT)
+#define I40E_PRT_MNG_MNGONLY 0x00256A60
+#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT 0
+#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_MASK (0xFF << I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT)
+#define I40E_PRT_MNG_MSFM 0x00256AA0
+#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT 0
+#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT 1
+#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT 2
+#define I40E_PRT_MNG_MSFM_PORT_298_UDP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT 3
+#define I40E_PRT_MNG_MSFM_PORT_298_TCP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT 4
+#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT 5
+#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT 6
+#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT 7
+#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT)
+#define I40E_MSIX_PBA(_i) (0x00004900 + ((_i) * 4)) /* _i=0...5 */
+#define I40E_MSIX_PBA_MAX_INDEX 5
+#define I40E_MSIX_PBA_PENBIT_SHIFT 0
+#define I40E_MSIX_PBA_PENBIT_MASK (0xFFFFFFFF << I40E_MSIX_PBA_PENBIT_SHIFT)
+#define I40E_MSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TADD_MAX_INDEX 128
+#define I40E_MSIX_TADD_MSIXTADD10_SHIFT 0
+#define I40E_MSIX_TADD_MSIXTADD10_MASK (0x3 << I40E_MSIX_TADD_MSIXTADD10_SHIFT)
+#define I40E_MSIX_TADD_MSIXTADD_SHIFT 2
+#define I40E_MSIX_TADD_MSIXTADD_MASK (0x3FFFFFFF << I40E_MSIX_TADD_MSIXTADD_SHIFT)
+#define I40E_MSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TMSG_MAX_INDEX 128
+#define I40E_MSIX_TMSG_MSIXTMSG_SHIFT 0
+#define I40E_MSIX_TMSG_MSIXTMSG_MASK (0xFFFFFFFF << I40E_MSIX_TMSG_MSIXTMSG_SHIFT)
+#define I40E_MSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TUADD_MAX_INDEX 128
+#define I40E_MSIX_TUADD_MSIXTUADD_SHIFT 0
+#define I40E_MSIX_TUADD_MSIXTUADD_MASK (0xFFFFFFFF << I40E_MSIX_TUADD_MSIXTUADD_SHIFT)
+#define I40E_MSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TVCTRL_MAX_INDEX 128
+#define I40E_MSIX_TVCTRL_MASK_SHIFT 0
+#define I40E_MSIX_TVCTRL_MASK_MASK (0x1 << I40E_MSIX_TVCTRL_MASK_SHIFT)
+#define I40E_VFMSIX_PBA1(_i) (0x00004944 + ((_i) * 4)) /* _i=0...19 */
+#define I40E_VFMSIX_PBA1_MAX_INDEX 19
+#define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0
+#define I40E_VFMSIX_PBA1_PENBIT_MASK (0xFFFFFFFF << I40E_VFMSIX_PBA1_PENBIT_SHIFT)
+#define I40E_VFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TADD1_MAX_INDEX 639
+#define I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT 0
+#define I40E_VFMSIX_TADD1_MSIXTADD10_MASK (0x3 << I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT)
+#define I40E_VFMSIX_TADD1_MSIXTADD_SHIFT 2
+#define I40E_VFMSIX_TADD1_MSIXTADD_MASK (0x3FFFFFFF << I40E_VFMSIX_TADD1_MSIXTADD_SHIFT)
+#define I40E_VFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TMSG1_MAX_INDEX 639
+#define I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT 0
+#define I40E_VFMSIX_TMSG1_MSIXTMSG_MASK (0xFFFFFFFF << I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT)
+#define I40E_VFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TUADD1_MAX_INDEX 639
+#define I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT 0
+#define I40E_VFMSIX_TUADD1_MSIXTUADD_MASK (0xFFFFFFFF << I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT)
+#define I40E_VFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639
+#define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0
+#define I40E_VFMSIX_TVCTRL1_MASK_MASK (0x1 << I40E_VFMSIX_TVCTRL1_MASK_SHIFT)
+#define I40E_GLNVM_FLA 0x000B6108
+#define I40E_GLNVM_FLA_FL_SCK_SHIFT 0
+#define I40E_GLNVM_FLA_FL_SCK_MASK (0x1 << I40E_GLNVM_FLA_FL_SCK_SHIFT)
+#define I40E_GLNVM_FLA_FL_CE_SHIFT 1
+#define I40E_GLNVM_FLA_FL_CE_MASK (0x1 << I40E_GLNVM_FLA_FL_CE_SHIFT)
+#define I40E_GLNVM_FLA_FL_SI_SHIFT 2
+#define I40E_GLNVM_FLA_FL_SI_MASK (0x1 << I40E_GLNVM_FLA_FL_SI_SHIFT)
+#define I40E_GLNVM_FLA_FL_SO_SHIFT 3
+#define I40E_GLNVM_FLA_FL_SO_MASK (0x1 << I40E_GLNVM_FLA_FL_SO_SHIFT)
+#define I40E_GLNVM_FLA_FL_REQ_SHIFT 4
+#define I40E_GLNVM_FLA_FL_REQ_MASK (0x1 << I40E_GLNVM_FLA_FL_REQ_SHIFT)
+#define I40E_GLNVM_FLA_FL_GNT_SHIFT 5
+#define I40E_GLNVM_FLA_FL_GNT_MASK (0x1 << I40E_GLNVM_FLA_FL_GNT_SHIFT)
+#define I40E_GLNVM_FLA_LOCKED_SHIFT 6
+#define I40E_GLNVM_FLA_LOCKED_MASK (0x1 << I40E_GLNVM_FLA_LOCKED_SHIFT)
+#define I40E_GLNVM_FLA_FL_SADDR_SHIFT 18
+#define I40E_GLNVM_FLA_FL_SADDR_MASK (0x7FF << I40E_GLNVM_FLA_FL_SADDR_SHIFT)
+#define I40E_GLNVM_FLA_FL_BUSY_SHIFT 30
+#define I40E_GLNVM_FLA_FL_BUSY_MASK (0x1 << I40E_GLNVM_FLA_FL_BUSY_SHIFT)
+#define I40E_GLNVM_FLA_FL_DER_SHIFT 31
+#define I40E_GLNVM_FLA_FL_DER_MASK (0x1 << I40E_GLNVM_FLA_FL_DER_SHIFT)
+#define I40E_GLNVM_FLASHID 0x000B6104
+#define I40E_GLNVM_FLASHID_FLASHID_SHIFT 0
+#define I40E_GLNVM_FLASHID_FLASHID_MASK (0xFFFFFF << I40E_GLNVM_FLASHID_FLASHID_SHIFT)
+#define I40E_GLNVM_GENS 0x000B6100
+#define I40E_GLNVM_GENS_NVM_PRES_SHIFT 0
+#define I40E_GLNVM_GENS_NVM_PRES_MASK (0x1 << I40E_GLNVM_GENS_NVM_PRES_SHIFT)
+#define I40E_GLNVM_GENS_SR_SIZE_SHIFT 5
+#define I40E_GLNVM_GENS_SR_SIZE_MASK (0x7 << I40E_GLNVM_GENS_SR_SIZE_SHIFT)
+#define I40E_GLNVM_GENS_BANK1VAL_SHIFT 8
+#define I40E_GLNVM_GENS_BANK1VAL_MASK (0x1 << I40E_GLNVM_GENS_BANK1VAL_SHIFT)
+#define I40E_GLNVM_GENS_ALT_PRST_SHIFT 23
+#define I40E_GLNVM_GENS_ALT_PRST_MASK (0x1 << I40E_GLNVM_GENS_ALT_PRST_SHIFT)
+#define I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT 25
+#define I40E_GLNVM_GENS_FL_AUTO_RD_MASK (0x1 << I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT)
+#define I40E_GLNVM_PROTCSR(_i) (0x000B6010 + ((_i) * 4)) /* _i=0...59 */
+#define I40E_GLNVM_PROTCSR_MAX_INDEX 59
+#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT 0
+#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_MASK (0xFFFFFF << I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT)
+#define I40E_GLNVM_SRCTL 0x000B6110
+#define I40E_GLNVM_SRCTL_SRBUSY_SHIFT 0
+#define I40E_GLNVM_SRCTL_SRBUSY_MASK (0x1 << I40E_GLNVM_SRCTL_SRBUSY_SHIFT)
+#define I40E_GLNVM_SRCTL_ADDR_SHIFT 14
+#define I40E_GLNVM_SRCTL_ADDR_MASK (0x7FFF << I40E_GLNVM_SRCTL_ADDR_SHIFT)
+#define I40E_GLNVM_SRCTL_WRITE_SHIFT 29
+#define I40E_GLNVM_SRCTL_WRITE_MASK (0x1 << I40E_GLNVM_SRCTL_WRITE_SHIFT)
+#define I40E_GLNVM_SRCTL_START_SHIFT 30
+#define I40E_GLNVM_SRCTL_START_MASK (0x1 << I40E_GLNVM_SRCTL_START_SHIFT)
+#define I40E_GLNVM_SRCTL_DONE_SHIFT 31
+#define I40E_GLNVM_SRCTL_DONE_MASK (0x1 << I40E_GLNVM_SRCTL_DONE_SHIFT)
+#define I40E_GLNVM_SRDATA 0x000B6114
+#define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0
+#define I40E_GLNVM_SRDATA_WRDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_WRDATA_SHIFT)
+#define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16
+#define I40E_GLNVM_SRDATA_RDDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_RDDATA_SHIFT)
+#define I40E_GLPCI_BYTCTH 0x0009C484
+#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0
+#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK (0xFFFFFFFF << I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT)
+#define I40E_GLPCI_BYTCTL 0x0009C488
+#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT 0
+#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_MASK (0xFFFFFFFF << I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT)
+#define I40E_GLPCI_CAPCTRL 0x000BE4A4
+#define I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT 0
+#define I40E_GLPCI_CAPCTRL_VPD_EN_MASK (0x1 << I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP 0x000BE4A8
+#define I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT 0
+#define I40E_GLPCI_CAPSUP_PCIE_VER_MASK (0x1 << I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT)
+#define I40E_GLPCI_CAPSUP_LTR_EN_SHIFT 2
+#define I40E_GLPCI_CAPSUP_LTR_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_LTR_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_TPH_EN_SHIFT 3
+#define I40E_GLPCI_CAPSUP_TPH_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_TPH_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ARI_EN_SHIFT 4
+#define I40E_GLPCI_CAPSUP_ARI_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ARI_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_IOV_EN_SHIFT 5
+#define I40E_GLPCI_CAPSUP_IOV_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_IOV_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ACS_EN_SHIFT 6
+#define I40E_GLPCI_CAPSUP_ACS_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ACS_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_SEC_EN_SHIFT 7
+#define I40E_GLPCI_CAPSUP_SEC_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_SEC_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT 16
+#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT 17
+#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_IDO_EN_SHIFT 18
+#define I40E_GLPCI_CAPSUP_IDO_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_IDO_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT 19
+#define I40E_GLPCI_CAPSUP_MSI_MASK_MASK (0x1 << I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT)
+#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT 20
+#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT 30
+#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_MASK (0x1 << I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT)
+#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT 31
+#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_MASK (0x1 << I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT)
+#define I40E_GLPCI_CNF 0x000BE4C0
+#define I40E_GLPCI_CNF_FLEX10_SHIFT 1
+#define I40E_GLPCI_CNF_FLEX10_MASK (0x1 << I40E_GLPCI_CNF_FLEX10_SHIFT)
+#define I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT 2
+#define I40E_GLPCI_CNF_WAKE_PIN_EN_MASK (0x1 << I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT)
+#define I40E_GLPCI_CNF2 0x000BE494
+#define I40E_GLPCI_CNF2_RO_DIS_SHIFT 0
+#define I40E_GLPCI_CNF2_RO_DIS_MASK (0x1 << I40E_GLPCI_CNF2_RO_DIS_SHIFT)
+#define I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT 1
+#define I40E_GLPCI_CNF2_CACHELINE_SIZE_MASK (0x1 << I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT)
+#define I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT 2
+#define I40E_GLPCI_CNF2_MSI_X_PF_N_MASK (0x7FF << I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT)
+#define I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT 13
+#define I40E_GLPCI_CNF2_MSI_X_VF_N_MASK (0x7FF << I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT)
+#define I40E_GLPCI_DREVID 0x0009C480
+#define I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT 0
+#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK (0xFF << I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT)
+#define I40E_GLPCI_GSCL_1 0x0009C48C
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT 0
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT 1
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT 2
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT 3
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT 4
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT 5
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT 6
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT 7
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT 8
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT 9
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_MASK (0x1F << I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT 14
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT 15
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_MASK (0x1F << I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT 28
+#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT 29
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT 30
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT 31
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT)
+#define I40E_GLPCI_GSCL_2 0x0009C490
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT 0
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT 8
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT 16
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT 24
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT)
+#define I40E_GLPCI_GSCL_5_8(_i) (0x0009C494 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLPCI_GSCL_5_8_MAX_INDEX 3
+#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT 0
+#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_MASK (0xFFFF << I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT)
+#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT 16
+#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_MASK (0xFFFF << I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT)
+#define I40E_GLPCI_GSCN_0_3(_i) (0x0009C4A4 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3
+#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0
+#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK (0xFFFFFFFF << I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT)
+#define I40E_GLPCI_LATCT 0x0009C4B4
+#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT 0
+#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_MASK (0xFFFFFFFF << I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT)
+#define I40E_GLPCI_LBARCTRL 0x000BE484
+#define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0
+#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK (0x1 << I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT)
+#define I40E_GLPCI_LBARCTRL_BAR32_SHIFT 1
+#define I40E_GLPCI_LBARCTRL_BAR32_MASK (0x1 << I40E_GLPCI_LBARCTRL_BAR32_SHIFT)
+#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT 3
+#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_MASK (0x1 << I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT 4
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK (0x3 << I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT 6
+#define I40E_GLPCI_LBARCTRL_FL_SIZE_MASK (0x7 << I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT 10
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_MASK (0x1 << I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT 11
+#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_MASK (0x7 << I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT)
+#define I40E_GLPCI_LINKCAP 0x000BE4AC
+#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT 0
+#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_MASK (0x3F << I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT)
+#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT 6
+#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_MASK (0x7 << I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT)
+#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT 9
+#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_MASK (0xF << I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT)
+#define I40E_GLPCI_PCIERR 0x000BE4FC
+#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0
+#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK (0xFFFFFFFF << I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT)
+#define I40E_GLPCI_PKTCT 0x0009C4BC
+#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0
+#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK (0xFFFFFFFF << I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT)
+#define I40E_GLPCI_PMSUP 0x000BE4B0
+#define I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT 0
+#define I40E_GLPCI_PMSUP_ASPM_SUP_MASK (0x3 << I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT)
+#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT 2
+#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT 5
+#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT 8
+#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT 11
+#define I40E_GLPCI_PMSUP_L1_ACC_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT 14
+#define I40E_GLPCI_PMSUP_SLOT_CLK_MASK (0x1 << I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT)
+#define I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT 15
+#define I40E_GLPCI_PMSUP_OBFF_SUP_MASK (0x3 << I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT)
+#define I40E_GLPCI_PWRDATA 0x000BE490
+#define I40E_GLPCI_PWRDATA_D0_POWER_SHIFT 0
+#define I40E_GLPCI_PWRDATA_D0_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_D0_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT 8
+#define I40E_GLPCI_PWRDATA_COMM_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_D3_POWER_SHIFT 16
+#define I40E_GLPCI_PWRDATA_D3_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_D3_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT 24
+#define I40E_GLPCI_PWRDATA_DATA_SCALE_MASK (0x3 << I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT)
+#define I40E_GLPCI_REVID 0x000BE4B4
+#define I40E_GLPCI_REVID_NVM_REVID_SHIFT 0
+#define I40E_GLPCI_REVID_NVM_REVID_MASK (0xFF << I40E_GLPCI_REVID_NVM_REVID_SHIFT)
+#define I40E_GLPCI_SERH 0x000BE49C
+#define I40E_GLPCI_SERH_SER_NUM_H_SHIFT 0
+#define I40E_GLPCI_SERH_SER_NUM_H_MASK (0xFFFF << I40E_GLPCI_SERH_SER_NUM_H_SHIFT)
+#define I40E_GLPCI_SERL 0x000BE498
+#define I40E_GLPCI_SERL_SER_NUM_L_SHIFT 0
+#define I40E_GLPCI_SERL_SER_NUM_L_MASK (0xFFFFFFFF << I40E_GLPCI_SERL_SER_NUM_L_SHIFT)
+#define I40E_GLPCI_SUBSYSID 0x000BE48C
+#define I40E_GLPCI_SUBSYSID_SUB_VEN_ID_SHIFT 0
+#define I40E_GLPCI_SUBSYSID_SUB_VEN_ID_MASK (0xFFFF << I40E_GLPCI_SUBSYSID_SUB_VEN_ID_SHIFT)
+#define I40E_GLPCI_SUBSYSID_SUB_ID_SHIFT 16
+#define I40E_GLPCI_SUBSYSID_SUB_ID_MASK (0xFFFF << I40E_GLPCI_SUBSYSID_SUB_ID_SHIFT)
+#define I40E_GLPCI_UPADD 0x000BE4F8
+#define I40E_GLPCI_UPADD_ADDRESS_SHIFT 1
+#define I40E_GLPCI_UPADD_ADDRESS_MASK (0x7FFFFFFF << I40E_GLPCI_UPADD_ADDRESS_SHIFT)
+#define I40E_GLPCI_VFSUP 0x000BE4B8
+#define I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT 0
+#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK (0x1 << I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT)
+#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1
+#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK (0x1 << I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT)
+#define I40E_PF_FUNC_RID 0x0009C000
+#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0
+#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK (0x7 << I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT)
+#define I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT 3
+#define I40E_PF_FUNC_RID_DEVICE_NUMBER_MASK (0x1F << I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT)
+#define I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT 8
+#define I40E_PF_FUNC_RID_BUS_NUMBER_MASK (0xFF << I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT)
+#define I40E_PF_PCI_CIAA 0x0009C080
+#define I40E_PF_PCI_CIAA_ADDRESS_SHIFT 0
+#define I40E_PF_PCI_CIAA_ADDRESS_MASK (0xFFF << I40E_PF_PCI_CIAA_ADDRESS_SHIFT)
+#define I40E_PF_PCI_CIAA_VF_NUM_SHIFT 12
+#define I40E_PF_PCI_CIAA_VF_NUM_MASK (0x7F << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)
+#define I40E_PF_PCI_CIAD 0x0009C100
+#define I40E_PF_PCI_CIAD_DATA_SHIFT 0
+#define I40E_PF_PCI_CIAD_DATA_MASK (0xFFFFFFFF << I40E_PF_PCI_CIAD_DATA_SHIFT)
+#define I40E_PFPCI_CLASS 0x000BE400
+#define I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT 0
+#define I40E_PFPCI_CLASS_STORAGE_CLASS_MASK (0x1 << I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT)
+#define I40E_PFPCI_CNF 0x000BE000
+#define I40E_PFPCI_CNF_MSI_EN_SHIFT 2
+#define I40E_PFPCI_CNF_MSI_EN_MASK (0x1 << I40E_PFPCI_CNF_MSI_EN_SHIFT)
+#define I40E_PFPCI_CNF_EXROM_DIS_SHIFT 3
+#define I40E_PFPCI_CNF_EXROM_DIS_MASK (0x1 << I40E_PFPCI_CNF_EXROM_DIS_SHIFT)
+#define I40E_PFPCI_CNF_IO_BAR_SHIFT 4
+#define I40E_PFPCI_CNF_IO_BAR_MASK (0x1 << I40E_PFPCI_CNF_IO_BAR_SHIFT)
+#define I40E_PFPCI_CNF_INT_PIN_SHIFT 5
+#define I40E_PFPCI_CNF_INT_PIN_MASK (0x3 << I40E_PFPCI_CNF_INT_PIN_SHIFT)
+#define I40E_PFPCI_FACTPS 0x0009C180
+#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT 0
+#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_MASK (0x3 << I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT)
+#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT 3
+#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_MASK (0x1 << I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT)
+#define I40E_PFPCI_FUNC 0x000BE200
+#define I40E_PFPCI_FUNC_FUNC_DIS_SHIFT 0
+#define I40E_PFPCI_FUNC_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT 1
+#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT 2
+#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_MASK (0x1 << I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT)
+#define I40E_PFPCI_FUNC2 0x000BE180
+#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT 0
+#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_ICAUSE 0x0009C200
+#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT 0
+#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_MASK (0xFFFFFFFF << I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT)
+#define I40E_PFPCI_IENA 0x0009C280
+#define I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT 0
+#define I40E_PFPCI_IENA_PCIE_ERR_EN_MASK (0xFFFFFFFF << I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT)
+#define I40E_PFPCI_PFDEVID 0x000BE080
+#define I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_SHIFT 0
+#define I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_MASK (0xFFFF << I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_SHIFT)
+#define I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_SHIFT 16
+#define I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_MASK (0xFFFF << I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_SHIFT)
+#define I40E_PFPCI_PM 0x000BE300
+#define I40E_PFPCI_PM_PME_EN_SHIFT 0
+#define I40E_PFPCI_PM_PME_EN_MASK (0x1 << I40E_PFPCI_PM_PME_EN_SHIFT)
+#define I40E_PFPCI_STATUS1 0x000BE280
+#define I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT 0
+#define I40E_PFPCI_STATUS1_FUNC_VALID_MASK (0x1 << I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT)
+#define I40E_PFPCI_VFDEVID 0x000BE100
+#define I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_SHIFT 0
+#define I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_MASK (0xFFFF << I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_SHIFT)
+#define I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_SHIFT 16
+#define I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_MASK (0xFFFF << I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_SHIFT)
+#define I40E_PFPCI_VMINDEX 0x0009C300
+#define I40E_PFPCI_VMINDEX_VMINDEX_SHIFT 0
+#define I40E_PFPCI_VMINDEX_VMINDEX_MASK (0x1FF << I40E_PFPCI_VMINDEX_VMINDEX_SHIFT)
+#define I40E_PFPCI_VMPEND 0x0009C380
+#define I40E_PFPCI_VMPEND_PENDING_SHIFT 0
+#define I40E_PFPCI_VMPEND_PENDING_MASK (0x1 << I40E_PFPCI_VMPEND_PENDING_SHIFT)
+#define I40E_GLPE_CPUSTATUS0 0x0000D040
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT)
+#define I40E_GLPE_CPUSTATUS1 0x0000D044
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT)
+#define I40E_GLPE_CPUSTATUS2 0x0000D048
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT)
+#define I40E_GLPE_PFFLMOBJCTRL(_i) (0x0000D480 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPE_PFFLMOBJCTRL_MAX_INDEX 15
+#define I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
+#define I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK (0x7 << I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
+#define I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_MASK (0x7 << I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX 31
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK (0x7 << I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK (0x7 << I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMQ1ALLOCERR(_i) (0x0000C700 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFFLMXMITALLOCERR(_i) (0x0000C600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFUDACTRL(_i) (0x0000C000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFUDACTRL_MAX_INDEX 31
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK (0x1 << I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN(_i) (0x0000C100 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX 31
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT 0
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK (0x3FFFF << I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK (0x1 << I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_AEQALLOC 0x00131180
+#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_PFPE_AEQALLOC_AECOUNT_MASK (0xFFFFFFFF << I40E_PFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_PFPE_CCQPHIGH 0x00008200
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_PFPE_CCQPLOW 0x00008180
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK (0xFFFFFFFF << I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_PFPE_CCQPSTATUS 0x00008100
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK (0x1 << I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK (0x1 << I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_PFPE_CQACK 0x00131100
+#define I40E_PFPE_CQACK_PECQID_SHIFT 0
+#define I40E_PFPE_CQACK_PECQID_MASK (0x1FFFF << I40E_PFPE_CQACK_PECQID_SHIFT)
+#define I40E_PFPE_CQARM 0x00131080
+#define I40E_PFPE_CQARM_PECQID_SHIFT 0
+#define I40E_PFPE_CQARM_PECQID_MASK (0x1FFFF << I40E_PFPE_CQARM_PECQID_SHIFT)
+#define I40E_PFPE_CQPDB 0x00008000
+#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_PFPE_CQPDB_WQHEAD_MASK (0x7FF << I40E_PFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_PFPE_CQPERRCODES 0x00008880
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK (0xFFFF << I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_PFPE_CQPTAIL 0x00008080
+#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_PFPE_CQPTAIL_WQTAIL_MASK (0x7FF << I40E_PFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK (0x1 << I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_PFPE_FLMQ1ALLOCERR 0x00008980
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_FLMXMITALLOCERR 0x00008900
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_IPCONFIG0 0x00008280
+#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_PFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_PFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_PFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT 17
+#define I40E_PFPE_IPCONFIG0_USEUPPERIDRANGE_MASK (0x1 << I40E_PFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT)
+#define I40E_PFPE_MRTEIDXMASK 0x00008600
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_PFPE_RCVUNEXPECTEDERROR 0x00008680
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_PFPE_TCPNOWTIMER 0x00008580
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK (0xFFFFFFFF << I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_PFPE_UDACTRL 0x00008700
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_MASK (0x1 << I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN 0x00008780
+#define I40E_PFPE_UDAUCFBQPN_QPN_SHIFT 0
+#define I40E_PFPE_UDAUCFBQPN_QPN_MASK (0x3FFFF << I40E_PFPE_UDAUCFBQPN_QPN_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN_VALID_SHIFT 31
+#define I40E_PFPE_UDAUCFBQPN_VALID_MASK (0x1 << I40E_PFPE_UDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_WQEALLOC 0x00138C00
+#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_PFPE_WQEALLOC_PEQPID_MASK (0x3FFFF << I40E_PFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK (0xFFF << I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_VFPE_AEQALLOC(_VF) (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_AEQALLOC_MAX_INDEX 127
+#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC_AECOUNT_MASK (0xFFFFFFFF << I40E_VFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH(_VF) (0x00001000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CCQPHIGH_MAX_INDEX 127
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW(_VF) (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CCQPLOW_MAX_INDEX 127
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK (0xFFFFFFFF << I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS(_VF) (0x00000800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CCQPSTATUS_MAX_INDEX 127
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK (0x1 << I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK (0x1 << I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK(_VF) (0x00130800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQACK_MAX_INDEX 127
+#define I40E_VFPE_CQACK_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK_PECQID_MASK (0x1FFFF << I40E_VFPE_CQACK_PECQID_SHIFT)
+#define I40E_VFPE_CQARM(_VF) (0x00130400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQARM_MAX_INDEX 127
+#define I40E_VFPE_CQARM_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM_PECQID_MASK (0x1FFFF << I40E_VFPE_CQARM_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB(_VF) (0x00000000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQPDB_MAX_INDEX 127
+#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB_WQHEAD_MASK (0x7FF << I40E_VFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES(_VF) (0x00001800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQPERRCODES_MAX_INDEX 127
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL(_VF) (0x00000400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQPTAIL_MAX_INDEX 127
+#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL_WQTAIL_MASK (0x7FF << I40E_VFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK (0x1 << I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG0(_VF) (0x00001400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_IPCONFIG0_MAX_INDEX 127
+#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT 17
+#define I40E_VFPE_IPCONFIG0_USEUPPERIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00003400 + ((_VF) * 4))
+#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 127
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER(_VF) (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX 127
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK (0xFFFFFFFF << I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC(_VF) (0x00138000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_WQEALLOC_MAX_INDEX 127
+#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC_PEQPID_MASK (0x3FFFF << I40E_VFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK (0xFFF << I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSHI(_i) (0x00010804 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSHI(_i) (0x00010A04 + ((_i) * 8))
+#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8))
+#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSHI(_i) (0x00010C04 + ((_i) * 8))
+#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8))
+#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSHI(_i) (0x00010204 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSHI(_i) (0x00010404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSHI(_i) (0x00011E04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSHI(_i) (0x00012004 + ((_i) * 8))
+#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8))
+#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSHI(_i) (0x00012204 + ((_i) * 8))
+#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8))
+#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSHI(_i) (0x00011A04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSHI(_i) (0x00011C04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSHI(_i) (0x00011404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSHI(_i) (0x00011604 + ((_i) * 8))
+#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8))
+#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSHI(_i) (0x00011804 + ((_i) * 8))
+#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8))
+#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSHI(_i) (0x00010E04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSHI(_i) (0x00011004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSHI(_i) (0x00012804 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSHI(_i) (0x00012A04 + ((_i) * 8))
+#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8))
+#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSHI(_i) (0x00012C04 + ((_i) * 8))
+#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8))
+#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSHI(_i) (0x00012404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSHI(_i) (0x00012604 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSHI(_i) (0x00013E04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSHI(_i) (0x00014004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSHI(_i) (0x00013C04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSHI(_i) (0x00014404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSHI(_i) (0x00014604 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSHI(_i) (0x00014204 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDHI(_i) (0x00014804 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_PFRDMAVINVHI(_i) (0x00014A04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFRXVLANERR_MAX_INDEX 15
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK (0xFFFFFF << I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK (0xFFFFFF << I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4))
+#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK (0xFFFFFF << I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSHI(_i) (0x00013004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK (0xFFFF << I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGHI(_i) (0x00013404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK (0xFFFF << I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSHI(_i) (0x00013804 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSHI(_i) (0x00013A04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSHI 0x0001E014
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK (0xFFFFFF << I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSLO 0x0001E010
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPHI 0x0001E01C
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK (0xFFFFFF << I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPLO 0x0001E018
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT)
+#define I40E_GLPES_RDMARXOOONOMARK 0x0001E004
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT)
+#define I40E_GLPES_RDMARXUNALIGN 0x0001E000
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLEHI 0x0001E044
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLELO 0x0001E040
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLEHI 0x0001E02C
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLELO 0x0001E028
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKHI 0x0001E024
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKSLO 0x0001E020
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLEHI 0x0001E03C
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLELO 0x0001E038
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLEHI 0x0001E034
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLELO 0x0001E030
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXUNEXPERR 0x0001E008
+#define I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_SHIFT 0
+#define I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_MASK (0xFFFFFF << I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTHI 0x0001E04C
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTLO 0x0001E048
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTHI 0x0001E054
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTLO 0x0001E050
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSHI 0x0001E05C
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSLO 0x0001E058
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXDISCARD(_i) (0x00018600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSHI(_i) (0x00018804 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSLO(_i) (0x00018800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSHI(_i) (0x00018A04 + ((_i) * 4))
+#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSLO(_i) (0x00018A00 + ((_i) * 4))
+#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSHI(_i) (0x00018C04 + ((_i) * 4))
+#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSLO(_i) (0x00018C00 + ((_i) * 4))
+#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSHI(_i) (0x00018204 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSLO(_i) (0x00018200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSHI(_i) (0x00018404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSLO(_i) (0x00018400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXTRUNC(_i) (0x00018700 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSHI(_i) (0x00019E04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSLO(_i) (0x00019E00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSHI(_i) (0x0001A004 + ((_i) * 4))
+#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSLO(_i) (0x0001A000 + ((_i) * 4))
+#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSHI(_i) (0x0001A204 + ((_i) * 4))
+#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSLO(_i) (0x0001A200 + ((_i) * 4))
+#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXNOROUTE(_i) (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSHI(_i) (0x00019A04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSLO(_i) (0x00019A00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSHI(_i) (0x00019C04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSLO(_i) (0x00019C00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXDISCARD(_i) (0x00019200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSHI(_i) (0x00019404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSLO(_i) (0x00019400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSHI(_i) (0x00019604 + ((_i) * 4))
+#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSLO(_i) (0x00019600 + ((_i) * 4))
+#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSHI(_i) (0x00019804 + ((_i) * 4))
+#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSLO(_i) (0x00019800 + ((_i) * 4))
+#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSHI(_i) (0x00018E04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSLO(_i) (0x00018E00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSHI(_i) (0x00019004 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSLO(_i) (0x00019000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXTRUNC(_i) (0x00019300 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSHI(_i) (0x0001A804 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSLO(_i) (0x0001A800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSHI(_i) (0x0001AA04 + ((_i) * 4))
+#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSLO(_i) (0x0001AA00 + ((_i) * 4))
+#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSHI(_i) (0x0001AC04 + ((_i) * 4))
+#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSLO(_i) (0x0001AC00 + ((_i) * 4))
+#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXNOROUTE(_i) (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSHI(_i) (0x0001A404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSLO(_i) (0x0001A400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSHI(_i) (0x0001A604 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSLO(_i) (0x0001A600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSHI(_i) (0x0001BE04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSLO(_i) (0x0001BE00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSHI(_i) (0x0001C004 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSLO(_i) (0x0001C000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSHI(_i) (0x0001BC04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSLO(_i) (0x0001BC00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSHI(_i) (0x0001C404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSLO(_i) (0x0001C400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSHI(_i) (0x0001C604 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSLO(_i) (0x0001C600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSHI(_i) (0x0001C204 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSLO(_i) (0x0001C200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDHI(_i) (0x0001C804 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDLO(_i) (0x0001C800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_VFRDMAVINVHI(_i) (0x0001CA04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_VFRDMAVINVLO(_i) (0x0001CA00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_VFRXVLANERR(_i) (0x00018000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRXVLANERR_MAX_INDEX 31
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK (0xFFFFFF << I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_VFTCPRTXSEG(_i) (0x0001B600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_VFTCPRXOPTERR(_i) (0x0001B200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK (0xFFFFFF << I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_VFTCPRXPROTOERR(_i) (0x0001B300 + ((_i) * 4))
+#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK (0xFFFFFF << I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSHI(_i) (0x0001B004 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK (0xFFFF << I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSLO(_i) (0x0001B000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGHI(_i) (0x0001B404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK (0xFFFF << I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGLO(_i) (0x0001B400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSHI(_i) (0x0001B804 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSLO(_i) (0x0001B800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSHI(_i) (0x0001BA04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSLO(_i) (0x0001BA00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_GLPM_DMACR 0x000881F4
+#define I40E_GLPM_DMACR_DMACWT_SHIFT 0
+#define I40E_GLPM_DMACR_DMACWT_MASK (0xFFFF << I40E_GLPM_DMACR_DMACWT_SHIFT)
+#define I40E_GLPM_DMACR_EXIT_DC_SHIFT 29
+#define I40E_GLPM_DMACR_EXIT_DC_MASK (0x1 << I40E_GLPM_DMACR_EXIT_DC_SHIFT)
+#define I40E_GLPM_DMACR_LX_COALESCING_INDICATION_SHIFT 30
+#define I40E_GLPM_DMACR_LX_COALESCING_INDICATION_MASK (0x1 << I40E_GLPM_DMACR_LX_COALESCING_INDICATION_SHIFT)
+#define I40E_GLPM_DMACR_DMAC_EN_SHIFT 31
+#define I40E_GLPM_DMACR_DMAC_EN_MASK (0x1 << I40E_GLPM_DMACR_DMAC_EN_SHIFT)
+#define I40E_GLPM_LTRC 0x000BE500
+#define I40E_GLPM_LTRC_SLTRV_SHIFT 0
+#define I40E_GLPM_LTRC_SLTRV_MASK (0x3FF << I40E_GLPM_LTRC_SLTRV_SHIFT)
+#define I40E_GLPM_LTRC_SSCALE_SHIFT 10
+#define I40E_GLPM_LTRC_SSCALE_MASK (0x7 << I40E_GLPM_LTRC_SSCALE_SHIFT)
+#define I40E_GLPM_LTRC_LTRS_REQUIREMENT_SHIFT 15
+#define I40E_GLPM_LTRC_LTRS_REQUIREMENT_MASK (0x1 << I40E_GLPM_LTRC_LTRS_REQUIREMENT_SHIFT)
+#define I40E_GLPM_LTRC_NSLTRV_SHIFT 16
+#define I40E_GLPM_LTRC_NSLTRV_MASK (0x3FF << I40E_GLPM_LTRC_NSLTRV_SHIFT)
+#define I40E_GLPM_LTRC_NSSCALE_SHIFT 26
+#define I40E_GLPM_LTRC_NSSCALE_MASK (0x7 << I40E_GLPM_LTRC_NSSCALE_SHIFT)
+#define I40E_GLPM_LTRC_LTR_SEND_SHIFT 30
+#define I40E_GLPM_LTRC_LTR_SEND_MASK (0x1 << I40E_GLPM_LTRC_LTR_SEND_SHIFT)
+#define I40E_GLPM_LTRC_LTRNS_REQUIREMENT_SHIFT 31
+#define I40E_GLPM_LTRC_LTRNS_REQUIREMENT_MASK (0x1 << I40E_GLPM_LTRC_LTRNS_REQUIREMENT_SHIFT)
+#define I40E_PRTPM_EEE_STAT 0x001E4320
+#define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT 29
+#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK (0x1 << I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT)
+#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT 30
+#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK (0x1 << I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT)
+#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT 31
+#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK (0x1 << I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT)
+#define I40E_PRTPM_EEEC 0x001E4380
+#define I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT 16
+#define I40E_PRTPM_EEEC_TW_WAKE_MIN_MASK (0x3F << I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT)
+#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT 24
+#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_MASK (0x3 << I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT)
+#define I40E_PRTPM_EEEC_TEEE_DLY_SHIFT 26
+#define I40E_PRTPM_EEEC_TEEE_DLY_MASK (0x3F << I40E_PRTPM_EEEC_TEEE_DLY_SHIFT)
+#define I40E_PRTPM_EEEFWD 0x001E4400
+#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT 31
+#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_MASK (0x1 << I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT)
+#define I40E_PRTPM_EEER 0x001E4360
+#define I40E_PRTPM_EEER_TW_SYSTEM_SHIFT 0
+#define I40E_PRTPM_EEER_TW_SYSTEM_MASK (0xFFFF << I40E_PRTPM_EEER_TW_SYSTEM_SHIFT)
+#define I40E_PRTPM_EEER_TX_LPI_EN_SHIFT 16
+#define I40E_PRTPM_EEER_TX_LPI_EN_MASK (0x1 << I40E_PRTPM_EEER_TX_LPI_EN_SHIFT)
+#define I40E_PRTPM_EEETXC 0x001E43E0
+#define I40E_PRTPM_EEETXC_TW_PHY_SHIFT 0
+#define I40E_PRTPM_EEETXC_TW_PHY_MASK (0xFFFF << I40E_PRTPM_EEETXC_TW_PHY_SHIFT)
+#define I40E_PRTPM_GC 0x000B8140
+#define I40E_PRTPM_GC_EMP_LINK_ON_SHIFT 0
+#define I40E_PRTPM_GC_EMP_LINK_ON_MASK (0x1 << I40E_PRTPM_GC_EMP_LINK_ON_SHIFT)
+#define I40E_PRTPM_GC_MNG_VETO_SHIFT 1
+#define I40E_PRTPM_GC_MNG_VETO_MASK (0x1 << I40E_PRTPM_GC_MNG_VETO_SHIFT)
+#define I40E_PRTPM_GC_RATD_SHIFT 2
+#define I40E_PRTPM_GC_RATD_MASK (0x1 << I40E_PRTPM_GC_RATD_SHIFT)
+#define I40E_PRTPM_GC_LCDMP_SHIFT 3
+#define I40E_PRTPM_GC_LCDMP_MASK (0x1 << I40E_PRTPM_GC_LCDMP_SHIFT)
+#define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31
+#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK (0x1 << I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT)
+#define I40E_PRTPM_HPTC 0x000AC800
+#define I40E_PRTPM_HPTC_HIGH_PRI_TC_SHIFT 0
+#define I40E_PRTPM_HPTC_HIGH_PRI_TC_MASK (0xFF << I40E_PRTPM_HPTC_HIGH_PRI_TC_SHIFT)
+#define I40E_PRTPM_RLPIC 0x001E43A0
+#define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0
+#define I40E_PRTPM_RLPIC_ERLPIC_MASK (0xFFFFFFFF << I40E_PRTPM_RLPIC_ERLPIC_SHIFT)
+#define I40E_PRTPM_TLPIC 0x001E43C0
+#define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0
+#define I40E_PRTPM_TLPIC_ETLPIC_MASK (0xFFFFFFFF << I40E_PRTPM_TLPIC_ETLPIC_SHIFT)
+#define I40E_GLRPB_DPSS 0x000AC828
+#define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0
+#define I40E_GLRPB_DPSS_DPS_TCN_MASK (0xFFFFF << I40E_GLRPB_DPSS_DPS_TCN_SHIFT)
+#define I40E_GLRPB_GHW 0x000AC830
+#define I40E_GLRPB_GHW_GHW_SHIFT 0
+#define I40E_GLRPB_GHW_GHW_MASK (0xFFFFF << I40E_GLRPB_GHW_GHW_SHIFT)
+#define I40E_GLRPB_GLW 0x000AC834
+#define I40E_GLRPB_GLW_GLW_SHIFT 0
+#define I40E_GLRPB_GLW_GLW_MASK (0xFFFFF << I40E_GLRPB_GLW_GLW_SHIFT)
+#define I40E_GLRPB_PHW 0x000AC844
+#define I40E_GLRPB_PHW_PHW_SHIFT 0
+#define I40E_GLRPB_PHW_PHW_MASK (0xFFFFF << I40E_GLRPB_PHW_PHW_SHIFT)
+#define I40E_GLRPB_PLW 0x000AC848
+#define I40E_GLRPB_PLW_PLW_SHIFT 0
+#define I40E_GLRPB_PLW_PLW_MASK (0xFFFFF << I40E_GLRPB_PLW_PLW_SHIFT)
+#define I40E_PRTRPB_DHW(_i) (0x000AC100 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_DHW_MAX_INDEX 7
+#define I40E_PRTRPB_DHW_DHW_TCN_SHIFT 0
+#define I40E_PRTRPB_DHW_DHW_TCN_MASK (0xFFFFF << I40E_PRTRPB_DHW_DHW_TCN_SHIFT)
+#define I40E_PRTRPB_DLW(_i) (0x000AC220 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_DLW_MAX_INDEX 7
+#define I40E_PRTRPB_DLW_DLW_TCN_SHIFT 0
+#define I40E_PRTRPB_DLW_DLW_TCN_MASK (0xFFFFF << I40E_PRTRPB_DLW_DLW_TCN_SHIFT)
+#define I40E_PRTRPB_DPS(_i) (0x000AC320 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_DPS_MAX_INDEX 7
+#define I40E_PRTRPB_DPS_DPS_TCN_SHIFT 0
+#define I40E_PRTRPB_DPS_DPS_TCN_MASK (0xFFFFF << I40E_PRTRPB_DPS_DPS_TCN_SHIFT)
+#define I40E_PRTRPB_SHT(_i) (0x000AC480 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_SHT_MAX_INDEX 7
+#define I40E_PRTRPB_SHT_SHT_TCN_SHIFT 0
+#define I40E_PRTRPB_SHT_SHT_TCN_MASK (0xFFFFF << I40E_PRTRPB_SHT_SHT_TCN_SHIFT)
+#define I40E_PRTRPB_SHW 0x000AC580
+#define I40E_PRTRPB_SHW_SHW_SHIFT 0
+#define I40E_PRTRPB_SHW_SHW_MASK (0xFFFFF << I40E_PRTRPB_SHW_SHW_SHIFT)
+#define I40E_PRTRPB_SLT(_i) (0x000AC5A0 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_SLT_MAX_INDEX 7
+#define I40E_PRTRPB_SLT_SLT_TCN_SHIFT 0
+#define I40E_PRTRPB_SLT_SLT_TCN_MASK (0xFFFFF << I40E_PRTRPB_SLT_SLT_TCN_SHIFT)
+#define I40E_PRTRPB_SLW 0x000AC6A0
+#define I40E_PRTRPB_SLW_SLW_SHIFT 0
+#define I40E_PRTRPB_SLW_SLW_MASK (0xFFFFF << I40E_PRTRPB_SLW_SLW_SHIFT)
+#define I40E_PRTRPB_SPS 0x000AC7C0
+#define I40E_PRTRPB_SPS_SPS_SHIFT 0
+#define I40E_PRTRPB_SPS_SPS_MASK (0xFFFFF << I40E_PRTRPB_SPS_SPS_SHIFT)
+#define I40E_GLQF_APBVT(_i) (0x00260000 + ((_i) * 4)) /* _i=0...2047 */
+#define I40E_GLQF_APBVT_MAX_INDEX 2047
+#define I40E_GLQF_APBVT_APBVT_SHIFT 0
+#define I40E_GLQF_APBVT_APBVT_MASK (0xFFFFFFFF << I40E_GLQF_APBVT_APBVT_SHIFT)
+#define I40E_GLQF_CTL 0x00269BA4
+#define I40E_GLQF_CTL_HTOEP_SHIFT 1
+#define I40E_GLQF_CTL_HTOEP_MASK (0x1 << I40E_GLQF_CTL_HTOEP_SHIFT)
+#define I40E_GLQF_CTL_HTOEP_FCOE_SHIFT 2
+#define I40E_GLQF_CTL_HTOEP_FCOE_MASK (0x1 << I40E_GLQF_CTL_HTOEP_FCOE_SHIFT)
+#define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT 3
+#define I40E_GLQF_CTL_PCNT_ALLOC_MASK (0x7 << I40E_GLQF_CTL_PCNT_ALLOC_SHIFT)
+#define I40E_GLQF_CTL_DDPLPEN_SHIFT 7
+#define I40E_GLQF_CTL_DDPLPEN_MASK (0x1 << I40E_GLQF_CTL_DDPLPEN_SHIFT)
+#define I40E_GLQF_CTL_MAXPEBLEN_SHIFT 8
+#define I40E_GLQF_CTL_MAXPEBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXPEBLEN_SHIFT)
+#define I40E_GLQF_CTL_MAXFCBLEN_SHIFT 11
+#define I40E_GLQF_CTL_MAXFCBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXFCBLEN_SHIFT)
+#define I40E_GLQF_CTL_MAXFDBLEN_SHIFT 14
+#define I40E_GLQF_CTL_MAXFDBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXFDBLEN_SHIFT)
+#define I40E_GLQF_CTL_FDBEST_SHIFT 17
+#define I40E_GLQF_CTL_FDBEST_MASK (0xFF << I40E_GLQF_CTL_FDBEST_SHIFT)
+#define I40E_GLQF_CTL_PROGPRIO_SHIFT 25
+#define I40E_GLQF_CTL_PROGPRIO_MASK (0x1 << I40E_GLQF_CTL_PROGPRIO_SHIFT)
+#define I40E_GLQF_CTL_INVALPRIO_SHIFT 26
+#define I40E_GLQF_CTL_INVALPRIO_MASK (0x1 << I40E_GLQF_CTL_INVALPRIO_SHIFT)
+#define I40E_GLQF_CTL_IGNORE_IP_SHIFT 27
+#define I40E_GLQF_CTL_IGNORE_IP_MASK (0x1 << I40E_GLQF_CTL_IGNORE_IP_SHIFT)
+#define I40E_GLQF_FDCNT_0 0x00269BAC
+#define I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT 0
+#define I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK (0x1FFF << I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT)
+#define I40E_GLQF_FDCNT_0_BESTCNT_SHIFT 13
+#define I40E_GLQF_FDCNT_0_BESTCNT_MASK (0x1FFF << I40E_GLQF_FDCNT_0_BESTCNT_SHIFT)
+#define I40E_GLQF_HSYM(_i) (0x00269D00 + ((_i) * 4)) /* _i=0...63 */
+#define I40E_GLQF_HSYM_MAX_INDEX 63
+#define I40E_GLQF_HSYM_SYMH_ENA_SHIFT 0
+#define I40E_GLQF_HSYM_SYMH_ENA_MASK (0x1 << I40E_GLQF_HSYM_SYMH_ENA_SHIFT)
+#define I40E_GLQF_PCNT(_i) (0x00266800 + ((_i) * 4)) /* _i=0...511 */
+#define I40E_GLQF_PCNT_MAX_INDEX 511
+#define I40E_GLQF_PCNT_PCNT_SHIFT 0
+#define I40E_GLQF_PCNT_PCNT_MASK (0xFFFFFFFF << I40E_GLQF_PCNT_PCNT_SHIFT)
+#define I40E_GLQF_SWAP(_i, _j) (0x00267E00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */
+#define I40E_GLQF_SWAP_MAX_INDEX 1
+#define I40E_GLQF_SWAP_OFF0_SRC0_SHIFT 0
+#define I40E_GLQF_SWAP_OFF0_SRC0_MASK (0x3F << I40E_GLQF_SWAP_OFF0_SRC0_SHIFT)
+#define I40E_GLQF_SWAP_OFF0_SRC1_SHIFT 6
+#define I40E_GLQF_SWAP_OFF0_SRC1_MASK (0x3F << I40E_GLQF_SWAP_OFF0_SRC1_SHIFT)
+#define I40E_GLQF_SWAP_FLEN0_SHIFT 12
+#define I40E_GLQF_SWAP_FLEN0_MASK (0xF << I40E_GLQF_SWAP_FLEN0_SHIFT)
+#define I40E_GLQF_SWAP_OFF1_SRC0_SHIFT 16
+#define I40E_GLQF_SWAP_OFF1_SRC0_MASK (0x3F << I40E_GLQF_SWAP_OFF1_SRC0_SHIFT)
+#define I40E_GLQF_SWAP_OFF1_SRC1_SHIFT 22
+#define I40E_GLQF_SWAP_OFF1_SRC1_MASK (0x3F << I40E_GLQF_SWAP_OFF1_SRC1_SHIFT)
+#define I40E_GLQF_SWAP_FLEN1_SHIFT 28
+#define I40E_GLQF_SWAP_FLEN1_MASK (0xF << I40E_GLQF_SWAP_FLEN1_SHIFT)
+#define I40E_PFQF_CTL_0 0x001C0AC0
+#define I40E_PFQF_CTL_0_PEHSIZE_SHIFT 0
+#define I40E_PFQF_CTL_0_PEHSIZE_MASK (0x1F << I40E_PFQF_CTL_0_PEHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PEDSIZE_SHIFT 5
+#define I40E_PFQF_CTL_0_PEDSIZE_MASK (0x1F << I40E_PFQF_CTL_0_PEDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT 10
+#define I40E_PFQF_CTL_0_PFFCHSIZE_MASK (0xF << I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT 14
+#define I40E_PFQF_CTL_0_PFFCDSIZE_MASK (0x3 << I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT 16
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_MASK (0x1 << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_FD_ENA_SHIFT 17
+#define I40E_PFQF_CTL_0_FD_ENA_MASK (0x1 << I40E_PFQF_CTL_0_FD_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT 18
+#define I40E_PFQF_CTL_0_ETYPE_ENA_MASK (0x1 << I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT 19
+#define I40E_PFQF_CTL_0_MACVLAN_ENA_MASK (0x1 << I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT 20
+#define I40E_PFQF_CTL_0_VFFCHSIZE_MASK (0xF << I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT 24
+#define I40E_PFQF_CTL_0_VFFCDSIZE_MASK (0x3 << I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT)
+#define I40E_PFQF_CTL_1 0x00245D80
+#define I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT 0
+#define I40E_PFQF_CTL_1_CLEARFDTABLE_MASK (0x1 << I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT)
+#define I40E_PFQF_FDALLOC 0x00246280
+#define I40E_PFQF_FDALLOC_FDALLOC_SHIFT 0
+#define I40E_PFQF_FDALLOC_FDALLOC_MASK (0xFF << I40E_PFQF_FDALLOC_FDALLOC_SHIFT)
+#define I40E_PFQF_FDALLOC_FDBEST_SHIFT 8
+#define I40E_PFQF_FDALLOC_FDBEST_MASK (0xFF << I40E_PFQF_FDALLOC_FDBEST_SHIFT)
+#define I40E_PFQF_FDSTAT 0x00246380
+#define I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT 0
+#define I40E_PFQF_FDSTAT_GUARANT_CNT_MASK (0x1FFF << I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT)
+#define I40E_PFQF_FDSTAT_BEST_CNT_SHIFT 16
+#define I40E_PFQF_FDSTAT_BEST_CNT_MASK (0x1FFF << I40E_PFQF_FDSTAT_BEST_CNT_SHIFT)
+#define I40E_PFQF_HENA(_i) (0x00245900 + ((_i) * 128)) /* _i=0...1 */
+#define I40E_PFQF_HENA_MAX_INDEX 1
+#define I40E_PFQF_HENA_PTYPE_ENA_SHIFT 0
+#define I40E_PFQF_HENA_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_PFQF_HENA_PTYPE_ENA_SHIFT)
+#define I40E_PFQF_HKEY(_i) (0x00244800 + ((_i) * 128)) /* _i=0...12 */
+#define I40E_PFQF_HKEY_MAX_INDEX 12
+#define I40E_PFQF_HKEY_KEY_0_SHIFT 0
+#define I40E_PFQF_HKEY_KEY_0_MASK (0xFF << I40E_PFQF_HKEY_KEY_0_SHIFT)
+#define I40E_PFQF_HKEY_KEY_1_SHIFT 8
+#define I40E_PFQF_HKEY_KEY_1_MASK (0xFF << I40E_PFQF_HKEY_KEY_1_SHIFT)
+#define I40E_PFQF_HKEY_KEY_2_SHIFT 16
+#define I40E_PFQF_HKEY_KEY_2_MASK (0xFF << I40E_PFQF_HKEY_KEY_2_SHIFT)
+#define I40E_PFQF_HKEY_KEY_3_SHIFT 24
+#define I40E_PFQF_HKEY_KEY_3_MASK (0xFF << I40E_PFQF_HKEY_KEY_3_SHIFT)
+#define I40E_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */
+#define I40E_PFQF_HLUT_MAX_INDEX 127
+#define I40E_PFQF_HLUT_LUT0_SHIFT 0
+#define I40E_PFQF_HLUT_LUT0_MASK (0x3F << I40E_PFQF_HLUT_LUT0_SHIFT)
+#define I40E_PFQF_HLUT_LUT1_SHIFT 8
+#define I40E_PFQF_HLUT_LUT1_MASK (0x3F << I40E_PFQF_HLUT_LUT1_SHIFT)
+#define I40E_PFQF_HLUT_LUT2_SHIFT 16
+#define I40E_PFQF_HLUT_LUT2_MASK (0x3F << I40E_PFQF_HLUT_LUT2_SHIFT)
+#define I40E_PFQF_HLUT_LUT3_SHIFT 24
+#define I40E_PFQF_HLUT_LUT3_MASK (0x3F << I40E_PFQF_HLUT_LUT3_SHIFT)
+#define I40E_PFQF_HREGION(_i) (0x00245400 + ((_i) * 128)) /* _i=0...7 */
+#define I40E_PFQF_HREGION_MAX_INDEX 7
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define I40E_PFQF_HREGION_REGION_0_SHIFT 1
+#define I40E_PFQF_HREGION_REGION_0_MASK (0x7 << I40E_PFQF_HREGION_REGION_0_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define I40E_PFQF_HREGION_REGION_1_SHIFT 5
+#define I40E_PFQF_HREGION_REGION_1_MASK (0x7 << I40E_PFQF_HREGION_REGION_1_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define I40E_PFQF_HREGION_REGION_2_SHIFT 9
+#define I40E_PFQF_HREGION_REGION_2_MASK (0x7 << I40E_PFQF_HREGION_REGION_2_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define I40E_PFQF_HREGION_REGION_3_SHIFT 13
+#define I40E_PFQF_HREGION_REGION_3_MASK (0x7 << I40E_PFQF_HREGION_REGION_3_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define I40E_PFQF_HREGION_REGION_4_SHIFT 17
+#define I40E_PFQF_HREGION_REGION_4_MASK (0x7 << I40E_PFQF_HREGION_REGION_4_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define I40E_PFQF_HREGION_REGION_5_SHIFT 21
+#define I40E_PFQF_HREGION_REGION_5_MASK (0x7 << I40E_PFQF_HREGION_REGION_5_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define I40E_PFQF_HREGION_REGION_6_SHIFT 25
+#define I40E_PFQF_HREGION_REGION_6_MASK (0x7 << I40E_PFQF_HREGION_REGION_6_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define I40E_PFQF_HREGION_REGION_7_SHIFT 29
+#define I40E_PFQF_HREGION_REGION_7_MASK (0x7 << I40E_PFQF_HREGION_REGION_7_SHIFT)
+#define I40E_PRTQF_CTL_0 0x00256E60
+#define I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT 0
+#define I40E_PRTQF_CTL_0_HSYM_ENA_MASK (0x1 << I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT)
+#define I40E_PRTQF_FD_FLXINSET(_i) (0x00253800 + ((_i) * 32)) /* _i=0...63 */
+#define I40E_PRTQF_FD_FLXINSET_MAX_INDEX 63
+#define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0
+#define I40E_PRTQF_FD_FLXINSET_INSET_MASK (0xFF << I40E_PRTQF_FD_FLXINSET_INSET_SHIFT)
+#define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */
+#define I40E_PRTQF_FD_MSK_MAX_INDEX 63
+#define I40E_PRTQF_FD_MSK_MASK_SHIFT 0
+#define I40E_PRTQF_FD_MSK_MASK_MASK (0xFFFF << I40E_PRTQF_FD_MSK_MASK_SHIFT)
+#define I40E_PRTQF_FD_MSK_OFFSET_SHIFT 16
+#define I40E_PRTQF_FD_MSK_OFFSET_MASK (0x3F << I40E_PRTQF_FD_MSK_OFFSET_SHIFT)
+#define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */
+#define I40E_PRTQF_FLX_PIT_MAX_INDEX 8
+#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0
+#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK (0x3F << I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
+#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 6
+#define I40E_PRTQF_FLX_PIT_FSIZE_MASK (0xF << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
+#define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10
+#define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK (0x3F << I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT)
+#define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4))
+#define I40E_VFQF_HENA1_MAX_INDEX 1
+#define I40E_VFQF_HENA1_PTYPE_ENA_SHIFT 0
+#define I40E_VFQF_HENA1_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_VFQF_HENA1_PTYPE_ENA_SHIFT)
+#define I40E_VFQF_HKEY1(_i, _VF) (0x00228000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...12, _VF=0...127 */
+#define I40E_VFQF_HKEY1_MAX_INDEX 12
+#define I40E_VFQF_HKEY1_KEY_0_SHIFT 0
+#define I40E_VFQF_HKEY1_KEY_0_MASK (0xFF << I40E_VFQF_HKEY1_KEY_0_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_1_SHIFT 8
+#define I40E_VFQF_HKEY1_KEY_1_MASK (0xFF << I40E_VFQF_HKEY1_KEY_1_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_2_SHIFT 16
+#define I40E_VFQF_HKEY1_KEY_2_MASK (0xFF << I40E_VFQF_HKEY1_KEY_2_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_3_SHIFT 24
+#define I40E_VFQF_HKEY1_KEY_3_MASK (0xFF << I40E_VFQF_HKEY1_KEY_3_SHIFT)
+#define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */
+#define I40E_VFQF_HLUT1_MAX_INDEX 15
+#define I40E_VFQF_HLUT1_LUT0_SHIFT 0
+#define I40E_VFQF_HLUT1_LUT0_MASK (0xF << I40E_VFQF_HLUT1_LUT0_SHIFT)
+#define I40E_VFQF_HLUT1_LUT1_SHIFT 8
+#define I40E_VFQF_HLUT1_LUT1_MASK (0xF << I40E_VFQF_HLUT1_LUT1_SHIFT)
+#define I40E_VFQF_HLUT1_LUT2_SHIFT 16
+#define I40E_VFQF_HLUT1_LUT2_MASK (0xF << I40E_VFQF_HLUT1_LUT2_SHIFT)
+#define I40E_VFQF_HLUT1_LUT3_SHIFT 24
+#define I40E_VFQF_HLUT1_LUT3_MASK (0xF << I40E_VFQF_HLUT1_LUT3_SHIFT)
+#define I40E_VFQF_HREGION1(_i, _VF) (0x0022E000 + ((_i) * 1024 + (_VF) * 4))
+#define I40E_VFQF_HREGION1_MAX_INDEX 7
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_0_SHIFT 1
+#define I40E_VFQF_HREGION1_REGION_0_MASK (0x7 << I40E_VFQF_HREGION1_REGION_0_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_1_SHIFT 5
+#define I40E_VFQF_HREGION1_REGION_1_MASK (0x7 << I40E_VFQF_HREGION1_REGION_1_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_2_SHIFT 9
+#define I40E_VFQF_HREGION1_REGION_2_MASK (0x7 << I40E_VFQF_HREGION1_REGION_2_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_3_SHIFT 13
+#define I40E_VFQF_HREGION1_REGION_3_MASK (0x7 << I40E_VFQF_HREGION1_REGION_3_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_4_SHIFT 17
+#define I40E_VFQF_HREGION1_REGION_4_MASK (0x7 << I40E_VFQF_HREGION1_REGION_4_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_5_SHIFT 21
+#define I40E_VFQF_HREGION1_REGION_5_MASK (0x7 << I40E_VFQF_HREGION1_REGION_5_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_6_SHIFT 25
+#define I40E_VFQF_HREGION1_REGION_6_MASK (0x7 << I40E_VFQF_HREGION1_REGION_6_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_7_SHIFT 29
+#define I40E_VFQF_HREGION1_REGION_7_MASK (0x7 << I40E_VFQF_HREGION1_REGION_7_SHIFT)
+#define I40E_VPQF_CTL(_VF) (0x001C0000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPQF_CTL_MAX_INDEX 127
+#define I40E_VPQF_CTL_PEHSIZE_SHIFT 0
+#define I40E_VPQF_CTL_PEHSIZE_MASK (0x1F << I40E_VPQF_CTL_PEHSIZE_SHIFT)
+#define I40E_VPQF_CTL_PEDSIZE_SHIFT 5
+#define I40E_VPQF_CTL_PEDSIZE_MASK (0x1F << I40E_VPQF_CTL_PEDSIZE_SHIFT)
+#define I40E_VPQF_CTL_FCHSIZE_SHIFT 10
+#define I40E_VPQF_CTL_FCHSIZE_MASK (0xF << I40E_VPQF_CTL_FCHSIZE_SHIFT)
+#define I40E_VPQF_CTL_FCDSIZE_SHIFT 14
+#define I40E_VPQF_CTL_FCDSIZE_MASK (0x3 << I40E_VPQF_CTL_FCDSIZE_SHIFT)
+#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VSIQF_CTL_MAX_INDEX 383
+#define I40E_VSIQF_CTL_FCOE_ENA_SHIFT 0
+#define I40E_VSIQF_CTL_FCOE_ENA_MASK (0x1 << I40E_VSIQF_CTL_FCOE_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PETCP_ENA_SHIFT 1
+#define I40E_VSIQF_CTL_PETCP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PETCP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT 2
+#define I40E_VSIQF_CTL_PEUUDP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT 3
+#define I40E_VSIQF_CTL_PEMUDP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT 4
+#define I40E_VSIQF_CTL_PEUFRAG_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5
+#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT)
+#define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4))
+#define I40E_VSIQF_TCREGION_MAX_INDEX 7
+#define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT 0
+#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK (0x1FF << I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT 9
+#define I40E_VSIQF_TCREGION_TC_SIZE_MASK (0x7 << I40E_VSIQF_TCREGION_TC_SIZE_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT 16
+#define I40E_VSIQF_TCREGION_TC_OFFSET2_MASK (0x1FF << I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT 25
+#define I40E_VSIQF_TCREGION_TC_SIZE2_MASK (0x7 << I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT)
+#define I40E_GL_FCOECRC(_i) (0x00314d80 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOECRC_MAX_INDEX 143
+#define I40E_GL_FCOECRC_FCOECRC_SHIFT 0
+#define I40E_GL_FCOECRC_FCOECRC_MASK (0xFFFFFFFF << I40E_GL_FCOECRC_FCOECRC_SHIFT)
+#define I40E_GL_FCOEDDPC(_i) (0x00314480 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDDPC_MAX_INDEX 143
+#define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0
+#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK (0xFFFFFFFF << I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT)
+#define I40E_GL_FCOEDDPEC(_i) (0x00314900 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDDPEC_MAX_INDEX 143
+#define I40E_GL_FCOEDDPEC_CFOEDDPEC_SHIFT 0
+#define I40E_GL_FCOEDDPEC_CFOEDDPEC_MASK (0xFFFFFFFF << I40E_GL_FCOEDDPEC_CFOEDDPEC_SHIFT)
+#define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIFEC_MAX_INDEX 143
+#define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0
+#define I40E_GL_FCOEDIFEC_FCOEDIFRC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT)
+#define I40E_GL_FCOEDIFRC(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIFRC_MAX_INDEX 143
+#define I40E_GL_FCOEDIFRC_FCOEDIFRC_SHIFT 0
+#define I40E_GL_FCOEDIFRC_FCOEDIFRC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFRC_FCOEDIFRC_SHIFT)
+#define I40E_GL_FCOEDIFTCL(_i) (0x00354000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIFTCL_MAX_INDEX 143
+#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT 0
+#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT)
+#define I40E_GL_FCOEDIXAC(_i) (0x0031c000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIXAC_MAX_INDEX 143
+#define I40E_GL_FCOEDIXAC_FCOEDIXAC_SHIFT 0
+#define I40E_GL_FCOEDIXAC_FCOEDIXAC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXAC_FCOEDIXAC_SHIFT)
+#define I40E_GL_FCOEDIXEC(_i) (0x0034c000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIXEC_MAX_INDEX 143
+#define I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT 0
+#define I40E_GL_FCOEDIXEC_FCOEDIXEC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT)
+#define I40E_GL_FCOEDIXVC(_i) (0x00350000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIXVC_MAX_INDEX 143
+#define I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT 0
+#define I40E_GL_FCOEDIXVC_FCOEDIXVC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT)
+#define I40E_GL_FCOEDWRCH(_i) (0x00320004 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWRCH_MAX_INDEX 143
+#define I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT 0
+#define I40E_GL_FCOEDWRCH_FCOEDWRCH_MASK (0xFFFF << I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT)
+#define I40E_GL_FCOEDWRCL(_i) (0x00320000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWRCL_MAX_INDEX 143
+#define I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT 0
+#define I40E_GL_FCOEDWRCL_FCOEDWRCL_MASK (0xFFFFFFFF << I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT)
+#define I40E_GL_FCOEDWTCH(_i) (0x00348084 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWTCH_MAX_INDEX 143
+#define I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT 0
+#define I40E_GL_FCOEDWTCH_FCOEDWTCH_MASK (0xFFFF << I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT)
+#define I40E_GL_FCOEDWTCL(_i) (0x00348080 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWTCL_MAX_INDEX 143
+#define I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT 0
+#define I40E_GL_FCOEDWTCL_FCOEDWTCL_MASK (0xFFFFFFFF << I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT)
+#define I40E_GL_FCOELAST(_i) (0x00314000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOELAST_MAX_INDEX 143
+#define I40E_GL_FCOELAST_FCOELAST_SHIFT 0
+#define I40E_GL_FCOELAST_FCOELAST_MASK (0xFFFFFFFF << I40E_GL_FCOELAST_FCOELAST_SHIFT)
+#define I40E_GL_FCOEPRC(_i) (0x00315200 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEPRC_MAX_INDEX 143
+#define I40E_GL_FCOEPRC_FCOEPRC_SHIFT 0
+#define I40E_GL_FCOEPRC_FCOEPRC_MASK (0xFFFFFFFF << I40E_GL_FCOEPRC_FCOEPRC_SHIFT)
+#define I40E_GL_FCOEPTC(_i) (0x00344C00 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEPTC_MAX_INDEX 143
+#define I40E_GL_FCOEPTC_FCOEPTC_SHIFT 0
+#define I40E_GL_FCOEPTC_FCOEPTC_MASK (0xFFFFFFFF << I40E_GL_FCOEPTC_FCOEPTC_SHIFT)
+#define I40E_GL_FCOERPDC(_i) (0x00324000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOERPDC_MAX_INDEX 143
+#define I40E_GL_FCOERPDC_FCOERPDC_SHIFT 0
+#define I40E_GL_FCOERPDC_FCOERPDC_MASK (0xFFFFFFFF << I40E_GL_FCOERPDC_FCOERPDC_SHIFT)
+#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPRCH_MAX_INDEX 3
+#define I40E_GLPRT_BPRCH_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPRCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_BPRCH_UPRCH_SHIFT)
+#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPRCL_MAX_INDEX 3
+#define I40E_GLPRT_BPRCL_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPRCL_UPRCH_MASK (0xFFFFFFFF << I40E_GLPRT_BPRCL_UPRCH_SHIFT)
+#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPTCH_MAX_INDEX 3
+#define I40E_GLPRT_BPTCH_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPTCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_BPTCH_UPRCH_SHIFT)
+#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPTCL_MAX_INDEX 3
+#define I40E_GLPRT_BPTCL_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPTCL_UPRCH_MASK (0xFFFFFFFF << I40E_GLPRT_BPTCL_UPRCH_SHIFT)
+#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_CRCERRS_MAX_INDEX 3
+#define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0
+#define I40E_GLPRT_CRCERRS_CRCERRS_MASK (0xFFFFFFFF << I40E_GLPRT_CRCERRS_CRCERRS_SHIFT)
+#define I40E_GLPRT_GORCH(_i) (0x00300004 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GORCH_MAX_INDEX 3
+#define I40E_GLPRT_GORCH_GORCH_SHIFT 0
+#define I40E_GLPRT_GORCH_GORCH_MASK (0xFFFF << I40E_GLPRT_GORCH_GORCH_SHIFT)
+#define I40E_GLPRT_GORCL(_i) (0x00300000 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GORCL_MAX_INDEX 3
+#define I40E_GLPRT_GORCL_GORCL_SHIFT 0
+#define I40E_GLPRT_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLPRT_GORCL_GORCL_SHIFT)
+#define I40E_GLPRT_GOTCH(_i) (0x00300684 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GOTCH_MAX_INDEX 3
+#define I40E_GLPRT_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLPRT_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLPRT_GOTCH_GOTCH_SHIFT)
+#define I40E_GLPRT_GOTCL(_i) (0x00300680 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GOTCL_MAX_INDEX 3
+#define I40E_GLPRT_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLPRT_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLPRT_GOTCL_GOTCL_SHIFT)
+#define I40E_GLPRT_ILLERRC(_i) (0x003000E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_ILLERRC_MAX_INDEX 3
+#define I40E_GLPRT_ILLERRC_ILLERRC_SHIFT 0
+#define I40E_GLPRT_ILLERRC_ILLERRC_MASK (0xFFFFFFFF << I40E_GLPRT_ILLERRC_ILLERRC_SHIFT)
+#define I40E_GLPRT_LDPC(_i) (0x00300620 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LDPC_MAX_INDEX 3
+#define I40E_GLPRT_LDPC_LDPC_SHIFT 0
+#define I40E_GLPRT_LDPC_LDPC_MASK (0xFFFFFFFF << I40E_GLPRT_LDPC_LDPC_SHIFT)
+#define I40E_GLPRT_LXOFFRXC(_i) (0x00300160 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXOFFRXC_MAX_INDEX 3
+#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT 0
+#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT)
+#define I40E_GLPRT_LXOFFTXC(_i) (0x003009A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXOFFTXC_MAX_INDEX 3
+#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT 0
+#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_MASK (0xFFFFFFFF << I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT)
+#define I40E_GLPRT_LXONRXC(_i) (0x00300140 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXONRXC_MAX_INDEX 3
+#define I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT 0
+#define I40E_GLPRT_LXONRXC_LXONRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT)
+#define I40E_GLPRT_LXONTXC(_i) (0x00300980 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXONTXC_MAX_INDEX 3
+#define I40E_GLPRT_LXONTXC_LXONTXC_SHIFT 0
+#define I40E_GLPRT_LXONTXC_LXONTXC_MASK (0xFFFFFFFF << I40E_GLPRT_LXONTXC_LXONTXC_SHIFT)
+#define I40E_GLPRT_MLFC(_i) (0x00300020 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MLFC_MAX_INDEX 3
+#define I40E_GLPRT_MLFC_MLFC_SHIFT 0
+#define I40E_GLPRT_MLFC_MLFC_MASK (0xFFFFFFFF << I40E_GLPRT_MLFC_MLFC_SHIFT)
+#define I40E_GLPRT_MPRCH(_i) (0x003005C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPRCH_MAX_INDEX 3
+#define I40E_GLPRT_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLPRT_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLPRT_MPRCH_MPRCH_SHIFT)
+#define I40E_GLPRT_MPRCL(_i) (0x003005C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPRCL_MAX_INDEX 3
+#define I40E_GLPRT_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLPRT_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLPRT_MPRCL_MPRCL_SHIFT)
+#define I40E_GLPRT_MPTCH(_i) (0x003009E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPTCH_MAX_INDEX 3
+#define I40E_GLPRT_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLPRT_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLPRT_MPTCH_MPTCH_SHIFT)
+#define I40E_GLPRT_MPTCL(_i) (0x003009E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPTCL_MAX_INDEX 3
+#define I40E_GLPRT_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLPRT_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLPRT_MPTCL_MPTCL_SHIFT)
+#define I40E_GLPRT_MRFC(_i) (0x00300040 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MRFC_MAX_INDEX 3
+#define I40E_GLPRT_MRFC_MRFC_SHIFT 0
+#define I40E_GLPRT_MRFC_MRFC_MASK (0xFFFFFFFF << I40E_GLPRT_MRFC_MRFC_SHIFT)
+#define I40E_GLPRT_PRC1023H(_i) (0x00300504 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1023H_MAX_INDEX 3
+#define I40E_GLPRT_PRC1023H_PRC1023H_SHIFT 0
+#define I40E_GLPRT_PRC1023H_PRC1023H_MASK (0xFFFF << I40E_GLPRT_PRC1023H_PRC1023H_SHIFT)
+#define I40E_GLPRT_PRC1023L(_i) (0x00300500 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1023L_MAX_INDEX 3
+#define I40E_GLPRT_PRC1023L_PRC1023L_SHIFT 0
+#define I40E_GLPRT_PRC1023L_PRC1023L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC1023L_PRC1023L_SHIFT)
+#define I40E_GLPRT_PRC127H(_i) (0x003004A4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC127H_MAX_INDEX 3
+#define I40E_GLPRT_PRC127H_PRC127H_SHIFT 0
+#define I40E_GLPRT_PRC127H_PRC127H_MASK (0xFFFF << I40E_GLPRT_PRC127H_PRC127H_SHIFT)
+#define I40E_GLPRT_PRC127L(_i) (0x003004A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC127L_MAX_INDEX 3
+#define I40E_GLPRT_PRC127L_PRC127L_SHIFT 0
+#define I40E_GLPRT_PRC127L_PRC127L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC127L_PRC127L_SHIFT)
+#define I40E_GLPRT_PRC1522H(_i) (0x00300524 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1522H_MAX_INDEX 3
+#define I40E_GLPRT_PRC1522H_PRC1522H_SHIFT 0
+#define I40E_GLPRT_PRC1522H_PRC1522H_MASK (0xFFFF << I40E_GLPRT_PRC1522H_PRC1522H_SHIFT)
+#define I40E_GLPRT_PRC1522L(_i) (0x00300520 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1522L_MAX_INDEX 3
+#define I40E_GLPRT_PRC1522L_PRC1522L_SHIFT 0
+#define I40E_GLPRT_PRC1522L_PRC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC1522L_PRC1522L_SHIFT)
+#define I40E_GLPRT_PRC255H(_i) (0x003004C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC255H_MAX_INDEX 3
+#define I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT 0
+#define I40E_GLPRT_PRC255H_PRTPRC255H_MASK (0xFFFF << I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT)
+#define I40E_GLPRT_PRC255L(_i) (0x003004C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC255L_MAX_INDEX 3
+#define I40E_GLPRT_PRC255L_PRC255L_SHIFT 0
+#define I40E_GLPRT_PRC255L_PRC255L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC255L_PRC255L_SHIFT)
+#define I40E_GLPRT_PRC511H(_i) (0x003004E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC511H_MAX_INDEX 3
+#define I40E_GLPRT_PRC511H_PRC511H_SHIFT 0
+#define I40E_GLPRT_PRC511H_PRC511H_MASK (0xFFFF << I40E_GLPRT_PRC511H_PRC511H_SHIFT)
+#define I40E_GLPRT_PRC511L(_i) (0x003004E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC511L_MAX_INDEX 3
+#define I40E_GLPRT_PRC511L_PRC511L_SHIFT 0
+#define I40E_GLPRT_PRC511L_PRC511L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC511L_PRC511L_SHIFT)
+#define I40E_GLPRT_PRC64H(_i) (0x00300484 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC64H_MAX_INDEX 3
+#define I40E_GLPRT_PRC64H_PRC64H_SHIFT 0
+#define I40E_GLPRT_PRC64H_PRC64H_MASK (0xFFFF << I40E_GLPRT_PRC64H_PRC64H_SHIFT)
+#define I40E_GLPRT_PRC64L(_i) (0x00300480 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC64L_MAX_INDEX 3
+#define I40E_GLPRT_PRC64L_PRC64L_SHIFT 0
+#define I40E_GLPRT_PRC64L_PRC64L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC64L_PRC64L_SHIFT)
+#define I40E_GLPRT_PRC9522H(_i) (0x00300544 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC9522H_MAX_INDEX 3
+#define I40E_GLPRT_PRC9522H_PRC1522H_SHIFT 0
+#define I40E_GLPRT_PRC9522H_PRC1522H_MASK (0xFFFF << I40E_GLPRT_PRC9522H_PRC1522H_SHIFT)
+#define I40E_GLPRT_PRC9522L(_i) (0x00300540 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC9522L_MAX_INDEX 3
+#define I40E_GLPRT_PRC9522L_PRC1522L_SHIFT 0
+#define I40E_GLPRT_PRC9522L_PRC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC9522L_PRC1522L_SHIFT)
+#define I40E_GLPRT_PTC1023H(_i) (0x00300724 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1023H_MAX_INDEX 3
+#define I40E_GLPRT_PTC1023H_PTC1023H_SHIFT 0
+#define I40E_GLPRT_PTC1023H_PTC1023H_MASK (0xFFFF << I40E_GLPRT_PTC1023H_PTC1023H_SHIFT)
+#define I40E_GLPRT_PTC1023L(_i) (0x00300720 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1023L_MAX_INDEX 3
+#define I40E_GLPRT_PTC1023L_PTC1023L_SHIFT 0
+#define I40E_GLPRT_PTC1023L_PTC1023L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC1023L_PTC1023L_SHIFT)
+#define I40E_GLPRT_PTC127H(_i) (0x003006C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC127H_MAX_INDEX 3
+#define I40E_GLPRT_PTC127H_PTC127H_SHIFT 0
+#define I40E_GLPRT_PTC127H_PTC127H_MASK (0xFFFF << I40E_GLPRT_PTC127H_PTC127H_SHIFT)
+#define I40E_GLPRT_PTC127L(_i) (0x003006C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC127L_MAX_INDEX 3
+#define I40E_GLPRT_PTC127L_PTC127L_SHIFT 0
+#define I40E_GLPRT_PTC127L_PTC127L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC127L_PTC127L_SHIFT)
+#define I40E_GLPRT_PTC1522H(_i) (0x00300744 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1522H_MAX_INDEX 3
+#define I40E_GLPRT_PTC1522H_PTC1522H_SHIFT 0
+#define I40E_GLPRT_PTC1522H_PTC1522H_MASK (0xFFFF << I40E_GLPRT_PTC1522H_PTC1522H_SHIFT)
+#define I40E_GLPRT_PTC1522L(_i) (0x00300740 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1522L_MAX_INDEX 3
+#define I40E_GLPRT_PTC1522L_PTC1522L_SHIFT 0
+#define I40E_GLPRT_PTC1522L_PTC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC1522L_PTC1522L_SHIFT)
+#define I40E_GLPRT_PTC255H(_i) (0x003006E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC255H_MAX_INDEX 3
+#define I40E_GLPRT_PTC255H_PTC255H_SHIFT 0
+#define I40E_GLPRT_PTC255H_PTC255H_MASK (0xFFFF << I40E_GLPRT_PTC255H_PTC255H_SHIFT)
+#define I40E_GLPRT_PTC255L(_i) (0x003006E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC255L_MAX_INDEX 3
+#define I40E_GLPRT_PTC255L_PTC255L_SHIFT 0
+#define I40E_GLPRT_PTC255L_PTC255L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC255L_PTC255L_SHIFT)
+#define I40E_GLPRT_PTC511H(_i) (0x00300704 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC511H_MAX_INDEX 3
+#define I40E_GLPRT_PTC511H_PTC511H_SHIFT 0
+#define I40E_GLPRT_PTC511H_PTC511H_MASK (0xFFFF << I40E_GLPRT_PTC511H_PTC511H_SHIFT)
+#define I40E_GLPRT_PTC511L(_i) (0x00300700 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC511L_MAX_INDEX 3
+#define I40E_GLPRT_PTC511L_PTC511L_SHIFT 0
+#define I40E_GLPRT_PTC511L_PTC511L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC511L_PTC511L_SHIFT)
+#define I40E_GLPRT_PTC64H(_i) (0x003006A4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC64H_MAX_INDEX 3
+#define I40E_GLPRT_PTC64H_PTC64H_SHIFT 0
+#define I40E_GLPRT_PTC64H_PTC64H_MASK (0xFFFF << I40E_GLPRT_PTC64H_PTC64H_SHIFT)
+#define I40E_GLPRT_PTC64L(_i) (0x003006A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC64L_MAX_INDEX 3
+#define I40E_GLPRT_PTC64L_PTC64L_SHIFT 0
+#define I40E_GLPRT_PTC64L_PTC64L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC64L_PTC64L_SHIFT)
+#define I40E_GLPRT_PTC9522H(_i) (0x00300764 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC9522H_MAX_INDEX 3
+#define I40E_GLPRT_PTC9522H_PTC9522H_SHIFT 0
+#define I40E_GLPRT_PTC9522H_PTC9522H_MASK (0xFFFF << I40E_GLPRT_PTC9522H_PTC9522H_SHIFT)
+#define I40E_GLPRT_PTC9522L(_i) (0x00300760 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC9522L_MAX_INDEX 3
+#define I40E_GLPRT_PTC9522L_PTC9522L_SHIFT 0
+#define I40E_GLPRT_PTC9522L_PTC9522L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC9522L_PTC9522L_SHIFT)
+#define I40E_GLPRT_PXOFFRXC(_i, _j) (0x00300280 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXOFFRXC_MAX_INDEX 3
+#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT 0
+#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT)
+#define I40E_GLPRT_PXOFFTXC(_i, _j) (0x00300880 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXOFFTXC_MAX_INDEX 3
+#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT 0
+#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT)
+#define I40E_GLPRT_PXONRXC(_i, _j) (0x00300180 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXONRXC_MAX_INDEX 3
+#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT 0
+#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT)
+#define I40E_GLPRT_PXONTXC(_i, _j) (0x00300780 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXONTXC_MAX_INDEX 3
+#define I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT 0
+#define I40E_GLPRT_PXONTXC_PRPXONTXC_MASK (0xFFFFFFFF << I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT)
+#define I40E_GLPRT_RDPC(_i) (0x00300600 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RDPC_MAX_INDEX 3
+#define I40E_GLPRT_RDPC_RDPC_SHIFT 0
+#define I40E_GLPRT_RDPC_RDPC_MASK (0xFFFFFFFF << I40E_GLPRT_RDPC_RDPC_SHIFT)
+#define I40E_GLPRT_RFC(_i) (0x00300560 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RFC_MAX_INDEX 3
+#define I40E_GLPRT_RFC_RFC_SHIFT 0
+#define I40E_GLPRT_RFC_RFC_MASK (0xFFFFFFFF << I40E_GLPRT_RFC_RFC_SHIFT)
+#define I40E_GLPRT_RJC(_i) (0x00300580 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RJC_MAX_INDEX 3
+#define I40E_GLPRT_RJC_RJC_SHIFT 0
+#define I40E_GLPRT_RJC_RJC_MASK (0xFFFFFFFF << I40E_GLPRT_RJC_RJC_SHIFT)
+#define I40E_GLPRT_RLEC(_i) (0x003000A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RLEC_MAX_INDEX 3
+#define I40E_GLPRT_RLEC_RLEC_SHIFT 0
+#define I40E_GLPRT_RLEC_RLEC_MASK (0xFFFFFFFF << I40E_GLPRT_RLEC_RLEC_SHIFT)
+#define I40E_GLPRT_ROC(_i) (0x00300120 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_ROC_MAX_INDEX 3
+#define I40E_GLPRT_ROC_ROC_SHIFT 0
+#define I40E_GLPRT_ROC_ROC_MASK (0xFFFFFFFF << I40E_GLPRT_ROC_ROC_SHIFT)
+#define I40E_GLPRT_RUC(_i) (0x00300100 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RUC_MAX_INDEX 3
+#define I40E_GLPRT_RUC_RUC_SHIFT 0
+#define I40E_GLPRT_RUC_RUC_MASK (0xFFFFFFFF << I40E_GLPRT_RUC_RUC_SHIFT)
+#define I40E_GLPRT_RUPP(_i) (0x00300660 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RUPP_MAX_INDEX 3
+#define I40E_GLPRT_RUPP_RUPP_SHIFT 0
+#define I40E_GLPRT_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLPRT_RUPP_RUPP_SHIFT)
+#define I40E_GLPRT_RXON2OFFCNT(_i, _j) (0x00300380 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_RXON2OFFCNT_MAX_INDEX 3
+#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT 0
+#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_MASK (0xFFFFFFFF << I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT)
+#define I40E_GLPRT_STDC(_i) (0x00300640 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_STDC_MAX_INDEX 3
+#define I40E_GLPRT_STDC_STDC_SHIFT 0
+#define I40E_GLPRT_STDC_STDC_MASK (0xFFFFFFFF << I40E_GLPRT_STDC_STDC_SHIFT)
+#define I40E_GLPRT_TDOLD(_i) (0x00300A20 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_TDOLD_MAX_INDEX 3
+#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0
+#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK (0xFFFFFFFF << I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT)
+#define I40E_GLPRT_TDPC(_i) (0x00375400 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_TDPC_MAX_INDEX 3
+#define I40E_GLPRT_TDPC_TDPC_SHIFT 0
+#define I40E_GLPRT_TDPC_TDPC_MASK (0xFFFFFFFF << I40E_GLPRT_TDPC_TDPC_SHIFT)
+#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPRCH_MAX_INDEX 3
+#define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLPRT_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_UPRCH_UPRCH_SHIFT)
+#define I40E_GLPRT_UPRCL(_i) (0x003005A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPRCL_MAX_INDEX 3
+#define I40E_GLPRT_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLPRT_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLPRT_UPRCL_UPRCL_SHIFT)
+#define I40E_GLPRT_UPTCH(_i) (0x003009C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPTCH_MAX_INDEX 3
+#define I40E_GLPRT_UPTCH_UPTCH_SHIFT 0
+#define I40E_GLPRT_UPTCH_UPTCH_MASK (0xFFFF << I40E_GLPRT_UPTCH_UPTCH_SHIFT)
+#define I40E_GLPRT_UPTCL(_i) (0x003009C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPTCL_MAX_INDEX 3
+#define I40E_GLPRT_UPTCL_VUPTCH_SHIFT 0
+#define I40E_GLPRT_UPTCL_VUPTCH_MASK (0xFFFFFFFF << I40E_GLPRT_UPTCL_VUPTCH_SHIFT)
+#define I40E_GLSW_BPRCH(_i) (0x00370104 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPRCH_MAX_INDEX 15
+#define I40E_GLSW_BPRCH_BPRCH_SHIFT 0
+#define I40E_GLSW_BPRCH_BPRCH_MASK (0xFFFF << I40E_GLSW_BPRCH_BPRCH_SHIFT)
+#define I40E_GLSW_BPRCL(_i) (0x00370100 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPRCL_MAX_INDEX 15
+#define I40E_GLSW_BPRCL_BPRCL_SHIFT 0
+#define I40E_GLSW_BPRCL_BPRCL_MASK (0xFFFFFFFF << I40E_GLSW_BPRCL_BPRCL_SHIFT)
+#define I40E_GLSW_BPTCH(_i) (0x00340104 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPTCH_MAX_INDEX 15
+#define I40E_GLSW_BPTCH_BPTCH_SHIFT 0
+#define I40E_GLSW_BPTCH_BPTCH_MASK (0xFFFF << I40E_GLSW_BPTCH_BPTCH_SHIFT)
+#define I40E_GLSW_BPTCL(_i) (0x00340100 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPTCL_MAX_INDEX 15
+#define I40E_GLSW_BPTCL_BPTCL_SHIFT 0
+#define I40E_GLSW_BPTCL_BPTCL_MASK (0xFFFFFFFF << I40E_GLSW_BPTCL_BPTCL_SHIFT)
+#define I40E_GLSW_GORCH(_i) (0x0035C004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GORCH_MAX_INDEX 15
+#define I40E_GLSW_GORCH_GORCH_SHIFT 0
+#define I40E_GLSW_GORCH_GORCH_MASK (0xFFFF << I40E_GLSW_GORCH_GORCH_SHIFT)
+#define I40E_GLSW_GORCL(_i) (0x0035c000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GORCL_MAX_INDEX 15
+#define I40E_GLSW_GORCL_GORCL_SHIFT 0
+#define I40E_GLSW_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLSW_GORCL_GORCL_SHIFT)
+#define I40E_GLSW_GOTCH(_i) (0x0032C004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GOTCH_MAX_INDEX 15
+#define I40E_GLSW_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLSW_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLSW_GOTCH_GOTCH_SHIFT)
+#define I40E_GLSW_GOTCL(_i) (0x0032c000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GOTCL_MAX_INDEX 15
+#define I40E_GLSW_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLSW_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLSW_GOTCL_GOTCL_SHIFT)
+#define I40E_GLSW_MPRCH(_i) (0x00370084 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPRCH_MAX_INDEX 15
+#define I40E_GLSW_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLSW_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLSW_MPRCH_MPRCH_SHIFT)
+#define I40E_GLSW_MPRCL(_i) (0x00370080 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPRCL_MAX_INDEX 15
+#define I40E_GLSW_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLSW_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLSW_MPRCL_MPRCL_SHIFT)
+#define I40E_GLSW_MPTCH(_i) (0x00340084 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPTCH_MAX_INDEX 15
+#define I40E_GLSW_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLSW_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLSW_MPTCH_MPTCH_SHIFT)
+#define I40E_GLSW_MPTCL(_i) (0x00340080 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPTCL_MAX_INDEX 15
+#define I40E_GLSW_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLSW_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLSW_MPTCL_MPTCL_SHIFT)
+#define I40E_GLSW_RUPP(_i) (0x00370180 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_RUPP_MAX_INDEX 15
+#define I40E_GLSW_RUPP_RUPP_SHIFT 0
+#define I40E_GLSW_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLSW_RUPP_RUPP_SHIFT)
+#define I40E_GLSW_TDPC(_i) (0x00348000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_TDPC_MAX_INDEX 15
+#define I40E_GLSW_TDPC_TDPC_SHIFT 0
+#define I40E_GLSW_TDPC_TDPC_MASK (0xFFFFFFFF << I40E_GLSW_TDPC_TDPC_SHIFT)
+#define I40E_GLSW_UPRCH(_i) (0x00370004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPRCH_MAX_INDEX 15
+#define I40E_GLSW_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLSW_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLSW_UPRCH_UPRCH_SHIFT)
+#define I40E_GLSW_UPRCL(_i) (0x00370000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPRCL_MAX_INDEX 15
+#define I40E_GLSW_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLSW_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLSW_UPRCL_UPRCL_SHIFT)
+#define I40E_GLSW_UPTCH(_i) (0x00340004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPTCH_MAX_INDEX 15
+#define I40E_GLSW_UPTCH_UPTCH_SHIFT 0
+#define I40E_GLSW_UPTCH_UPTCH_MASK (0xFFFF << I40E_GLSW_UPTCH_UPTCH_SHIFT)
+#define I40E_GLSW_UPTCL(_i) (0x00340000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPTCL_MAX_INDEX 15
+#define I40E_GLSW_UPTCL_UPTCL_SHIFT 0
+#define I40E_GLSW_UPTCL_UPTCL_MASK (0xFFFFFFFF << I40E_GLSW_UPTCL_UPTCL_SHIFT)
+#define I40E_GLV_BPRCH(_i) (0x0036D804 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPRCH_MAX_INDEX 383
+#define I40E_GLV_BPRCH_BPRCH_SHIFT 0
+#define I40E_GLV_BPRCH_BPRCH_MASK (0xFFFF << I40E_GLV_BPRCH_BPRCH_SHIFT)
+#define I40E_GLV_BPRCL(_i) (0x0036d800 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPRCL_MAX_INDEX 383
+#define I40E_GLV_BPRCL_BPRCL_SHIFT 0
+#define I40E_GLV_BPRCL_BPRCL_MASK (0xFFFFFFFF << I40E_GLV_BPRCL_BPRCL_SHIFT)
+#define I40E_GLV_BPTCH(_i) (0x0033D804 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPTCH_MAX_INDEX 383
+#define I40E_GLV_BPTCH_BPTCH_SHIFT 0
+#define I40E_GLV_BPTCH_BPTCH_MASK (0xFFFF << I40E_GLV_BPTCH_BPTCH_SHIFT)
+#define I40E_GLV_BPTCL(_i) (0x0033d800 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPTCL_MAX_INDEX 383
+#define I40E_GLV_BPTCL_BPTCL_SHIFT 0
+#define I40E_GLV_BPTCL_BPTCL_MASK (0xFFFFFFFF << I40E_GLV_BPTCL_BPTCL_SHIFT)
+#define I40E_GLV_GORCH(_i) (0x00358004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GORCH_MAX_INDEX 383
+#define I40E_GLV_GORCH_GORCH_SHIFT 0
+#define I40E_GLV_GORCH_GORCH_MASK (0xFFFF << I40E_GLV_GORCH_GORCH_SHIFT)
+#define I40E_GLV_GORCL(_i) (0x00358000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GORCL_MAX_INDEX 383
+#define I40E_GLV_GORCL_GORCL_SHIFT 0
+#define I40E_GLV_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLV_GORCL_GORCL_SHIFT)
+#define I40E_GLV_GOTCH(_i) (0x00328004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GOTCH_MAX_INDEX 383
+#define I40E_GLV_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLV_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLV_GOTCH_GOTCH_SHIFT)
+#define I40E_GLV_GOTCL(_i) (0x00328000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GOTCL_MAX_INDEX 383
+#define I40E_GLV_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLV_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLV_GOTCL_GOTCL_SHIFT)
+#define I40E_GLV_MPRCH(_i) (0x0036CC04 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPRCH_MAX_INDEX 383
+#define I40E_GLV_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLV_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLV_MPRCH_MPRCH_SHIFT)
+#define I40E_GLV_MPRCL(_i) (0x0036cc00 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPRCL_MAX_INDEX 383
+#define I40E_GLV_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLV_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLV_MPRCL_MPRCL_SHIFT)
+#define I40E_GLV_MPTCH(_i) (0x0033CC04 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPTCH_MAX_INDEX 383
+#define I40E_GLV_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLV_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLV_MPTCH_MPTCH_SHIFT)
+#define I40E_GLV_MPTCL(_i) (0x0033cc00 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPTCL_MAX_INDEX 383
+#define I40E_GLV_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLV_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLV_MPTCL_MPTCL_SHIFT)
+#define I40E_GLV_RDPC(_i) (0x00310000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_RDPC_MAX_INDEX 383
+#define I40E_GLV_RDPC_RDPC_SHIFT 0
+#define I40E_GLV_RDPC_RDPC_MASK (0xFFFFFFFF << I40E_GLV_RDPC_RDPC_SHIFT)
+#define I40E_GLV_RUPP(_i) (0x0036E400 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_RUPP_MAX_INDEX 383
+#define I40E_GLV_RUPP_RUPP_SHIFT 0
+#define I40E_GLV_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLV_RUPP_RUPP_SHIFT)
+#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 8)) /* _i=0...383 */
+#define I40E_GLV_TEPC_MAX_INDEX 383
+#define I40E_GLV_TEPC_TEPC_SHIFT 0
+#define I40E_GLV_TEPC_TEPC_MASK (0xFFFFFFFF << I40E_GLV_TEPC_TEPC_SHIFT)
+#define I40E_GLV_UPRCH(_i) (0x0036C004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPRCH_MAX_INDEX 383
+#define I40E_GLV_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLV_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLV_UPRCH_UPRCH_SHIFT)
+#define I40E_GLV_UPRCL(_i) (0x0036c000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPRCL_MAX_INDEX 383
+#define I40E_GLV_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLV_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLV_UPRCL_UPRCL_SHIFT)
+#define I40E_GLV_UPTCH(_i) (0x0033C004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPTCH_MAX_INDEX 383
+#define I40E_GLV_UPTCH_GLVUPTCH_SHIFT 0
+#define I40E_GLV_UPTCH_GLVUPTCH_MASK (0xFFFF << I40E_GLV_UPTCH_GLVUPTCH_SHIFT)
+#define I40E_GLV_UPTCL(_i) (0x0033c000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPTCL_MAX_INDEX 383
+#define I40E_GLV_UPTCL_UPTCL_SHIFT 0
+#define I40E_GLV_UPTCL_UPTCL_MASK (0xFFFFFFFF << I40E_GLV_UPTCL_UPTCL_SHIFT)
+#define I40E_GLVEBTC_RBCH(_i, _j) (0x00364004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RBCH_MAX_INDEX 7
+#define I40E_GLVEBTC_RBCH_TCBCH_SHIFT 0
+#define I40E_GLVEBTC_RBCH_TCBCH_MASK (0xFFFF << I40E_GLVEBTC_RBCH_TCBCH_SHIFT)
+#define I40E_GLVEBTC_RBCL(_i, _j) (0x00364000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RBCL_MAX_INDEX 7
+#define I40E_GLVEBTC_RBCL_TCBCL_SHIFT 0
+#define I40E_GLVEBTC_RBCL_TCBCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_RBCL_TCBCL_SHIFT)
+#define I40E_GLVEBTC_RPCH(_i, _j) (0x00368004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RPCH_MAX_INDEX 7
+#define I40E_GLVEBTC_RPCH_TCPCH_SHIFT 0
+#define I40E_GLVEBTC_RPCH_TCPCH_MASK (0xFFFF << I40E_GLVEBTC_RPCH_TCPCH_SHIFT)
+#define I40E_GLVEBTC_RPCL(_i, _j) (0x00368000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RPCL_MAX_INDEX 7
+#define I40E_GLVEBTC_RPCL_TCPCL_SHIFT 0
+#define I40E_GLVEBTC_RPCL_TCPCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_RPCL_TCPCL_SHIFT)
+#define I40E_GLVEBTC_TBCH(_i, _j) (0x00334004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TBCH_MAX_INDEX 7
+#define I40E_GLVEBTC_TBCH_TCBCH_SHIFT 0
+#define I40E_GLVEBTC_TBCH_TCBCH_MASK (0xFFFF << I40E_GLVEBTC_TBCH_TCBCH_SHIFT)
+#define I40E_GLVEBTC_TBCL(_i, _j) (0x00334000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TBCL_MAX_INDEX 7
+#define I40E_GLVEBTC_TBCL_TCBCL_SHIFT 0
+#define I40E_GLVEBTC_TBCL_TCBCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_TBCL_TCBCL_SHIFT)
+#define I40E_GLVEBTC_TPCH(_i, _j) (0x00338004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TPCH_MAX_INDEX 7
+#define I40E_GLVEBTC_TPCH_TCPCH_SHIFT 0
+#define I40E_GLVEBTC_TPCH_TCPCH_MASK (0xFFFF << I40E_GLVEBTC_TPCH_TCPCH_SHIFT)
+#define I40E_GLVEBTC_TPCL(_i, _j) (0x00338000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TPCL_MAX_INDEX 7
+#define I40E_GLVEBTC_TPCL_TCPCL_SHIFT 0
+#define I40E_GLVEBTC_TPCL_TCPCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_TPCL_TCPCL_SHIFT)
+#define I40E_GLVEBVL_BPCH(_i) (0x00374804 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_BPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_BPCH_VLBPCH_SHIFT 0
+#define I40E_GLVEBVL_BPCH_VLBPCH_MASK (0xFFFF << I40E_GLVEBVL_BPCH_VLBPCH_SHIFT)
+#define I40E_GLVEBVL_BPCL(_i) (0x00374800 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_BPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_BPCL_VLBPCL_SHIFT 0
+#define I40E_GLVEBVL_BPCL_VLBPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_BPCL_VLBPCL_SHIFT)
+#define I40E_GLVEBVL_GORCH(_i) (0x00360004 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GORCH_MAX_INDEX 127
+#define I40E_GLVEBVL_GORCH_VLBCH_SHIFT 0
+#define I40E_GLVEBVL_GORCH_VLBCH_MASK (0xFFFF << I40E_GLVEBVL_GORCH_VLBCH_SHIFT)
+#define I40E_GLVEBVL_GORCL(_i) (0x00360000 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GORCL_MAX_INDEX 127
+#define I40E_GLVEBVL_GORCL_VLBCL_SHIFT 0
+#define I40E_GLVEBVL_GORCL_VLBCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_GORCL_VLBCL_SHIFT)
+#define I40E_GLVEBVL_GOTCH(_i) (0x00330004 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GOTCH_MAX_INDEX 127
+#define I40E_GLVEBVL_GOTCH_VLBCH_SHIFT 0
+#define I40E_GLVEBVL_GOTCH_VLBCH_MASK (0xFFFF << I40E_GLVEBVL_GOTCH_VLBCH_SHIFT)
+#define I40E_GLVEBVL_GOTCL(_i) (0x00330000 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GOTCL_MAX_INDEX 127
+#define I40E_GLVEBVL_GOTCL_VLBCL_SHIFT 0
+#define I40E_GLVEBVL_GOTCL_VLBCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_GOTCL_VLBCL_SHIFT)
+#define I40E_GLVEBVL_MPCH(_i) (0x00374404 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_MPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_MPCH_VLMPCH_SHIFT 0
+#define I40E_GLVEBVL_MPCH_VLMPCH_MASK (0xFFFF << I40E_GLVEBVL_MPCH_VLMPCH_SHIFT)
+#define I40E_GLVEBVL_MPCL(_i) (0x00374400 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_MPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_MPCL_VLMPCL_SHIFT 0
+#define I40E_GLVEBVL_MPCL_VLMPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_MPCL_VLMPCL_SHIFT)
+#define I40E_GLVEBVL_UPCH(_i) (0x00374004 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_UPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_UPCH_VLUPCH_SHIFT 0
+#define I40E_GLVEBVL_UPCH_VLUPCH_MASK (0xFFFF << I40E_GLVEBVL_UPCH_VLUPCH_SHIFT)
+#define I40E_GLVEBVL_UPCL(_i) (0x00374000 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_UPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_UPCL_VLUPCL_SHIFT 0
+#define I40E_GLVEBVL_UPCL_VLUPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_UPCL_VLUPCL_SHIFT)
+#define I40E_GL_MTG_FLU_MSK_H 0x00269F4C
+#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT 0
+#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_MASK (0xFFFF << I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT)
+#define I40E_GL_MTG_FLU_MSK_L 0x00269F44
+#define I40E_GL_MTG_FLU_MSK_L_MASK_LOW_SHIFT 0
+#define I40E_GL_MTG_FLU_MSK_L_MASK_LOW_MASK (0xFFFFFFFF << I40E_GL_MTG_FLU_MSK_L_MASK_LOW_SHIFT)
+#define I40E_GL_SWR_DEF_ACT(_i) (0x0026CF00 + ((_i) * 4)) /* _i=0...25 */
+#define I40E_GL_SWR_DEF_ACT_MAX_INDEX 25
+#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT 0
+#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_MASK (0xFFFFFFFF << I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT)
+#define I40E_GL_SWR_DEF_ACT_EN 0x0026CF84
+#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT 0
+#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_MASK (0xFFFFFFFF << I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT)
+#define I40E_PRT_MSCCNT 0x00256BA0
+#define I40E_PRT_MSCCNT_CCOUNT_SHIFT 0
+#define I40E_PRT_MSCCNT_CCOUNT_MASK (0x1FFFFFF << I40E_PRT_MSCCNT_CCOUNT_SHIFT)
+#define I40E_PRT_SCSTS 0x00256C20
+#define I40E_PRT_SCSTS_BSCA_SHIFT 0
+#define I40E_PRT_SCSTS_BSCA_MASK (0x1 << I40E_PRT_SCSTS_BSCA_SHIFT)
+#define I40E_PRT_SCSTS_BSCAP_SHIFT 1
+#define I40E_PRT_SCSTS_BSCAP_MASK (0x1 << I40E_PRT_SCSTS_BSCAP_SHIFT)
+#define I40E_PRT_SCSTS_MSCA_SHIFT 2
+#define I40E_PRT_SCSTS_MSCA_MASK (0x1 << I40E_PRT_SCSTS_MSCA_SHIFT)
+#define I40E_PRT_SCSTS_MSCAP_SHIFT 3
+#define I40E_PRT_SCSTS_MSCAP_MASK (0x1 << I40E_PRT_SCSTS_MSCAP_SHIFT)
+#define I40E_PRT_SWT_BSCCNT 0x00256C60
+#define I40E_PRT_SWT_BSCCNT_CCOUNT_SHIFT 0
+#define I40E_PRT_SWT_BSCCNT_CCOUNT_MASK (0x1FFFFFF << I40E_PRT_SWT_BSCCNT_CCOUNT_SHIFT)
+#define I40E_PRTTSYN_ADJ 0x001E4280
+#define I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT 0
+#define I40E_PRTTSYN_ADJ_TSYNADJ_MASK (0x7FFFFFFF << I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT)
+#define I40E_PRTTSYN_ADJ_SIGN_SHIFT 31
+#define I40E_PRTTSYN_ADJ_SIGN_MASK (0x1 << I40E_PRTTSYN_ADJ_SIGN_SHIFT)
+#define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_AUX_0_MAX_INDEX 1
+#define I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT 0
+#define I40E_PRTTSYN_AUX_0_OUT_ENA_MASK (0x1 << I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT 1
+#define I40E_PRTTSYN_AUX_0_OUTMOD_MASK (0x3 << I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT 3
+#define I40E_PRTTSYN_AUX_0_OUTLVL_MASK (0x1 << I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT)
+#define I40E_PRTTSYN_AUX_0_PULSEW_SHIFT 8
+#define I40E_PRTTSYN_AUX_0_PULSEW_MASK (0xF << I40E_PRTTSYN_AUX_0_PULSEW_SHIFT)
+#define I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT 16
+#define I40E_PRTTSYN_AUX_0_EVNTLVL_MASK (0x3 << I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT)
+#define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_AUX_1_MAX_INDEX 1
+#define I40E_PRTTSYN_AUX_1_INSTNT_SHIFT 0
+#define I40E_PRTTSYN_AUX_1_INSTNT_MASK (0x1 << I40E_PRTTSYN_AUX_1_INSTNT_SHIFT)
+#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT 1
+#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_MASK (0x1 << I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT)
+#define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_CLKO_MAX_INDEX 1
+#define I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT 0
+#define I40E_PRTTSYN_CLKO_TSYNCLKO_MASK (0xFFFFFFFF << I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT)
+#define I40E_PRTTSYN_CTL0 0x001E4200
+#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT 0
+#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_MASK (0x1 << I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT)
+#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT 1
+#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT 2
+#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT 3
+#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_PF_ID_SHIFT 8
+#define I40E_PRTTSYN_CTL0_PF_ID_MASK (0xF << I40E_PRTTSYN_CTL0_PF_ID_SHIFT)
+#define I40E_PRTTSYN_CTL0_TSYNACT_SHIFT 12
+#define I40E_PRTTSYN_CTL0_TSYNACT_MASK (0x3 << I40E_PRTTSYN_CTL0_TSYNACT_SHIFT)
+#define I40E_PRTTSYN_CTL0_TSYNENA_SHIFT 31
+#define I40E_PRTTSYN_CTL0_TSYNENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TSYNENA_SHIFT)
+#define I40E_PRTTSYN_CTL1 0x00085020
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT 0
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK (0xFF << I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT)
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT 8
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_MASK (0xFF << I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT)
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT 16
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK (0xF << I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT)
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT 20
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_MASK (0xF << I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT 24
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_MASK (0x3 << I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+#define I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT 26
+#define I40E_PRTTSYN_CTL1_UDP_ENA_MASK (0x3 << I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNENA_SHIFT 31
+#define I40E_PRTTSYN_CTL1_TSYNENA_MASK (0x1 << I40E_PRTTSYN_CTL1_TSYNENA_SHIFT)
+#define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_EVNT_H_MAX_INDEX 1
+#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT 0
+#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT)
+#define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_EVNT_L_MAX_INDEX 1
+#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT 0
+#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT)
+#define I40E_PRTTSYN_INC_H 0x001E4060
+#define I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT 0
+#define I40E_PRTTSYN_INC_H_TSYNINC_H_MASK (0x3F << I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT)
+#define I40E_PRTTSYN_INC_L 0x001E4040
+#define I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT 0
+#define I40E_PRTTSYN_INC_L_TSYNINC_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT)
+#define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTTSYN_RXTIME_H_MAX_INDEX 3
+#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT 0
+#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT)
+#define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTTSYN_RXTIME_L_MAX_INDEX 3
+#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT 0
+#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT)
+#define I40E_PRTTSYN_STAT_0 0x001E4220
+#define I40E_PRTTSYN_STAT_0_EVENT0_SHIFT 0
+#define I40E_PRTTSYN_STAT_0_EVENT0_MASK (0x1 << I40E_PRTTSYN_STAT_0_EVENT0_SHIFT)
+#define I40E_PRTTSYN_STAT_0_EVENT1_SHIFT 1
+#define I40E_PRTTSYN_STAT_0_EVENT1_MASK (0x1 << I40E_PRTTSYN_STAT_0_EVENT1_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TGT0_SHIFT 2
+#define I40E_PRTTSYN_STAT_0_TGT0_MASK (0x1 << I40E_PRTTSYN_STAT_0_TGT0_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TGT1_SHIFT 3
+#define I40E_PRTTSYN_STAT_0_TGT1_MASK (0x1 << I40E_PRTTSYN_STAT_0_TGT1_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TXTIME_SHIFT 4
+#define I40E_PRTTSYN_STAT_0_TXTIME_MASK (0x1 << I40E_PRTTSYN_STAT_0_TXTIME_SHIFT)
+#define I40E_PRTTSYN_STAT_1 0x00085140
+#define I40E_PRTTSYN_STAT_1_RXT0_SHIFT 0
+#define I40E_PRTTSYN_STAT_1_RXT0_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT0_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT1_SHIFT 1
+#define I40E_PRTTSYN_STAT_1_RXT1_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT1_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT2_SHIFT 2
+#define I40E_PRTTSYN_STAT_1_RXT2_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT2_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT3_SHIFT 3
+#define I40E_PRTTSYN_STAT_1_RXT3_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT3_SHIFT)
+#define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_TGT_H_MAX_INDEX 1
+#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT 0
+#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT)
+#define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_TGT_L_MAX_INDEX 1
+#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT 0
+#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT)
+#define I40E_PRTTSYN_TIME_H 0x001E4120
+#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT 0
+#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT)
+#define I40E_PRTTSYN_TIME_L 0x001E4100
+#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT 0
+#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT)
+#define I40E_PRTTSYN_TXTIME_H 0x001E41E0
+#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT 0
+#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT)
+#define I40E_PRTTSYN_TXTIME_L 0x001E41C0
+#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0
+#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT)
+#define I40E_GLSCD_QUANTA 0x000B2080
+#define I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT 0
+#define I40E_GLSCD_QUANTA_TSCDQUANTA_MASK (0x7 << I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT)
+#define I40E_GL_MDET_RX 0x0012A510
+#define I40E_GL_MDET_RX_FUNCTION_SHIFT 0
+#define I40E_GL_MDET_RX_FUNCTION_MASK (0xFF << I40E_GL_MDET_RX_FUNCTION_SHIFT)
+#define I40E_GL_MDET_RX_EVENT_SHIFT 8
+#define I40E_GL_MDET_RX_EVENT_MASK (0x1FF << I40E_GL_MDET_RX_EVENT_SHIFT)
+#define I40E_GL_MDET_RX_QUEUE_SHIFT 17
+#define I40E_GL_MDET_RX_QUEUE_MASK (0x3FFF << I40E_GL_MDET_RX_QUEUE_SHIFT)
+#define I40E_GL_MDET_RX_VALID_SHIFT 31
+#define I40E_GL_MDET_RX_VALID_MASK (0x1 << I40E_GL_MDET_RX_VALID_SHIFT)
+#define I40E_GL_MDET_TX 0x000E6480
+#define I40E_GL_MDET_TX_FUNCTION_SHIFT 0
+#define I40E_GL_MDET_TX_FUNCTION_MASK (0xFF << I40E_GL_MDET_TX_FUNCTION_SHIFT)
+#define I40E_GL_MDET_TX_EVENT_SHIFT 8
+#define I40E_GL_MDET_TX_EVENT_MASK (0x1FF << I40E_GL_MDET_TX_EVENT_SHIFT)
+#define I40E_GL_MDET_TX_QUEUE_SHIFT 17
+#define I40E_GL_MDET_TX_QUEUE_MASK (0x3FFF << I40E_GL_MDET_TX_QUEUE_SHIFT)
+#define I40E_GL_MDET_TX_VALID_SHIFT 31
+#define I40E_GL_MDET_TX_VALID_MASK (0x1 << I40E_GL_MDET_TX_VALID_SHIFT)
+#define I40E_PF_MDET_RX 0x0012A400
+#define I40E_PF_MDET_RX_VALID_SHIFT 0
+#define I40E_PF_MDET_RX_VALID_MASK (0x1 << I40E_PF_MDET_RX_VALID_SHIFT)
+#define I40E_PF_MDET_TX 0x000E6400
+#define I40E_PF_MDET_TX_VALID_SHIFT 0
+#define I40E_PF_MDET_TX_VALID_MASK (0x1 << I40E_PF_MDET_TX_VALID_SHIFT)
+#define I40E_PF_VT_PFALLOC 0x001C0500
+#define I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT 0
+#define I40E_PF_VT_PFALLOC_FIRSTVF_MASK (0xFF << I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8
+#define I40E_PF_VT_PFALLOC_LASTVF_MASK (0xFF << I40E_PF_VT_PFALLOC_LASTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_VALID_SHIFT 31
+#define I40E_PF_VT_PFALLOC_VALID_MASK (0x1 << I40E_PF_VT_PFALLOC_VALID_SHIFT)
+#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VP_MDET_RX_MAX_INDEX 127
+#define I40E_VP_MDET_RX_VALID_SHIFT 0
+#define I40E_VP_MDET_RX_VALID_MASK (0x1 << I40E_VP_MDET_RX_VALID_SHIFT)
+#define I40E_VP_MDET_TX(_VF) (0x000E6000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VP_MDET_TX_MAX_INDEX 127
+#define I40E_VP_MDET_TX_VALID_SHIFT 0
+#define I40E_VP_MDET_TX_VALID_MASK (0x1 << I40E_VP_MDET_TX_VALID_SHIFT)
+#define I40E_GLPM_WUMC 0x0006C800
+#define I40E_GLPM_WUMC_NOTCO_SHIFT 0
+#define I40E_GLPM_WUMC_NOTCO_MASK (0x1 << I40E_GLPM_WUMC_NOTCO_SHIFT)
+#define I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT 1
+#define I40E_GLPM_WUMC_SRST_PIN_VAL_MASK (0x1 << I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT)
+#define I40E_GLPM_WUMC_ROL_MODE_SHIFT 2
+#define I40E_GLPM_WUMC_ROL_MODE_MASK (0x1 << I40E_GLPM_WUMC_ROL_MODE_SHIFT)
+#define I40E_GLPM_WUMC_RESERVED_4_SHIFT 3
+#define I40E_GLPM_WUMC_RESERVED_4_MASK (0x1FFF << I40E_GLPM_WUMC_RESERVED_4_SHIFT)
+#define I40E_GLPM_WUMC_MNG_WU_PF_SHIFT 16
+#define I40E_GLPM_WUMC_MNG_WU_PF_MASK (0xFFFF << I40E_GLPM_WUMC_MNG_WU_PF_SHIFT)
+#define I40E_PFPM_APM 0x000B8080
+#define I40E_PFPM_APM_APME_SHIFT 0
+#define I40E_PFPM_APM_APME_MASK (0x1 << I40E_PFPM_APM_APME_SHIFT)
+#define I40E_PFPM_FHFT_DATA(_i, _j) (0x00060000 + ((_i) * 4096 + (_j) * 128))
+#define I40E_PFPM_FHFT_DATA_MAX_INDEX 7
+#define I40E_PFPM_FHFT_DATA_DWORD_SHIFT 0
+#define I40E_PFPM_FHFT_DATA_DWORD_MASK (0xFFFFFFFF << I40E_PFPM_FHFT_DATA_DWORD_SHIFT)
+#define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */
+#define I40E_PFPM_FHFT_LENGTH_MAX_INDEX 7
+#define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0
+#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK (0xFF << I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT)
+#define I40E_PFPM_FHFT_MASK(_i, _j) (0x00068000 + ((_i) * 1024 + (_j) * 128))
+#define I40E_PFPM_FHFT_MASK_MAX_INDEX 7
+#define I40E_PFPM_FHFT_MASK_MASK_SHIFT 0
+#define I40E_PFPM_FHFT_MASK_MASK_MASK (0xFFFF << I40E_PFPM_FHFT_MASK_MASK_SHIFT)
+#define I40E_PFPM_PROXYFC 0x00245A80
+#define I40E_PFPM_PROXYFC_PPROXYE_SHIFT 0
+#define I40E_PFPM_PROXYFC_PPROXYE_MASK (0x1 << I40E_PFPM_PROXYFC_PPROXYE_SHIFT)
+#define I40E_PFPM_PROXYFC_EX_SHIFT 1
+#define I40E_PFPM_PROXYFC_EX_MASK (0x1 << I40E_PFPM_PROXYFC_EX_SHIFT)
+#define I40E_PFPM_PROXYFC_ARP_SHIFT 4
+#define I40E_PFPM_PROXYFC_ARP_MASK (0x1 << I40E_PFPM_PROXYFC_ARP_SHIFT)
+#define I40E_PFPM_PROXYFC_ARP_DIRECTED_SHIFT 5
+#define I40E_PFPM_PROXYFC_ARP_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYFC_ARP_DIRECTED_SHIFT)
+#define I40E_PFPM_PROXYFC_NS_SHIFT 9
+#define I40E_PFPM_PROXYFC_NS_MASK (0x1 << I40E_PFPM_PROXYFC_NS_SHIFT)
+#define I40E_PFPM_PROXYFC_NS_DIRECTED_SHIFT 10
+#define I40E_PFPM_PROXYFC_NS_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYFC_NS_DIRECTED_SHIFT)
+#define I40E_PFPM_PROXYFC_MLD_SHIFT 12
+#define I40E_PFPM_PROXYFC_MLD_MASK (0x1 << I40E_PFPM_PROXYFC_MLD_SHIFT)
+#define I40E_PFPM_PROXYS 0x00245B80
+#define I40E_PFPM_PROXYS_EX_SHIFT 1
+#define I40E_PFPM_PROXYS_EX_MASK (0x1 << I40E_PFPM_PROXYS_EX_SHIFT)
+#define I40E_PFPM_PROXYS_ARP_SHIFT 4
+#define I40E_PFPM_PROXYS_ARP_MASK (0x1 << I40E_PFPM_PROXYS_ARP_SHIFT)
+#define I40E_PFPM_PROXYS_ARP_DIRECTED_SHIFT 5
+#define I40E_PFPM_PROXYS_ARP_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYS_ARP_DIRECTED_SHIFT)
+#define I40E_PFPM_PROXYS_NS_SHIFT 9
+#define I40E_PFPM_PROXYS_NS_MASK (0x1 << I40E_PFPM_PROXYS_NS_SHIFT)
+#define I40E_PFPM_PROXYS_NS_DIRECTED_SHIFT 10
+#define I40E_PFPM_PROXYS_NS_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYS_NS_DIRECTED_SHIFT)
+#define I40E_PFPM_PROXYS_MLD_SHIFT 12
+#define I40E_PFPM_PROXYS_MLD_MASK (0x1 << I40E_PFPM_PROXYS_MLD_SHIFT)
+#define I40E_PFPM_WUC 0x0006B200
+#define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5
+#define I40E_PFPM_WUC_EN_APM_D0_MASK (0x1 << I40E_PFPM_WUC_EN_APM_D0_SHIFT)
+#define I40E_PFPM_WUFC 0x0006B400
+#define I40E_PFPM_WUFC_LNKC_SHIFT 0
+#define I40E_PFPM_WUFC_LNKC_MASK (0x1 << I40E_PFPM_WUFC_LNKC_SHIFT)
+#define I40E_PFPM_WUFC_MAG_SHIFT 1
+#define I40E_PFPM_WUFC_MAG_MASK (0x1 << I40E_PFPM_WUFC_MAG_SHIFT)
+#define I40E_PFPM_WUFC_MNG_SHIFT 3
+#define I40E_PFPM_WUFC_MNG_MASK (0x1 << I40E_PFPM_WUFC_MNG_SHIFT)
+#define I40E_PFPM_WUFC_FLX0_ACT_SHIFT 4
+#define I40E_PFPM_WUFC_FLX0_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX0_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX1_ACT_SHIFT 5
+#define I40E_PFPM_WUFC_FLX1_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX1_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX2_ACT_SHIFT 6
+#define I40E_PFPM_WUFC_FLX2_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX2_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX3_ACT_SHIFT 7
+#define I40E_PFPM_WUFC_FLX3_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX3_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX4_ACT_SHIFT 8
+#define I40E_PFPM_WUFC_FLX4_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX4_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX5_ACT_SHIFT 9
+#define I40E_PFPM_WUFC_FLX5_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX5_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX6_ACT_SHIFT 10
+#define I40E_PFPM_WUFC_FLX6_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX6_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX7_ACT_SHIFT 11
+#define I40E_PFPM_WUFC_FLX7_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX7_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX0_SHIFT 16
+#define I40E_PFPM_WUFC_FLX0_MASK (0x1 << I40E_PFPM_WUFC_FLX0_SHIFT)
+#define I40E_PFPM_WUFC_FLX1_SHIFT 17
+#define I40E_PFPM_WUFC_FLX1_MASK (0x1 << I40E_PFPM_WUFC_FLX1_SHIFT)
+#define I40E_PFPM_WUFC_FLX2_SHIFT 18
+#define I40E_PFPM_WUFC_FLX2_MASK (0x1 << I40E_PFPM_WUFC_FLX2_SHIFT)
+#define I40E_PFPM_WUFC_FLX3_SHIFT 19
+#define I40E_PFPM_WUFC_FLX3_MASK (0x1 << I40E_PFPM_WUFC_FLX3_SHIFT)
+#define I40E_PFPM_WUFC_FLX4_SHIFT 20
+#define I40E_PFPM_WUFC_FLX4_MASK (0x1 << I40E_PFPM_WUFC_FLX4_SHIFT)
+#define I40E_PFPM_WUFC_FLX5_SHIFT 21
+#define I40E_PFPM_WUFC_FLX5_MASK (0x1 << I40E_PFPM_WUFC_FLX5_SHIFT)
+#define I40E_PFPM_WUFC_FLX6_SHIFT 22
+#define I40E_PFPM_WUFC_FLX6_MASK (0x1 << I40E_PFPM_WUFC_FLX6_SHIFT)
+#define I40E_PFPM_WUFC_FLX7_SHIFT 23
+#define I40E_PFPM_WUFC_FLX7_MASK (0x1 << I40E_PFPM_WUFC_FLX7_SHIFT)
+#define I40E_PFPM_WUFC_FW_RST_WK_SHIFT 31
+#define I40E_PFPM_WUFC_FW_RST_WK_MASK (0x1 << I40E_PFPM_WUFC_FW_RST_WK_SHIFT)
+#define I40E_PFPM_WUS 0x0006B600
+#define I40E_PFPM_WUS_LNKC_SHIFT 0
+#define I40E_PFPM_WUS_LNKC_MASK (0x1 << I40E_PFPM_WUS_LNKC_SHIFT)
+#define I40E_PFPM_WUS_MAG_SHIFT 1
+#define I40E_PFPM_WUS_MAG_MASK (0x1 << I40E_PFPM_WUS_MAG_SHIFT)
+#define I40E_PFPM_WUS_PME_STATUS_SHIFT 2
+#define I40E_PFPM_WUS_PME_STATUS_MASK (0x1 << I40E_PFPM_WUS_PME_STATUS_SHIFT)
+#define I40E_PFPM_WUS_MNG_SHIFT 3
+#define I40E_PFPM_WUS_MNG_MASK (0x1 << I40E_PFPM_WUS_MNG_SHIFT)
+#define I40E_PFPM_WUS_FLX0_SHIFT 16
+#define I40E_PFPM_WUS_FLX0_MASK (0x1 << I40E_PFPM_WUS_FLX0_SHIFT)
+#define I40E_PFPM_WUS_FLX1_SHIFT 17
+#define I40E_PFPM_WUS_FLX1_MASK (0x1 << I40E_PFPM_WUS_FLX1_SHIFT)
+#define I40E_PFPM_WUS_FLX2_SHIFT 18
+#define I40E_PFPM_WUS_FLX2_MASK (0x1 << I40E_PFPM_WUS_FLX2_SHIFT)
+#define I40E_PFPM_WUS_FLX3_SHIFT 19
+#define I40E_PFPM_WUS_FLX3_MASK (0x1 << I40E_PFPM_WUS_FLX3_SHIFT)
+#define I40E_PFPM_WUS_FLX4_SHIFT 20
+#define I40E_PFPM_WUS_FLX4_MASK (0x1 << I40E_PFPM_WUS_FLX4_SHIFT)
+#define I40E_PFPM_WUS_FLX5_SHIFT 21
+#define I40E_PFPM_WUS_FLX5_MASK (0x1 << I40E_PFPM_WUS_FLX5_SHIFT)
+#define I40E_PFPM_WUS_FLX6_SHIFT 22
+#define I40E_PFPM_WUS_FLX6_MASK (0x1 << I40E_PFPM_WUS_FLX6_SHIFT)
+#define I40E_PFPM_WUS_FLX7_SHIFT 23
+#define I40E_PFPM_WUS_FLX7_MASK (0x1 << I40E_PFPM_WUS_FLX7_SHIFT)
+#define I40E_PFPM_WUS_FW_RST_WK_SHIFT 31
+#define I40E_PFPM_WUS_FW_RST_WK_MASK (0x1 << I40E_PFPM_WUS_FW_RST_WK_SHIFT)
+#define I40E_PRTPM_FHFHR 0x0006C000
+#define I40E_PRTPM_FHFHR_UNICAST_SHIFT 0
+#define I40E_PRTPM_FHFHR_UNICAST_MASK (0x1 << I40E_PRTPM_FHFHR_UNICAST_SHIFT)
+#define I40E_PRTPM_FHFHR_MULTICAST_SHIFT 1
+#define I40E_PRTPM_FHFHR_MULTICAST_MASK (0x1 << I40E_PRTPM_FHFHR_MULTICAST_SHIFT)
+#define I40E_PRTPM_SAH(_i) (0x001E44C0 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTPM_SAH_MAX_INDEX 3
+#define I40E_PRTPM_SAH_PFPM_SAH_SHIFT 0
+#define I40E_PRTPM_SAH_PFPM_SAH_MASK (0xFFFF << I40E_PRTPM_SAH_PFPM_SAH_SHIFT)
+#define I40E_PRTPM_SAH_PF_NUM_SHIFT 26
+#define I40E_PRTPM_SAH_PF_NUM_MASK (0xF << I40E_PRTPM_SAH_PF_NUM_SHIFT)
+#define I40E_PRTPM_SAH_MC_MAG_EN_SHIFT 30
+#define I40E_PRTPM_SAH_MC_MAG_EN_MASK (0x1 << I40E_PRTPM_SAH_MC_MAG_EN_SHIFT)
+#define I40E_PRTPM_SAH_AV_SHIFT 31
+#define I40E_PRTPM_SAH_AV_MASK (0x1 << I40E_PRTPM_SAH_AV_SHIFT)
+#define I40E_PRTPM_SAL(_i) (0x001E4440 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTPM_SAL_MAX_INDEX 3
+#define I40E_PRTPM_SAL_PFPM_SAL_SHIFT 0
+#define I40E_PRTPM_SAL_PFPM_SAL_MASK (0xFFFFFFFF << I40E_PRTPM_SAL_PFPM_SAL_SHIFT)
+#define I40E_VF_ARQBAH1 0x00006000
+#define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0
+#define I40E_VF_ARQBAH1_ARQBAH_MASK (0xFFFFFFFF << I40E_VF_ARQBAH1_ARQBAH_SHIFT)
+#define I40E_VF_ARQBAL1 0x00006C00
+#define I40E_VF_ARQBAL1_ARQBAL_SHIFT 0
+#define I40E_VF_ARQBAL1_ARQBAL_MASK (0xFFFFFFFF << I40E_VF_ARQBAL1_ARQBAL_SHIFT)
+#define I40E_VF_ARQH1 0x00007400
+#define I40E_VF_ARQH1_ARQH_SHIFT 0
+#define I40E_VF_ARQH1_ARQH_MASK (0x3FF << I40E_VF_ARQH1_ARQH_SHIFT)
+#define I40E_VF_ARQLEN1 0x00008000
+#define I40E_VF_ARQLEN1_ARQLEN_SHIFT 0
+#define I40E_VF_ARQLEN1_ARQLEN_MASK (0x3FF << I40E_VF_ARQLEN1_ARQLEN_SHIFT)
+#define I40E_VF_ARQLEN1_ARQVFE_SHIFT 28
+#define I40E_VF_ARQLEN1_ARQVFE_MASK (0x1 << I40E_VF_ARQLEN1_ARQVFE_SHIFT)
+#define I40E_VF_ARQLEN1_ARQOVFL_SHIFT 29
+#define I40E_VF_ARQLEN1_ARQOVFL_MASK (0x1 << I40E_VF_ARQLEN1_ARQOVFL_SHIFT)
+#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30
+#define I40E_VF_ARQLEN1_ARQCRIT_MASK (0x1 << I40E_VF_ARQLEN1_ARQCRIT_SHIFT)
+#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31
+#define I40E_VF_ARQLEN1_ARQENABLE_MASK (0x1 << I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
+#define I40E_VF_ARQT1 0x00007000
+#define I40E_VF_ARQT1_ARQT_SHIFT 0
+#define I40E_VF_ARQT1_ARQT_MASK (0x3FF << I40E_VF_ARQT1_ARQT_SHIFT)
+#define I40E_VF_ATQBAH1 0x00007800
+#define I40E_VF_ATQBAH1_ATQBAH_SHIFT 0
+#define I40E_VF_ATQBAH1_ATQBAH_MASK (0xFFFFFFFF << I40E_VF_ATQBAH1_ATQBAH_SHIFT)
+#define I40E_VF_ATQBAL1 0x00007C00
+#define I40E_VF_ATQBAL1_ATQBAL_SHIFT 0
+#define I40E_VF_ATQBAL1_ATQBAL_MASK (0xFFFFFFFF << I40E_VF_ATQBAL1_ATQBAL_SHIFT)
+#define I40E_VF_ATQH1 0x00006400
+#define I40E_VF_ATQH1_ATQH_SHIFT 0
+#define I40E_VF_ATQH1_ATQH_MASK (0x3FF << I40E_VF_ATQH1_ATQH_SHIFT)
+#define I40E_VF_ATQLEN1 0x00006800
+#define I40E_VF_ATQLEN1_ATQLEN_SHIFT 0
+#define I40E_VF_ATQLEN1_ATQLEN_MASK (0x3FF << I40E_VF_ATQLEN1_ATQLEN_SHIFT)
+#define I40E_VF_ATQLEN1_ATQVFE_SHIFT 28
+#define I40E_VF_ATQLEN1_ATQVFE_MASK (0x1 << I40E_VF_ATQLEN1_ATQVFE_SHIFT)
+#define I40E_VF_ATQLEN1_ATQOVFL_SHIFT 29
+#define I40E_VF_ATQLEN1_ATQOVFL_MASK (0x1 << I40E_VF_ATQLEN1_ATQOVFL_SHIFT)
+#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30
+#define I40E_VF_ATQLEN1_ATQCRIT_MASK (0x1 << I40E_VF_ATQLEN1_ATQCRIT_SHIFT)
+#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31
+#define I40E_VF_ATQLEN1_ATQENABLE_MASK (0x1 << I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
+#define I40E_VF_ATQT1 0x00008400
+#define I40E_VF_ATQT1_ATQT_SHIFT 0
+#define I40E_VF_ATQT1_ATQT_MASK (0x3FF << I40E_VF_ATQT1_ATQT_SHIFT)
+#define I40E_VFGEN_RSTAT 0x00008800
+#define I40E_VFGEN_RSTAT_VFR_STATE_SHIFT 0
+#define I40E_VFGEN_RSTAT_VFR_STATE_MASK (0x3 << I40E_VFGEN_RSTAT_VFR_STATE_SHIFT)
+#define I40E_VFINT_DYN_CTL01 0x00005C00
+#define I40E_VFINT_DYN_CTL01_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTL01_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTL01_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTL01_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTL01_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTL01_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT)
+#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4))
+#define I40E_VFINT_DYN_CTLN1_MAX_INDEX 15
+#define I40E_VFINT_DYN_CTLN1_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTLN1_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTLN1_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT)
+#define I40E_VFINT_ICR0_ENA1 0x00005000
+#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA1_RSVD_SHIFT 31
+#define I40E_VFINT_ICR0_ENA1_RSVD_MASK (0x1 << I40E_VFINT_ICR0_ENA1_RSVD_SHIFT)
+#define I40E_VFINT_ICR01 0x00004800
+#define I40E_VFINT_ICR01_INTEVENT_SHIFT 0
+#define I40E_VFINT_ICR01_INTEVENT_MASK (0x1 << I40E_VFINT_ICR01_INTEVENT_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_0_SHIFT 1
+#define I40E_VFINT_ICR01_QUEUE_0_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_0_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_1_SHIFT 2
+#define I40E_VFINT_ICR01_QUEUE_1_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_1_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_2_SHIFT 3
+#define I40E_VFINT_ICR01_QUEUE_2_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_2_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_3_SHIFT 4
+#define I40E_VFINT_ICR01_QUEUE_3_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_3_SHIFT)
+#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR01_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR01_ADMINQ_MASK (0x1 << I40E_VFINT_ICR01_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR01_SWINT_SHIFT 31
+#define I40E_VFINT_ICR01_SWINT_MASK (0x1 << I40E_VFINT_ICR01_SWINT_SHIFT)
+#define I40E_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */
+#define I40E_VFINT_ITR01_MAX_INDEX 2
+#define I40E_VFINT_ITR01_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITR01_INTERVAL_MASK (0xFFF << I40E_VFINT_ITR01_INTERVAL_SHIFT)
+#define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4))
+#define I40E_VFINT_ITRN1_MAX_INDEX 2
+#define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITRN1_INTERVAL_MASK (0xFFF << I40E_VFINT_ITRN1_INTERVAL_SHIFT)
+#define I40E_VFINT_STAT_CTL01 0x00005400
+#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2
+#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK (0x3 << I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT)
+#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */
+#define I40E_QRX_TAIL1_MAX_INDEX 15
+#define I40E_QRX_TAIL1_TAIL_SHIFT 0
+#define I40E_QRX_TAIL1_TAIL_MASK (0x1FFF << I40E_QRX_TAIL1_TAIL_SHIFT)
+#define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */
+#define I40E_QTX_TAIL1_MAX_INDEX 15
+#define I40E_QTX_TAIL1_TAIL_SHIFT 0
+#define I40E_QTX_TAIL1_TAIL_MASK (0x1FFF << I40E_QTX_TAIL1_TAIL_SHIFT)
+#define I40E_VFMSIX_PBA 0x00002000
+#define I40E_VFMSIX_PBA_PENBIT_SHIFT 0
+#define I40E_VFMSIX_PBA_PENBIT_MASK (0xFFFFFFFF << I40E_VFMSIX_PBA_PENBIT_SHIFT)
+#define I40E_VFMSIX_TADD(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TADD_MAX_INDEX 16
+#define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0
+#define I40E_VFMSIX_TADD_MSIXTADD10_MASK (0x3 << I40E_VFMSIX_TADD_MSIXTADD10_SHIFT)
+#define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2
+#define I40E_VFMSIX_TADD_MSIXTADD_MASK (0x3FFFFFFF << I40E_VFMSIX_TADD_MSIXTADD_SHIFT)
+#define I40E_VFMSIX_TMSG(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TMSG_MAX_INDEX 16
+#define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0
+#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK (0xFFFFFFFF << I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT)
+#define I40E_VFMSIX_TUADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TUADD_MAX_INDEX 16
+#define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0
+#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK (0xFFFFFFFF << I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT)
+#define I40E_VFMSIX_TVCTRL(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TVCTRL_MAX_INDEX 16
+#define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0
+#define I40E_VFMSIX_TVCTRL_MASK_MASK (0x1 << I40E_VFMSIX_TVCTRL_MASK_SHIFT)
+#define I40E_VFCM_PE_ERRDATA 0x0000DC00
+#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_MASK (0xF << I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_VFCM_PE_ERRDATA_Q_TYPE_MASK (0x7 << I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_VFCM_PE_ERRDATA_Q_NUM_MASK (0x3FFFF << I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define I40E_VFCM_PE_ERRINFO 0x0000D800
+#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_VFCM_PE_ERRINFO_ERROR_INST_MASK (0x7 << I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_VFPE_AEQALLOC1 0x0000A400
+#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK (0xFFFFFFFF << I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH1 0x00009800
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW1 0x0000AC00
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK (0xFFFFFFFF << I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1 0x0000B800
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK (0x1 << I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK (0x1 << I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK1 0x0000B000
+#define I40E_VFPE_CQACK1_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK1_PECQID_MASK (0x1FFFF << I40E_VFPE_CQACK1_PECQID_SHIFT)
+#define I40E_VFPE_CQARM1 0x0000B400
+#define I40E_VFPE_CQARM1_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM1_PECQID_MASK (0x1FFFF << I40E_VFPE_CQARM1_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB1 0x0000BC00
+#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB1_WQHEAD_MASK (0x7FF << I40E_VFPE_CQPDB1_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES1 0x00009C00
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL1 0x0000A000
+#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK (0x7FF << I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK (0x1 << I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG01 0x00008C00
+#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG01_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_IPCONFIG01_USEUPPERIDRANGE_SHIFT 17
+#define I40E_VFPE_IPCONFIG01_USEUPPERIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG01_USEUPPERIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK1 0x00009000
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER1 0x0000A800
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK (0xFFFFFFFF << I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC1 0x0000C000
+#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC1_PEQPID_MASK (0x3FFFF << I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK (0xFFF << I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
+#define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */
+#define I40E_VFQF_HENA_MAX_INDEX 1
+#define I40E_VFQF_HENA_PTYPE_ENA_SHIFT 0
+#define I40E_VFQF_HENA_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_VFQF_HENA_PTYPE_ENA_SHIFT)
+#define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */
+#define I40E_VFQF_HKEY_MAX_INDEX 12
+#define I40E_VFQF_HKEY_KEY_0_SHIFT 0
+#define I40E_VFQF_HKEY_KEY_0_MASK (0xFF << I40E_VFQF_HKEY_KEY_0_SHIFT)
+#define I40E_VFQF_HKEY_KEY_1_SHIFT 8
+#define I40E_VFQF_HKEY_KEY_1_MASK (0xFF << I40E_VFQF_HKEY_KEY_1_SHIFT)
+#define I40E_VFQF_HKEY_KEY_2_SHIFT 16
+#define I40E_VFQF_HKEY_KEY_2_MASK (0xFF << I40E_VFQF_HKEY_KEY_2_SHIFT)
+#define I40E_VFQF_HKEY_KEY_3_SHIFT 24
+#define I40E_VFQF_HKEY_KEY_3_MASK (0xFF << I40E_VFQF_HKEY_KEY_3_SHIFT)
+#define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_VFQF_HLUT_MAX_INDEX 15
+#define I40E_VFQF_HLUT_LUT0_SHIFT 0
+#define I40E_VFQF_HLUT_LUT0_MASK (0xF << I40E_VFQF_HLUT_LUT0_SHIFT)
+#define I40E_VFQF_HLUT_LUT1_SHIFT 8
+#define I40E_VFQF_HLUT_LUT1_MASK (0xF << I40E_VFQF_HLUT_LUT1_SHIFT)
+#define I40E_VFQF_HLUT_LUT2_SHIFT 16
+#define I40E_VFQF_HLUT_LUT2_MASK (0xF << I40E_VFQF_HLUT_LUT2_SHIFT)
+#define I40E_VFQF_HLUT_LUT3_SHIFT 24
+#define I40E_VFQF_HLUT_LUT3_MASK (0xF << I40E_VFQF_HLUT_LUT3_SHIFT)
+#define I40E_VFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */
+#define I40E_VFQF_HREGION_MAX_INDEX 7
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define I40E_VFQF_HREGION_REGION_0_SHIFT 1
+#define I40E_VFQF_HREGION_REGION_0_MASK (0x7 << I40E_VFQF_HREGION_REGION_0_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define I40E_VFQF_HREGION_REGION_1_SHIFT 5
+#define I40E_VFQF_HREGION_REGION_1_MASK (0x7 << I40E_VFQF_HREGION_REGION_1_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define I40E_VFQF_HREGION_REGION_2_SHIFT 9
+#define I40E_VFQF_HREGION_REGION_2_MASK (0x7 << I40E_VFQF_HREGION_REGION_2_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define I40E_VFQF_HREGION_REGION_3_SHIFT 13
+#define I40E_VFQF_HREGION_REGION_3_MASK (0x7 << I40E_VFQF_HREGION_REGION_3_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define I40E_VFQF_HREGION_REGION_4_SHIFT 17
+#define I40E_VFQF_HREGION_REGION_4_MASK (0x7 << I40E_VFQF_HREGION_REGION_4_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define I40E_VFQF_HREGION_REGION_5_SHIFT 21
+#define I40E_VFQF_HREGION_REGION_5_MASK (0x7 << I40E_VFQF_HREGION_REGION_5_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define I40E_VFQF_HREGION_REGION_6_SHIFT 25
+#define I40E_VFQF_HREGION_REGION_6_MASK (0x7 << I40E_VFQF_HREGION_REGION_6_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define I40E_VFQF_HREGION_REGION_7_SHIFT 29
+#define I40E_VFQF_HREGION_REGION_7_MASK (0x7 << I40E_VFQF_HREGION_REGION_7_SHIFT)
+
+#endif
diff --git a/drivers/net/ethernet/intel/i40e/i40e_status.h b/drivers/net/ethernet/intel/i40e/i40e_status.h
new file mode 100644
index 0000000..5e5bcdd
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_status.h
@@ -0,0 +1,101 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_STATUS_H_
+#define _I40E_STATUS_H_
+
+/* Error Codes */
+enum i40e_status_code {
+	I40E_SUCCESS				= 0,
+	I40E_ERR_NVM				= -1,
+	I40E_ERR_NVM_CHECKSUM			= -2,
+	I40E_ERR_PHY				= -3,
+	I40E_ERR_CONFIG				= -4,
+	I40E_ERR_PARAM				= -5,
+	I40E_ERR_MAC_TYPE			= -6,
+	I40E_ERR_UNKNOWN_PHY			= -7,
+	I40E_ERR_LINK_SETUP			= -8,
+	I40E_ERR_ADAPTER_STOPPED		= -9,
+	I40E_ERR_INVALID_MAC_ADDR		= -10,
+	I40E_ERR_DEVICE_NOT_SUPPORTED		= -11,
+	I40E_ERR_MASTER_REQUESTS_PENDING	= -12,
+	I40E_ERR_INVALID_LINK_SETTINGS		= -13,
+	I40E_ERR_AUTONEG_NOT_COMPLETE		= -14,
+	I40E_ERR_RESET_FAILED			= -15,
+	I40E_ERR_SWFW_SYNC			= -16,
+	I40E_ERR_NO_AVAILABLE_VSI		= -17,
+	I40E_ERR_NO_MEMORY			= -18,
+	I40E_ERR_BAD_PTR			= -19,
+	I40E_ERR_RING_FULL			= -20,
+	I40E_ERR_INVALID_PD_ID			= -21,
+	I40E_ERR_INVALID_QP_ID			= -22,
+	I40E_ERR_INVALID_CQ_ID			= -23,
+	I40E_ERR_INVALID_CEQ_ID			= -24,
+	I40E_ERR_INVALID_AEQ_ID			= -25,
+	I40E_ERR_INVALID_SIZE			= -26,
+	I40E_ERR_INVALID_ARP_INDEX		= -27,
+	I40E_ERR_INVALID_FPM_FUNC_ID		= -28,
+	I40E_ERR_QP_INVALID_MSG_SIZE		= -29,
+	I40E_ERR_QP_TOOMANY_WRS_POSTED		= -30,
+	I40E_ERR_INVALID_FRAG_COUNT		= -31,
+	I40E_ERR_QUEUE_EMPTY			= -32,
+	I40E_ERR_INVALID_ALIGNMENT		= -33,
+	I40E_ERR_FLUSHED_QUEUE			= -34,
+	I40E_ERR_INVALID_PUSH_PAGE_INDEX	= -35,
+	I40E_ERR_INVALID_IMM_DATA_SIZE		= -36,
+	I40E_ERR_TIMEOUT			= -37,
+	I40E_ERR_OPCODE_MISMATCH		= -38,
+	I40E_ERR_CQP_COMPL_ERROR		= -39,
+	I40E_ERR_INVALID_VF_ID			= -40,
+	I40E_ERR_INVALID_HMCFN_ID		= -41,
+	I40E_ERR_BACKING_PAGE_ERROR		= -42,
+	I40E_ERR_NO_PBLCHUNKS_AVAILABLE		= -43,
+	I40E_ERR_INVALID_PBLE_INDEX		= -44,
+	I40E_ERR_INVALID_SD_INDEX		= -45,
+	I40E_ERR_INVALID_PAGE_DESC_INDEX	= -46,
+	I40E_ERR_INVALID_SD_TYPE		= -47,
+	I40E_ERR_MEMCPY_FAILED			= -48,
+	I40E_ERR_INVALID_HMC_OBJ_INDEX		= -49,
+	I40E_ERR_INVALID_HMC_OBJ_COUNT		= -50,
+	I40E_ERR_INVALID_SRQ_ARM_LIMIT		= -51,
+	I40E_ERR_SRQ_ENABLED			= -52,
+	I40E_ERR_ADMIN_QUEUE_ERROR		= -53,
+	I40E_ERR_ADMIN_QUEUE_TIMEOUT		= -54,
+	I40E_ERR_BUF_TOO_SHORT			= -55,
+	I40E_ERR_ADMIN_QUEUE_FULL		= -56,
+	I40E_ERR_ADMIN_QUEUE_NO_WORK		= -57,
+	I40E_ERR_BAD_IWARP_CQE			= -58,
+	I40E_ERR_NVM_BLANK_MODE			= -59,
+	I40E_ERR_NOT_IMPLEMENTED		= -60,
+	I40E_ERR_PE_DOORBELL_NOT_ENABLED	= -61,
+	I40E_ERR_DIAG_TEST_FAILED		= -62,
+	I40E_ERR_NOT_READY			= -63,
+	I40E_NOT_SUPPORTED			= -64,
+	I40E_ERR_FIRMWARE_API_VERSION		= -65,
+};
+
+#endif /* _I40E_STATUS_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
new file mode 100644
index 0000000..49d2cfa
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -0,0 +1,1817 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e.h"
+
+static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
+				u32 td_tag)
+{
+	return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
+			   ((u64)td_cmd  << I40E_TXD_QW1_CMD_SHIFT) |
+			   ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
+			   ((u64)size  << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
+			   ((u64)td_tag  << I40E_TXD_QW1_L2TAG1_SHIFT));
+}
+
+/**
+ * i40e_program_fdir_filter - Program a Flow Director filter
+ * @fdir_input: Packet data that will be filter parameters
+ * @pf: The pf pointer
+ * @add: True for add/update, False for remove
+ **/
+int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
+			     struct i40e_pf *pf, bool add)
+{
+	struct i40e_filter_program_desc *fdir_desc;
+	struct i40e_tx_buffer *tx_buf;
+	struct i40e_tx_desc *tx_desc;
+	struct i40e_ring *tx_ring;
+	struct i40e_vsi *vsi;
+	struct device *dev;
+	dma_addr_t dma;
+	u32 td_cmd = 0;
+	u16 i;
+
+	/* find existing FDIR VSI */
+	vsi = NULL;
+	for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+		if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
+			vsi = pf->vsi[i];
+	if (!vsi)
+		return -ENOENT;
+
+	tx_ring = &vsi->tx_rings[0];
+	dev = tx_ring->dev;
+
+	dma = dma_map_single(dev, fdir_data->raw_packet,
+				I40E_FDIR_MAX_RAW_PACKET_LOOKUP, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, dma))
+		goto dma_fail;
+
+	/* grab the next descriptor */
+	fdir_desc = I40E_TX_FDIRDESC(tx_ring, tx_ring->next_to_use);
+	tx_buf = &tx_ring->tx_bi[tx_ring->next_to_use];
+	tx_ring->next_to_use++;
+	if (tx_ring->next_to_use == tx_ring->count)
+		tx_ring->next_to_use = 0;
+
+	fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32((fdir_data->q_index
+					     << I40E_TXD_FLTR_QW0_QINDEX_SHIFT)
+					     & I40E_TXD_FLTR_QW0_QINDEX_MASK);
+
+	fdir_desc->qindex_flex_ptype_vsi |= cpu_to_le32((fdir_data->flex_off
+					    << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT)
+					    & I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
+
+	fdir_desc->qindex_flex_ptype_vsi |= cpu_to_le32((fdir_data->pctype
+					     << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT)
+					     & I40E_TXD_FLTR_QW0_PCTYPE_MASK);
+
+	/* Use LAN VSI Id if not programmed by user */
+	if (fdir_data->dest_vsi == 0)
+		fdir_desc->qindex_flex_ptype_vsi |=
+					  cpu_to_le32((pf->vsi[pf->lan_vsi]->id)
+					   << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
+	else
+		fdir_desc->qindex_flex_ptype_vsi |=
+					    cpu_to_le32((fdir_data->dest_vsi
+					    << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
+					    & I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
+
+	fdir_desc->dtype_cmd_cntindex =
+				    cpu_to_le32(I40E_TX_DESC_DTYPE_FILTER_PROG);
+
+	if (add)
+		fdir_desc->dtype_cmd_cntindex |= cpu_to_le32(
+				       I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE
+					<< I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+	else
+		fdir_desc->dtype_cmd_cntindex |= cpu_to_le32(
+					   I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE
+					   << I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+
+	fdir_desc->dtype_cmd_cntindex |= cpu_to_le32((fdir_data->dest_ctl
+					  << I40E_TXD_FLTR_QW1_DEST_SHIFT)
+					  & I40E_TXD_FLTR_QW1_DEST_MASK);
+
+	fdir_desc->dtype_cmd_cntindex |= cpu_to_le32(
+		     (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
+		      & I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
+
+	if (fdir_data->cnt_index != 0) {
+		fdir_desc->dtype_cmd_cntindex |=
+				    cpu_to_le32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
+		fdir_desc->dtype_cmd_cntindex |=
+					    cpu_to_le32((fdir_data->cnt_index
+					    << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
+					    & I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
+	}
+
+	fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
+
+	/* Now program a dummy descriptor */
+	tx_desc = I40E_TX_DESC(tx_ring, tx_ring->next_to_use);
+	tx_buf = &tx_ring->tx_bi[tx_ring->next_to_use];
+	tx_ring->next_to_use++;
+	if (tx_ring->next_to_use == tx_ring->count)
+		tx_ring->next_to_use = 0;
+
+	tx_desc->buffer_addr = cpu_to_le64(dma);
+	td_cmd = I40E_TX_DESC_CMD_EOP |
+		 I40E_TX_DESC_CMD_RS  |
+		 I40E_TX_DESC_CMD_DUMMY;
+
+	tx_desc->cmd_type_offset_bsz =
+		build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_LOOKUP, 0);
+
+	/* Mark the data descriptor to be watched */
+	tx_buf->next_to_watch = tx_desc;
+
+	/* Force memory writes to complete before letting h/w
+	 * know there are new descriptors to fetch.  (Only
+	 * applicable for weak-ordered memory model archs,
+	 * such as IA-64).
+	 */
+	wmb();
+
+	writel(tx_ring->next_to_use, tx_ring->tail);
+	return 0;
+
+dma_fail:
+	return -1;
+}
+
+/**
+ * i40e_fd_handle_status - check the Programming Status for FD
+ * @rx_ring: the Rx ring for this descriptor
+ * @qw: the descriptor data
+ * @prog_id: the id originally used for programming
+ *
+ * This is used to verify if the FD programming or invalidation
+ * requested by SW to the HW is successful or not and take actions accordingly.
+ **/
+static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u32 qw, u8 prog_id)
+{
+	struct pci_dev *pdev = rx_ring->vsi->back->pdev;
+	u32 error;
+
+	error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
+		I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
+
+	/* for now just print the Status */
+	dev_info(&pdev->dev, "FD programming id %02x, Status %08x\n",
+		 prog_id, error);
+}
+
+/**
+ * i40e_unmap_tx_resource - Release a Tx buffer
+ * @ring:      the ring that owns the buffer
+ * @tx_buffer: the buffer to free
+ **/
+static inline void i40e_unmap_tx_resource(struct i40e_ring *ring,
+					  struct i40e_tx_buffer *tx_buffer)
+{
+	if (tx_buffer->dma) {
+		if (tx_buffer->tx_flags & I40E_TX_FLAGS_MAPPED_AS_PAGE)
+			dma_unmap_page(ring->dev,
+				       tx_buffer->dma,
+				       tx_buffer->length,
+				       DMA_TO_DEVICE);
+		else
+			dma_unmap_single(ring->dev,
+					 tx_buffer->dma,
+					 tx_buffer->length,
+					 DMA_TO_DEVICE);
+	}
+	tx_buffer->dma = 0;
+	tx_buffer->time_stamp = 0;
+}
+
+/**
+ * i40e_clean_tx_ring - Free any empty Tx buffers
+ * @tx_ring: ring to be cleaned
+ **/
+void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
+{
+	struct i40e_tx_buffer *tx_buffer;
+	unsigned long bi_size;
+	u16 i;
+
+	/* ring already cleared, nothing to do */
+	if (!tx_ring->tx_bi)
+		return;
+
+	/* Free all the Tx ring sk_buffs */
+	for (i = 0; i < tx_ring->count; i++) {
+		tx_buffer = &tx_ring->tx_bi[i];
+		i40e_unmap_tx_resource(tx_ring, tx_buffer);
+		if (tx_buffer->skb)
+			dev_kfree_skb_any(tx_buffer->skb);
+		tx_buffer->skb = NULL;
+	}
+
+	bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
+	memset(tx_ring->tx_bi, 0, bi_size);
+
+	/* Zero out the descriptor ring */
+	memset(tx_ring->desc, 0, tx_ring->size);
+
+	tx_ring->next_to_use = 0;
+	tx_ring->next_to_clean = 0;
+}
+
+/**
+ * i40e_free_tx_resources - Free Tx resources per queue
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ **/
+void i40e_free_tx_resources(struct i40e_ring *tx_ring)
+{
+	i40e_clean_tx_ring(tx_ring);
+	kfree(tx_ring->tx_bi);
+	tx_ring->tx_bi = NULL;
+
+	if (tx_ring->desc) {
+		dma_free_coherent(tx_ring->dev, tx_ring->size,
+				  tx_ring->desc, tx_ring->dma);
+		tx_ring->desc = NULL;
+	}
+}
+
+/**
+ * i40e_get_tx_pending - how many tx descriptors not processed
+ * @tx_ring: the ring of descriptors
+ *
+ * Since there is no access to the ring head register
+ * in XL710, we need to use our local copies
+ **/
+static u32 i40e_get_tx_pending(struct i40e_ring *ring)
+{
+	u32 ntu = ((ring->next_to_clean <= ring->next_to_use)
+			? ring->next_to_use
+			: ring->next_to_use + ring->count);
+	return ntu - ring->next_to_clean;
+}
+
+/**
+ * i40e_check_tx_hang - Is there a hang in the Tx queue
+ * @tx_ring: the ring of descriptors
+ **/
+static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
+{
+	u32 tx_pending = i40e_get_tx_pending(tx_ring);
+	bool ret = false;
+
+	clear_check_for_tx_hang(tx_ring);
+
+	/* Check for a hung queue, but be thorough. This verifies
+	 * that a transmit has been completed since the previous
+	 * check AND there is at least one packet pending. The
+	 * ARMED bit is set to indicate a potential hang. The
+	 * bit is cleared if a pause frame is received to remove
+	 * false hang detection due to PFC or 802.3x frames. By
+	 * requiring this to fail twice we avoid races with
+	 * PFC clearing the ARMED bit and conditions where we
+	 * run the check_tx_hang logic with a transmit completion
+	 * pending but without time to complete it yet.
+	 */
+	if ((tx_ring->tx_stats.tx_done_old == tx_ring->tx_stats.packets) &&
+	    tx_pending) {
+		/* make sure it is true for two checks in a row */
+		ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
+				       &tx_ring->state);
+	} else {
+		/* update completed stats and disarm the hang check */
+		tx_ring->tx_stats.tx_done_old = tx_ring->tx_stats.packets;
+		clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
+	}
+
+	return ret;
+}
+
+/**
+ * i40e_clean_tx_irq - Reclaim resources after transmit completes
+ * @tx_ring:  tx ring to clean
+ * @budget:   how many cleans we're allowed
+ *
+ * Returns true if there's any budget left (e.g. the clean is finished)
+ **/
+static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
+{
+	u16 i = tx_ring->next_to_clean;
+	struct i40e_tx_buffer *tx_buf;
+	struct i40e_tx_desc *tx_desc;
+	unsigned int total_packets = 0;
+	unsigned int total_bytes = 0;
+
+	tx_buf = &tx_ring->tx_bi[i];
+	tx_desc = I40E_TX_DESC(tx_ring, i);
+
+	for (; budget; budget--) {
+		struct i40e_tx_desc *eop_desc;
+
+		eop_desc = tx_buf->next_to_watch;
+
+		/* if next_to_watch is not set then there is no work pending */
+		if (!eop_desc)
+			break;
+
+		/* if the descriptor isn't done, no work yet to do */
+		if (!(eop_desc->cmd_type_offset_bsz &
+		      cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
+			break;
+
+		/* count the packet as being completed */
+		tx_ring->tx_stats.completed++;
+		tx_buf->next_to_watch = NULL;
+		tx_buf->time_stamp = 0;
+
+		/* set memory barrier before eop_desc is verified */
+		rmb();
+
+		do {
+			i40e_unmap_tx_resource(tx_ring, tx_buf);
+
+			/* clear dtype status */
+			tx_desc->cmd_type_offset_bsz &=
+				~cpu_to_le64(I40E_TXD_QW1_DTYPE_MASK);
+
+			if (likely(tx_desc == eop_desc)) {
+				eop_desc = NULL;
+
+				dev_kfree_skb_any(tx_buf->skb);
+				tx_buf->skb = NULL;
+
+				total_bytes += tx_buf->bytecount;
+				total_packets += tx_buf->gso_segs;
+			}
+
+			tx_buf++;
+			tx_desc++;
+			i++;
+			if (unlikely(i == tx_ring->count)) {
+				i = 0;
+				tx_buf = tx_ring->tx_bi;
+				tx_desc = I40E_TX_DESC(tx_ring, 0);
+			}
+		} while (eop_desc);
+	}
+
+	tx_ring->next_to_clean = i;
+	tx_ring->tx_stats.bytes += total_bytes;
+	tx_ring->tx_stats.packets += total_packets;
+	tx_ring->q_vector->tx.total_bytes += total_bytes;
+	tx_ring->q_vector->tx.total_packets += total_packets;
+	if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
+		/* schedule immediate reset if we believe we hung */
+		dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
+			 "  VSI                  <%d>\n"
+			 "  Tx Queue             <%d>\n"
+			 "  next_to_use          <%x>\n"
+			 "  next_to_clean        <%x>\n",
+			 tx_ring->vsi->seid,
+			 tx_ring->queue_index,
+			 tx_ring->next_to_use, i);
+		dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n"
+			 "  time_stamp           <%lx>\n"
+			 "  jiffies              <%lx>\n",
+			 tx_ring->tx_bi[i].time_stamp, jiffies);
+
+		netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+
+		dev_info(tx_ring->dev,
+			 "tx hang detected on queue %d, resetting adapter\n",
+			 tx_ring->queue_index);
+
+		tx_ring->netdev->netdev_ops->ndo_tx_timeout(tx_ring->netdev);
+
+		/* the adapter is about to reset, no point in enabling stuff */
+		return true;
+	}
+
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
+	if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
+		     (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
+		/* Make sure that anybody stopping the queue after this
+		 * sees the new next_to_clean.
+		 */
+		smp_mb();
+		if (__netif_subqueue_stopped(tx_ring->netdev,
+					     tx_ring->queue_index) &&
+		   !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) {
+			netif_wake_subqueue(tx_ring->netdev,
+					    tx_ring->queue_index);
+			++tx_ring->tx_stats.restart_queue;
+		}
+	}
+
+	return budget > 0;
+}
+
+/**
+ * i40e_set_new_dynamic_itr - Find new ITR level
+ * @rc: structure containing ring performance data
+ *
+ * Stores a new ITR value based on packets and byte counts during
+ * the last interrupt.  The advantage of per interrupt computation
+ * is faster updates and more accurate ITR for the current traffic
+ * pattern.  Constants in this function were computed based on
+ * theoretical maximum wire speed and thresholds were set based on
+ * testing data as well as attempting to minimize response time
+ * while increasing bulk throughput.
+ **/
+static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
+{
+	enum i40e_latency_range new_latency_range = rc->latency_range;
+	u32 new_itr = rc->itr;
+	int bytes_per_int;
+
+	if (rc->total_packets == 0 || !rc->itr)
+		return;
+
+	/* simple throttlerate management
+	 *   0-10MB/s   lowest (100000 ints/s)
+	 *  10-20MB/s   low    (20000 ints/s)
+	 *  20-1249MB/s bulk   (8000 ints/s)
+	 */
+	bytes_per_int = rc->total_bytes / rc->itr;
+	switch (rc->itr) {
+	case I40E_LOWEST_LATENCY:
+		if (bytes_per_int > 10)
+			new_latency_range = I40E_LOW_LATENCY;
+		break;
+	case I40E_LOW_LATENCY:
+		if (bytes_per_int > 20)
+			new_latency_range = I40E_BULK_LATENCY;
+		else if (bytes_per_int <= 10)
+			new_latency_range = I40E_LOWEST_LATENCY;
+		break;
+	case I40E_BULK_LATENCY:
+		if (bytes_per_int <= 20)
+			rc->latency_range = I40E_LOW_LATENCY;
+		break;
+	}
+
+	switch (new_latency_range) {
+	case I40E_LOWEST_LATENCY:
+		new_itr = I40E_ITR_100K;
+		break;
+	case I40E_LOW_LATENCY:
+		new_itr = I40E_ITR_20K;
+		break;
+	case I40E_BULK_LATENCY:
+		new_itr = I40E_ITR_8K;
+		break;
+	default:
+		break;
+	}
+
+	if (new_itr != rc->itr) {
+		/* do an exponential smoothing */
+		new_itr = (10 * new_itr * rc->itr) /
+			  ((9 * new_itr) + rc->itr);
+		rc->itr = new_itr & I40E_MAX_ITR;
+	}
+
+	rc->total_bytes = 0;
+	rc->total_packets = 0;
+}
+
+/**
+ * i40e_update_dynamic_itr - Adjust ITR based on bytes per int
+ * @q_vector: the vector to adjust
+ **/
+static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector)
+{
+	u16 vector = q_vector->vsi->base_vector + q_vector->v_idx;
+	struct i40e_hw *hw = &q_vector->vsi->back->hw;
+	u32 reg_addr;
+	u16 old_itr;
+
+	reg_addr = I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1);
+	old_itr = q_vector->rx.itr;
+	i40e_set_new_dynamic_itr(&q_vector->rx);
+	if (old_itr != q_vector->rx.itr)
+		wr32(hw, reg_addr, q_vector->rx.itr);
+
+	reg_addr = I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1);
+	old_itr = q_vector->tx.itr;
+	i40e_set_new_dynamic_itr(&q_vector->tx);
+	if (old_itr != q_vector->tx.itr)
+		wr32(hw, reg_addr, q_vector->tx.itr);
+
+	i40e_flush(hw);
+}
+
+/**
+ * i40e_clean_programming_status - clean the programming status descriptor
+ * @rx_ring: the rx ring that has this descriptor
+ * @rx_desc: the rx descriptor written back by HW
+ *
+ * Flow director should handle FD_FILTER_STATUS to check its filter programming
+ * status being successful or not and take actions accordingly. FCoE should
+ * handle its context/filter programming/invalidation status and take actions.
+ *
+ **/
+static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
+					  union i40e_rx_desc *rx_desc)
+{
+	u64 qw;
+	u8 id;
+
+	qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+	id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
+		  I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
+
+	if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
+		i40e_fd_handle_status(rx_ring, qw, id);
+}
+
+/**
+ * i40e_setup_tx_descriptors - Allocate the Tx descriptors
+ * @tx_ring: the tx ring to set up
+ *
+ * Return 0 on success, negative on error
+ **/
+int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
+{
+	struct device *dev = tx_ring->dev;
+	int bi_size;
+
+	if (!dev)
+		return -ENOMEM;
+
+	bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
+	tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
+	if (!tx_ring->tx_bi)
+		goto err;
+
+	/* round up to nearest 4K */
+	tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
+	tx_ring->size = ALIGN(tx_ring->size, 4096);
+	tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
+					   &tx_ring->dma, GFP_KERNEL);
+	if (!tx_ring->desc) {
+		dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
+			 tx_ring->size);
+		goto err;
+	}
+
+	tx_ring->next_to_use = 0;
+	tx_ring->next_to_clean = 0;
+	return 0;
+
+err:
+	kfree(tx_ring->tx_bi);
+	tx_ring->tx_bi = NULL;
+	return -ENOMEM;
+}
+
+/**
+ * i40e_clean_rx_ring - Free Rx buffers
+ * @rx_ring: ring to be cleaned
+ **/
+void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
+{
+	struct device *dev = rx_ring->dev;
+	struct i40e_rx_buffer *rx_bi;
+	unsigned long bi_size;
+	u16 i;
+
+	/* ring already cleared, nothing to do */
+	if (!rx_ring->rx_bi)
+		return;
+
+	/* Free all the Rx ring sk_buffs */
+	for (i = 0; i < rx_ring->count; i++) {
+		rx_bi = &rx_ring->rx_bi[i];
+		if (rx_bi->dma) {
+			dma_unmap_single(dev,
+					 rx_bi->dma,
+					 rx_ring->rx_buf_len,
+					 DMA_FROM_DEVICE);
+			rx_bi->dma = 0;
+		}
+		if (rx_bi->skb) {
+			dev_kfree_skb(rx_bi->skb);
+			rx_bi->skb = NULL;
+		}
+		if (rx_bi->page) {
+			if (rx_bi->page_dma) {
+				dma_unmap_page(dev,
+					       rx_bi->page_dma,
+					       PAGE_SIZE / 2,
+					       DMA_FROM_DEVICE);
+				rx_bi->page_dma = 0;
+			}
+			__free_page(rx_bi->page);
+			rx_bi->page = NULL;
+			rx_bi->page_offset = 0;
+		}
+	}
+
+	bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
+	memset(rx_ring->rx_bi, 0, bi_size);
+
+	/* Zero out the descriptor ring */
+	memset(rx_ring->desc, 0, rx_ring->size);
+
+	rx_ring->next_to_clean = 0;
+	rx_ring->next_to_use = 0;
+}
+
+/**
+ * i40e_free_rx_resources - Free Rx resources
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+void i40e_free_rx_resources(struct i40e_ring *rx_ring)
+{
+	i40e_clean_rx_ring(rx_ring);
+	kfree(rx_ring->rx_bi);
+	rx_ring->rx_bi = NULL;
+
+	if (rx_ring->desc) {
+		dma_free_coherent(rx_ring->dev, rx_ring->size,
+				  rx_ring->desc, rx_ring->dma);
+		rx_ring->desc = NULL;
+	}
+}
+
+/**
+ * i40e_setup_rx_descriptors - Allocate Rx descriptors
+ * @rx_ring: Rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
+{
+	struct device *dev = rx_ring->dev;
+	int bi_size;
+
+	bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
+	rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
+	if (!rx_ring->rx_bi)
+		goto err;
+
+	/* Round up to nearest 4K */
+	rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
+		? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
+		: rx_ring->count * sizeof(union i40e_32byte_rx_desc);
+	rx_ring->size = ALIGN(rx_ring->size, 4096);
+	rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
+					   &rx_ring->dma, GFP_KERNEL);
+
+	if (!rx_ring->desc) {
+		dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
+			 rx_ring->size);
+		goto err;
+	}
+
+	rx_ring->next_to_clean = 0;
+	rx_ring->next_to_use = 0;
+
+	return 0;
+err:
+	kfree(rx_ring->rx_bi);
+	rx_ring->rx_bi = NULL;
+	return -ENOMEM;
+}
+
+/**
+ * i40e_release_rx_desc - Store the new tail and head values
+ * @rx_ring: ring to bump
+ * @val: new head index
+ **/
+static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
+{
+	rx_ring->next_to_use = val;
+	/* Force memory writes to complete before letting h/w
+	 * know there are new descriptors to fetch.  (Only
+	 * applicable for weak-ordered memory model archs,
+	 * such as IA-64).
+	 */
+	wmb();
+	writel(val, rx_ring->tail);
+}
+
+/**
+ * i40e_alloc_rx_buffers - Replace used receive buffers; packet split
+ * @rx_ring: ring to place buffers on
+ * @cleaned_count: number of buffers to replace
+ **/
+void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
+{
+	u16 i = rx_ring->next_to_use;
+	union i40e_rx_desc *rx_desc;
+	struct i40e_rx_buffer *bi;
+	struct sk_buff *skb;
+
+	/* do nothing if no valid netdev defined */
+	if (!rx_ring->netdev || !cleaned_count)
+		return;
+
+	while (cleaned_count--) {
+		rx_desc = I40E_RX_DESC(rx_ring, i);
+		bi = &rx_ring->rx_bi[i];
+		skb = bi->skb;
+
+		if (!skb) {
+			skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+							rx_ring->rx_buf_len);
+			if (!skb) {
+				rx_ring->rx_stats.alloc_rx_buff_failed++;
+				goto no_buffers;
+			}
+			/* initialize queue mapping */
+			skb_record_rx_queue(skb, rx_ring->queue_index);
+			bi->skb = skb;
+		}
+
+		if (!bi->dma) {
+			bi->dma = dma_map_single(rx_ring->dev,
+						 skb->data,
+						 rx_ring->rx_buf_len,
+						 DMA_FROM_DEVICE);
+			if (dma_mapping_error(rx_ring->dev, bi->dma)) {
+				rx_ring->rx_stats.alloc_rx_buff_failed++;
+				bi->dma = 0;
+				goto no_buffers;
+			}
+		}
+
+		if (ring_is_ps_enabled(rx_ring)) {
+			if (!bi->page) {
+				bi->page = alloc_page(GFP_ATOMIC);
+				if (!bi->page) {
+					rx_ring->rx_stats.alloc_rx_page_failed++;
+					goto no_buffers;
+				}
+			}
+
+			if (!bi->page_dma) {
+				/* use a half page if we're re-using */
+				bi->page_offset ^= PAGE_SIZE / 2;
+				bi->page_dma = dma_map_page(rx_ring->dev,
+							    bi->page,
+							    bi->page_offset,
+							    PAGE_SIZE / 2,
+							    DMA_FROM_DEVICE);
+				if (dma_mapping_error(rx_ring->dev,
+						      bi->page_dma)) {
+					rx_ring->rx_stats.alloc_rx_page_failed++;
+					bi->page_dma = 0;
+					goto no_buffers;
+				}
+			}
+
+			/* Refresh the desc even if buffer_addrs didn't change
+			 * because each write-back erases this info.
+			 */
+			rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
+			rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
+		} else {
+			rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+			rx_desc->read.hdr_addr = 0;
+		}
+		i++;
+		if (i == rx_ring->count)
+			i = 0;
+	}
+
+no_buffers:
+	if (rx_ring->next_to_use != i)
+		i40e_release_rx_desc(rx_ring, i);
+}
+
+/**
+ * i40e_receive_skb - Send a completed packet up the stack
+ * @rx_ring:  rx ring in play
+ * @skb: packet to send up
+ * @vlan_tag: vlan tag for packet
+ **/
+static void i40e_receive_skb(struct i40e_ring *rx_ring,
+			     struct sk_buff *skb, u16 vlan_tag)
+{
+	struct i40e_q_vector *q_vector = rx_ring->q_vector;
+	struct i40e_vsi *vsi = rx_ring->vsi;
+	u64 flags = vsi->back->flags;
+
+	if (vlan_tag & VLAN_VID_MASK)
+		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+
+	if (flags & I40E_FLAG_IN_NETPOLL)
+		netif_rx(skb);
+	else
+		napi_gro_receive(&q_vector->napi, skb);
+}
+
+/**
+ * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
+ * @vsi: the VSI we care about
+ * @skb: skb currently being received and modified
+ * @rx_status: status value of last descriptor in packet
+ * @rx_error: error value of last descriptor in packet
+ **/
+static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
+				    struct sk_buff *skb,
+				    u32 rx_status,
+				    u32 rx_error)
+{
+	skb->ip_summed = CHECKSUM_NONE;
+
+	/* Rx csum enabled and ip headers found? */
+	if (!(vsi->netdev->features & NETIF_F_RXCSUM &&
+	      rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
+		return;
+
+	/* IP or L4 checksum error */
+	if (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
+			(1 << I40E_RX_DESC_ERROR_L4E_SHIFT))) {
+		vsi->back->hw_csum_rx_error++;
+		return;
+	}
+
+	skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
+/**
+ * i40e_rx_hash - returns the hash value from the Rx descriptor
+ * @ring: descriptor ring
+ * @rx_desc: specific descriptor
+ **/
+static inline u32 i40e_rx_hash(struct i40e_ring *ring,
+			       union i40e_rx_desc *rx_desc)
+{
+	if (ring->netdev->features & NETIF_F_RXHASH) {
+		if ((le64_to_cpu(rx_desc->wb.qword1.status_error_len) >>
+		     I40E_RX_DESC_STATUS_FLTSTAT_SHIFT) &
+		    I40E_RX_DESC_FLTSTAT_RSS_HASH)
+			return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
+	}
+	return 0;
+}
+
+/**
+ * i40e_clean_rx_irq - Reclaim resources after receive completes
+ * @rx_ring:  rx ring to clean
+ * @budget:   how many cleans we're allowed
+ *
+ * Returns true if there's any budget left (e.g. the clean is finished)
+ **/
+static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
+{
+	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+	u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
+	u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
+	const int current_node = numa_node_id();
+	struct i40e_vsi *vsi = rx_ring->vsi;
+	u16 i = rx_ring->next_to_clean;
+	union i40e_rx_desc *rx_desc;
+	u32 rx_error, rx_status;
+	u64 qword;
+
+	rx_desc = I40E_RX_DESC(rx_ring, i);
+	qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+	rx_status = (qword & I40E_RXD_QW1_STATUS_MASK)
+				>> I40E_RXD_QW1_STATUS_SHIFT;
+
+	while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
+		union i40e_rx_desc *next_rxd;
+		struct i40e_rx_buffer *rx_bi;
+		struct sk_buff *skb;
+		u16 vlan_tag;
+		if (i40e_rx_is_programming_status(qword)) {
+			i40e_clean_programming_status(rx_ring, rx_desc);
+			I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd);
+			goto next_desc;
+		}
+		rx_bi = &rx_ring->rx_bi[i];
+		skb = rx_bi->skb;
+		prefetch(skb->data);
+
+		rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK)
+					      >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+		rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK)
+					      >> I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
+		rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK)
+					      >> I40E_RXD_QW1_LENGTH_SPH_SHIFT;
+
+		rx_error = (qword & I40E_RXD_QW1_ERROR_MASK)
+					      >> I40E_RXD_QW1_ERROR_SHIFT;
+		rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+		rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+
+		rx_bi->skb = NULL;
+
+		/* This memory barrier is needed to keep us from reading
+		 * any other fields out of the rx_desc until we know the
+		 * STATUS_DD bit is set
+		 */
+		rmb();
+
+		/* Get the header and possibly the whole packet
+		 * If this is an skb from previous receive dma will be 0
+		 */
+		if (rx_bi->dma) {
+			u16 len;
+
+			if (rx_hbo)
+				len = I40E_RX_HDR_SIZE;
+			else if (rx_sph)
+				len = rx_header_len;
+			else if (rx_packet_len)
+				len = rx_packet_len;   /* 1buf/no split found */
+			else
+				len = rx_header_len;   /* split always mode */
+
+			skb_put(skb, len);
+			dma_unmap_single(rx_ring->dev,
+					 rx_bi->dma,
+					 rx_ring->rx_buf_len,
+					 DMA_FROM_DEVICE);
+			rx_bi->dma = 0;
+		}
+
+		/* Get the rest of the data if this was a header split */
+		if (ring_is_ps_enabled(rx_ring) && rx_packet_len) {
+
+			skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+					   rx_bi->page,
+					   rx_bi->page_offset,
+					   rx_packet_len);
+
+			skb->len += rx_packet_len;
+			skb->data_len += rx_packet_len;
+			skb->truesize += rx_packet_len;
+
+			if ((page_count(rx_bi->page) == 1) &&
+			    (page_to_nid(rx_bi->page) == current_node))
+				get_page(rx_bi->page);
+			else
+				rx_bi->page = NULL;
+
+			dma_unmap_page(rx_ring->dev,
+				       rx_bi->page_dma,
+				       PAGE_SIZE / 2,
+				       DMA_FROM_DEVICE);
+			rx_bi->page_dma = 0;
+		}
+		I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd);
+
+		if (unlikely(
+		    !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+			struct i40e_rx_buffer *next_buffer;
+
+			next_buffer = &rx_ring->rx_bi[i];
+
+			if (ring_is_ps_enabled(rx_ring)) {
+				rx_bi->skb = next_buffer->skb;
+				rx_bi->dma = next_buffer->dma;
+				next_buffer->skb = skb;
+				next_buffer->dma = 0;
+			}
+			rx_ring->rx_stats.non_eop_descs++;
+			goto next_desc;
+		}
+
+		/* ERR_MASK will only have valid bits if EOP set */
+		if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+			dev_kfree_skb_any(skb);
+			goto next_desc;
+		}
+
+		skb->rxhash = i40e_rx_hash(rx_ring, rx_desc);
+		i40e_rx_checksum(vsi, skb, rx_status, rx_error);
+
+		/* probably a little skewed due to removing CRC */
+		total_rx_bytes += skb->len;
+		total_rx_packets++;
+
+		skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+		vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+			 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
+			 : 0;
+		i40e_receive_skb(rx_ring, skb, vlan_tag);
+
+		rx_ring->netdev->last_rx = jiffies;
+		budget--;
+next_desc:
+		rx_desc->wb.qword1.status_error_len = 0;
+		if (!budget)
+			break;
+
+		cleaned_count++;
+		/* return some buffers to hardware, one at a time is too slow */
+		if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
+			i40e_alloc_rx_buffers(rx_ring, cleaned_count);
+			cleaned_count = 0;
+		}
+
+		/* use prefetched values */
+		rx_desc = next_rxd;
+		qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+		rx_status = (qword & I40E_RXD_QW1_STATUS_MASK)
+						>> I40E_RXD_QW1_STATUS_SHIFT;
+	}
+
+	rx_ring->next_to_clean = i;
+	rx_ring->rx_stats.packets += total_rx_packets;
+	rx_ring->rx_stats.bytes += total_rx_bytes;
+	rx_ring->q_vector->rx.total_packets += total_rx_packets;
+	rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
+
+	if (cleaned_count)
+		i40e_alloc_rx_buffers(rx_ring, cleaned_count);
+
+	return budget > 0;
+}
+
+/**
+ * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
+ * @napi: napi struct with our devices info in it
+ * @budget: amount of work driver is allowed to do this pass, in packets
+ *
+ * This function will clean all queues associated with a q_vector.
+ *
+ * Returns the amount of work done
+ **/
+int i40e_napi_poll(struct napi_struct *napi, int budget)
+{
+	struct i40e_q_vector *q_vector =
+			       container_of(napi, struct i40e_q_vector, napi);
+	struct i40e_vsi *vsi = q_vector->vsi;
+	bool clean_complete = true;
+	int budget_per_ring;
+	int i;
+
+	if (test_bit(__I40E_DOWN, &vsi->state)) {
+		napi_complete(napi);
+		return 0;
+	}
+
+	/* We attempt to distribute budget to each Rx queue fairly, but don't
+	 * allow the budget to go below 1 because that would exit polling early.
+	 * Since the actual Tx work is minimal, we can give the Tx a larger
+	 * budget and be more aggressive about cleaning up the Tx descriptors.
+	 */
+	budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
+	for (i = 0; i < q_vector->num_ringpairs; i++) {
+		clean_complete &= i40e_clean_tx_irq(q_vector->tx.ring[i],
+						    vsi->work_limit);
+		clean_complete &= i40e_clean_rx_irq(q_vector->rx.ring[i],
+						    budget_per_ring);
+	}
+
+	/* If work not completed, return budget and polling will return */
+	if (!clean_complete)
+		return budget;
+
+	/* Work is done so exit the polling mode and re-enable the interrupt */
+	napi_complete(napi);
+	if (ITR_IS_DYNAMIC(vsi->rx_itr_setting) ||
+	    ITR_IS_DYNAMIC(vsi->tx_itr_setting))
+		i40e_update_dynamic_itr(q_vector);
+
+	if (!test_bit(__I40E_DOWN, &vsi->state)) {
+		if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
+			i40e_irq_dynamic_enable(vsi,
+					q_vector->v_idx + vsi->base_vector);
+		} else {
+			struct i40e_hw *hw = &vsi->back->hw;
+			/* We re-enable the queue 0 cause, but
+			 * don't worry about dynamic_enable
+			 * because we left it on for the other
+			 * possible interrupts during napi
+			 */
+			u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
+			qval |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
+			wr32(hw, I40E_QINT_RQCTL(0), qval);
+
+			qval = rd32(hw, I40E_QINT_TQCTL(0));
+			qval |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
+			wr32(hw, I40E_QINT_TQCTL(0), qval);
+			i40e_flush(hw);
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * i40e_atr - Add a Flow Director ATR filter
+ * @tx_ring:  ring to add programming descriptor to
+ * @skb:      send buffer
+ * @flags:    send flags
+ * @protocol: wire protocol
+ **/
+static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
+		     u32 flags, __be16 protocol)
+{
+	struct i40e_filter_program_desc *fdir_desc;
+	struct i40e_pf *pf = tx_ring->vsi->back;
+	union {
+		unsigned char *network;
+		struct iphdr *ipv4;
+		struct ipv6hdr *ipv6;
+	} hdr;
+	struct tcphdr *th;
+	unsigned int hlen;
+	u32 flex_ptype, dtype_cmd;
+
+	/* make sure ATR is enabled */
+	if (!(pf->flags & I40E_FLAG_FDIR_ATR_ENABLED))
+		return;
+
+	/* if sampling is disabled do nothing */
+	if (!tx_ring->atr_sample_rate)
+		return;
+
+	tx_ring->atr_count++;
+
+	/* snag network header to get L4 type and address */
+	hdr.network = skb_network_header(skb);
+
+	/* Currently only IPv4/IPv6 with TCP is supported */
+	if (protocol == htons(ETH_P_IP)) {
+		if (hdr.ipv4->protocol != IPPROTO_TCP)
+			return;
+
+		/* access ihl as a u8 to avoid unaligned access on ia64 */
+		hlen = (hdr.network[0] & 0x0F) << 2;
+	} else if (protocol == htons(ETH_P_IPV6)) {
+		if (hdr.ipv6->nexthdr != IPPROTO_TCP)
+			return;
+
+		hlen = sizeof(struct ipv6hdr);
+	} else {
+		return;
+	}
+
+	th = (struct tcphdr *)(hdr.network + hlen);
+
+	/* sample on all syn/fin packets or once every atr sample rate */
+	if (!th->fin && !th->syn && (tx_ring->atr_count < tx_ring->atr_sample_rate))
+		return;
+
+	tx_ring->atr_count = 0;
+
+	/* grab the next descriptor */
+	fdir_desc = I40E_TX_FDIRDESC(tx_ring, tx_ring->next_to_use);
+	tx_ring->next_to_use++;
+	if (tx_ring->next_to_use == tx_ring->count)
+		tx_ring->next_to_use = 0;
+
+	flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
+		      I40E_TXD_FLTR_QW0_QINDEX_MASK;
+	flex_ptype |= (protocol == htons(ETH_P_IP)) ?
+		      (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
+		       I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
+		      (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
+		       I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
+
+	flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
+
+	dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
+
+	dtype_cmd |= th->fin ?
+		     (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
+		      I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
+		     (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
+		      I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+
+	dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
+		     I40E_TXD_FLTR_QW1_DEST_SHIFT;
+
+	dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
+		     I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
+
+	fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
+	fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
+}
+
+#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
+/**
+ * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
+ * @skb:     send buffer
+ * @tx_ring: ring to send buffer on
+ * @flags:   the tx flags to be set
+ *
+ * Checks the skb and set up correspondingly several generic transmit flags
+ * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
+ *
+ * Returns error code indicate the frame should be dropped upon error and the
+ * otherwise  returns 0 to indicate the flags has been set properly.
+ **/
+static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
+				      struct i40e_ring *tx_ring,
+				      u32 *flags)
+{
+	__be16 protocol = skb->protocol;
+	u32  tx_flags = 0;
+
+	/* if we have a HW VLAN tag being added, default to the HW one */
+	if (vlan_tx_tag_present(skb)) {
+		tx_flags |= vlan_tx_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
+		tx_flags |= I40E_TX_FLAGS_HW_VLAN;
+	/* else if it is a SW VLAN, check the next protocol and store the tag */
+	} else if (protocol == __constant_htons(ETH_P_8021Q)) {
+		struct vlan_hdr *vhdr, _vhdr;
+		vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
+		if (!vhdr)
+			return -EINVAL;
+
+		protocol = vhdr->h_vlan_encapsulated_proto;
+		tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
+		tx_flags |= I40E_TX_FLAGS_SW_VLAN;
+	}
+
+	/* Insert 802.1p priority into VLAN header */
+	if ((tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED) &&
+	    ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
+	     (skb->priority != TC_PRIO_CONTROL))) {
+		tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
+		tx_flags |= (skb->priority & 0x7) <<
+				I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
+		if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
+			struct vlan_ethhdr *vhdr;
+			if (skb_header_cloned(skb) &&
+			    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+				return -ENOMEM;
+			vhdr = (struct vlan_ethhdr *)skb->data;
+			vhdr->h_vlan_TCI = htons(tx_flags >>
+						 I40E_TX_FLAGS_VLAN_SHIFT);
+		} else {
+			tx_flags |= I40E_TX_FLAGS_HW_VLAN;
+		}
+	}
+	*flags = tx_flags;
+	return 0;
+}
+
+/**
+ * i40e_tx_csum - is checksum offload requested
+ * @tx_ring:  ptr to the ring to send
+ * @skb:      ptr to the skb we're sending
+ * @tx_flags: the collected send information
+ * @protocol: the send protocol
+ *
+ * Returns true if checksum offload is requested
+ **/
+static bool i40e_tx_csum(struct i40e_ring *tx_ring, struct sk_buff *skb,
+			 u32 tx_flags, __be16 protocol)
+{
+	if ((skb->ip_summed != CHECKSUM_PARTIAL) &&
+	    !(tx_flags & I40E_TX_FLAGS_TXSW)) {
+		if (!(tx_flags & I40E_TX_FLAGS_HW_VLAN))
+			return false;
+	}
+
+	return skb->ip_summed == CHECKSUM_PARTIAL;
+}
+
+/**
+ * i40e_tso - set up the tso context descriptor
+ * @tx_ring:  ptr to the ring to send
+ * @skb:      ptr to the skb we're sending
+ * @tx_flags: the collected send information
+ * @protocol: the send protocol
+ * @hdr_len:  ptr to the size of the packet header
+ * @cd_tunneling: ptr to context descriptor bits
+ *
+ * Returns 0 if no TSO can happen, 1 if tso is going, or error
+ **/
+static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
+		    u32 tx_flags, __be16 protocol, u8 *hdr_len,
+		    u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)
+{
+	u32 cd_cmd, cd_tso_len, cd_mss;
+	struct tcphdr *tcph;
+	struct iphdr *iph;
+	u32 l4len;
+	int err;
+	struct ipv6hdr *ipv6h;
+
+	if (!skb_is_gso(skb))
+		return 0;
+
+	if (skb_header_cloned(skb)) {
+		err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+		if (err)
+			return err;
+	}
+
+	if (protocol == __constant_htons(ETH_P_IP)) {
+		iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+		tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
+		iph->tot_len = 0;
+		iph->check = 0;
+		tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+						 0, IPPROTO_TCP, 0);
+	} else if (skb_is_gso_v6(skb)) {
+
+		ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb)
+					   : ipv6_hdr(skb);
+		tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
+		ipv6h->payload_len = 0;
+		tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
+					       0, IPPROTO_TCP, 0);
+	}
+
+	l4len = skb->encapsulation ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb);
+	*hdr_len = (skb->encapsulation
+		    ? (skb_inner_transport_header(skb) - skb->data)
+		    : skb_transport_offset(skb)) + l4len;
+
+	/* find the field values */
+	cd_cmd = I40E_TX_CTX_DESC_TSO;
+	cd_tso_len = skb->len - *hdr_len;
+	cd_mss = skb_shinfo(skb)->gso_size;
+	*cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT)
+			     | ((u64)cd_tso_len
+				<< I40E_TXD_CTX_QW1_TSO_LEN_SHIFT)
+			     | ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
+	return 1;
+}
+
+/**
+ * i40e_tx_enable_csum - Enable Tx checksum offloads
+ * @skb: send buffer
+ * @tx_flags: Tx flags currently set
+ * @td_cmd: Tx descriptor command bits to set
+ * @td_offset: Tx descriptor header offsets to set
+ * @cd_tunneling: ptr to context desc bits
+ **/
+static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
+				u32 *td_cmd, u32 *td_offset,
+				struct i40e_ring *tx_ring,
+				u32 *cd_tunneling)
+{
+	struct ipv6hdr *this_ipv6_hdr;
+	unsigned int this_tcp_hdrlen;
+	struct iphdr *this_ip_hdr;
+	u32 network_hdr_len;
+	u8 l4_hdr = 0;
+
+	if (skb->encapsulation) {
+		network_hdr_len = skb_inner_network_header_len(skb);
+		this_ip_hdr = inner_ip_hdr(skb);
+		this_ipv6_hdr = inner_ipv6_hdr(skb);
+		this_tcp_hdrlen = inner_tcp_hdrlen(skb);
+
+		if (tx_flags & I40E_TX_FLAGS_IPV4) {
+
+			if (tx_flags & I40E_TX_FLAGS_TSO) {
+				*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
+				ip_hdr(skb)->check = 0;
+			} else {
+				*cd_tunneling |=
+					 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
+			}
+		} else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+			if (tx_flags & I40E_TX_FLAGS_TSO) {
+				*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+				ip_hdr(skb)->check = 0;
+			} else {
+				*cd_tunneling |=
+					 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
+			}
+		}
+
+		/* Now set the ctx descriptor fields */
+		*cd_tunneling |= (skb_network_header_len(skb) >> 2) <<
+					I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
+				   I40E_TXD_CTX_UDP_TUNNELING            |
+				   ((skb_inner_network_offset(skb) -
+					skb_transport_offset(skb)) >> 1) <<
+				   I40E_TXD_CTX_QW0_NATLEN_SHIFT;
+
+	} else {
+		network_hdr_len = skb_network_header_len(skb);
+		this_ip_hdr = ip_hdr(skb);
+		this_ipv6_hdr = ipv6_hdr(skb);
+		this_tcp_hdrlen = tcp_hdrlen(skb);
+	}
+
+	/* Enable IP checksum offloads */
+	if (tx_flags & I40E_TX_FLAGS_IPV4) {
+		l4_hdr = this_ip_hdr->protocol;
+		/* the stack computes the IP header already, the only time we
+		 * need the hardware to recompute it is in the case of TSO.
+		 */
+		if (tx_flags & I40E_TX_FLAGS_TSO) {
+			*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
+			this_ip_hdr->check = 0;
+		} else {
+			*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
+		}
+		/* Now set the td_offset for IP header length */
+		*td_offset = (network_hdr_len >> 2) <<
+			      I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+	} else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+		l4_hdr = this_ipv6_hdr->nexthdr;
+		*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
+		/* Now set the td_offset for IP header length */
+		*td_offset = (network_hdr_len >> 2) <<
+			      I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+	}
+	/* words in MACLEN + dwords in IPLEN + dwords in L4Len */
+	*td_offset |= (skb_network_offset(skb) >> 1) <<
+		       I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
+
+	/* Enable L4 checksum offloads */
+	switch (l4_hdr) {
+	case IPPROTO_TCP:
+		/* enable checksum offloads */
+		*td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
+		*td_offset |= (this_tcp_hdrlen >> 2) <<
+			       I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+		break;
+	case IPPROTO_SCTP:
+		/* enable SCTP checksum offload */
+		*td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
+		*td_offset |= (sizeof(struct sctphdr) >> 2) <<
+			       I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+		break;
+	case IPPROTO_UDP:
+		/* enable UDP checksum offload */
+		*td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
+		*td_offset |= (sizeof(struct udphdr) >> 2) <<
+			       I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+		break;
+	default:
+		break;
+	}
+}
+
+/**
+ * i40e_create_tx_ctx Build the Tx context descriptor
+ * @tx_ring:  ring to create the descriptor on
+ * @cd_type_cmd_tso_mss: Quad Word 1
+ * @cd_tunneling: Quad Word 0 - bits 0-31
+ * @cd_l2tag2: Quad Word 0 - bits 32-63
+ **/
+static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
+			       const u64 cd_type_cmd_tso_mss,
+			       const u32 cd_tunneling, const u32 cd_l2tag2)
+{
+	struct i40e_tx_context_desc *context_desc;
+
+	if (!cd_type_cmd_tso_mss && !cd_tunneling && !cd_l2tag2)
+		return;
+
+	/* grab the next descriptor */
+	context_desc = I40E_TX_CTXTDESC(tx_ring, tx_ring->next_to_use);
+	tx_ring->next_to_use++;
+	if (tx_ring->next_to_use == tx_ring->count)
+		tx_ring->next_to_use = 0;
+
+	/* cpu_to_le32 and assign to struct fields */
+	context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
+	context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
+	context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
+}
+
+/**
+ * i40e_tx_map - Build the Tx descriptor
+ * @tx_ring:  ring to send buffer on
+ * @skb:      send buffer
+ * @first:    first buffer info buffer to use
+ * @tx_flags: collected send information
+ * @hdr_len:  size of the packet header
+ * @td_cmd:   the command field in the descriptor
+ * @td_offset: offset for checksum or crc
+ **/
+static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
+			struct i40e_tx_buffer *first, u32 tx_flags,
+			const u8 hdr_len, u32 td_cmd, u32 td_offset)
+{
+	struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+	unsigned int data_len = skb->data_len;
+	unsigned int size = skb_headlen(skb);
+	struct device *dev = tx_ring->dev;
+	u32 paylen = skb->len - hdr_len;
+	u16 i = tx_ring->next_to_use;
+	struct i40e_tx_buffer *tx_bi;
+	struct i40e_tx_desc *tx_desc;
+	u32 buf_offset = 0;
+	u32 td_tag = 0;
+	dma_addr_t dma;
+	u16 gso_segs;
+
+	dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, dma))
+		goto dma_error;
+
+	if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
+		td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
+		td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
+			 I40E_TX_FLAGS_VLAN_SHIFT;
+	}
+
+	tx_desc = I40E_TX_DESC(tx_ring, i);
+	for (;;) {
+		while (size > I40E_MAX_DATA_PER_TXD) {
+			tx_desc->buffer_addr = cpu_to_le64(dma + buf_offset);
+			tx_desc->cmd_type_offset_bsz =
+				build_ctob(td_cmd, td_offset,
+					   I40E_MAX_DATA_PER_TXD, td_tag);
+
+			buf_offset += I40E_MAX_DATA_PER_TXD;
+			size -= I40E_MAX_DATA_PER_TXD;
+
+			tx_desc++;
+			i++;
+			if (i == tx_ring->count) {
+				tx_desc = I40E_TX_DESC(tx_ring, 0);
+				i = 0;
+			}
+		}
+
+		tx_bi = &tx_ring->tx_bi[i];
+		tx_bi->length = buf_offset + size;
+		tx_bi->tx_flags = tx_flags;
+		tx_bi->dma = dma;
+
+		tx_desc->buffer_addr = cpu_to_le64(dma + buf_offset);
+		tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
+							  size, td_tag);
+
+		if (likely(!data_len))
+			break;
+
+		size = skb_frag_size(frag);
+		data_len -= size;
+		buf_offset = 0;
+		tx_flags |= I40E_TX_FLAGS_MAPPED_AS_PAGE;
+
+		dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
+		if (dma_mapping_error(dev, dma))
+			goto dma_error;
+
+		tx_desc++;
+		i++;
+		if (i == tx_ring->count) {
+			tx_desc = I40E_TX_DESC(tx_ring, 0);
+			i = 0;
+		}
+
+		frag++;
+	}
+
+	tx_desc->cmd_type_offset_bsz |=
+		       cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT);
+
+	i++;
+	if (i == tx_ring->count)
+		i = 0;
+
+	tx_ring->next_to_use = i;
+
+	if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
+		gso_segs = skb_shinfo(skb)->gso_segs;
+	else
+		gso_segs = 1;
+
+	/* multiply data chunks by size of headers */
+	tx_bi->bytecount = paylen + (gso_segs * hdr_len);
+	tx_bi->gso_segs = gso_segs;
+	tx_bi->skb = skb;
+
+	/* set the timestamp and next to watch values */
+	first->time_stamp = jiffies;
+	first->next_to_watch = tx_desc;
+
+	/* Force memory writes to complete before letting h/w
+	 * know there are new descriptors to fetch.  (Only
+	 * applicable for weak-ordered memory model archs,
+	 * such as IA-64).
+	 */
+	wmb();
+
+	writel(i, tx_ring->tail);
+	return;
+
+dma_error:
+	dev_info(dev, "TX DMA map failed\n");
+
+	/* clear dma mappings for failed tx_bi map */
+	for (;;) {
+		tx_bi = &tx_ring->tx_bi[i];
+		i40e_unmap_tx_resource(tx_ring, tx_bi);
+		if (tx_bi == first)
+			break;
+		if (i == 0)
+			i = tx_ring->count;
+		i--;
+	}
+
+	dev_kfree_skb_any(skb);
+
+	tx_ring->next_to_use = i;
+}
+
+/**
+ * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size:    the size buffer we want to assure is available
+ *
+ * Returns -EBUSY if a stop is needed, else 0
+ **/
+static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+{
+	netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+	smp_mb();
+
+	/* Check again in a case another CPU has just made room available. */
+	if (likely(I40E_DESC_UNUSED(tx_ring) < size))
+		return -EBUSY;
+
+	/* A reprieve! - use start_queue because it doesn't call schedule */
+	netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
+	++tx_ring->tx_stats.restart_queue;
+	return 0;
+}
+
+/**
+ * i40e_maybe_stop_tx - 1st level check for tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size:    the size buffer we want to assure is available
+ *
+ * Returns 0 if stop is not needed
+ **/
+static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+{
+	if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
+		return 0;
+	return __i40e_maybe_stop_tx(tx_ring, size);
+}
+
+/**
+ * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
+ * @skb:     send buffer
+ * @tx_ring: ring to send buffer on
+ *
+ * Returns number of data descriptors needed for this skb. Returns 0 to indicate
+ * there is not enough descriptors available in this ring since we need at least
+ * one descriptor.
+ **/
+static int i40e_xmit_descriptor_count(struct sk_buff *skb,
+				      struct i40e_ring *tx_ring)
+{
+#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
+	unsigned int f;
+#endif
+	int count = 0;
+
+	/* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
+	 *       + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
+	 *       + 2 desc gap to keep tail from touching head,
+	 *       + 1 desc for context descriptor,
+	 * otherwise try next time
+	 */
+#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
+	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+		count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+#else
+	count += skb_shinfo(skb)->nr_frags;
+#endif
+	count += TXD_USE_COUNT(skb_headlen(skb));
+	if (i40e_maybe_stop_tx(tx_ring, count + 3)) {
+		tx_ring->tx_stats.tx_busy++;
+		return 0;
+	}
+	return count;
+}
+
+/**
+ * i40e_xmit_frame_ring - Sends buffer on Tx ring
+ * @skb:     send buffer
+ * @tx_ring: ring to send buffer on
+ *
+ * Returns NETDEV_TX_OK if sent, else an error code
+ **/
+static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
+					struct i40e_ring *tx_ring)
+{
+	u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
+	u32 cd_tunneling = 0, cd_l2tag2 = 0;
+	struct i40e_tx_buffer *first;
+	u32 td_offset = 0;
+	u32 tx_flags = 0;
+	__be16 protocol;
+	u32 td_cmd = 0;
+	u8 hdr_len = 0;
+	int tso;
+	if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
+		return NETDEV_TX_BUSY;
+
+	/* prepare the xmit flags */
+	if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
+		goto out_drop;
+
+	/* obtain protocol of skb */
+	protocol = skb->protocol;
+
+	/* record the location of the first descriptor for this packet */
+	first = &tx_ring->tx_bi[tx_ring->next_to_use];
+
+	/* setup IPv4/IPv6 offloads */
+	if (protocol == __constant_htons(ETH_P_IP))
+		tx_flags |= I40E_TX_FLAGS_IPV4;
+	else if (protocol == __constant_htons(ETH_P_IPV6))
+		tx_flags |= I40E_TX_FLAGS_IPV6;
+
+	tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len,
+		       &cd_type_cmd_tso_mss, &cd_tunneling);
+
+	if (tso < 0)
+		goto out_drop;
+	else if (tso)
+		tx_flags |= I40E_TX_FLAGS_TSO;
+
+	skb_tx_timestamp(skb);
+
+	/* Always offload the checksum, since it's in the data descriptor */
+	if (i40e_tx_csum(tx_ring, skb, tx_flags, protocol))
+		tx_flags |= I40E_TX_FLAGS_CSUM;
+
+	/* always enable offload insertion */
+	td_cmd |= I40E_TX_DESC_CMD_ICRC;
+
+	if (tx_flags & I40E_TX_FLAGS_CSUM)
+		i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset,
+				    tx_ring, &cd_tunneling);
+
+	i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
+			   cd_tunneling, cd_l2tag2);
+
+	/* Add Flow Director ATR if it's enabled.
+	 *
+	 * NOTE: this must always be directly before the data descriptor.
+	 */
+	i40e_atr(tx_ring, skb, tx_flags, protocol);
+
+	i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
+		    td_cmd, td_offset);
+
+	i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
+
+	return NETDEV_TX_OK;
+
+out_drop:
+	dev_kfree_skb_any(skb);
+	return NETDEV_TX_OK;
+}
+
+/**
+ * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
+ * @skb:    send buffer
+ * @netdev: network interface device structure
+ *
+ * Returns NETDEV_TX_OK if sent, else an error code
+ **/
+netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_vsi *vsi = np->vsi;
+	struct i40e_ring *tx_ring = &vsi->tx_rings[skb->queue_mapping];
+
+	/* hardware can't handle really short frames, hardware padding works
+	 * beyond this point
+	 */
+	if (unlikely(skb->len < I40E_MIN_TX_LEN)) {
+		if (skb_pad(skb, I40E_MIN_TX_LEN - skb->len))
+			return NETDEV_TX_OK;
+		skb->len = I40E_MIN_TX_LEN;
+		skb_set_tail_pointer(skb, I40E_MIN_TX_LEN);
+	}
+
+	return i40e_xmit_frame_ring(skb, tx_ring);
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
new file mode 100644
index 0000000..b1d7722
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -0,0 +1,259 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+/* Interrupt Throttling and Rate Limiting (storm control) Goodies */
+
+#define I40E_MAX_ITR               0x07FF
+#define I40E_MIN_ITR               0x0001
+#define I40E_ITR_USEC_RESOLUTION   2
+#define I40E_MAX_IRATE             0x03F
+#define I40E_MIN_IRATE             0x001
+#define I40E_IRATE_USEC_RESOLUTION 4
+#define I40E_ITR_100K              0x0005
+#define I40E_ITR_20K               0x0019
+#define I40E_ITR_8K                0x003E
+#define I40E_ITR_4K                0x007A
+#define I40E_ITR_RX_DEF            I40E_ITR_8K
+#define I40E_ITR_TX_DEF            I40E_ITR_4K
+#define I40E_ITR_DYNAMIC           0x8000  /* use top bit as a flag */
+#define I40E_MIN_INT_RATE          250     /* ~= 1000000 / (I40E_MAX_ITR * 2) */
+#define I40E_MAX_INT_RATE          500000  /* == 1000000 / (I40E_MIN_ITR * 2) */
+#define I40E_DEFAULT_IRQ_WORK      256
+#define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1)
+#define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC))
+#define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1)
+
+#define I40E_QUEUE_END_OF_LIST 0x7FF
+
+#define I40E_ITR_NONE  3
+#define I40E_RX_ITR    0
+#define I40E_TX_ITR    1
+#define I40E_PE_ITR    2
+/* Supported Rx Buffer Sizes */
+#define I40E_RXBUFFER_512   512    /* Used for packet split */
+#define I40E_RXBUFFER_2048  2048
+#define I40E_RXBUFFER_3072  3072   /* For FCoE MTU of 2158 */
+#define I40E_RXBUFFER_4096  4096
+#define I40E_RXBUFFER_8192  8192
+#define I40E_MAX_RXBUFFER   9728  /* largest size for single descriptor */
+
+/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
+ * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
+ * this adds up to 512 bytes of extra data meaning the smallest allocation
+ * we could have is 1K.
+ * i.e. RXBUFFER_512 --> size-1024 slab
+ */
+#define I40E_RX_HDR_SIZE  I40E_RXBUFFER_512
+
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define I40E_RX_BUFFER_WRITE	16	/* Must be power of 2 */
+#define I40E_RX_NEXT_DESC(r, i, n)		\
+	do {					\
+		(i)++;				\
+		if ((i) == (r)->count)		\
+			i = 0;			\
+		(n) = I40E_RX_DESC((r), (i));	\
+	} while (0)
+
+#define I40E_RX_NEXT_DESC_PREFETCH(r, i, n)		\
+	do {						\
+		I40E_RX_NEXT_DESC((r), (i), (n));	\
+		prefetch((n));				\
+	} while (0)
+
+#define i40e_rx_desc i40e_32byte_rx_desc
+
+#define I40E_MIN_TX_LEN		17
+#define I40E_MAX_DATA_PER_TXD	16383	/* aka 16kB - 1 */
+
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
+#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4)
+
+#define I40E_TX_FLAGS_CSUM		(u32)(1)
+#define I40E_TX_FLAGS_HW_VLAN		(u32)(1 << 1)
+#define I40E_TX_FLAGS_SW_VLAN		(u32)(1 << 2)
+#define I40E_TX_FLAGS_TSO		(u32)(1 << 3)
+#define I40E_TX_FLAGS_IPV4		(u32)(1 << 4)
+#define I40E_TX_FLAGS_IPV6		(u32)(1 << 5)
+#define I40E_TX_FLAGS_FCCRC		(u32)(1 << 6)
+#define I40E_TX_FLAGS_FSO		(u32)(1 << 7)
+#define I40E_TX_FLAGS_TXSW		(u32)(1 << 8)
+#define I40E_TX_FLAGS_MAPPED_AS_PAGE	(u32)(1 << 9)
+#define I40E_TX_FLAGS_VLAN_MASK		0xffff0000
+#define I40E_TX_FLAGS_VLAN_PRIO_MASK	0xe0000000
+#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT	29
+#define I40E_TX_FLAGS_VLAN_SHIFT	16
+
+struct i40e_tx_buffer {
+	struct sk_buff *skb;
+	dma_addr_t dma;
+	unsigned long time_stamp;
+	u16 length;
+	u32 tx_flags;
+	struct i40e_tx_desc *next_to_watch;
+	unsigned int bytecount;
+	u16 gso_segs;
+	u8 mapped_as_page;
+};
+
+struct i40e_rx_buffer {
+	struct sk_buff *skb;
+	dma_addr_t dma;
+	struct page *page;
+	dma_addr_t page_dma;
+	unsigned int page_offset;
+};
+
+struct i40e_tx_queue_stats {
+	u64 packets;
+	u64 bytes;
+	u64 restart_queue;
+	u64 tx_busy;
+	u64 completed;
+	u64 tx_done_old;
+};
+
+struct i40e_rx_queue_stats {
+	u64 packets;
+	u64 bytes;
+	u64 non_eop_descs;
+	u64 alloc_rx_page_failed;
+	u64 alloc_rx_buff_failed;
+};
+
+enum i40e_ring_state_t {
+	__I40E_TX_FDIR_INIT_DONE,
+	__I40E_TX_XPS_INIT_DONE,
+	__I40E_TX_DETECT_HANG,
+	__I40E_HANG_CHECK_ARMED,
+	__I40E_RX_PS_ENABLED,
+	__I40E_RX_LRO_ENABLED,
+	__I40E_RX_16BYTE_DESC_ENABLED,
+};
+
+#define ring_is_ps_enabled(ring) \
+	test_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
+#define set_ring_ps_enabled(ring) \
+	set_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
+#define clear_ring_ps_enabled(ring) \
+	clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
+#define check_for_tx_hang(ring) \
+	test_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
+#define set_check_for_tx_hang(ring) \
+	set_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
+#define clear_check_for_tx_hang(ring) \
+	clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
+#define ring_is_lro_enabled(ring) \
+	test_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
+#define set_ring_lro_enabled(ring) \
+	set_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
+#define clear_ring_lro_enabled(ring) \
+	clear_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
+#define ring_is_16byte_desc_enabled(ring) \
+	test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
+#define set_ring_16byte_desc_enabled(ring) \
+	set_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
+#define clear_ring_16byte_desc_enabled(ring) \
+	clear_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
+
+/* struct that defines a descriptor ring, associated with a VSI */
+struct i40e_ring {
+	void *desc;			/* Descriptor ring memory */
+	struct device *dev;		/* Used for DMA mapping */
+	struct net_device *netdev;	/* netdev ring maps to */
+	union {
+		struct i40e_tx_buffer *tx_bi;
+		struct i40e_rx_buffer *rx_bi;
+	};
+	unsigned long state;
+	u16 queue_index;		/* Queue number of ring */
+	u8 dcb_tc;			/* Traffic class of ring */
+	u8 __iomem *tail;
+
+	u16 count;			/* Number of descriptors */
+	u16 reg_idx;			/* HW register index of the ring */
+	u16 rx_hdr_len;
+	u16 rx_buf_len;
+	u8  dtype;
+#define I40E_RX_DTYPE_NO_SPLIT      0
+#define I40E_RX_DTYPE_SPLIT_ALWAYS  1
+#define I40E_RX_DTYPE_HEADER_SPLIT  2
+	u8  hsplit;
+#define I40E_RX_SPLIT_L2      0x1
+#define I40E_RX_SPLIT_IP      0x2
+#define I40E_RX_SPLIT_TCP_UDP 0x4
+#define I40E_RX_SPLIT_SCTP    0x8
+
+	/* used in interrupt processing */
+	u16 next_to_use;
+	u16 next_to_clean;
+
+	u8 atr_sample_rate;
+	u8 atr_count;
+
+	bool ring_active;		/* is ring online or not */
+
+	/* stats structs */
+	union {
+		struct i40e_tx_queue_stats tx_stats;
+		struct i40e_rx_queue_stats rx_stats;
+	};
+
+	unsigned int size;		/* length of descriptor ring in bytes */
+	dma_addr_t dma;			/* physical address of ring */
+
+	struct i40e_vsi *vsi;		/* Backreference to associated VSI */
+	struct i40e_q_vector *q_vector;	/* Backreference to associated vector */
+} ____cacheline_internodealigned_in_smp;
+
+enum i40e_latency_range {
+	I40E_LOWEST_LATENCY = 0,
+	I40E_LOW_LATENCY = 1,
+	I40E_BULK_LATENCY = 2,
+};
+
+struct i40e_ring_container {
+#define I40E_MAX_RINGPAIR_PER_VECTOR 8
+	/* array of pointers to rings */
+	struct i40e_ring *ring[I40E_MAX_RINGPAIR_PER_VECTOR];
+	unsigned int total_bytes;	/* total bytes processed this int */
+	unsigned int total_packets;	/* total packets processed this int */
+	u16 count;
+	enum i40e_latency_range latency_range;
+	u16 itr;
+};
+
+void i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
+netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
+void i40e_clean_rx_ring(struct i40e_ring *rx_ring);
+int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring);
+int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring);
+void i40e_free_tx_resources(struct i40e_ring *tx_ring);
+void i40e_free_rx_resources(struct i40e_ring *rx_ring);
+int i40e_napi_poll(struct napi_struct *napi, int budget);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
new file mode 100644
index 0000000..f3f22b2
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -0,0 +1,1154 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_TYPE_H_
+#define _I40E_TYPE_H_
+
+#include "i40e_status.h"
+#include "i40e_osdep.h"
+#include "i40e_register.h"
+#include "i40e_adminq.h"
+#include "i40e_hmc.h"
+#include "i40e_lan_hmc.h"
+
+/* Device IDs */
+#define I40E_SFP_XL710_DEVICE_ID	0x1572
+#define I40E_SFP_X710_DEVICE_ID		0x1573
+#define I40E_QEMU_DEVICE_ID		0x1574
+#define I40E_KX_A_DEVICE_ID		0x157F
+#define I40E_KX_B_DEVICE_ID		0x1580
+#define I40E_KX_C_DEVICE_ID		0x1581
+#define I40E_KX_D_DEVICE_ID		0x1582
+#define I40E_QSFP_A_DEVICE_ID		0x1583
+#define I40E_QSFP_B_DEVICE_ID		0x1584
+#define I40E_QSFP_C_DEVICE_ID		0x1585
+#define I40E_VF_DEVICE_ID		0x154C
+#define I40E_VF_HV_DEVICE_ID		0x1571
+
+#define I40E_FW_API_VERSION_MAJOR  0x0001
+#define I40E_FW_API_VERSION_MINOR  0x0000
+
+#define I40E_MAX_VSI_QP			16
+#define I40E_MAX_VF_VSI			3
+#define I40E_MAX_CHAINED_RX_BUFFERS	5
+
+/* Max default timeout in ms, */
+#define I40E_MAX_NVM_TIMEOUT		18000
+
+/* Check whether address is multicast.  This is little-endian specific check.*/
+#define I40E_IS_MULTICAST(address)	\
+	(bool)(((u8 *)(address))[0] & ((u8)0x01))
+
+/* Check whether an address is broadcast. */
+#define I40E_IS_BROADCAST(address)	\
+	((((u8 *)(address))[0] == ((u8)0xff)) && \
+	(((u8 *)(address))[1] == ((u8)0xff)))
+
+/* Switch from mc to the 2usec global time (this is the GTIME resolution) */
+#define I40E_MS_TO_GTIME(time)		(((time) * 1000) / 2)
+
+/* forward declaration */
+struct i40e_hw;
+typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
+
+#define I40E_ETH_LENGTH_OF_ADDRESS	6
+
+/* Data type manipulation macros. */
+
+#define I40E_DESC_UNUSED(R)	\
+	((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+	(R)->next_to_clean - (R)->next_to_use - 1)
+
+/* bitfields for Tx queue mapping in QTX_CTL */
+#define I40E_QTX_CTL_VF_QUEUE	0x0
+#define I40E_QTX_CTL_PF_QUEUE	0x2
+
+/* debug masks */
+enum i40e_debug_mask {
+	I40E_DEBUG_INIT			= 0x00000001,
+	I40E_DEBUG_RELEASE		= 0x00000002,
+
+	I40E_DEBUG_LINK			= 0x00000010,
+	I40E_DEBUG_PHY			= 0x00000020,
+	I40E_DEBUG_HMC			= 0x00000040,
+	I40E_DEBUG_NVM			= 0x00000080,
+	I40E_DEBUG_LAN			= 0x00000100,
+	I40E_DEBUG_FLOW			= 0x00000200,
+	I40E_DEBUG_DCB			= 0x00000400,
+	I40E_DEBUG_DIAG			= 0x00000800,
+
+	I40E_DEBUG_AQ_MESSAGE		= 0x01000000, /* for i40e_debug() */
+	I40E_DEBUG_AQ_DESCRIPTOR	= 0x02000000,
+	I40E_DEBUG_AQ_DESC_BUFFER	= 0x04000000,
+	I40E_DEBUG_AQ_COMMAND		= 0x06000000, /* for i40e_debug_aq() */
+	I40E_DEBUG_AQ			= 0x0F000000,
+
+	I40E_DEBUG_USER			= 0xF0000000,
+
+	I40E_DEBUG_ALL			= 0xFFFFFFFF
+};
+
+/* These are structs for managing the hardware information and the operations.
+ * The structures of function pointers are filled out at init time when we
+ * know for sure exactly which hardware we're working with.  This gives us the
+ * flexibility of using the same main driver code but adapting to slightly
+ * different hardware needs as new parts are developed.  For this architecture,
+ * the Firmware and AdminQ are intended to insulate the driver from most of the
+ * future changes, but these structures will also do part of the job.
+ */
+enum i40e_mac_type {
+	I40E_MAC_UNKNOWN = 0,
+	I40E_MAC_X710,
+	I40E_MAC_XL710,
+	I40E_MAC_VF,
+	I40E_MAC_GENERIC,
+};
+
+enum i40e_media_type {
+	I40E_MEDIA_TYPE_UNKNOWN = 0,
+	I40E_MEDIA_TYPE_FIBER,
+	I40E_MEDIA_TYPE_BASET,
+	I40E_MEDIA_TYPE_BACKPLANE,
+	I40E_MEDIA_TYPE_CX4,
+	I40E_MEDIA_TYPE_VIRTUAL
+};
+
+enum i40e_fc_mode {
+	I40E_FC_NONE = 0,
+	I40E_FC_RX_PAUSE,
+	I40E_FC_TX_PAUSE,
+	I40E_FC_FULL,
+	I40E_FC_PFC,
+	I40E_FC_DEFAULT
+};
+
+enum i40e_vsi_type {
+	I40E_VSI_MAIN = 0,
+	I40E_VSI_VMDQ1,
+	I40E_VSI_VMDQ2,
+	I40E_VSI_CTRL,
+	I40E_VSI_FCOE,
+	I40E_VSI_MIRROR,
+	I40E_VSI_SRIOV,
+	I40E_VSI_FDIR,
+	I40E_VSI_TYPE_UNKNOWN
+};
+
+enum i40e_queue_type {
+	I40E_QUEUE_TYPE_RX = 0,
+	I40E_QUEUE_TYPE_TX,
+	I40E_QUEUE_TYPE_PE_CEQ,
+	I40E_QUEUE_TYPE_UNKNOWN
+};
+
+struct i40e_link_status {
+	enum i40e_aq_phy_type phy_type;
+	enum i40e_aq_link_speed link_speed;
+	u8 link_info;
+	u8 an_info;
+	u8 ext_info;
+	/* is Link Status Event notification to SW enabled */
+	bool lse_enable;
+};
+
+struct i40e_phy_info {
+	struct i40e_link_status link_info;
+	struct i40e_link_status link_info_old;
+	u32 autoneg_advertised;
+	u32 phy_id;
+	u32 module_type;
+	bool get_link_info;
+	enum i40e_media_type media_type;
+};
+
+#define I40E_HW_CAP_MAX_GPIO			30
+/* Capabilities of a PF or a VF or the whole device */
+struct i40e_hw_capabilities {
+	u32  switch_mode;
+#define I40E_NVM_IMAGE_TYPE_EVB		0x0
+#define I40E_NVM_IMAGE_TYPE_CLOUD	0x2
+#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD	0x3
+
+	u32  management_mode;
+	u32  npar_enable;
+	u32  os2bmc;
+	u32  valid_functions;
+	bool sr_iov_1_1;
+	bool vmdq;
+	bool evb_802_1_qbg; /* Edge Virtual Bridging */
+	bool evb_802_1_qbh; /* Bridge Port Extension */
+	bool dcb;
+	bool fcoe;
+	bool mfp_mode_1;
+	bool mgmt_cem;
+	bool ieee_1588;
+	bool iwarp;
+	bool fd;
+	u32 fd_filters_guaranteed;
+	u32 fd_filters_best_effort;
+	bool rss;
+	u32 rss_table_size;
+	u32 rss_table_entry_width;
+	bool led[I40E_HW_CAP_MAX_GPIO];
+	bool sdp[I40E_HW_CAP_MAX_GPIO];
+	u32 nvm_image_type;
+	u32 num_flow_director_filters;
+	u32 num_vfs;
+	u32 vf_base_id;
+	u32 num_vsis;
+	u32 num_rx_qp;
+	u32 num_tx_qp;
+	u32 base_queue;
+	u32 num_msix_vectors;
+	u32 num_msix_vectors_vf;
+	u32 led_pin_num;
+	u32 sdp_pin_num;
+	u32 mdio_port_num;
+	u32 mdio_port_mode;
+	u8 rx_buf_chain_len;
+	u32 enabled_tcmap;
+	u32 maxtc;
+};
+
+struct i40e_mac_info {
+	enum i40e_mac_type type;
+	u8 addr[I40E_ETH_LENGTH_OF_ADDRESS];
+	u8 perm_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+	u8 san_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+	u16 max_fcoeq;
+};
+
+enum i40e_aq_resources_ids {
+	I40E_NVM_RESOURCE_ID = 1
+};
+
+enum i40e_aq_resource_access_type {
+	I40E_RESOURCE_READ = 1,
+	I40E_RESOURCE_WRITE
+};
+
+struct i40e_nvm_info {
+	u64 hw_semaphore_timeout; /* 2usec global time (GTIME resolution) */
+	u64 hw_semaphore_wait;    /* - || - */
+	u32 timeout;              /* [ms] */
+	u16 sr_size;              /* Shadow RAM size in words */
+	bool blank_nvm_mode;      /* is NVM empty (no FW present)*/
+	u16 version;              /* NVM package version */
+	u32 eetrack;              /* NVM data version */
+};
+
+/* PCI bus types */
+enum i40e_bus_type {
+	i40e_bus_type_unknown = 0,
+	i40e_bus_type_pci,
+	i40e_bus_type_pcix,
+	i40e_bus_type_pci_express,
+	i40e_bus_type_reserved
+};
+
+/* PCI bus speeds */
+enum i40e_bus_speed {
+	i40e_bus_speed_unknown	= 0,
+	i40e_bus_speed_33	= 33,
+	i40e_bus_speed_66	= 66,
+	i40e_bus_speed_100	= 100,
+	i40e_bus_speed_120	= 120,
+	i40e_bus_speed_133	= 133,
+	i40e_bus_speed_2500	= 2500,
+	i40e_bus_speed_5000	= 5000,
+	i40e_bus_speed_8000	= 8000,
+	i40e_bus_speed_reserved
+};
+
+/* PCI bus widths */
+enum i40e_bus_width {
+	i40e_bus_width_unknown	= 0,
+	i40e_bus_width_pcie_x1	= 1,
+	i40e_bus_width_pcie_x2	= 2,
+	i40e_bus_width_pcie_x4	= 4,
+	i40e_bus_width_pcie_x8	= 8,
+	i40e_bus_width_32	= 32,
+	i40e_bus_width_64	= 64,
+	i40e_bus_width_reserved
+};
+
+/* Bus parameters */
+struct i40e_bus_info {
+	enum i40e_bus_speed speed;
+	enum i40e_bus_width width;
+	enum i40e_bus_type type;
+
+	u16 func;
+	u16 device;
+	u16 lan_id;
+};
+
+/* Flow control (FC) parameters */
+struct i40e_fc_info {
+	enum i40e_fc_mode current_mode; /* FC mode in effect */
+	enum i40e_fc_mode requested_mode; /* FC mode requested by caller */
+};
+
+#define I40E_MAX_TRAFFIC_CLASS		8
+#define I40E_MAX_USER_PRIORITY		8
+#define I40E_DCBX_MAX_APPS		32
+#define I40E_LLDPDU_SIZE		1500
+
+/* IEEE 802.1Qaz ETS Configuration data */
+struct i40e_ieee_ets_config {
+	u8 willing;
+	u8 cbs;
+	u8 maxtcs;
+	u8 prioritytable[I40E_MAX_TRAFFIC_CLASS];
+	u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS];
+	u8 tsatable[I40E_MAX_TRAFFIC_CLASS];
+};
+
+/* IEEE 802.1Qaz ETS Recommendation data */
+struct i40e_ieee_ets_recommend {
+	u8 prioritytable[I40E_MAX_TRAFFIC_CLASS];
+	u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS];
+	u8 tsatable[I40E_MAX_TRAFFIC_CLASS];
+};
+
+/* IEEE 802.1Qaz PFC Configuration data */
+struct i40e_ieee_pfc_config {
+	u8 willing;
+	u8 mbc;
+	u8 pfccap;
+	u8 pfcenable;
+};
+
+/* IEEE 802.1Qaz Application Priority data */
+struct i40e_ieee_app_priority_table {
+	u8  priority;
+	u8  selector;
+	u16 protocolid;
+};
+
+struct i40e_dcbx_config {
+	u32 numapps;
+	struct i40e_ieee_ets_config etscfg;
+	struct i40e_ieee_ets_recommend etsrec;
+	struct i40e_ieee_pfc_config pfc;
+	struct i40e_ieee_app_priority_table app[I40E_DCBX_MAX_APPS];
+};
+
+/* Port hardware description */
+struct i40e_hw {
+	u8 __iomem *hw_addr;
+	void *back;
+
+	/* function pointer structs */
+	struct i40e_phy_info phy;
+	struct i40e_mac_info mac;
+	struct i40e_bus_info bus;
+	struct i40e_nvm_info nvm;
+	struct i40e_fc_info fc;
+
+	/* pci info */
+	u16 device_id;
+	u16 vendor_id;
+	u16 subsystem_device_id;
+	u16 subsystem_vendor_id;
+	u8 revision_id;
+	u8 port;
+	bool adapter_stopped;
+
+	/* capabilities for entire device and PCI func */
+	struct i40e_hw_capabilities dev_caps;
+	struct i40e_hw_capabilities func_caps;
+
+	/* Flow Director shared filter space */
+	u16 fdir_shared_filter_count;
+
+	/* device profile info */
+	u8  pf_id;
+	u16 main_vsi_seid;
+
+	/* Closest numa node to the device */
+	u16 numa_node;
+
+	/* Admin Queue info */
+	struct i40e_adminq_info aq;
+
+	/* HMC info */
+	struct i40e_hmc_info hmc; /* HMC info struct */
+
+	/* LLDP/DCBX Status */
+	u16 dcbx_status;
+
+	/* DCBX info */
+	struct i40e_dcbx_config local_dcbx_config;
+	struct i40e_dcbx_config remote_dcbx_config;
+
+	/* debug mask */
+	u32 debug_mask;
+};
+
+struct i40e_driver_version {
+	u8 major_version;
+	u8 minor_version;
+	u8 build_version;
+	u8 subbuild_version;
+};
+
+/* RX Descriptors */
+union i40e_16byte_rx_desc {
+	struct {
+		__le64 pkt_addr; /* Packet buffer address */
+		__le64 hdr_addr; /* Header buffer address */
+	} read;
+	struct {
+		struct {
+			struct {
+				union {
+					__le16 mirroring_status;
+					__le16 fcoe_ctx_id;
+				} mirr_fcoe;
+				__le16 l2tag1;
+			} lo_dword;
+			union {
+				__le32 rss; /* RSS Hash */
+				__le32 fd_id; /* Flow director filter id */
+				__le32 fcoe_param; /* FCoE DDP Context id */
+			} hi_dword;
+		} qword0;
+		struct {
+			/* ext status/error/pktype/length */
+			__le64 status_error_len;
+		} qword1;
+	} wb;  /* writeback */
+};
+
+union i40e_32byte_rx_desc {
+	struct {
+		__le64  pkt_addr; /* Packet buffer address */
+		__le64  hdr_addr; /* Header buffer address */
+			/* bit 0 of hdr_buffer_addr is DD bit */
+		__le64  rsvd1;
+		__le64  rsvd2;
+	} read;
+	struct {
+		struct {
+			struct {
+				union {
+					__le16 mirroring_status;
+					__le16 fcoe_ctx_id;
+				} mirr_fcoe;
+				__le16 l2tag1;
+			} lo_dword;
+			union {
+				__le32 rss; /* RSS Hash */
+				__le32 fcoe_param; /* FCoE DDP Context id */
+			} hi_dword;
+		} qword0;
+		struct {
+			/* status/error/pktype/length */
+			__le64 status_error_len;
+		} qword1;
+		struct {
+			__le16 ext_status; /* extended status */
+			__le16 rsvd;
+			__le16 l2tag2_1;
+			__le16 l2tag2_2;
+		} qword2;
+		struct {
+			union {
+				__le32 flex_bytes_lo;
+				__le32 pe_status;
+			} lo_dword;
+			union {
+				__le32 flex_bytes_hi;
+				__le32 fd_id;
+			} hi_dword;
+		} qword3;
+	} wb;  /* writeback */
+};
+
+#define I40E_RXD_QW1_STATUS_SHIFT	0
+#define I40E_RXD_QW1_STATUS_MASK	(0x7FFFUL << I40E_RXD_QW1_STATUS_SHIFT)
+
+enum i40e_rx_desc_status_bits {
+	/* Note: These are predefined bit offsets */
+	I40E_RX_DESC_STATUS_DD_SHIFT		= 0,
+	I40E_RX_DESC_STATUS_EOF_SHIFT		= 1,
+	I40E_RX_DESC_STATUS_L2TAG1P_SHIFT	= 2,
+	I40E_RX_DESC_STATUS_L3L4P_SHIFT		= 3,
+	I40E_RX_DESC_STATUS_CRCP_SHIFT		= 4,
+	I40E_RX_DESC_STATUS_TSYNINDX_SHIFT	= 5, /* 3 BITS */
+	I40E_RX_DESC_STATUS_PIF_SHIFT		= 8,
+	I40E_RX_DESC_STATUS_UMBCAST_SHIFT	= 9, /* 2 BITS */
+	I40E_RX_DESC_STATUS_FLM_SHIFT		= 11,
+	I40E_RX_DESC_STATUS_FLTSTAT_SHIFT	= 12, /* 2 BITS */
+	I40E_RX_DESC_STATUS_LPBK_SHIFT		= 14
+};
+
+#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT   I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
+#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK	(0x7UL << \
+					     I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
+
+enum i40e_rx_desc_fltstat_values {
+	I40E_RX_DESC_FLTSTAT_NO_DATA	= 0,
+	I40E_RX_DESC_FLTSTAT_RSV_FD_ID	= 1, /* 16byte desc? FD_ID : RSV */
+	I40E_RX_DESC_FLTSTAT_RSV	= 2,
+	I40E_RX_DESC_FLTSTAT_RSS_HASH	= 3,
+};
+
+#define I40E_RXD_QW1_ERROR_SHIFT	19
+#define I40E_RXD_QW1_ERROR_MASK		(0xFFUL << I40E_RXD_QW1_ERROR_SHIFT)
+
+enum i40e_rx_desc_error_bits {
+	/* Note: These are predefined bit offsets */
+	I40E_RX_DESC_ERROR_RXE_SHIFT		= 0,
+	I40E_RX_DESC_ERROR_RECIPE_SHIFT		= 1,
+	I40E_RX_DESC_ERROR_HBO_SHIFT		= 2,
+	I40E_RX_DESC_ERROR_L3L4E_SHIFT		= 3, /* 3 BITS */
+	I40E_RX_DESC_ERROR_IPE_SHIFT		= 3,
+	I40E_RX_DESC_ERROR_L4E_SHIFT		= 4,
+	I40E_RX_DESC_ERROR_EIPE_SHIFT		= 5,
+	I40E_RX_DESC_ERROR_OVERSIZE_SHIFT	= 6
+};
+
+enum i40e_rx_desc_error_l3l4e_fcoe_masks {
+	I40E_RX_DESC_ERROR_L3L4E_NONE		= 0,
+	I40E_RX_DESC_ERROR_L3L4E_PROT		= 1,
+	I40E_RX_DESC_ERROR_L3L4E_FC		= 2,
+	I40E_RX_DESC_ERROR_L3L4E_DMAC_ERR	= 3,
+	I40E_RX_DESC_ERROR_L3L4E_DMAC_WARN	= 4
+};
+
+#define I40E_RXD_QW1_PTYPE_SHIFT	30
+#define I40E_RXD_QW1_PTYPE_MASK		(0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT)
+
+/* Packet type non-ip values */
+enum i40e_rx_l2_ptype {
+	I40E_RX_PTYPE_L2_RESERVED		= 0,
+	I40E_RX_PTYPE_L2_MAC_PAY2		= 1,
+	I40E_RX_PTYPE_L2_TIMESYNC_PAY2		= 2,
+	I40E_RX_PTYPE_L2_FIP_PAY2		= 3,
+	I40E_RX_PTYPE_L2_OUI_PAY2		= 4,
+	I40E_RX_PTYPE_L2_MACCNTRL_PAY2		= 5,
+	I40E_RX_PTYPE_L2_LLDP_PAY2		= 6,
+	I40E_RX_PTYPE_L2_ECP_PAY2		= 7,
+	I40E_RX_PTYPE_L2_EVB_PAY2		= 8,
+	I40E_RX_PTYPE_L2_QCN_PAY2		= 9,
+	I40E_RX_PTYPE_L2_EAPOL_PAY2		= 10,
+	I40E_RX_PTYPE_L2_ARP			= 11,
+	I40E_RX_PTYPE_L2_FCOE_PAY3		= 12,
+	I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3	= 13,
+	I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3	= 14,
+	I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3	= 15,
+	I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA	= 16,
+	I40E_RX_PTYPE_L2_FCOE_VFT_PAY3		= 17,
+	I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA	= 18,
+	I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY		= 19,
+	I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP		= 20,
+	I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER	= 21
+};
+
+struct i40e_rx_ptype_decoded {
+	u32 ptype:8;
+	u32 known:1;
+	u32 outer_ip:1;
+	u32 outer_ip_ver:1;
+	u32 outer_frag:1;
+	u32 tunnel_type:3;
+	u32 tunnel_end_prot:2;
+	u32 tunnel_end_frag:1;
+	u32 inner_prot:4;
+	u32 payload_layer:3;
+};
+
+enum i40e_rx_ptype_outer_ip {
+	I40E_RX_PTYPE_OUTER_L2	= 0,
+	I40E_RX_PTYPE_OUTER_IP	= 1
+};
+
+enum i40e_rx_ptype_outer_ip_ver {
+	I40E_RX_PTYPE_OUTER_NONE	= 0,
+	I40E_RX_PTYPE_OUTER_IPV4	= 0,
+	I40E_RX_PTYPE_OUTER_IPV6	= 1
+};
+
+enum i40e_rx_ptype_outer_fragmented {
+	I40E_RX_PTYPE_NOT_FRAG	= 0,
+	I40E_RX_PTYPE_FRAG	= 1
+};
+
+enum i40e_rx_ptype_tunnel_type {
+	I40E_RX_PTYPE_TUNNEL_NONE		= 0,
+	I40E_RX_PTYPE_TUNNEL_IP_IP		= 1,
+	I40E_RX_PTYPE_TUNNEL_IP_GRENAT		= 2,
+	I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC	= 3,
+	I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN	= 4,
+};
+
+enum i40e_rx_ptype_tunnel_end_prot {
+	I40E_RX_PTYPE_TUNNEL_END_NONE	= 0,
+	I40E_RX_PTYPE_TUNNEL_END_IPV4	= 1,
+	I40E_RX_PTYPE_TUNNEL_END_IPV6	= 2,
+};
+
+enum i40e_rx_ptype_inner_prot {
+	I40E_RX_PTYPE_INNER_PROT_NONE		= 0,
+	I40E_RX_PTYPE_INNER_PROT_UDP		= 1,
+	I40E_RX_PTYPE_INNER_PROT_TCP		= 2,
+	I40E_RX_PTYPE_INNER_PROT_SCTP		= 3,
+	I40E_RX_PTYPE_INNER_PROT_ICMP		= 4,
+	I40E_RX_PTYPE_INNER_PROT_TIMESYNC	= 5
+};
+
+enum i40e_rx_ptype_payload_layer {
+	I40E_RX_PTYPE_PAYLOAD_LAYER_NONE	= 0,
+	I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2	= 1,
+	I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3	= 2,
+	I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4	= 3,
+};
+
+#define I40E_RXD_QW1_LENGTH_PBUF_SHIFT	38
+#define I40E_RXD_QW1_LENGTH_PBUF_MASK	(0x3FFFULL << \
+					 I40E_RXD_QW1_LENGTH_PBUF_SHIFT)
+
+#define I40E_RXD_QW1_LENGTH_HBUF_SHIFT	52
+#define I40E_RXD_QW1_LENGTH_HBUF_MASK	(0x7FFULL << \
+					 I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
+
+#define I40E_RXD_QW1_LENGTH_SPH_SHIFT	63
+#define I40E_RXD_QW1_LENGTH_SPH_MASK	(0x1ULL << \
+					 I40E_RXD_QW1_LENGTH_SPH_SHIFT)
+
+enum i40e_rx_desc_ext_status_bits {
+	/* Note: These are predefined bit offsets */
+	I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT	= 0,
+	I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT	= 1,
+	I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT	= 2, /* 2 BITS */
+	I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT	= 4, /* 2 BITS */
+	I40E_RX_DESC_EXT_STATUS_FTYPE_SHIFT	= 6, /* 3 BITS */
+	I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT	= 9,
+	I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT	= 10,
+	I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT	= 11,
+};
+
+enum i40e_rx_desc_pe_status_bits {
+	/* Note: These are predefined bit offsets */
+	I40E_RX_DESC_PE_STATUS_QPID_SHIFT	= 0, /* 18 BITS */
+	I40E_RX_DESC_PE_STATUS_L4PORT_SHIFT	= 0, /* 16 BITS */
+	I40E_RX_DESC_PE_STATUS_IPINDEX_SHIFT	= 16, /* 8 BITS */
+	I40E_RX_DESC_PE_STATUS_QPIDHIT_SHIFT	= 24,
+	I40E_RX_DESC_PE_STATUS_APBVTHIT_SHIFT	= 25,
+	I40E_RX_DESC_PE_STATUS_PORTV_SHIFT	= 26,
+	I40E_RX_DESC_PE_STATUS_URG_SHIFT	= 27,
+	I40E_RX_DESC_PE_STATUS_IPFRAG_SHIFT	= 28,
+	I40E_RX_DESC_PE_STATUS_IPOPT_SHIFT	= 29
+};
+
+#define I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT		38
+#define I40E_RX_PROG_STATUS_DESC_LENGTH			0x2000000
+
+#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT	2
+#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK	(0x7UL << \
+				I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT)
+
+#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT	19
+#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK		(0x3FUL << \
+				I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT)
+
+enum i40e_rx_prog_status_desc_status_bits {
+	/* Note: These are predefined bit offsets */
+	I40E_RX_PROG_STATUS_DESC_DD_SHIFT	= 0,
+	I40E_RX_PROG_STATUS_DESC_PROG_ID_SHIFT	= 2 /* 3 BITS */
+};
+
+enum i40e_rx_prog_status_desc_prog_id_masks {
+	I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS	= 1,
+	I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS	= 2,
+	I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS	= 4,
+};
+
+enum i40e_rx_prog_status_desc_error_bits {
+	/* Note: These are predefined bit offsets */
+	I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT	= 0,
+	I40E_RX_PROG_STATUS_DESC_NO_FD_QUOTA_SHIFT	= 1,
+	I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT	= 2,
+	I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT	= 3
+};
+
+/* TX Descriptor */
+struct i40e_tx_desc {
+	__le64 buffer_addr; /* Address of descriptor's data buf */
+	__le64 cmd_type_offset_bsz;
+};
+
+#define I40E_TXD_QW1_DTYPE_SHIFT	0
+#define I40E_TXD_QW1_DTYPE_MASK		(0xFUL << I40E_TXD_QW1_DTYPE_SHIFT)
+
+enum i40e_tx_desc_dtype_value {
+	I40E_TX_DESC_DTYPE_DATA		= 0x0,
+	I40E_TX_DESC_DTYPE_NOP		= 0x1, /* same as Context desc */
+	I40E_TX_DESC_DTYPE_CONTEXT	= 0x1,
+	I40E_TX_DESC_DTYPE_FCOE_CTX	= 0x2,
+	I40E_TX_DESC_DTYPE_FILTER_PROG	= 0x8,
+	I40E_TX_DESC_DTYPE_DDP_CTX	= 0x9,
+	I40E_TX_DESC_DTYPE_FLEX_DATA	= 0xB,
+	I40E_TX_DESC_DTYPE_FLEX_CTX_1	= 0xC,
+	I40E_TX_DESC_DTYPE_FLEX_CTX_2	= 0xD,
+	I40E_TX_DESC_DTYPE_DESC_DONE	= 0xF
+};
+
+#define I40E_TXD_QW1_CMD_SHIFT	4
+#define I40E_TXD_QW1_CMD_MASK	(0x3FFUL << I40E_TXD_QW1_CMD_SHIFT)
+
+enum i40e_tx_desc_cmd_bits {
+	I40E_TX_DESC_CMD_EOP			= 0x0001,
+	I40E_TX_DESC_CMD_RS			= 0x0002,
+	I40E_TX_DESC_CMD_ICRC			= 0x0004,
+	I40E_TX_DESC_CMD_IL2TAG1		= 0x0008,
+	I40E_TX_DESC_CMD_DUMMY			= 0x0010,
+	I40E_TX_DESC_CMD_IIPT_NONIP		= 0x0000, /* 2 BITS */
+	I40E_TX_DESC_CMD_IIPT_IPV6		= 0x0020, /* 2 BITS */
+	I40E_TX_DESC_CMD_IIPT_IPV4		= 0x0040, /* 2 BITS */
+	I40E_TX_DESC_CMD_IIPT_IPV4_CSUM		= 0x0060, /* 2 BITS */
+	I40E_TX_DESC_CMD_FCOET			= 0x0080,
+	I40E_TX_DESC_CMD_L4T_EOFT_UNK		= 0x0000, /* 2 BITS */
+	I40E_TX_DESC_CMD_L4T_EOFT_TCP		= 0x0100, /* 2 BITS */
+	I40E_TX_DESC_CMD_L4T_EOFT_SCTP		= 0x0200, /* 2 BITS */
+	I40E_TX_DESC_CMD_L4T_EOFT_UDP		= 0x0300, /* 2 BITS */
+	I40E_TX_DESC_CMD_L4T_EOFT_EOF_N		= 0x0000, /* 2 BITS */
+	I40E_TX_DESC_CMD_L4T_EOFT_EOF_T		= 0x0100, /* 2 BITS */
+	I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI	= 0x0200, /* 2 BITS */
+	I40E_TX_DESC_CMD_L4T_EOFT_EOF_A		= 0x0300, /* 2 BITS */
+};
+
+#define I40E_TXD_QW1_OFFSET_SHIFT	16
+#define I40E_TXD_QW1_OFFSET_MASK	(0x3FFFFULL << \
+					 I40E_TXD_QW1_OFFSET_SHIFT)
+
+enum i40e_tx_desc_length_fields {
+	/* Note: These are predefined bit offsets */
+	I40E_TX_DESC_LENGTH_MACLEN_SHIFT	= 0, /* 7 BITS */
+	I40E_TX_DESC_LENGTH_IPLEN_SHIFT		= 7, /* 7 BITS */
+	I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT	= 14 /* 4 BITS */
+};
+
+#define I40E_TXD_QW1_TX_BUF_SZ_SHIFT	34
+#define I40E_TXD_QW1_TX_BUF_SZ_MASK	(0x3FFFULL << \
+					 I40E_TXD_QW1_TX_BUF_SZ_SHIFT)
+
+#define I40E_TXD_QW1_L2TAG1_SHIFT	48
+#define I40E_TXD_QW1_L2TAG1_MASK	(0xFFFFULL << I40E_TXD_QW1_L2TAG1_SHIFT)
+
+/* Context descriptors */
+struct i40e_tx_context_desc {
+	__le32 tunneling_params;
+	__le16 l2tag2;
+	__le16 rsvd;
+	__le64 type_cmd_tso_mss;
+};
+
+#define I40E_TXD_CTX_QW1_DTYPE_SHIFT	0
+#define I40E_TXD_CTX_QW1_DTYPE_MASK	(0xFUL << I40E_TXD_CTX_QW1_DTYPE_SHIFT)
+
+#define I40E_TXD_CTX_QW1_CMD_SHIFT	4
+#define I40E_TXD_CTX_QW1_CMD_MASK	(0xFFFFUL << I40E_TXD_CTX_QW1_CMD_SHIFT)
+
+enum i40e_tx_ctx_desc_cmd_bits {
+	I40E_TX_CTX_DESC_TSO		= 0x01,
+	I40E_TX_CTX_DESC_TSYN		= 0x02,
+	I40E_TX_CTX_DESC_IL2TAG2	= 0x04,
+	I40E_TX_CTX_DESC_IL2TAG2_IL2H	= 0x08,
+	I40E_TX_CTX_DESC_SWTCH_NOTAG	= 0x00,
+	I40E_TX_CTX_DESC_SWTCH_UPLINK	= 0x10,
+	I40E_TX_CTX_DESC_SWTCH_LOCAL	= 0x20,
+	I40E_TX_CTX_DESC_SWTCH_VSI	= 0x30,
+	I40E_TX_CTX_DESC_SWPE		= 0x40
+};
+
+#define I40E_TXD_CTX_QW1_TSO_LEN_SHIFT	30
+#define I40E_TXD_CTX_QW1_TSO_LEN_MASK	(0x3FFFFULL << \
+					 I40E_TXD_CTX_QW1_TSO_LEN_SHIFT)
+
+#define I40E_TXD_CTX_QW1_MSS_SHIFT	50
+#define I40E_TXD_CTX_QW1_MSS_MASK	(0x3FFFULL << \
+					 I40E_TXD_CTX_QW1_MSS_SHIFT)
+
+#define I40E_TXD_CTX_QW1_VSI_SHIFT	50
+#define I40E_TXD_CTX_QW1_VSI_MASK	(0x1FFULL << I40E_TXD_CTX_QW1_VSI_SHIFT)
+
+#define I40E_TXD_CTX_QW0_EXT_IP_SHIFT	0
+#define I40E_TXD_CTX_QW0_EXT_IP_MASK	(0x3ULL << \
+					 I40E_TXD_CTX_QW0_EXT_IP_SHIFT)
+
+enum i40e_tx_ctx_desc_eipt_offload {
+	I40E_TX_CTX_EXT_IP_NONE		= 0x0,
+	I40E_TX_CTX_EXT_IP_IPV6		= 0x1,
+	I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM	= 0x2,
+	I40E_TX_CTX_EXT_IP_IPV4		= 0x3
+};
+
+#define I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT	2
+#define I40E_TXD_CTX_QW0_EXT_IPLEN_MASK	(0x3FULL << \
+					 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT)
+
+#define I40E_TXD_CTX_QW0_NATT_SHIFT	9
+#define I40E_TXD_CTX_QW0_NATT_MASK	(0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+
+#define I40E_TXD_CTX_UDP_TUNNELING	(0x1ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+#define I40E_TXD_CTX_GRE_TUNNELING	(0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+
+#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT	11
+#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK	(0x1ULL << \
+					 I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
+
+#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST	I40E_TXD_CTX_QW0_EIP_NOINC_MASK
+
+#define I40E_TXD_CTX_QW0_NATLEN_SHIFT	12
+#define I40E_TXD_CTX_QW0_NATLEN_MASK	(0X7FULL << \
+					 I40E_TXD_CTX_QW0_NATLEN_SHIFT)
+
+#define I40E_TXD_CTX_QW0_DECTTL_SHIFT	19
+#define I40E_TXD_CTX_QW0_DECTTL_MASK	(0xFULL << \
+					 I40E_TXD_CTX_QW0_DECTTL_SHIFT)
+
+struct i40e_filter_program_desc {
+	__le32 qindex_flex_ptype_vsi;
+	__le32 rsvd;
+	__le32 dtype_cmd_cntindex;
+	__le32 fd_id;
+};
+#define I40E_TXD_FLTR_QW0_QINDEX_SHIFT	0
+#define I40E_TXD_FLTR_QW0_QINDEX_MASK	(0x7FFUL << \
+					 I40E_TXD_FLTR_QW0_QINDEX_SHIFT)
+#define I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT	11
+#define I40E_TXD_FLTR_QW0_FLEXOFF_MASK	(0x7UL << \
+					 I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT)
+#define I40E_TXD_FLTR_QW0_PCTYPE_SHIFT	17
+#define I40E_TXD_FLTR_QW0_PCTYPE_MASK	(0x3FUL << \
+					 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT)
+
+/* Packet Classifier Types for filters */
+enum i40e_filter_pctype {
+	/* Note: Value 0-25 are reserved for future use */
+	I40E_FILTER_PCTYPE_IPV4_TEREDO_UDP		= 26,
+	I40E_FILTER_PCTYPE_IPV6_TEREDO_UDP		= 27,
+	I40E_FILTER_PCTYPE_NONF_IPV4_1588_UDP		= 28,
+	I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP	= 29,
+	I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP	= 30,
+	I40E_FILTER_PCTYPE_NONF_IPV4_UDP		= 31,
+	I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN		= 32,
+	I40E_FILTER_PCTYPE_NONF_IPV4_TCP		= 33,
+	I40E_FILTER_PCTYPE_NONF_IPV4_SCTP		= 34,
+	I40E_FILTER_PCTYPE_NONF_IPV4_OTHER		= 35,
+	I40E_FILTER_PCTYPE_FRAG_IPV4			= 36,
+	/* Note: Value 37 is reserved for future use */
+	I40E_FILTER_PCTYPE_NONF_IPV6_1588_UDP		= 38,
+	I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP	= 39,
+	I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP	= 40,
+	I40E_FILTER_PCTYPE_NONF_IPV6_UDP		= 41,
+	I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN		= 42,
+	I40E_FILTER_PCTYPE_NONF_IPV6_TCP		= 43,
+	I40E_FILTER_PCTYPE_NONF_IPV6_SCTP		= 44,
+	I40E_FILTER_PCTYPE_NONF_IPV6_OTHER		= 45,
+	I40E_FILTER_PCTYPE_FRAG_IPV6			= 46,
+	/* Note: Value 47 is reserved for future use */
+	I40E_FILTER_PCTYPE_FCOE_OX			= 48,
+	I40E_FILTER_PCTYPE_FCOE_RX			= 49,
+	/* Note: Value 50-62 are reserved for future use */
+	I40E_FILTER_PCTYPE_L2_PAYLOAD			= 63,
+};
+
+enum i40e_filter_program_desc_dest {
+	I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET		= 0x0,
+	I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX	= 0x1,
+	I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER	= 0x2,
+};
+
+enum i40e_filter_program_desc_fd_status {
+	I40E_FILTER_PROGRAM_DESC_FD_STATUS_NONE			= 0x0,
+	I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID		= 0x1,
+	I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID_4FLEX_BYTES	= 0x2,
+	I40E_FILTER_PROGRAM_DESC_FD_STATUS_8FLEX_BYTES		= 0x3,
+};
+
+#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT	23
+#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK	(0x1FFUL << \
+					 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CMD_SHIFT	4
+#define I40E_TXD_FLTR_QW1_CMD_MASK	(0xFFFFULL << \
+					 I40E_TXD_FLTR_QW1_CMD_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_PCMD_SHIFT	(0x0ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_PCMD_MASK	(0x7ULL << I40E_TXD_FLTR_QW1_PCMD_SHIFT)
+
+enum i40e_filter_program_desc_pcmd {
+	I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE	= 0x1,
+	I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE		= 0x2,
+};
+
+#define I40E_TXD_FLTR_QW1_DEST_SHIFT	(0x3ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_DEST_MASK	(0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT	(0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK	(0x1ULL << \
+					 I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT	(0x9ULL + \
+						 I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \
+					  I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20
+#define I40E_TXD_FLTR_QW1_CNTINDEX_MASK	(0x1FFUL << \
+					 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
+
+enum i40e_filter_type {
+	I40E_FLOW_DIRECTOR_FLTR = 0,
+	I40E_PE_QUAD_HASH_FLTR = 1,
+	I40E_ETHERTYPE_FLTR,
+	I40E_FCOE_CTX_FLTR,
+	I40E_MAC_VLAN_FLTR,
+	I40E_HASH_FLTR
+};
+
+struct i40e_vsi_context {
+	u16 seid;
+	u16 uplink_seid;
+	u16 vsi_number;
+	u16 vsis_allocated;
+	u16 vsis_unallocated;
+	u16 flags;
+	u8 pf_num;
+	u8 vf_num;
+	u8 connection_type;
+	struct i40e_aqc_vsi_properties_data info;
+};
+
+/* Statistics collected by each port, VSI, VEB, and S-channel */
+struct i40e_eth_stats {
+	u64 rx_bytes;			/* gorc */
+	u64 rx_unicast;			/* uprc */
+	u64 rx_multicast;		/* mprc */
+	u64 rx_broadcast;		/* bprc */
+	u64 rx_discards;		/* rdpc */
+	u64 rx_errors;			/* repc */
+	u64 rx_missed;			/* rmpc */
+	u64 rx_unknown_protocol;	/* rupp */
+	u64 tx_bytes;			/* gotc */
+	u64 tx_unicast;			/* uptc */
+	u64 tx_multicast;		/* mptc */
+	u64 tx_broadcast;		/* bptc */
+	u64 tx_discards;		/* tdpc */
+	u64 tx_errors;			/* tepc */
+};
+
+/* Statistics collected by the MAC */
+struct i40e_hw_port_stats {
+	/* eth stats collected by the port */
+	struct i40e_eth_stats eth;
+
+	/* additional port specific stats */
+	u64 tx_dropped_link_down;	/* tdold */
+	u64 crc_errors;			/* crcerrs */
+	u64 illegal_bytes;		/* illerrc */
+	u64 error_bytes;		/* errbc */
+	u64 mac_local_faults;		/* mlfc */
+	u64 mac_remote_faults;		/* mrfc */
+	u64 rx_length_errors;		/* rlec */
+	u64 link_xon_rx;		/* lxonrxc */
+	u64 link_xoff_rx;		/* lxoffrxc */
+	u64 priority_xon_rx[8];		/* pxonrxc[8] */
+	u64 priority_xoff_rx[8];	/* pxoffrxc[8] */
+	u64 link_xon_tx;		/* lxontxc */
+	u64 link_xoff_tx;		/* lxofftxc */
+	u64 priority_xon_tx[8];		/* pxontxc[8] */
+	u64 priority_xoff_tx[8];	/* pxofftxc[8] */
+	u64 priority_xon_2_xoff[8];	/* pxon2offc[8] */
+	u64 rx_size_64;			/* prc64 */
+	u64 rx_size_127;		/* prc127 */
+	u64 rx_size_255;		/* prc255 */
+	u64 rx_size_511;		/* prc511 */
+	u64 rx_size_1023;		/* prc1023 */
+	u64 rx_size_1522;		/* prc1522 */
+	u64 rx_size_big;		/* prc9522 */
+	u64 rx_undersize;		/* ruc */
+	u64 rx_fragments;		/* rfc */
+	u64 rx_oversize;		/* roc */
+	u64 rx_jabber;			/* rjc */
+	u64 tx_size_64;			/* ptc64 */
+	u64 tx_size_127;		/* ptc127 */
+	u64 tx_size_255;		/* ptc255 */
+	u64 tx_size_511;		/* ptc511 */
+	u64 tx_size_1023;		/* ptc1023 */
+	u64 tx_size_1522;		/* ptc1522 */
+	u64 tx_size_big;		/* ptc9522 */
+	u64 mac_short_packet_dropped;	/* mspdc */
+	u64 checksum_error;		/* xec */
+};
+
+/* Checksum and Shadow RAM pointers */
+#define I40E_SR_NVM_CONTROL_WORD		0x00
+#define I40E_SR_EMP_MODULE_PTR			0x0F
+#define I40E_SR_NVM_IMAGE_VERSION		0x18
+#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR	0x27
+#define I40E_SR_NVM_EETRACK_LO			0x2D
+#define I40E_SR_NVM_EETRACK_HI			0x2E
+#define I40E_SR_VPD_PTR				0x2F
+#define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR		0x3E
+#define I40E_SR_SW_CHECKSUM_WORD		0x3F
+
+/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
+#define I40E_SR_VPD_MODULE_MAX_SIZE		1024
+#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE	1024
+#define I40E_SR_CONTROL_WORD_1_SHIFT		0x06
+#define I40E_SR_CONTROL_WORD_1_MASK	(0x03 << I40E_SR_CONTROL_WORD_1_SHIFT)
+
+/* Shadow RAM related */
+#define I40E_SR_SECTOR_SIZE_IN_WORDS	0x800
+#define I40E_SR_WORDS_IN_1KB		512
+/* Checksum should be calculated such that after adding all the words,
+ * including the checksum word itself, the sum should be 0xBABA.
+ */
+#define I40E_SR_SW_CHECKSUM_BASE	0xBABA
+
+#define I40E_SRRD_SRCTL_ATTEMPTS	100000
+
+enum i40e_switch_element_types {
+	I40E_SWITCH_ELEMENT_TYPE_MAC	= 1,
+	I40E_SWITCH_ELEMENT_TYPE_PF	= 2,
+	I40E_SWITCH_ELEMENT_TYPE_VF	= 3,
+	I40E_SWITCH_ELEMENT_TYPE_EMP	= 4,
+	I40E_SWITCH_ELEMENT_TYPE_BMC	= 6,
+	I40E_SWITCH_ELEMENT_TYPE_PE	= 16,
+	I40E_SWITCH_ELEMENT_TYPE_VEB	= 17,
+	I40E_SWITCH_ELEMENT_TYPE_PA	= 18,
+	I40E_SWITCH_ELEMENT_TYPE_VSI	= 19,
+};
+
+/* Supported EtherType filters */
+enum i40e_ether_type_index {
+	I40E_ETHER_TYPE_1588		= 0,
+	I40E_ETHER_TYPE_FIP		= 1,
+	I40E_ETHER_TYPE_OUI_EXTENDED	= 2,
+	I40E_ETHER_TYPE_MAC_CONTROL	= 3,
+	I40E_ETHER_TYPE_LLDP		= 4,
+	I40E_ETHER_TYPE_EVB_PROTOCOL1	= 5,
+	I40E_ETHER_TYPE_EVB_PROTOCOL2	= 6,
+	I40E_ETHER_TYPE_QCN_CNM		= 7,
+	I40E_ETHER_TYPE_8021X		= 8,
+	I40E_ETHER_TYPE_ARP		= 9,
+	I40E_ETHER_TYPE_RSV1		= 10,
+	I40E_ETHER_TYPE_RSV2		= 11,
+};
+
+/* Filter context base size is 1K */
+#define I40E_HASH_FILTER_BASE_SIZE	1024
+/* Supported Hash filter values */
+enum i40e_hash_filter_size {
+	I40E_HASH_FILTER_SIZE_1K	= 0,
+	I40E_HASH_FILTER_SIZE_2K	= 1,
+	I40E_HASH_FILTER_SIZE_4K	= 2,
+	I40E_HASH_FILTER_SIZE_8K	= 3,
+	I40E_HASH_FILTER_SIZE_16K	= 4,
+	I40E_HASH_FILTER_SIZE_32K	= 5,
+	I40E_HASH_FILTER_SIZE_64K	= 6,
+	I40E_HASH_FILTER_SIZE_128K	= 7,
+	I40E_HASH_FILTER_SIZE_256K	= 8,
+	I40E_HASH_FILTER_SIZE_512K	= 9,
+	I40E_HASH_FILTER_SIZE_1M	= 10,
+};
+
+/* DMA context base size is 0.5K */
+#define I40E_DMA_CNTX_BASE_SIZE		512
+/* Supported DMA context values */
+enum i40e_dma_cntx_size {
+	I40E_DMA_CNTX_SIZE_512		= 0,
+	I40E_DMA_CNTX_SIZE_1K		= 1,
+	I40E_DMA_CNTX_SIZE_2K		= 2,
+	I40E_DMA_CNTX_SIZE_4K		= 3,
+	I40E_DMA_CNTX_SIZE_8K		= 4,
+	I40E_DMA_CNTX_SIZE_16K		= 5,
+	I40E_DMA_CNTX_SIZE_32K		= 6,
+	I40E_DMA_CNTX_SIZE_64K		= 7,
+	I40E_DMA_CNTX_SIZE_128K		= 8,
+	I40E_DMA_CNTX_SIZE_256K		= 9,
+};
+
+/* Supported Hash look up table (LUT) sizes */
+enum i40e_hash_lut_size {
+	I40E_HASH_LUT_SIZE_128		= 0,
+	I40E_HASH_LUT_SIZE_512		= 1,
+};
+
+/* Structure to hold a per PF filter control settings */
+struct i40e_filter_control_settings {
+	/* number of PE Quad Hash filter buckets */
+	enum i40e_hash_filter_size pe_filt_num;
+	/* number of PE Quad Hash contexts */
+	enum i40e_dma_cntx_size pe_cntx_num;
+	/* number of FCoE filter buckets */
+	enum i40e_hash_filter_size fcoe_filt_num;
+	/* number of FCoE DDP contexts */
+	enum i40e_dma_cntx_size fcoe_cntx_num;
+	/* size of the Hash LUT */
+	enum i40e_hash_lut_size	hash_lut_size;
+	/* enable FDIR filters for PF and its VFs */
+	bool enable_fdir;
+	/* enable Ethertype filters for PF and its VFs */
+	bool enable_ethtype;
+	/* enable MAC/VLAN filters for PF and its VFs */
+	bool enable_macvlan;
+};
+
+/* Structure to hold device level control filter counts */
+struct i40e_control_filter_stats {
+	u16 mac_etype_used;   /* Used perfect match MAC/EtherType filters */
+	u16 etype_used;       /* Used perfect EtherType filters */
+	u16 mac_etype_free;   /* Un-used perfect match MAC/EtherType filters */
+	u16 etype_free;       /* Un-used perfect EtherType filters */
+};
+
+enum i40e_reset_type {
+	I40E_RESET_POR		= 0,
+	I40E_RESET_CORER	= 1,
+	I40E_RESET_GLOBR	= 2,
+	I40E_RESET_EMPR		= 3,
+};
+
+/* IEEE 802.1AB LLDP Agent Variables from NVM */
+#define I40E_NVM_LLDP_CFG_PTR		0xF
+struct i40e_lldp_variables {
+	u16 length;
+	u16 adminstatus;
+	u16 msgfasttx;
+	u16 msgtxinterval;
+	u16 txparams;
+	u16 timers;
+	u16 crc8;
+};
+
+#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
new file mode 100644
index 0000000..cc6654f
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -0,0 +1,368 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_VIRTCHNL_H_
+#define _I40E_VIRTCHNL_H_
+
+#include "i40e_type.h"
+
+/* Description:
+ * This header file describes the VF-PF communication protocol used
+ * by the various i40e drivers.
+ *
+ * Admin queue buffer usage:
+ * desc->opcode is always i40e_aqc_opc_send_msg_to_pf
+ * flags, retval, datalen, and data addr are all used normally.
+ * Firmware copies the cookie fields when sending messages between the PF and
+ * VF, but uses all other fields internally. Due to this limitation, we
+ * must send all messages as "indirect", i.e. using an external buffer.
+ *
+ * All the vsi indexes are relative to the VF. Each VF can have maximum of
+ * three VSIs. All the queue indexes are relative to the VSI.  Each VF can
+ * have a maximum of sixteen queues for all of its VSIs.
+ *
+ * The PF is required to return a status code in v_retval for all messages
+ * except RESET_VF, which does not require any response. The return value is of
+ * i40e_status_code type, defined in the i40e_type.h.
+ *
+ * In general, VF driver initialization should roughly follow the order of these
+ * opcodes. The VF driver must first validate the API version of the PF driver,
+ * then request a reset, then get resources, then configure queues and
+ * interrupts. After these operations are complete, the VF driver may start
+ * its queues, optionally add MAC and VLAN filters, and process traffic.
+ */
+
+/* Opcodes for VF-PF communication. These are placed in the v_opcode field
+ * of the virtchnl_msg structure.
+ */
+enum i40e_virtchnl_ops {
+/* VF sends req. to pf for the following
+ * ops.
+ */
+	I40E_VIRTCHNL_OP_UNKNOWN = 0,
+	I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
+	I40E_VIRTCHNL_OP_RESET_VF,
+	I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+	I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE,
+	I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE,
+	I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+	I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+	I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+	I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+	I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+	I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
+	I40E_VIRTCHNL_OP_ADD_VLAN,
+	I40E_VIRTCHNL_OP_DEL_VLAN,
+	I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+	I40E_VIRTCHNL_OP_GET_STATS,
+	I40E_VIRTCHNL_OP_FCOE,
+/* PF sends status change events to vfs using
+ * the following op.
+ */
+	I40E_VIRTCHNL_OP_EVENT,
+};
+
+/* Virtual channel message descriptor. This overlays the admin queue
+ * descriptor. All other data is passed in external buffers.
+ */
+
+struct i40e_virtchnl_msg {
+	u8 pad[8];			 /* AQ flags/opcode/len/retval fields */
+	enum i40e_virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
+	i40e_status v_retval;  /* ditto for desc->retval */
+	u32 vfid;			 /* used by PF when sending to VF */
+};
+
+/* Message descriptions and data structures.*/
+
+/* I40E_VIRTCHNL_OP_VERSION
+ * VF posts its version number to the PF. PF responds with its version number
+ * in the same format, along with a return code.
+ * Reply from PF has its major/minor versions also in param0 and param1.
+ * If there is a major version mismatch, then the VF cannot operate.
+ * If there is a minor version mismatch, then the VF can operate but should
+ * add a warning to the system log.
+ *
+ * This enum element MUST always be specified as == 1, regardless of other
+ * changes in the API. The PF must always respond to this message without
+ * error regardless of version mismatch.
+ */
+#define I40E_VIRTCHNL_VERSION_MAJOR		1
+#define I40E_VIRTCHNL_VERSION_MINOR		0
+struct i40e_virtchnl_version_info {
+	u32 major;
+	u32 minor;
+};
+
+/* I40E_VIRTCHNL_OP_RESET_VF
+ * VF sends this request to PF with no parameters
+ * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
+ * until reset completion is indicated. The admin queue must be reinitialized
+ * after this operation.
+ *
+ * When reset is complete, PF must ensure that all queues in all VSIs associated
+ * with the VF are stopped, all queue configurations in the HMC are set to 0,
+ * and all MAC and VLAN filters (except the default MAC address) on all VSIs
+ * are cleared.
+ */
+
+/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
+ * VF sends this request to PF with no parameters
+ * PF responds with an indirect message containing
+ * i40e_virtchnl_vf_resource and one or more
+ * i40e_virtchnl_vsi_resource structures.
+ */
+
+struct i40e_virtchnl_vsi_resource {
+	u16 vsi_id;
+	u16 num_queue_pairs;
+	enum i40e_vsi_type vsi_type;
+	u16 qset_handle;
+	u8 default_mac_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+};
+/* VF offload flags */
+#define I40E_VIRTCHNL_VF_OFFLOAD_L2	0x00000001
+#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE	0x00000004
+#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN	0x00010000
+
+struct i40e_virtchnl_vf_resource {
+	u16 num_vsis;
+	u16 num_queue_pairs;
+	u16 max_vectors;
+	u16 max_mtu;
+
+	u32 vf_offload_flags;
+	u32 max_fcoe_contexts;
+	u32 max_fcoe_filters;
+
+	struct i40e_virtchnl_vsi_resource vsi_res[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE
+ * VF sends this message to set up parameters for one TX queue.
+ * External data buffer contains one instance of i40e_virtchnl_txq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Tx queue config info */
+struct i40e_virtchnl_txq_info {
+	u16 vsi_id;
+	u16 queue_id;
+	u16 ring_len;		/* number of descriptors, multiple of 8 */
+	u16 headwb_enabled;
+	u64 dma_ring_addr;
+	u64 dma_headwb_addr;
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE
+ * VF sends this message to set up parameters for one RX queue.
+ * External data buffer contains one instance of i40e_virtchnl_rxq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Rx queue config info */
+struct i40e_virtchnl_rxq_info {
+	u16 vsi_id;
+	u16 queue_id;
+	u32 ring_len;		/* number of descriptors, multiple of 32 */
+	u16 hdr_size;
+	u16 splithdr_enabled;
+	u32 databuffer_size;
+	u32 max_pkt_size;
+	u64 dma_ring_addr;
+	enum i40e_hmc_obj_rx_hsplit_0 rx_split_pos;
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES
+ * VF sends this message to set parameters for all active TX and RX queues
+ * associated with the specified VSI.
+ * PF configures queues and returns status.
+ * If the number of queues specified is greater than the number of queues
+ * associated with the VSI, an error is returned and no queues are configured.
+ */
+struct i40e_virtchnl_queue_pair_info {
+	/* NOTE: vsi_id and queue_id should be identical for both queues. */
+	struct i40e_virtchnl_txq_info txq;
+	struct i40e_virtchnl_rxq_info rxq;
+};
+
+struct i40e_virtchnl_vsi_queue_config_info {
+	u16 vsi_id;
+	u16 num_queue_pairs;
+	struct i40e_virtchnl_queue_pair_info qpair[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP
+ * VF uses this message to map vectors to queues.
+ * The rxq_map and txq_map fields are bitmaps used to indicate which queues
+ * are to be associated with the specified vector.
+ * The "other" causes are always mapped to vector 0.
+ * PF configures interrupt mapping and returns status.
+ */
+struct i40e_virtchnl_vector_map {
+	u16 vsi_id;
+	u16 vector_id;
+	u16 rxq_map;
+	u16 txq_map;
+	u16 rxitr_idx;
+	u16 txitr_idx;
+};
+
+struct i40e_virtchnl_irq_map_info {
+	u16 num_vectors;
+	struct i40e_virtchnl_vector_map vecmap[1];
+};
+
+/* I40E_VIRTCHNL_OP_ENABLE_QUEUES
+ * I40E_VIRTCHNL_OP_DISABLE_QUEUES
+ * VF sends these message to enable or disable TX/RX queue pairs.
+ * The queues fields are bitmaps indicating which queues to act upon.
+ * (Currently, we only support 16 queues per VF, but we make the field
+ * u32 to allow for expansion.)
+ * PF performs requested action and returns status.
+ */
+struct i40e_virtchnl_queue_select {
+	u16 vsi_id;
+	u16 pad;
+	u32 rx_queues;
+	u32 tx_queues;
+};
+
+/* I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
+ * VF sends this message in order to add one or more unicast or multicast
+ * address filters for the specified VSI.
+ * PF adds the filters and returns status.
+ */
+
+/* I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
+ * VF sends this message in order to remove one or more unicast or multicast
+ * filters for the specified VSI.
+ * PF removes the filters and returns status.
+ */
+
+struct i40e_virtchnl_ether_addr {
+	u8 addr[I40E_ETH_LENGTH_OF_ADDRESS];
+	u8 pad[2];
+};
+
+struct i40e_virtchnl_ether_addr_list {
+	u16 vsi_id;
+	u16 num_elements;
+	struct i40e_virtchnl_ether_addr list[1];
+};
+
+/* I40E_VIRTCHNL_OP_ADD_VLAN
+ * VF sends this message to add one or more VLAN tag filters for receives.
+ * PF adds the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+/* I40E_VIRTCHNL_OP_DEL_VLAN
+ * VF sends this message to remove one or more VLAN tag filters for receives.
+ * PF removes the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+struct i40e_virtchnl_vlan_filter_list {
+	u16 vsi_id;
+	u16 num_elements;
+	u16 vlan_id[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
+ * VF sends VSI id and flags.
+ * PF returns status code in retval.
+ * Note: we assume that broadcast accept mode is always enabled.
+ */
+struct i40e_virtchnl_promisc_info {
+	u16 vsi_id;
+	u16 flags;
+};
+
+#define I40E_FLAG_VF_UNICAST_PROMISC	0x00000001
+#define I40E_FLAG_VF_MULTICAST_PROMISC	0x00000002
+
+/* I40E_VIRTCHNL_OP_GET_STATS
+ * VF sends this message to request stats for the selected VSI. VF uses
+ * the i40e_virtchnl_queue_select struct to specify the VSI. The queue_id
+ * field is ignored by the PF.
+ *
+ * PF replies with struct i40e_eth_stats in an external buffer.
+ */
+
+/* I40E_VIRTCHNL_OP_EVENT
+ * PF sends this message to inform the VF driver of events that may affect it.
+ * No direct response is expected from the VF, though it may generate other
+ * messages in response to this one.
+ */
+enum i40e_virtchnl_event_codes {
+	I40E_VIRTCHNL_EVENT_UNKNOWN = 0,
+	I40E_VIRTCHNL_EVENT_LINK_CHANGE,
+	I40E_VIRTCHNL_EVENT_RESET_IMPENDING,
+	I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
+};
+#define I40E_PF_EVENT_SEVERITY_INFO		0
+#define I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM	255
+
+struct i40e_virtchnl_pf_event {
+	enum i40e_virtchnl_event_codes event;
+	union {
+		struct {
+			enum i40e_aq_link_speed link_speed;
+			bool link_status;
+		} link_event;
+	} event_data;
+
+	int severity;
+};
+
+/* The following are TBD, not necessary for LAN functionality.
+ * I40E_VIRTCHNL_OP_FCOE
+ */
+
+/* VF reset states - these are written into the RSTAT register:
+ * I40E_VFGEN_RSTAT1 on the PF
+ * I40E_VFGEN_RSTAT on the VF
+ * When the PF initiates a reset, it writes 0
+ * When the reset is complete, it writes 1
+ * When the PF detects that the VF has recovered, it writes 2
+ * VF checks this register periodically to determine if a reset has occurred,
+ * then polls it to know when the reset is complete.
+ * If either the PF or VF reads the register while the hardware
+ * is in a reset state, it will return DEADBEEF, which, when masked
+ * will result in 3.
+ */
+enum i40e_vfr_states {
+	I40E_VFR_INPROGRESS = 0,
+	I40E_VFR_COMPLETED,
+	I40E_VFR_VFACTIVE,
+	I40E_VFR_UNKNOWN,
+};
+
+#endif /* _I40E_VIRTCHNL_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
new file mode 100644
index 0000000..8967e58
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -0,0 +1,2335 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e.h"
+
+/***********************misc routines*****************************/
+
+/**
+ * i40e_vc_isvalid_vsi_id
+ * @vf: pointer to the vf info
+ * @vsi_id: vf relative vsi id
+ *
+ * check for the valid vsi id
+ **/
+static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u8 vsi_id)
+{
+	struct i40e_pf *pf = vf->pf;
+
+	return pf->vsi[vsi_id]->vf_id == vf->vf_id;
+}
+
+/**
+ * i40e_vc_isvalid_queue_id
+ * @vf: pointer to the vf info
+ * @vsi_id: vsi id
+ * @qid: vsi relative queue id
+ *
+ * check for the valid queue id
+ **/
+static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u8 vsi_id,
+					    u8 qid)
+{
+	struct i40e_pf *pf = vf->pf;
+
+	return qid < pf->vsi[vsi_id]->num_queue_pairs;
+}
+
+/**
+ * i40e_vc_isvalid_vector_id
+ * @vf: pointer to the vf info
+ * @vector_id: vf relative vector id
+ *
+ * check for the valid vector id
+ **/
+static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
+{
+	struct i40e_pf *pf = vf->pf;
+
+	return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
+}
+
+/***********************vf resource mgmt routines*****************/
+
+/**
+ * i40e_vc_get_pf_queue_id
+ * @vf: pointer to the vf info
+ * @vsi_idx: index of VSI in PF struct
+ * @vsi_queue_id: vsi relative queue id
+ *
+ * return pf relative queue id
+ **/
+static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u8 vsi_idx,
+				   u8 vsi_queue_id)
+{
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_vsi *vsi = pf->vsi[vsi_idx];
+	u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
+
+	if (le16_to_cpu(vsi->info.mapping_flags) &
+	    I40E_AQ_VSI_QUE_MAP_NONCONTIG)
+		pf_queue_id =
+			le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
+	else
+		pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
+			      vsi_queue_id;
+
+	return pf_queue_id;
+}
+
+/**
+ * i40e_ctrl_vsi_tx_queue
+ * @vf: pointer to the vf info
+ * @vsi_idx: index of VSI in PF struct
+ * @vsi_queue_id: vsi relative queue index
+ * @ctrl: control flags
+ *
+ * enable/disable/enable check/disable check
+ **/
+static int i40e_ctrl_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
+				  u16 vsi_queue_id,
+				  enum i40e_queue_ctrl ctrl)
+{
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_hw *hw = &pf->hw;
+	bool writeback = false;
+	u16 pf_queue_id;
+	int ret = 0;
+	u32 reg;
+
+	pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
+	reg = rd32(hw, I40E_QTX_ENA(pf_queue_id));
+
+	switch (ctrl) {
+	case I40E_QUEUE_CTRL_ENABLE:
+		reg |= I40E_QTX_ENA_QENA_REQ_MASK;
+		writeback = true;
+		break;
+	case I40E_QUEUE_CTRL_ENABLECHECK:
+		ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? 0 : -EPERM;
+		break;
+	case I40E_QUEUE_CTRL_DISABLE:
+		reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
+		writeback = true;
+		break;
+	case I40E_QUEUE_CTRL_DISABLECHECK:
+		ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
+		break;
+	case I40E_QUEUE_CTRL_FASTDISABLE:
+		reg |= I40E_QTX_ENA_FAST_QDIS_MASK;
+		writeback = true;
+		break;
+	case I40E_QUEUE_CTRL_FASTDISABLECHECK:
+		ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
+		if (!ret) {
+			reg &= ~I40E_QTX_ENA_FAST_QDIS_MASK;
+			writeback = true;
+		}
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	if (writeback) {
+		wr32(hw, I40E_QTX_ENA(pf_queue_id), reg);
+		i40e_flush(hw);
+	}
+
+	return ret;
+}
+
+/**
+ * i40e_ctrl_vsi_rx_queue
+ * @vf: pointer to the vf info
+ * @vsi_idx: index of VSI in PF struct
+ * @vsi_queue_id: vsi relative queue index
+ * @ctrl: control flags
+ *
+ * enable/disable/enable check/disable check
+ **/
+static int i40e_ctrl_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
+				  u16 vsi_queue_id,
+				  enum i40e_queue_ctrl ctrl)
+{
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_hw *hw = &pf->hw;
+	bool writeback = false;
+	u16 pf_queue_id;
+	int ret = 0;
+	u32 reg;
+
+	pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
+	reg = rd32(hw, I40E_QRX_ENA(pf_queue_id));
+
+	switch (ctrl) {
+	case I40E_QUEUE_CTRL_ENABLE:
+		reg |= I40E_QRX_ENA_QENA_REQ_MASK;
+		writeback = true;
+		break;
+	case I40E_QUEUE_CTRL_ENABLECHECK:
+		ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? 0 : -EPERM;
+		break;
+	case I40E_QUEUE_CTRL_DISABLE:
+		reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
+		writeback = true;
+		break;
+	case I40E_QUEUE_CTRL_DISABLECHECK:
+		ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
+		break;
+	case I40E_QUEUE_CTRL_FASTDISABLE:
+		reg |= I40E_QRX_ENA_FAST_QDIS_MASK;
+		writeback = true;
+		break;
+	case I40E_QUEUE_CTRL_FASTDISABLECHECK:
+		ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
+		if (!ret) {
+			reg &= ~I40E_QRX_ENA_FAST_QDIS_MASK;
+			writeback = true;
+		}
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	if (writeback) {
+		wr32(hw, I40E_QRX_ENA(pf_queue_id), reg);
+		i40e_flush(hw);
+	}
+
+	return ret;
+}
+
+/**
+ * i40e_config_irq_link_list
+ * @vf: pointer to the vf info
+ * @vsi_idx: index of VSI in PF struct
+ * @vecmap: irq map info
+ *
+ * configure irq link list from the map
+ **/
+static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
+				      struct i40e_virtchnl_vector_map *vecmap)
+{
+	unsigned long linklistmap = 0, tempmap;
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_hw *hw = &pf->hw;
+	u16 vsi_queue_id, pf_queue_id;
+	enum i40e_queue_type qtype;
+	u16 next_q, vector_id;
+	u32 reg, reg_idx;
+	u16 itr_idx = 0;
+
+	vector_id = vecmap->vector_id;
+	/* setup the head */
+	if (0 == vector_id)
+		reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
+	else
+		reg_idx = I40E_VPINT_LNKLSTN(
+			    ((pf->hw.func_caps.num_msix_vectors_vf - 1)
+					      * vf->vf_id) + (vector_id - 1));
+
+	if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
+		/* Special case - No queues mapped on this vector */
+		wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
+		goto irq_list_done;
+	}
+	tempmap = vecmap->rxq_map;
+	vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+	while (vsi_queue_id < I40E_MAX_VSI_QP) {
+		linklistmap |= (1 <<
+				(I40E_VIRTCHNL_SUPPORTED_QTYPES *
+				 vsi_queue_id));
+		vsi_queue_id =
+		    find_next_bit(&tempmap, I40E_MAX_VSI_QP, vsi_queue_id + 1);
+	}
+
+	tempmap = vecmap->txq_map;
+	vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+	while (vsi_queue_id < I40E_MAX_VSI_QP) {
+		linklistmap |= (1 <<
+				(I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id
+				 + 1));
+		vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+					     vsi_queue_id + 1);
+	}
+
+	next_q = find_first_bit(&linklistmap,
+				(I40E_MAX_VSI_QP *
+				 I40E_VIRTCHNL_SUPPORTED_QTYPES));
+	vsi_queue_id = next_q/I40E_VIRTCHNL_SUPPORTED_QTYPES;
+	qtype = next_q%I40E_VIRTCHNL_SUPPORTED_QTYPES;
+	pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
+	reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
+
+	wr32(hw, reg_idx, reg);
+
+	while (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
+		switch (qtype) {
+		case I40E_QUEUE_TYPE_RX:
+			reg_idx = I40E_QINT_RQCTL(pf_queue_id);
+			itr_idx = vecmap->rxitr_idx;
+			break;
+		case I40E_QUEUE_TYPE_TX:
+			reg_idx = I40E_QINT_TQCTL(pf_queue_id);
+			itr_idx = vecmap->txitr_idx;
+			break;
+		default:
+			break;
+		}
+
+		next_q = find_next_bit(&linklistmap,
+				       (I40E_MAX_VSI_QP *
+					I40E_VIRTCHNL_SUPPORTED_QTYPES),
+				       next_q + 1);
+		if (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
+			vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
+			qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
+			pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx,
+							      vsi_queue_id);
+		} else {
+			pf_queue_id = I40E_QUEUE_END_OF_LIST;
+			qtype = 0;
+		}
+
+		/* format for the RQCTL & TQCTL regs is same */
+		reg = (vector_id) |
+		    (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
+		    (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
+		    (1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
+		    (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
+		wr32(hw, reg_idx, reg);
+	}
+
+irq_list_done:
+	i40e_flush(hw);
+}
+
+/**
+ * i40e_config_vsi_tx_queue
+ * @vf: pointer to the vf info
+ * @vsi_idx: index of VSI in PF struct
+ * @vsi_queue_id: vsi relative queue index
+ * @info: config. info
+ *
+ * configure tx queue
+ **/
+static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
+				    u16 vsi_queue_id,
+				    struct i40e_virtchnl_txq_info *info)
+{
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_hw *hw = &pf->hw;
+	struct i40e_hmc_obj_txq tx_ctx;
+	u16 pf_queue_id;
+	u32 qtx_ctl;
+	int ret = 0;
+
+	pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
+
+	/* clear the context structure first */
+	memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
+
+	/* only set the required fields */
+	tx_ctx.base = info->dma_ring_addr / 128;
+	tx_ctx.qlen = info->ring_len;
+	tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]);
+	tx_ctx.rdylist_act = 0;
+
+	/* clear the context in the HMC */
+	ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
+	if (ret) {
+		dev_err(&pf->pdev->dev,
+			"Failed to clear VF LAN Tx queue context %d, error: %d\n",
+			pf_queue_id, ret);
+		ret = -ENOENT;
+		goto error_context;
+	}
+
+	/* set the context in the HMC */
+	ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
+	if (ret) {
+		dev_err(&pf->pdev->dev,
+			"Failed to set VF LAN Tx queue context %d error: %d\n",
+			pf_queue_id, ret);
+		ret = -ENOENT;
+		goto error_context;
+	}
+
+	/* associate this queue with the PCI VF function */
+	qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
+	qtx_ctl |= ((hw->hmc.hmc_fn_id << I40E_QTX_CTL_PF_INDX_SHIFT)
+		    & I40E_QTX_CTL_PF_INDX_MASK);
+	qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
+		     << I40E_QTX_CTL_VFVM_INDX_SHIFT)
+		    & I40E_QTX_CTL_VFVM_INDX_MASK);
+	wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
+	i40e_flush(hw);
+
+error_context:
+	return ret;
+}
+
+/**
+ * i40e_config_vsi_rx_queue
+ * @vf: pointer to the vf info
+ * @vsi_idx: index of VSI in PF struct
+ * @vsi_queue_id: vsi relative queue index
+ * @info: config. info
+ *
+ * configure rx queue
+ **/
+static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
+				    u16 vsi_queue_id,
+				    struct i40e_virtchnl_rxq_info *info)
+{
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_hw *hw = &pf->hw;
+	struct i40e_hmc_obj_rxq rx_ctx;
+	u16 pf_queue_id;
+	int ret = 0;
+
+	pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
+
+	/* clear the context structure first */
+	memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
+
+	/* only set the required fields */
+	rx_ctx.base = info->dma_ring_addr / 128;
+	rx_ctx.qlen = info->ring_len;
+
+	if (info->splithdr_enabled) {
+		rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2      |
+				  I40E_RX_SPLIT_IP      |
+				  I40E_RX_SPLIT_TCP_UDP |
+				  I40E_RX_SPLIT_SCTP;
+		/* header length validation */
+		if (info->hdr_size > ((2 * 1024) - 64)) {
+			ret = -EINVAL;
+			goto error_param;
+		}
+		rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
+
+		/* set splitalways mode 10b */
+		rx_ctx.dtype = 0x2;
+	}
+
+	/* databuffer length validation */
+	if (info->databuffer_size > ((16 * 1024) - 128)) {
+		ret = -EINVAL;
+		goto error_param;
+	}
+	rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
+
+	/* max pkt. length validation */
+	if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
+		ret = -EINVAL;
+		goto error_param;
+	}
+	rx_ctx.rxmax = info->max_pkt_size;
+
+	/* enable 32bytes desc always */
+	rx_ctx.dsize = 1;
+
+	/* default values */
+	rx_ctx.tphrdesc_ena = 1;
+	rx_ctx.tphwdesc_ena = 1;
+	rx_ctx.tphdata_ena = 1;
+	rx_ctx.tphhead_ena = 1;
+	rx_ctx.lrxqthresh = 2;
+	rx_ctx.crcstrip = 1;
+
+	/* clear the context in the HMC */
+	ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
+	if (ret) {
+		dev_err(&pf->pdev->dev,
+			"Failed to clear VF LAN Rx queue context %d, error: %d\n",
+			pf_queue_id, ret);
+		ret = -ENOENT;
+		goto error_param;
+	}
+
+	/* set the context in the HMC */
+	ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
+	if (ret) {
+		dev_err(&pf->pdev->dev,
+			"Failed to set VF LAN Rx queue context %d error: %d\n",
+			pf_queue_id, ret);
+		ret = -ENOENT;
+		goto error_param;
+	}
+
+error_param:
+	return ret;
+}
+
+/**
+ * i40e_alloc_vsi_res
+ * @vf: pointer to the vf info
+ * @type: type of VSI to allocate
+ *
+ * alloc vf vsi context & resources
+ **/
+static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
+{
+	struct i40e_mac_filter *f = NULL;
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_hw *hw = &pf->hw;
+	struct i40e_vsi *vsi;
+	int ret = 0;
+
+	vsi = i40e_vsi_setup(pf, type, pf->vsi[pf->lan_vsi]->seid, vf->vf_id);
+
+	if (!vsi) {
+		dev_err(&pf->pdev->dev,
+			"add vsi failed for vf %d, aq_err %d\n",
+			vf->vf_id, pf->hw.aq.asq_last_status);
+		ret = -ENOENT;
+		goto error_alloc_vsi_res;
+	}
+	if (type == I40E_VSI_SRIOV) {
+		vf->lan_vsi_index = vsi->idx;
+		vf->lan_vsi_id = vsi->id;
+		dev_info(&pf->pdev->dev,
+			 "LAN VSI index %d, VSI id %d\n",
+			 vsi->idx, vsi->id);
+		f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
+				    0, true, false);
+	}
+	if (!f) {
+		dev_err(&pf->pdev->dev, "Unable to add ucast filter\n");
+		ret = -ENOMEM;
+		goto error_alloc_vsi_res;
+	}
+
+	/* program mac filter */
+	ret = i40e_sync_vsi_filters(vsi);
+	if (ret) {
+		dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
+		goto error_alloc_vsi_res;
+	}
+
+	/* accept bcast pkts. by default */
+	ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
+	if (ret) {
+		dev_err(&pf->pdev->dev,
+			"set vsi bcast failed for vf %d, vsi %d, aq_err %d\n",
+			vf->vf_id, vsi->idx, pf->hw.aq.asq_last_status);
+		ret = -EINVAL;
+	}
+
+error_alloc_vsi_res:
+	return ret;
+}
+
+/**
+ * i40e_reset_vf
+ * @vf: pointer to the vf structure
+ * @flr: VFLR was issued or not
+ *
+ * reset the vf
+ **/
+int i40e_reset_vf(struct i40e_vf *vf, bool flr)
+{
+	int ret = -ENOENT;
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_hw *hw = &pf->hw;
+	u32 reg, reg_idx, msix_vf;
+	bool rsd = false;
+	u16 pf_queue_id;
+	int i, j;
+
+	/* warn the VF */
+	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_INPROGRESS);
+
+	clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
+
+	/* PF triggers VFR only when VF requests, in case of
+	 * VFLR, HW triggers VFR
+	 */
+	if (!flr) {
+		/* reset vf using VPGEN_VFRTRIG reg */
+		reg = I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+		wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
+		i40e_flush(hw);
+	}
+
+	/* poll VPGEN_VFRSTAT reg to make sure
+	 * that reset is complete
+	 */
+	for (i = 0; i < 4; i++) {
+		/* vf reset requires driver to first reset the
+		 * vf & than poll the status register to make sure
+		 * that the requested op was completed
+		 * successfully
+		 */
+		udelay(10);
+		reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
+		if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
+			rsd = true;
+			break;
+		}
+	}
+
+	if (!rsd)
+		dev_err(&pf->pdev->dev, "VF reset check timeout %d\n",
+			vf->vf_id);
+
+	/* fast disable qps */
+	for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
+		ret = i40e_ctrl_vsi_tx_queue(vf, vf->lan_vsi_index, j,
+					     I40E_QUEUE_CTRL_FASTDISABLE);
+		ret = i40e_ctrl_vsi_rx_queue(vf, vf->lan_vsi_index, j,
+					     I40E_QUEUE_CTRL_FASTDISABLE);
+	}
+
+	/* Queue enable/disable requires driver to
+	 * first reset the vf & than poll the status register
+	 * to make sure that the requested op was completed
+	 * successfully
+	 */
+	udelay(10);
+	for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
+		ret = i40e_ctrl_vsi_tx_queue(vf, vf->lan_vsi_index, j,
+					     I40E_QUEUE_CTRL_FASTDISABLECHECK);
+		if (ret)
+			dev_info(&pf->pdev->dev,
+				 "Queue control check failed on Tx queue %d of VSI %d VF %d\n",
+				 vf->lan_vsi_index, j, vf->vf_id);
+		ret = i40e_ctrl_vsi_rx_queue(vf, vf->lan_vsi_index, j,
+					     I40E_QUEUE_CTRL_FASTDISABLECHECK);
+		if (ret)
+			dev_info(&pf->pdev->dev,
+				 "Queue control check failed on Rx queue %d of VSI %d VF %d\n",
+				 vf->lan_vsi_index, j, vf->vf_id);
+	}
+
+	/* clear the irq settings */
+	msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
+	for (i = 0; i < msix_vf; i++) {
+		/* format is same for both registers */
+		if (0 == i)
+			reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
+		else
+			reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
+						      (vf->vf_id))
+						     + (i - 1));
+		reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
+		       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
+		wr32(hw, reg_idx, reg);
+		i40e_flush(hw);
+	}
+	/* disable interrupts so the VF starts in a known state */
+	for (i = 0; i < msix_vf; i++) {
+		/* format is same for both registers */
+		if (0 == i)
+			reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
+		else
+			reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
+						      (vf->vf_id))
+						     + (i - 1));
+		wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
+		i40e_flush(hw);
+	}
+
+	/* set the defaults for the rqctl & tqctl registers */
+	reg = (I40E_QINT_RQCTL_NEXTQ_INDX_MASK | I40E_QINT_RQCTL_ITR_INDX_MASK |
+	       I40E_QINT_RQCTL_NEXTQ_TYPE_MASK);
+	for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
+		pf_queue_id = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j);
+		wr32(hw, I40E_QINT_RQCTL(pf_queue_id), reg);
+		wr32(hw, I40E_QINT_TQCTL(pf_queue_id), reg);
+	}
+
+	/* clear the reset bit in the VPGEN_VFRTRIG reg */
+	reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
+	reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
+	/* tell the VF the reset is done */
+	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED);
+	i40e_flush(hw);
+
+	return ret;
+}
+
+/**
+ * i40e_enable_vf_mappings
+ * @vf: pointer to the vf info
+ *
+ * enable vf mappings
+ **/
+static void i40e_enable_vf_mappings(struct i40e_vf *vf)
+{
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_hw *hw = &pf->hw;
+	u32 reg, total_queue_pairs = 0;
+	int j;
+
+	/* Tell the hardware we're using noncontiguous mapping. HW requires
+	 * that VF queues be mapped using this method, even when they are
+	 * contiguous in real life
+	 */
+	wr32(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
+	     I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
+
+	/* enable VF vplan_qtable mappings */
+	reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
+	wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
+
+	/* map PF queues to VF queues */
+	for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
+		u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j);
+		reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
+		wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg);
+		total_queue_pairs++;
+	}
+
+	/* map PF queues to VSI */
+	for (j = 0; j < 7; j++) {
+		if (j * 2 >= pf->vsi[vf->lan_vsi_index]->num_queue_pairs) {
+			reg = 0x07FF07FF;	/* unused */
+		} else {
+			u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index,
+							  j * 2);
+			reg = qid;
+			qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index,
+						      (j * 2) + 1);
+			reg |= qid << 16;
+		}
+		wr32(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id), reg);
+	}
+
+	i40e_flush(hw);
+}
+
+/**
+ * i40e_disable_vf_mappings
+ * @vf: pointer to the vf info
+ *
+ * disable vf mappings
+ **/
+static void i40e_disable_vf_mappings(struct i40e_vf *vf)
+{
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_hw *hw = &pf->hw;
+	int i;
+
+	/* disable qp mappings */
+	wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
+	for (i = 0; i < I40E_MAX_VSI_QP; i++)
+		wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
+		     I40E_QUEUE_END_OF_LIST);
+	i40e_flush(hw);
+}
+
+/**
+ * i40e_free_vf_res
+ * @vf: pointer to the vf info
+ *
+ * free vf resources
+ **/
+static void i40e_free_vf_res(struct i40e_vf *vf)
+{
+	struct i40e_pf *pf = vf->pf;
+
+	/* free vsi & disconnect it from the parent uplink */
+	if (vf->lan_vsi_index) {
+		i40e_vsi_release(pf->vsi[vf->lan_vsi_index]);
+		vf->lan_vsi_index = 0;
+		vf->lan_vsi_id = 0;
+	}
+	/* reset some of the state varibles keeping
+	 * track of the resources
+	 */
+	vf->num_queue_pairs = 0;
+	vf->vf_states = 0;
+}
+
+/**
+ * i40e_alloc_vf_res
+ * @vf: pointer to the vf info
+ *
+ * allocate vf resources
+ **/
+static int i40e_alloc_vf_res(struct i40e_vf *vf)
+{
+	struct i40e_pf *pf = vf->pf;
+	int total_queue_pairs = 0;
+	int ret;
+
+	/* allocate hw vsi context & associated resources */
+	ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV);
+	if (ret)
+		goto error_alloc;
+	total_queue_pairs += pf->vsi[vf->lan_vsi_index]->num_queue_pairs;
+	set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+
+	/* store the total qps number for the runtime
+	 * vf req validation
+	 */
+	vf->num_queue_pairs = total_queue_pairs;
+
+	/* vf is now completely initialized */
+	set_bit(I40E_VF_STAT_INIT, &vf->vf_states);
+
+error_alloc:
+	if (ret)
+		i40e_free_vf_res(vf);
+
+	return ret;
+}
+
+/**
+ * i40e_vfs_are_assigned
+ * @pf: pointer to the pf structure
+ *
+ * Determine if any VFs are assigned to VMs
+ **/
+static bool i40e_vfs_are_assigned(struct i40e_pf *pf)
+{
+	struct pci_dev *pdev = pf->pdev;
+	struct pci_dev *vfdev;
+
+	/* loop through all the VFs to see if we own any that are assigned */
+	vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, I40E_VF_DEVICE_ID , NULL);
+	while (vfdev) {
+		/* if we don't own it we don't care */
+		if (vfdev->is_virtfn && pci_physfn(vfdev) == pdev) {
+			/* if it is assigned we cannot release it */
+			if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
+				return true;
+		}
+
+		vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+				       I40E_VF_DEVICE_ID,
+				       vfdev);
+	}
+
+	return false;
+}
+
+/**
+ * i40e_free_vfs
+ * @pf: pointer to the pf structure
+ *
+ * free vf resources
+ **/
+void i40e_free_vfs(struct i40e_pf *pf)
+{
+	struct i40e_hw *hw = &pf->hw;
+	int i;
+
+	if (!pf->vf)
+		return;
+
+	/* Disable interrupt 0 so we don't try to handle the VFLR. */
+	wr32(hw, I40E_PFINT_DYN_CTL0, 0);
+	i40e_flush(hw);
+
+	/* free up vf resources */
+	for (i = 0; i < pf->num_alloc_vfs; i++) {
+		if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
+			i40e_free_vf_res(&pf->vf[i]);
+		/* disable qp mappings */
+		i40e_disable_vf_mappings(&pf->vf[i]);
+	}
+
+	kfree(pf->vf);
+	pf->vf = NULL;
+	pf->num_alloc_vfs = 0;
+
+	if (!i40e_vfs_are_assigned(pf))
+		pci_disable_sriov(pf->pdev);
+	else
+		dev_warn(&pf->pdev->dev,
+			 "unable to disable SR-IOV because VFs are assigned.\n");
+
+	/* Re-enable interrupt 0. */
+	wr32(hw, I40E_PFINT_DYN_CTL0,
+	     I40E_PFINT_DYN_CTL0_INTENA_MASK |
+	     I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
+	     (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
+	i40e_flush(hw);
+}
+
+#ifdef CONFIG_PCI_IOV
+/**
+ * i40e_alloc_vfs
+ * @pf: pointer to the pf structure
+ * @num_alloc_vfs: number of vfs to allocate
+ *
+ * allocate vf resources
+ **/
+static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
+{
+	struct i40e_vf *vfs;
+	int i, ret = 0;
+
+	ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
+	if (ret) {
+		dev_err(&pf->pdev->dev,
+			"pci_enable_sriov failed with error %d!\n", ret);
+		pf->num_alloc_vfs = 0;
+		goto err_iov;
+	}
+
+	/* allocate memory */
+	vfs = kzalloc(num_alloc_vfs * sizeof(struct i40e_vf), GFP_KERNEL);
+	if (!vfs) {
+		ret = -ENOMEM;
+		goto err_alloc;
+	}
+
+	/* apply default profile */
+	for (i = 0; i < num_alloc_vfs; i++) {
+		vfs[i].pf = pf;
+		vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
+		vfs[i].vf_id = i;
+
+		/* assign default capabilities */
+		set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
+
+		ret = i40e_alloc_vf_res(&vfs[i]);
+		i40e_reset_vf(&vfs[i], true);
+		if (ret)
+			break;
+
+		/* enable vf vplan_qtable mappings */
+		i40e_enable_vf_mappings(&vfs[i]);
+	}
+	pf->vf = vfs;
+	pf->num_alloc_vfs = num_alloc_vfs;
+
+err_alloc:
+	if (ret)
+		i40e_free_vfs(pf);
+err_iov:
+	return ret;
+}
+
+#endif
+/**
+ * i40e_pci_sriov_enable
+ * @pdev: pointer to a pci_dev structure
+ * @num_vfs: number of vfs to allocate
+ *
+ * Enable or change the number of VFs
+ **/
+static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
+{
+#ifdef CONFIG_PCI_IOV
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	int pre_existing_vfs = pci_num_vf(pdev);
+	int err = 0;
+
+	dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
+	if (pre_existing_vfs && pre_existing_vfs != num_vfs)
+		i40e_free_vfs(pf);
+	else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
+		goto out;
+
+	if (num_vfs > pf->num_req_vfs) {
+		err = -EPERM;
+		goto err_out;
+	}
+
+	err = i40e_alloc_vfs(pf, num_vfs);
+	if (err) {
+		dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
+		goto err_out;
+	}
+
+out:
+	return num_vfs;
+
+err_out:
+	return err;
+#endif
+	return 0;
+}
+
+/**
+ * i40e_pci_sriov_configure
+ * @pdev: pointer to a pci_dev structure
+ * @num_vfs: number of vfs to allocate
+ *
+ * Enable or change the number of VFs. Called when the user updates the number
+ * of VFs in sysfs.
+ **/
+int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+
+	if (num_vfs)
+		return i40e_pci_sriov_enable(pdev, num_vfs);
+
+	i40e_free_vfs(pf);
+	return 0;
+}
+
+/***********************virtual channel routines******************/
+
+/**
+ * i40e_vc_send_msg_to_vf
+ * @vf: pointer to the vf info
+ * @v_opcode: virtual channel opcode
+ * @v_retval: virtual channel return value
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * send msg to vf
+ **/
+static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
+				  u32 v_retval, u8 *msg, u16 msglen)
+{
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_hw *hw = &pf->hw;
+	i40e_status aq_ret;
+
+	/* single place to detect unsuccessful return values */
+	if (v_retval) {
+		vf->num_invalid_msgs++;
+		dev_err(&pf->pdev->dev, "Failed opcode %d Error: %d\n",
+			v_opcode, v_retval);
+		if (vf->num_invalid_msgs >
+		    I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
+			dev_err(&pf->pdev->dev,
+				"Number of invalid messages exceeded for VF %d\n",
+				vf->vf_id);
+			dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
+			set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
+		}
+	} else {
+		vf->num_valid_msgs++;
+	}
+
+	aq_ret = i40e_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval,
+				     msg, msglen, NULL);
+	if (aq_ret) {
+		dev_err(&pf->pdev->dev,
+			"Unable to send the message to VF %d aq_err %d\n",
+			vf->vf_id, pf->hw.aq.asq_last_status);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+/**
+ * i40e_vc_send_resp_to_vf
+ * @vf: pointer to the vf info
+ * @opcode: operation code
+ * @retval: return value
+ *
+ * send resp msg to vf
+ **/
+static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
+				   enum i40e_virtchnl_ops opcode,
+				   i40e_status retval)
+{
+	return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
+}
+
+/**
+ * i40e_vc_get_version_msg
+ * @vf: pointer to the vf info
+ *
+ * called from the vf to request the API version used by the PF
+ **/
+static int i40e_vc_get_version_msg(struct i40e_vf *vf)
+{
+	struct i40e_virtchnl_version_info info = {
+		I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR
+	};
+
+	return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
+				      I40E_SUCCESS, (u8 *)&info,
+				      sizeof(struct
+					     i40e_virtchnl_version_info));
+}
+
+/**
+ * i40e_vc_get_vf_resources_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the vf to request its resources
+ **/
+static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
+{
+	struct i40e_virtchnl_vf_resource *vfres = NULL;
+	struct i40e_pf *pf = vf->pf;
+	i40e_status aq_ret = 0;
+	struct i40e_vsi *vsi;
+	int i = 0, len = 0;
+	int num_vsis = 1;
+	int ret;
+
+	if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
+		aq_ret = I40E_ERR_PARAM;
+		goto err;
+	}
+
+	len = (sizeof(struct i40e_virtchnl_vf_resource) +
+	       sizeof(struct i40e_virtchnl_vsi_resource) * num_vsis);
+
+	vfres = kzalloc(len, GFP_KERNEL);
+	if (!vfres) {
+		aq_ret = I40E_ERR_NO_MEMORY;
+		len = 0;
+		goto err;
+	}
+
+	vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
+	vsi = pf->vsi[vf->lan_vsi_index];
+	if (!vsi->info.pvid)
+		vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
+
+	vfres->num_vsis = num_vsis;
+	vfres->num_queue_pairs = vf->num_queue_pairs;
+	vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
+	if (vf->lan_vsi_index) {
+		vfres->vsi_res[i].vsi_id = vf->lan_vsi_index;
+		vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV;
+		vfres->vsi_res[i].num_queue_pairs =
+		    pf->vsi[vf->lan_vsi_index]->num_queue_pairs;
+		memcpy(vfres->vsi_res[i].default_mac_addr,
+		       vf->default_lan_addr.addr, ETH_ALEN);
+		i++;
+	}
+	set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
+
+err:
+	/* send the response back to the vf */
+	ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+				     aq_ret, (u8 *)vfres, len);
+
+	kfree(vfres);
+	return ret;
+}
+
+/**
+ * i40e_vc_reset_vf_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the vf to reset itself,
+ * unlike other virtchnl messages, pf driver
+ * doesn't send the response back to the vf
+ **/
+static int i40e_vc_reset_vf_msg(struct i40e_vf *vf)
+{
+	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
+		return -ENOENT;
+
+	return i40e_reset_vf(vf, false);
+}
+
+/**
+ * i40e_vc_config_promiscuous_mode_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the vf to configure the promiscuous mode of
+ * vf vsis
+ **/
+static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
+					       u8 *msg, u16 msglen)
+{
+	struct i40e_virtchnl_promisc_info *info =
+	    (struct i40e_virtchnl_promisc_info *)msg;
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_hw *hw = &pf->hw;
+	bool allmulti = false;
+	bool promisc = false;
+	i40e_status aq_ret;
+
+	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+	    !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
+	    !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
+	    (pf->vsi[info->vsi_id]->type != I40E_VSI_FCOE)) {
+		aq_ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	if (info->flags & I40E_FLAG_VF_UNICAST_PROMISC)
+		promisc = true;
+	aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, info->vsi_id,
+						     promisc, NULL);
+	if (aq_ret)
+		goto error_param;
+
+	if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC)
+		allmulti = true;
+	aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, info->vsi_id,
+						       allmulti, NULL);
+
+error_param:
+	/* send the response to the vf */
+	return i40e_vc_send_resp_to_vf(vf,
+				       I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+				       aq_ret);
+}
+
+/**
+ * i40e_vc_config_queues_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the vf to configure the rx/tx
+ * queues
+ **/
+static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+	struct i40e_virtchnl_vsi_queue_config_info *qci =
+	    (struct i40e_virtchnl_vsi_queue_config_info *)msg;
+	struct i40e_virtchnl_queue_pair_info *qpi;
+	u16 vsi_id, vsi_queue_id;
+	i40e_status aq_ret = 0;
+	int i;
+
+	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+		aq_ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	vsi_id = qci->vsi_id;
+	if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+		aq_ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+	for (i = 0; i < qci->num_queue_pairs; i++) {
+		qpi = &qci->qpair[i];
+		vsi_queue_id = qpi->txq.queue_id;
+		if ((qpi->txq.vsi_id != vsi_id) ||
+		    (qpi->rxq.vsi_id != vsi_id) ||
+		    (qpi->rxq.queue_id != vsi_queue_id) ||
+		    !i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) {
+			aq_ret = I40E_ERR_PARAM;
+			goto error_param;
+		}
+
+		if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
+					     &qpi->rxq) ||
+		    i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
+					     &qpi->txq)) {
+			aq_ret = I40E_ERR_PARAM;
+			goto error_param;
+		}
+	}
+
+error_param:
+	/* send the response to the vf */
+	return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+				       aq_ret);
+}
+
+/**
+ * i40e_vc_config_irq_map_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the vf to configure the irq to
+ * queue map
+ **/
+static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+	struct i40e_virtchnl_irq_map_info *irqmap_info =
+	    (struct i40e_virtchnl_irq_map_info *)msg;
+	struct i40e_virtchnl_vector_map *map;
+	u16 vsi_id, vsi_queue_id, vector_id;
+	i40e_status aq_ret = 0;
+	unsigned long tempmap;
+	int i;
+
+	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+		aq_ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	for (i = 0; i < irqmap_info->num_vectors; i++) {
+		map = &irqmap_info->vecmap[i];
+
+		vector_id = map->vector_id;
+		vsi_id = map->vsi_id;
+		/* validate msg params */
+		if (!i40e_vc_isvalid_vector_id(vf, vector_id) ||
+		    !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+			aq_ret = I40E_ERR_PARAM;
+			goto error_param;
+		}
+
+		/* lookout for the invalid queue index */
+		tempmap = map->rxq_map;
+		vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+		while (vsi_queue_id < I40E_MAX_VSI_QP) {
+			if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
+						      vsi_queue_id)) {
+				aq_ret = I40E_ERR_PARAM;
+				goto error_param;
+			}
+			vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+						     vsi_queue_id + 1);
+		}
+
+		tempmap = map->txq_map;
+		vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+		while (vsi_queue_id < I40E_MAX_VSI_QP) {
+			if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
+						      vsi_queue_id)) {
+				aq_ret = I40E_ERR_PARAM;
+				goto error_param;
+			}
+			vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+						     vsi_queue_id + 1);
+		}
+
+		i40e_config_irq_link_list(vf, vsi_id, map);
+	}
+error_param:
+	/* send the response to the vf */
+	return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+				       aq_ret);
+}
+
+/**
+ * i40e_vc_enable_queues_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the vf to enable all or specific queue(s)
+ **/
+static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+	struct i40e_virtchnl_queue_select *vqs =
+	    (struct i40e_virtchnl_queue_select *)msg;
+	struct i40e_pf *pf = vf->pf;
+	u16 vsi_id = vqs->vsi_id;
+	i40e_status aq_ret = 0;
+	unsigned long tempmap;
+	u16 queue_id;
+
+	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+		aq_ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+		aq_ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
+		aq_ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	tempmap = vqs->rx_queues;
+	queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+	while (queue_id < I40E_MAX_VSI_QP) {
+		if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
+			aq_ret = I40E_ERR_PARAM;
+			goto error_param;
+		}
+		i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
+				       I40E_QUEUE_CTRL_ENABLE);
+
+		queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+					 queue_id + 1);
+	}
+
+	tempmap = vqs->tx_queues;
+	queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+	while (queue_id < I40E_MAX_VSI_QP) {
+		if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
+			aq_ret = I40E_ERR_PARAM;
+			goto error_param;
+		}
+		i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
+				       I40E_QUEUE_CTRL_ENABLE);
+
+		queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+					 queue_id + 1);
+	}
+
+	/* Poll the status register to make sure that the
+	 * requested op was completed successfully
+	 */
+	udelay(10);
+
+	tempmap = vqs->rx_queues;
+	queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+	while (queue_id < I40E_MAX_VSI_QP) {
+		if (i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
+					   I40E_QUEUE_CTRL_ENABLECHECK)) {
+			dev_err(&pf->pdev->dev,
+				"Queue control check failed on RX queue %d of VSI %d VF %d\n",
+				queue_id, vsi_id, vf->vf_id);
+		}
+		queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+					 queue_id + 1);
+	}
+
+	tempmap = vqs->tx_queues;
+	queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+	while (queue_id < I40E_MAX_VSI_QP) {
+		if (i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
+					   I40E_QUEUE_CTRL_ENABLECHECK)) {
+			dev_err(&pf->pdev->dev,
+				"Queue control check failed on TX queue %d of VSI %d VF %d\n",
+				queue_id, vsi_id, vf->vf_id);
+		}
+		queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+					 queue_id + 1);
+	}
+
+error_param:
+	/* send the response to the vf */
+	return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+				       aq_ret);
+}
+
+/**
+ * i40e_vc_disable_queues_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the vf to disable all or specific
+ * queue(s)
+ **/
+static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+	struct i40e_virtchnl_queue_select *vqs =
+	    (struct i40e_virtchnl_queue_select *)msg;
+	struct i40e_pf *pf = vf->pf;
+	u16 vsi_id = vqs->vsi_id;
+	i40e_status aq_ret = 0;
+	unsigned long tempmap;
+	u16 queue_id;
+
+	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+		aq_ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+		aq_ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
+		aq_ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	tempmap = vqs->rx_queues;
+	queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+	while (queue_id < I40E_MAX_VSI_QP) {
+		if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
+			aq_ret = I40E_ERR_PARAM;
+			goto error_param;
+		}
+		i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
+				       I40E_QUEUE_CTRL_DISABLE);
+
+		queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+					 queue_id + 1);
+	}
+
+	tempmap = vqs->tx_queues;
+	queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+	while (queue_id < I40E_MAX_VSI_QP) {
+		if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
+			aq_ret = I40E_ERR_PARAM;
+			goto error_param;
+		}
+		i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
+				       I40E_QUEUE_CTRL_DISABLE);
+
+		queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+					 queue_id + 1);
+	}
+
+	/* Poll the status register to make sure that the
+	 * requested op was completed successfully
+	 */
+	udelay(10);
+
+	tempmap = vqs->rx_queues;
+	queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+	while (queue_id < I40E_MAX_VSI_QP) {
+		if (i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
+					   I40E_QUEUE_CTRL_DISABLECHECK)) {
+			dev_err(&pf->pdev->dev,
+				"Queue control check failed on RX queue %d of VSI %d VF %d\n",
+				queue_id, vsi_id, vf->vf_id);
+		}
+		queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+					 queue_id + 1);
+	}
+
+	tempmap = vqs->tx_queues;
+	queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+	while (queue_id < I40E_MAX_VSI_QP) {
+		if (i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
+					   I40E_QUEUE_CTRL_DISABLECHECK)) {
+			dev_err(&pf->pdev->dev,
+				"Queue control check failed on TX queue %d of VSI %d VF %d\n",
+				queue_id, vsi_id, vf->vf_id);
+		}
+		queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+					 queue_id + 1);
+	}
+
+error_param:
+	/* send the response to the vf */
+	return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+				       aq_ret);
+}
+
+/**
+ * i40e_vc_get_stats_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the vf to get vsi stats
+ **/
+static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+	struct i40e_virtchnl_queue_select *vqs =
+	    (struct i40e_virtchnl_queue_select *)msg;
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_eth_stats stats;
+	i40e_status aq_ret = 0;
+	struct i40e_vsi *vsi;
+
+	memset(&stats, 0, sizeof(struct i40e_eth_stats));
+
+	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+		aq_ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+		aq_ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	vsi = pf->vsi[vqs->vsi_id];
+	if (!vsi) {
+		aq_ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+	i40e_update_eth_stats(vsi);
+	memcpy(&stats, &vsi->eth_stats, sizeof(struct i40e_eth_stats));
+
+error_param:
+	/* send the response back to the vf */
+	return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS, aq_ret,
+				      (u8 *)&stats, sizeof(stats));
+}
+
+/**
+ * i40e_vc_add_mac_addr_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * add guest mac address filter
+ **/
+static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+	struct i40e_virtchnl_ether_addr_list *al =
+	    (struct i40e_virtchnl_ether_addr_list *)msg;
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_vsi *vsi = NULL;
+	u16 vsi_id = al->vsi_id;
+	i40e_status aq_ret = 0;
+	int i;
+
+	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+	    !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
+	    !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+		aq_ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	for (i = 0; i < al->num_elements; i++) {
+		if (is_broadcast_ether_addr(al->list[i].addr) ||
+		    is_zero_ether_addr(al->list[i].addr)) {
+			dev_err(&pf->pdev->dev, "invalid VF MAC addr %pMAC\n",
+				al->list[i].addr);
+			aq_ret = I40E_ERR_PARAM;
+			goto error_param;
+		}
+	}
+	vsi = pf->vsi[vsi_id];
+
+	/* add new addresses to the list */
+	for (i = 0; i < al->num_elements; i++) {
+		struct i40e_mac_filter *f;
+
+		f = i40e_find_mac(vsi, al->list[i].addr, true, false);
+		if (f) {
+			if (i40e_is_vsi_in_vlan(vsi))
+				f = i40e_put_mac_in_vlan(vsi, al->list[i].addr,
+							 true, false);
+			else
+				f = i40e_add_filter(vsi, al->list[i].addr, -1,
+						    true, false);
+		}
+
+		if (!f) {
+			dev_err(&pf->pdev->dev,
+				"Unable to add VF MAC filter\n");
+			aq_ret = I40E_ERR_PARAM;
+			goto error_param;
+		}
+	}
+
+	/* program the updated filter list */
+	if (i40e_sync_vsi_filters(vsi))
+		dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
+
+error_param:
+	/* send the response to the vf */
+	return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+				       aq_ret);
+}
+
+/**
+ * i40e_vc_del_mac_addr_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * remove guest mac address filter
+ **/
+static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+	struct i40e_virtchnl_ether_addr_list *al =
+	    (struct i40e_virtchnl_ether_addr_list *)msg;
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_vsi *vsi = NULL;
+	u16 vsi_id = al->vsi_id;
+	i40e_status aq_ret = 0;
+	int i;
+
+	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+	    !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
+	    !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+		aq_ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+	vsi = pf->vsi[vsi_id];
+
+	/* delete addresses from the list */
+	for (i = 0; i < al->num_elements; i++)
+		i40e_del_filter(vsi, al->list[i].addr,
+				I40E_VLAN_ANY, true, false);
+
+	/* program the updated filter list */
+	if (i40e_sync_vsi_filters(vsi))
+		dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
+
+error_param:
+	/* send the response to the vf */
+	return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
+				       aq_ret);
+}
+
+/**
+ * i40e_vc_add_vlan_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * program guest vlan id
+ **/
+static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+	struct i40e_virtchnl_vlan_filter_list *vfl =
+	    (struct i40e_virtchnl_vlan_filter_list *)msg;
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_vsi *vsi = NULL;
+	u16 vsi_id = vfl->vsi_id;
+	i40e_status aq_ret = 0;
+	int i;
+
+	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+	    !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
+	    !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+		aq_ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	for (i = 0; i < vfl->num_elements; i++) {
+		if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
+			aq_ret = I40E_ERR_PARAM;
+			dev_err(&pf->pdev->dev,
+				"invalid VF VLAN id %d\n", vfl->vlan_id[i]);
+			goto error_param;
+		}
+	}
+	vsi = pf->vsi[vsi_id];
+	if (vsi->info.pvid) {
+		aq_ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	i40e_vlan_stripping_enable(vsi);
+	for (i = 0; i < vfl->num_elements; i++) {
+		/* add new VLAN filter */
+		int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
+		if (ret)
+			dev_err(&pf->pdev->dev,
+				"Unable to add VF vlan filter %d, error %d\n",
+				vfl->vlan_id[i], ret);
+	}
+
+error_param:
+	/* send the response to the vf */
+	return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN, aq_ret);
+}
+
+/**
+ * i40e_vc_remove_vlan_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * remove programmed guest vlan id
+ **/
+static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+	struct i40e_virtchnl_vlan_filter_list *vfl =
+	    (struct i40e_virtchnl_vlan_filter_list *)msg;
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_vsi *vsi = NULL;
+	u16 vsi_id = vfl->vsi_id;
+	i40e_status aq_ret = 0;
+	int i;
+
+	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+	    !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
+	    !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+		aq_ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	for (i = 0; i < vfl->num_elements; i++) {
+		if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
+			aq_ret = I40E_ERR_PARAM;
+			goto error_param;
+		}
+	}
+
+	vsi = pf->vsi[vsi_id];
+	if (vsi->info.pvid) {
+		aq_ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	for (i = 0; i < vfl->num_elements; i++) {
+		int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
+		if (ret)
+			dev_err(&pf->pdev->dev,
+				"Unable to delete VF vlan filter %d, error %d\n",
+				vfl->vlan_id[i], ret);
+	}
+
+error_param:
+	/* send the response to the vf */
+	return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, aq_ret);
+}
+
+/**
+ * i40e_vc_fcoe_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the vf for the fcoe msgs
+ **/
+static int i40e_vc_fcoe_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+	i40e_status aq_ret = 0;
+
+	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+	    !test_bit(I40E_VF_STAT_FCOEENA, &vf->vf_states)) {
+		aq_ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+	aq_ret = I40E_ERR_NOT_IMPLEMENTED;
+
+error_param:
+	/* send the response to the vf */
+	return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_FCOE, aq_ret);
+}
+
+/**
+ * i40e_vc_validate_vf_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @msghndl: msg handle
+ *
+ * validate msg
+ **/
+static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
+				   u32 v_retval, u8 *msg, u16 msglen)
+{
+	bool err_msg_format = false;
+	int valid_len;
+
+	/* Check if VF is disabled. */
+	if (test_bit(I40E_VF_STAT_DISABLED, &vf->vf_states))
+		return I40E_ERR_PARAM;
+
+	/* Validate message length. */
+	switch (v_opcode) {
+	case I40E_VIRTCHNL_OP_VERSION:
+		valid_len = sizeof(struct i40e_virtchnl_version_info);
+		break;
+	case I40E_VIRTCHNL_OP_RESET_VF:
+	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
+		valid_len = 0;
+		break;
+	case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
+		valid_len = sizeof(struct i40e_virtchnl_txq_info);
+		break;
+	case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
+		valid_len = sizeof(struct i40e_virtchnl_rxq_info);
+		break;
+	case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+		valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info);
+		if (msglen >= valid_len) {
+			struct i40e_virtchnl_vsi_queue_config_info *vqc =
+			    (struct i40e_virtchnl_vsi_queue_config_info *)msg;
+			valid_len += (vqc->num_queue_pairs *
+				      sizeof(struct
+					     i40e_virtchnl_queue_pair_info));
+			if (vqc->num_queue_pairs == 0)
+				err_msg_format = true;
+		}
+		break;
+	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
+		valid_len = sizeof(struct i40e_virtchnl_irq_map_info);
+		if (msglen >= valid_len) {
+			struct i40e_virtchnl_irq_map_info *vimi =
+			    (struct i40e_virtchnl_irq_map_info *)msg;
+			valid_len += (vimi->num_vectors *
+				      sizeof(struct i40e_virtchnl_vector_map));
+			if (vimi->num_vectors == 0)
+				err_msg_format = true;
+		}
+		break;
+	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
+	case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
+		valid_len = sizeof(struct i40e_virtchnl_queue_select);
+		break;
+	case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
+	case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
+		valid_len = sizeof(struct i40e_virtchnl_ether_addr_list);
+		if (msglen >= valid_len) {
+			struct i40e_virtchnl_ether_addr_list *veal =
+			    (struct i40e_virtchnl_ether_addr_list *)msg;
+			valid_len += veal->num_elements *
+			    sizeof(struct i40e_virtchnl_ether_addr);
+			if (veal->num_elements == 0)
+				err_msg_format = true;
+		}
+		break;
+	case I40E_VIRTCHNL_OP_ADD_VLAN:
+	case I40E_VIRTCHNL_OP_DEL_VLAN:
+		valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list);
+		if (msglen >= valid_len) {
+			struct i40e_virtchnl_vlan_filter_list *vfl =
+			    (struct i40e_virtchnl_vlan_filter_list *)msg;
+			valid_len += vfl->num_elements * sizeof(u16);
+			if (vfl->num_elements == 0)
+				err_msg_format = true;
+		}
+		break;
+	case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+		valid_len = sizeof(struct i40e_virtchnl_promisc_info);
+		break;
+	case I40E_VIRTCHNL_OP_GET_STATS:
+		valid_len = sizeof(struct i40e_virtchnl_queue_select);
+		break;
+	/* These are always errors coming from the VF. */
+	case I40E_VIRTCHNL_OP_EVENT:
+	case I40E_VIRTCHNL_OP_UNKNOWN:
+	default:
+		return -EPERM;
+		break;
+	}
+	/* few more checks */
+	if ((valid_len != msglen) || (err_msg_format)) {
+		i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
+		return -EINVAL;
+	} else {
+		return 0;
+	}
+}
+
+/**
+ * i40e_vc_process_vf_msg
+ * @pf: pointer to the pf structure
+ * @vf_id: source vf id
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @msghndl: msg handle
+ *
+ * called from the common aeq/arq handler to
+ * process request from vf
+ **/
+int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
+			   u32 v_retval, u8 *msg, u16 msglen)
+{
+	struct i40e_vf *vf = &(pf->vf[vf_id]);
+	struct i40e_hw *hw = &pf->hw;
+	int ret;
+
+	pf->vf_aq_requests++;
+	/* perform basic checks on the msg */
+	ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen);
+
+	if (ret) {
+		dev_err(&pf->pdev->dev, "invalid message from vf %d\n", vf_id);
+		return ret;
+	}
+	wr32(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_VFACTIVE);
+	switch (v_opcode) {
+	case I40E_VIRTCHNL_OP_VERSION:
+		ret = i40e_vc_get_version_msg(vf);
+		break;
+	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
+		ret = i40e_vc_get_vf_resources_msg(vf);
+		break;
+	case I40E_VIRTCHNL_OP_RESET_VF:
+		ret = i40e_vc_reset_vf_msg(vf);
+		break;
+	case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+		ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen);
+		break;
+	case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+		ret = i40e_vc_config_queues_msg(vf, msg, msglen);
+		break;
+	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
+		ret = i40e_vc_config_irq_map_msg(vf, msg, msglen);
+		break;
+	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
+		ret = i40e_vc_enable_queues_msg(vf, msg, msglen);
+		break;
+	case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
+		ret = i40e_vc_disable_queues_msg(vf, msg, msglen);
+		break;
+	case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
+		ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen);
+		break;
+	case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
+		ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen);
+		break;
+	case I40E_VIRTCHNL_OP_ADD_VLAN:
+		ret = i40e_vc_add_vlan_msg(vf, msg, msglen);
+		break;
+	case I40E_VIRTCHNL_OP_DEL_VLAN:
+		ret = i40e_vc_remove_vlan_msg(vf, msg, msglen);
+		break;
+	case I40E_VIRTCHNL_OP_GET_STATS:
+		ret = i40e_vc_get_stats_msg(vf, msg, msglen);
+		break;
+	case I40E_VIRTCHNL_OP_FCOE:
+		ret = i40e_vc_fcoe_msg(vf, msg, msglen);
+		break;
+	case I40E_VIRTCHNL_OP_UNKNOWN:
+	default:
+		dev_err(&pf->pdev->dev,
+			"Unsupported opcode %d from vf %d\n", v_opcode, vf_id);
+		ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
+					      I40E_ERR_NOT_IMPLEMENTED);
+		break;
+	}
+
+	return ret;
+}
+
+/**
+ * i40e_vc_process_vflr_event
+ * @pf: pointer to the pf structure
+ *
+ * called from the vlfr irq handler to
+ * free up vf resources and state variables
+ **/
+int i40e_vc_process_vflr_event(struct i40e_pf *pf)
+{
+	u32 reg, reg_idx, bit_idx, vf_id;
+	struct i40e_hw *hw = &pf->hw;
+	struct i40e_vf *vf;
+
+	if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
+		return 0;
+
+	clear_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
+	for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
+		reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
+		bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
+		/* read GLGEN_VFLRSTAT register to find out the flr vfs */
+		vf = &pf->vf[vf_id];
+		reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
+		if (reg & (1 << bit_idx)) {
+			/* clear the bit in GLGEN_VFLRSTAT */
+			wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
+
+			if (i40e_reset_vf(vf, true))
+				dev_err(&pf->pdev->dev,
+					"Unable to reset the VF %d\n", vf_id);
+			/* free up vf resources to destroy vsi state */
+			i40e_free_vf_res(vf);
+
+			/* allocate new vf resources with the default state */
+			if (i40e_alloc_vf_res(vf))
+				dev_err(&pf->pdev->dev,
+					"Unable to allocate VF resources %d\n",
+					vf_id);
+
+			i40e_enable_vf_mappings(vf);
+		}
+	}
+
+	/* re-enable vflr interrupt cause */
+	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
+	reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
+	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
+	i40e_flush(hw);
+
+	return 0;
+}
+
+/**
+ * i40e_vc_vf_broadcast
+ * @pf: pointer to the pf structure
+ * @opcode: operation code
+ * @retval: return value
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * send a message to all VFs on a given PF
+ **/
+static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
+				 enum i40e_virtchnl_ops v_opcode,
+				 i40e_status v_retval, u8 *msg,
+				 u16 msglen)
+{
+	struct i40e_hw *hw = &pf->hw;
+	struct i40e_vf *vf = pf->vf;
+	int i;
+
+	for (i = 0; i < pf->num_alloc_vfs; i++) {
+		/* Ignore return value on purpose - a given VF may fail, but
+		 * we need to keep going and send to all of them
+		 */
+		i40e_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval,
+				       msg, msglen, NULL);
+		vf++;
+	}
+}
+
+/**
+ * i40e_vc_notify_link_state
+ * @pf: pointer to the pf structure
+ *
+ * send a link status message to all VFs on a given PF
+ **/
+void i40e_vc_notify_link_state(struct i40e_pf *pf)
+{
+	struct i40e_virtchnl_pf_event pfe;
+
+	pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
+	pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
+	pfe.event_data.link_event.link_status =
+	    pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
+	pfe.event_data.link_event.link_speed = pf->hw.phy.link_info.link_speed;
+
+	i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS,
+			     (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event));
+}
+
+/**
+ * i40e_vc_notify_reset
+ * @pf: pointer to the pf structure
+ *
+ * indicate a pending reset to all VFs on a given PF
+ **/
+void i40e_vc_notify_reset(struct i40e_pf *pf)
+{
+	struct i40e_virtchnl_pf_event pfe;
+
+	pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
+	pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
+	i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS,
+			     (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event));
+}
+
+/**
+ * i40e_vc_notify_vf_reset
+ * @vf: pointer to the vf structure
+ *
+ * indicate a pending reset to the given VF
+ **/
+void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
+{
+	struct i40e_virtchnl_pf_event pfe;
+
+	pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
+	pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
+	i40e_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT,
+			       I40E_SUCCESS, (u8 *)&pfe,
+			       sizeof(struct i40e_virtchnl_pf_event), NULL);
+}
+
+/**
+ * i40e_ndo_set_vf_mac
+ * @netdev: network interface device structure
+ * @vf_id: vf identifier
+ * @mac: mac address
+ *
+ * program vf mac address
+ **/
+int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_vsi *vsi = np->vsi;
+	struct i40e_pf *pf = vsi->back;
+	struct i40e_mac_filter *f;
+	struct i40e_vf *vf;
+	int ret = 0;
+
+	/* validate the request */
+	if (vf_id >= pf->num_alloc_vfs) {
+		dev_err(&pf->pdev->dev,
+			"Invalid VF Identifier %d\n", vf_id);
+		ret = -EINVAL;
+		goto error_param;
+	}
+
+	vf = &(pf->vf[vf_id]);
+	vsi = pf->vsi[vf->lan_vsi_index];
+	if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
+		dev_err(&pf->pdev->dev,
+			"Uninitialized VF %d\n", vf_id);
+		ret = -EINVAL;
+		goto error_param;
+	}
+
+	if (!is_valid_ether_addr(mac)) {
+		dev_err(&pf->pdev->dev,
+			"Invalid VF ethernet address\n");
+		ret = -EINVAL;
+		goto error_param;
+	}
+
+	/* delete the temporary mac address */
+	i40e_del_filter(vsi, vf->default_lan_addr.addr, 0, true, false);
+
+	/* add the new mac address */
+	f = i40e_add_filter(vsi, mac, 0, true, false);
+	if (!f) {
+		dev_err(&pf->pdev->dev,
+			"Unable to add VF ucast filter\n");
+		ret = -ENOMEM;
+		goto error_param;
+	}
+
+	dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);
+	/* program mac filter */
+	if (i40e_sync_vsi_filters(vsi)) {
+		dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
+		ret = -EIO;
+		goto error_param;
+	}
+	memcpy(vf->default_lan_addr.addr, mac, ETH_ALEN);
+	dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
+	ret = 0;
+
+error_param:
+	return ret;
+}
+
+/**
+ * i40e_ndo_set_vf_port_vlan
+ * @netdev: network interface device structure
+ * @vf_id: vf identifier
+ * @vlan_id: mac address
+ * @qos: priority setting
+ *
+ * program vf vlan id and/or qos
+ **/
+int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
+			      int vf_id, u16 vlan_id, u8 qos)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_pf *pf = np->vsi->back;
+	struct i40e_vsi *vsi;
+	struct i40e_vf *vf;
+	int ret = 0;
+
+	/* validate the request */
+	if (vf_id >= pf->num_alloc_vfs) {
+		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
+		ret = -EINVAL;
+		goto error_pvid;
+	}
+
+	if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
+		dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
+		ret = -EINVAL;
+		goto error_pvid;
+	}
+
+	vf = &(pf->vf[vf_id]);
+	vsi = pf->vsi[vf->lan_vsi_index];
+	if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
+		dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
+		ret = -EINVAL;
+		goto error_pvid;
+	}
+
+	if (vsi->info.pvid) {
+		/* kill old VLAN */
+		ret = i40e_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
+					       VLAN_VID_MASK));
+		if (ret) {
+			dev_info(&vsi->back->pdev->dev,
+				 "remove VLAN failed, ret=%d, aq_err=%d\n",
+				 ret, pf->hw.aq.asq_last_status);
+		}
+	}
+	if (vlan_id || qos)
+		ret = i40e_vsi_add_pvid(vsi,
+				vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT));
+	else
+		i40e_vlan_stripping_disable(vsi);
+
+	if (vlan_id) {
+		dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
+			 vlan_id, qos, vf_id);
+
+		/* add new VLAN filter */
+		ret = i40e_vsi_add_vlan(vsi, vlan_id);
+		if (ret) {
+			dev_info(&vsi->back->pdev->dev,
+				 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
+				 vsi->back->hw.aq.asq_last_status);
+			goto error_pvid;
+		}
+	}
+
+	if (ret) {
+		dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
+		goto error_pvid;
+	}
+	ret = 0;
+
+error_pvid:
+	return ret;
+}
+
+/**
+ * i40e_ndo_set_vf_bw
+ * @netdev: network interface device structure
+ * @vf_id: vf identifier
+ * @tx_rate: tx rate
+ *
+ * configure vf tx rate
+ **/
+int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate)
+{
+	return -EOPNOTSUPP;
+}
+
+/**
+ * i40e_ndo_get_vf_config
+ * @netdev: network interface device structure
+ * @vf_id: vf identifier
+ * @ivi: vf configuration structure
+ *
+ * return vf configuration
+ **/
+int i40e_ndo_get_vf_config(struct net_device *netdev,
+			   int vf_id, struct ifla_vf_info *ivi)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_mac_filter *f, *ftmp;
+	struct i40e_vsi *vsi = np->vsi;
+	struct i40e_pf *pf = vsi->back;
+	struct i40e_vf *vf;
+	int ret = 0;
+
+	/* validate the request */
+	if (vf_id >= pf->num_alloc_vfs) {
+		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
+		ret = -EINVAL;
+		goto error_param;
+	}
+
+	vf = &(pf->vf[vf_id]);
+	/* first vsi is always the LAN vsi */
+	vsi = pf->vsi[vf->lan_vsi_index];
+	if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
+		dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
+		ret = -EINVAL;
+		goto error_param;
+	}
+
+	ivi->vf = vf_id;
+
+	/* first entry of the list is the default ethernet address */
+	list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+		memcpy(&ivi->mac, f->macaddr, I40E_ETH_LENGTH_OF_ADDRESS);
+		break;
+	}
+
+	ivi->tx_rate = 0;
+	ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
+	ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
+		   I40E_VLAN_PRIORITY_SHIFT;
+	ret = 0;
+
+error_param:
+	return ret;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
new file mode 100644
index 0000000..360382c
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -0,0 +1,120 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_VIRTCHNL_PF_H_
+#define _I40E_VIRTCHNL_PF_H_
+
+#include "i40e.h"
+
+#define I40E_MAX_MACVLAN_FILTERS 256
+#define I40E_MAX_VLAN_FILTERS 256
+#define I40E_MAX_VLANID 4095
+
+#define I40E_VIRTCHNL_SUPPORTED_QTYPES 2
+
+#define I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED	3
+#define I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED	10
+
+#define I40E_VLAN_PRIORITY_SHIFT	12
+#define I40E_VLAN_MASK			0xFFF
+#define I40E_PRIORITY_MASK		0x7000
+
+/* Various queue ctrls */
+enum i40e_queue_ctrl {
+	I40E_QUEUE_CTRL_UNKNOWN = 0,
+	I40E_QUEUE_CTRL_ENABLE,
+	I40E_QUEUE_CTRL_ENABLECHECK,
+	I40E_QUEUE_CTRL_DISABLE,
+	I40E_QUEUE_CTRL_DISABLECHECK,
+	I40E_QUEUE_CTRL_FASTDISABLE,
+	I40E_QUEUE_CTRL_FASTDISABLECHECK,
+};
+
+/* VF states */
+enum i40e_vf_states {
+	I40E_VF_STAT_INIT = 0,
+	I40E_VF_STAT_ACTIVE,
+	I40E_VF_STAT_FCOEENA,
+	I40E_VF_STAT_DISABLED,
+};
+
+/* VF capabilities */
+enum i40e_vf_capabilities {
+	I40E_VIRTCHNL_VF_CAP_PRIVILEGE = 0,
+	I40E_VIRTCHNL_VF_CAP_L2,
+};
+
+/* VF information structure */
+struct i40e_vf {
+	struct i40e_pf *pf;
+
+	/* vf id in the pf space */
+	u16 vf_id;
+	/* all vf vsis connect to the same parent */
+	enum i40e_switch_element_types parent_type;
+
+	/* vf Port Extender (PE) stag if used */
+	u16 stag;
+
+	struct i40e_virtchnl_ether_addr default_lan_addr;
+	struct i40e_virtchnl_ether_addr default_fcoe_addr;
+
+	/* VSI indices - actual VSI pointers are maintained in the PF structure
+	 * When assigned, these will be non-zero, because VSI 0 is always
+	 * the main LAN VSI for the PF.
+	 */
+	u8 lan_vsi_index;	/* index into PF struct */
+	u8 lan_vsi_id;		/* ID as used by firmware */
+
+	u8 num_queue_pairs;	/* num of qps assigned to vf vsis */
+	u64 num_mdd_events;	/* num of mdd events detected */
+	u64 num_invalid_msgs;	/* num of malformed or invalid msgs detected */
+	u64 num_valid_msgs;	/* num of valid msgs detected */
+
+	unsigned long vf_caps;	/* vf's adv. capabilities */
+	unsigned long vf_states;	/* vf's runtime states */
+};
+
+void i40e_free_vfs(struct i40e_pf *pf);
+int i40e_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
+int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
+			   u32 v_retval, u8 *msg, u16 msglen);
+int i40e_vc_process_vflr_event(struct i40e_pf *pf);
+int i40e_reset_vf(struct i40e_vf *vf, bool flr);
+void i40e_vc_notify_vf_reset(struct i40e_vf *vf);
+
+/* vf configuration related iplink handlers */
+int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
+int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
+			      int vf_id, u16 vlan_id, u8 qos);
+int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate);
+int i40e_ndo_get_vf_config(struct net_device *netdev,
+			   int vf_id, struct ifla_vf_info *ivi);
+void i40e_vc_notify_link_state(struct i40e_pf *pf);
+void i40e_vc_notify_reset(struct i40e_pf *pf);
+
+#endif /* _I40E_VIRTCHNL_PF_H_ */
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 79b5835..47c2d10 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -719,6 +719,10 @@
 	u32 ctrl_ext;
 	u32 mdic;
 
+	/* Extra read required for some PHY's on i354 */
+	if (hw->mac.type == e1000_i354)
+		igb_get_phy_id(hw);
+
 	/* For SGMII PHYs, we try the list of possible addresses until
 	 * we find one that works.  For non-SGMII PHYs
 	 * (e.g. integrated copper PHYs), an address of 1 should
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index f0dfd41..298f0ed 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -712,6 +712,7 @@
 static s32 igb_set_default_fc(struct e1000_hw *hw)
 {
 	s32 ret_val = 0;
+	u16 lan_offset;
 	u16 nvm_data;
 
 	/* Read and store word 0x0F of the EEPROM. This word contains bits
@@ -722,7 +723,14 @@
 	 * control setting, then the variable hw->fc will
 	 * be initialized based on a value in the EEPROM.
 	 */
-	ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data);
+	if (hw->mac.type == e1000_i350) {
+		lan_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func);
+		ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG
+					   + lan_offset, 1, &nvm_data);
+	 } else {
+		ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG,
+					   1, &nvm_data);
+	 }
 
 	if (ret_val) {
 		hw_dbg("NVM Read Error\n");
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 0e1b973..e8649ab 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -160,6 +160,13 @@
 	bool autoneg = false;
 	bool link_up;
 
+	/* SFP type is needed for get_link_capabilities */
+	if (hw->phy.media_type & (ixgbe_media_type_fiber |
+				  ixgbe_media_type_fiber_qsfp)) {
+		if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
+				hw->phy.ops.identify_sfp(hw);
+	}
+
 	hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
 
 	/* set the supported link speeds */
@@ -186,6 +193,11 @@
 			ecmd->advertising |= ADVERTISED_1000baseT_Full;
 		if (supported_link & IXGBE_LINK_SPEED_100_FULL)
 			ecmd->advertising |= ADVERTISED_100baseT_Full;
+
+		if (hw->phy.multispeed_fiber && !autoneg) {
+			if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
+				ecmd->advertising = ADVERTISED_10000baseT_Full;
+		}
 	}
 
 	if (autoneg) {
@@ -314,6 +326,14 @@
 		if (ecmd->advertising & ~ecmd->supported)
 			return -EINVAL;
 
+		/* only allow one speed at a time if no autoneg */
+		if (!ecmd->autoneg && hw->phy.multispeed_fiber) {
+			if (ecmd->advertising ==
+			    (ADVERTISED_10000baseT_Full |
+			     ADVERTISED_1000baseT_Full))
+				return -EINVAL;
+		}
+
 		old = hw->phy.autoneg_advertised;
 		advertised = 0;
 		if (ecmd->advertising & ADVERTISED_10000baseT_Full)
@@ -1805,6 +1825,10 @@
 	unsigned int size = 1024;
 	netdev_tx_t tx_ret_val;
 	struct sk_buff *skb;
+	u32 flags_orig = adapter->flags;
+
+	/* DCB can modify the frames on Tx */
+	adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
 
 	/* allocate test skb */
 	skb = alloc_skb(size, GFP_KERNEL);
@@ -1857,6 +1881,7 @@
 
 	/* free the original skb */
 	kfree_skb(skb);
+	adapter->flags = flags_orig;
 
 	return ret_val;
 }
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 7aba452..0ade0cd 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -3571,7 +3571,7 @@
 {
 	struct ixgbe_hw *hw = &adapter->hw;
 	int i;
-	u32 rxctrl;
+	u32 rxctrl, rfctl;
 
 	/* disable receives while setting up the descriptors */
 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
@@ -3580,6 +3580,13 @@
 	ixgbe_setup_psrtype(adapter);
 	ixgbe_setup_rdrxctl(adapter);
 
+	/* RSC Setup */
+	rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
+	rfctl &= ~IXGBE_RFCTL_RSC_DIS;
+	if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
+		rfctl |= IXGBE_RFCTL_RSC_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
+
 	/* Program registers for the distribution of queues */
 	ixgbe_setup_mrqc(adapter);
 
@@ -5993,8 +6000,16 @@
 	adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
 
 	speed = hw->phy.autoneg_advertised;
-	if ((!speed) && (hw->mac.ops.get_link_capabilities))
+	if ((!speed) && (hw->mac.ops.get_link_capabilities)) {
 		hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg);
+
+		/* setup the highest link when no autoneg */
+		if (!autoneg) {
+			if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+				speed = IXGBE_LINK_SPEED_10GB_FULL;
+		}
+	}
+
 	if (hw->mac.ops.setup_link)
 		hw->mac.ops.setup_link(hw, speed, true);
 
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 6442cf8..10775cb 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1861,6 +1861,7 @@
 #define IXGBE_RFCTL_ISCSI_DIS       0x00000001
 #define IXGBE_RFCTL_ISCSI_DWC_MASK  0x0000003E
 #define IXGBE_RFCTL_ISCSI_DWC_SHIFT 1
+#define IXGBE_RFCTL_RSC_DIS		0x00000020
 #define IXGBE_RFCTL_NFSW_DIS        0x00000040
 #define IXGBE_RFCTL_NFSR_DIS        0x00000080
 #define IXGBE_RFCTL_NFS_VER_MASK    0x00000300
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index 270e65f..a36fa80 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -996,14 +996,14 @@
 	 * that handles the Done Finished
 	 * Ovr and Und Events */
 	ret = request_irq(lp->rx_irq, korina_rx_dma_interrupt,
-			IRQF_DISABLED, "Korina ethernet Rx", dev);
+			0, "Korina ethernet Rx", dev);
 	if (ret < 0) {
 		printk(KERN_ERR "%s: unable to get Rx DMA IRQ %d\n",
 		    dev->name, lp->rx_irq);
 		goto err_release;
 	}
 	ret = request_irq(lp->tx_irq, korina_tx_dma_interrupt,
-			IRQF_DISABLED, "Korina ethernet Tx", dev);
+			0, "Korina ethernet Tx", dev);
 	if (ret < 0) {
 		printk(KERN_ERR "%s: unable to get Tx DMA IRQ %d\n",
 		    dev->name, lp->tx_irq);
@@ -1012,7 +1012,7 @@
 
 	/* Install handler for overrun error. */
 	ret = request_irq(lp->ovr_irq, korina_ovr_interrupt,
-			IRQF_DISABLED, "Ethernet Overflow", dev);
+			0, "Ethernet Overflow", dev);
 	if (ret < 0) {
 		printk(KERN_ERR "%s: unable to get OVR IRQ %d\n",
 		    dev->name, lp->ovr_irq);
@@ -1021,7 +1021,7 @@
 
 	/* Install handler for underflow error. */
 	ret = request_irq(lp->und_irq, korina_und_interrupt,
-			IRQF_DISABLED, "Ethernet Underflow", dev);
+			0, "Ethernet Underflow", dev);
 	if (ret < 0) {
 		printk(KERN_ERR "%s: unable to get UND IRQ %d\n",
 		    dev->name, lp->und_irq);
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index bfdb0686..6a6c1f7 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -282,8 +282,7 @@
 
 		if (IS_TX(i)) {
 			ltq_dma_alloc_tx(&ch->dma);
-			request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED,
-				"etop_tx", priv);
+			request_irq(irq, ltq_etop_dma_irq, 0, "etop_tx", priv);
 		} else if (IS_RX(i)) {
 			ltq_dma_alloc_rx(&ch->dma);
 			for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM;
@@ -291,8 +290,7 @@
 				if (ltq_etop_alloc_skb(ch))
 					return -ENOMEM;
 			ch->dma.desc = 0;
-			request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED,
-				"etop_rx", priv);
+			request_irq(irq, ltq_etop_dma_irq, 0, "etop_rx", priv);
 		}
 		ch->dma.irq = irq;
 	}
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 4ae0c74..fff6246 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1123,8 +1123,7 @@
 	struct pxa168_eth_private *pep = netdev_priv(dev);
 	int err;
 
-	err = request_irq(dev->irq, pxa168_eth_int_handler,
-			  IRQF_DISABLED, dev->name, dev);
+	err = request_irq(dev->irq, pxa168_eth_int_handler, 0, dev->name, dev);
 	if (err) {
 		dev_err(&dev->dev, "can't assign irq\n");
 		return -EAGAIN;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index a28cd80..0c75098 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -53,9 +53,11 @@
 	for (i = 0; i < priv->tx_ring_num; i++) {
 		priv->tx_cq[i].moder_cnt = priv->tx_frames;
 		priv->tx_cq[i].moder_time = priv->tx_usecs;
-		err = mlx4_en_set_cq_moder(priv, &priv->tx_cq[i]);
-		if (err)
-			return err;
+		if (priv->port_up) {
+			err = mlx4_en_set_cq_moder(priv, &priv->tx_cq[i]);
+			if (err)
+				return err;
+		}
 	}
 
 	if (priv->adaptive_rx_coal)
@@ -65,9 +67,11 @@
 		priv->rx_cq[i].moder_cnt = priv->rx_frames;
 		priv->rx_cq[i].moder_time = priv->rx_usecs;
 		priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
-		err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]);
-		if (err)
-			return err;
+		if (priv->port_up) {
+			err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]);
+			if (err)
+				return err;
+		}
 	}
 
 	return err;
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index 0fba153..075f4e2 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -915,7 +915,7 @@
 	struct ks_net *ks = netdev_priv(netdev);
 	int err;
 
-#define	KS_INT_FLAGS	(IRQF_DISABLED|IRQF_TRIGGER_LOW)
+#define	KS_INT_FLAGS	IRQF_TRIGGER_LOW
 	/* lock the card, even if we may not actually do anything
 	 * else at the moment.
 	 */
diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c
index c20766c..79257f7 100644
--- a/drivers/net/ethernet/natsemi/jazzsonic.c
+++ b/drivers/net/ethernet/natsemi/jazzsonic.c
@@ -83,8 +83,7 @@
 {
 	int retval;
 
-	retval = request_irq(dev->irq, sonic_interrupt, IRQF_DISABLED,
-				"sonic", dev);
+	retval = request_irq(dev->irq, sonic_interrupt, 0, "sonic", dev);
 	if (retval) {
 		printk(KERN_ERR "%s: unable to get IRQ %d.\n",
 				dev->name, dev->irq);
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index c2e0256..4da172a 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -95,8 +95,7 @@
 {
 	int retval;
 
-	retval = request_irq(dev->irq, sonic_interrupt, IRQF_DISABLED,
-				"sonic", dev);
+	retval = request_irq(dev->irq, sonic_interrupt, 0, "sonic", dev);
 	if (retval) {
 		printk(KERN_ERR "%s: unable to get IRQ %d.\n",
 		       dev->name, dev->irq);
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index c498181..5b65356 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -1219,7 +1219,7 @@
 	snprintf(mac->tx_irq_name, sizeof(mac->tx_irq_name), "%s tx",
 		 dev->name);
 
-	ret = request_irq(mac->tx->chan.irq, pasemi_mac_tx_intr, IRQF_DISABLED,
+	ret = request_irq(mac->tx->chan.irq, pasemi_mac_tx_intr, 0,
 			  mac->tx_irq_name, mac->tx);
 	if (ret) {
 		dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
@@ -1230,7 +1230,7 @@
 	snprintf(mac->rx_irq_name, sizeof(mac->rx_irq_name), "%s rx",
 		 dev->name);
 
-	ret = request_irq(mac->rx->chan.irq, pasemi_mac_rx_intr, IRQF_DISABLED,
+	ret = request_irq(mac->rx->chan.irq, pasemi_mac_rx_intr, 0,
 			  mac->rx_irq_name, mac->rx);
 	if (ret) {
 		dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 88349b8..81bf836 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -430,7 +430,7 @@
 	u8 diag_test;
 	u8 num_msix;
 	u8 nic_mode;
-	char diag_cnt;
+	int diag_cnt;
 
 	u16 max_uc_count;
 	u16 port_type;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 652cc13..392b9bd 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -1561,6 +1561,7 @@
 {
 	int err;
 
+	adapter->need_fw_reset = 0;
 	qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox);
 	qlcnic_83xx_enable_mbx_interrupt(adapter);
 
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 6f87f2c..3397cee 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -4231,6 +4231,7 @@
 	case RTL_GIGA_MAC_VER_23:
 	case RTL_GIGA_MAC_VER_24:
 	case RTL_GIGA_MAC_VER_34:
+	case RTL_GIGA_MAC_VER_35:
 		RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
 		break;
 	case RTL_GIGA_MAC_VER_40:
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 8b71525..0889212 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -7,7 +7,7 @@
 	select I2C_ALGOBIT
 	select PTP_1588_CLOCK
 	---help---
-	  This driver supports 10-gigabit Ethernet cards based on
+	  This driver supports 10/40-gigabit Ethernet cards based on
 	  the Solarflare SFC4000, SFC9000-family and SFC9100-family
 	  controllers.
 
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 5f42313..9f18ae9 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -94,7 +94,7 @@
 	return resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]);
 }
 
-static int efx_ef10_init_capabilities(struct efx_nic *efx)
+static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
 {
 	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_OUT_LEN);
 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
@@ -107,16 +107,27 @@
 			  outbuf, sizeof(outbuf), &outlen);
 	if (rc)
 		return rc;
+	if (outlen < sizeof(outbuf)) {
+		netif_err(efx, drv, efx->net_dev,
+			  "unable to read datapath firmware capabilities\n");
+		return -EIO;
+	}
 
-	if (outlen >= sizeof(outbuf)) {
-		nic_data->datapath_caps =
-			MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
-		if (!(nic_data->datapath_caps &
-		     (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) {
-			netif_err(efx, drv, efx->net_dev,
-				  "Capabilities don't indicate TSO support.\n");
-			return -ENODEV;
-		}
+	nic_data->datapath_caps =
+		MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
+
+	if (!(nic_data->datapath_caps &
+	      (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) {
+		netif_err(efx, drv, efx->net_dev,
+			  "current firmware does not support TSO\n");
+		return -ENODEV;
+	}
+
+	if (!(nic_data->datapath_caps &
+	      (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) {
+		netif_err(efx, probe, efx->net_dev,
+			  "current firmware does not support an RX prefix\n");
+		return -ENODEV;
 	}
 
 	return 0;
@@ -217,21 +228,13 @@
 	if (rc)
 		goto fail3;
 
-	rc = efx_ef10_init_capabilities(efx);
+	rc = efx_ef10_init_datapath_caps(efx);
 	if (rc < 0)
 		goto fail3;
 
 	efx->rx_packet_len_offset =
 		ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE;
 
-	if (!(nic_data->datapath_caps &
-	      (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) {
-		netif_err(efx, probe, efx->net_dev,
-			  "current firmware does not support an RX prefix\n");
-		rc = -ENODEV;
-		goto fail3;
-	}
-
 	rc = efx_mcdi_port_get_number(efx);
 	if (rc < 0)
 		goto fail3;
@@ -260,8 +263,6 @@
 	if (rc)
 		goto fail3;
 
-	efx_ptp_probe(efx);
-
 	return 0;
 
 fail3:
@@ -342,6 +343,13 @@
 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
 	int rc;
 
+	if (nic_data->must_check_datapath_caps) {
+		rc = efx_ef10_init_datapath_caps(efx);
+		if (rc)
+			return rc;
+		nic_data->must_check_datapath_caps = false;
+	}
+
 	if (nic_data->must_realloc_vis) {
 		/* We cannot let the number of VIs change now */
 		rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis,
@@ -710,6 +718,14 @@
 	nic_data->must_restore_filters = true;
 	nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
 
+	/* The datapath firmware might have been changed */
+	nic_data->must_check_datapath_caps = true;
+
+	/* MAC statistics have been cleared on the NIC; clear the local
+	 * statistic that we update with efx_update_diff_stat().
+	 */
+	nic_data->stats[EF10_STAT_rx_bad_bytes] = 0;
+
 	return -EIO;
 }
 
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 8d33da6..7b6be61 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -556,6 +556,7 @@
 		case 100:   caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN;   break;
 		case 1000:  caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN;  break;
 		case 10000: caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN; break;
+		case 40000: caps = 1 << MC_CMD_PHY_CAP_40000FDX_LBN; break;
 		default:    return -EINVAL;
 		}
 	} else {
@@ -841,6 +842,7 @@
 	[MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100,
 	[MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000,
 	[MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000,
+	[MCDI_EVENT_LINKCHANGE_SPEED_40G] = 40000,
 };
 
 void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 4b1e188..fda29d3 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -400,6 +400,8 @@
  * @rx_rss_context: Firmware handle for our RSS context
  * @stats: Hardware statistics
  * @workaround_35388: Flag: firmware supports workaround for bug 35388
+ * @must_check_datapath_caps: Flag: @datapath_caps needs to be revalidated
+ *	after MC reboot
  * @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of
  *	%MC_CMD_GET_CAPABILITIES response)
  */
@@ -413,6 +415,7 @@
 	u32 rx_rss_context;
 	u64 stats[EF10_STAT_COUNT];
 	bool workaround_35388;
+	bool must_check_datapath_caps;
 	u32 datapath_caps;
 };
 
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index 370e13d..5730fe2 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -271,7 +271,7 @@
 #define SMC_insw(a, r, p, l)	mcf_insw(a + r, p, l)
 #define SMC_outsw(a, r, p, l)	mcf_outsw(a + r, p, l)
 
-#define SMC_IRQ_FLAGS		(IRQF_DISABLED)
+#define SMC_IRQ_FLAGS		0
 
 #else
 
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index ffa5c4a..5f9e79f 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -1356,8 +1356,7 @@
 	smsc9420_reg_write(pd, INT_STAT, 0xFFFFFFFF);
 	smsc9420_pci_flush_write(pd);
 
-	result = request_irq(irq, smsc9420_isr, IRQF_SHARED | IRQF_DISABLED,
-			     DRV_NAME, pd);
+	result = request_irq(irq, smsc9420_isr, IRQF_SHARED, DRV_NAME, pd);
 	if (result) {
 		smsc_warn(IFUP, "Unable to use IRQ = %d", irq);
 		result = -ENODEV;
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 949076f..13e6fff 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -1734,7 +1734,8 @@
 	unsigned int data_len = skb->len - sh_len;
 	unsigned char *data = skb->data;
 	unsigned int ih_off, th_off, p_len;
-	unsigned int isum_seed, tsum_seed, id, seq;
+	unsigned int isum_seed, tsum_seed, seq;
+	unsigned int uninitialized_var(id);
 	int is_ipv6;
 	long f_id = -1;    /* id of the current fragment */
 	long f_size = skb_headlen(skb) - sh_len;  /* current fragment size */
@@ -1781,7 +1782,7 @@
 		} else {
 			ih = (struct iphdr *)(buf + ih_off);
 			ih->tot_len = htons(sh_len + p_len - ih_off);
-			ih->id = htons(id);
+			ih->id = htons(id++);
 			ih->check = csum_long(isum_seed + ih->tot_len +
 					      ih->id) ^ 0xffff;
 		}
@@ -1818,7 +1819,6 @@
 			slot++;
 		}
 
-		id++;
 		seq += p_len;
 
 		/* The last segment may be less than gso_size. */
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 9c805e0..f7f2ef4 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1726,7 +1726,7 @@
 		goto fail_alloc_irq;
 	}
 	result = request_irq(card->irq, gelic_card_interrupt,
-			     IRQF_DISABLED, netdev->name, card);
+			     0, netdev->name, card);
 
 	if (result) {
 		dev_info(ctodev(card), "%s:request_irq failed (%d)\n",
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 510b9c8..31bcb98 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -1488,7 +1488,7 @@
 toshoboe_close (struct pci_dev *pci_dev)
 {
   int i;
-  struct toshoboe_cb *self = (struct toshoboe_cb*)pci_get_drvdata(pci_dev);
+  struct toshoboe_cb *self = pci_get_drvdata(pci_dev);
 
   IRDA_DEBUG (4, "%s()\n", __func__);
 
@@ -1696,7 +1696,7 @@
 static int
 toshoboe_gotosleep (struct pci_dev *pci_dev, pm_message_t crap)
 {
-  struct toshoboe_cb *self = (struct toshoboe_cb*)pci_get_drvdata(pci_dev);
+  struct toshoboe_cb *self = pci_get_drvdata(pci_dev);
   unsigned long flags;
   int i = 10;
 
@@ -1725,7 +1725,7 @@
 static int
 toshoboe_wakeup (struct pci_dev *pci_dev)
 {
-  struct toshoboe_cb *self = (struct toshoboe_cb*)pci_get_drvdata(pci_dev);
+  struct toshoboe_cb *self = pci_get_drvdata(pci_dev);
   unsigned long flags;
 
   IRDA_DEBUG (4, "%s()\n", __func__);
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index f07c340..3f138ca 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -191,8 +191,8 @@
 		goto error;
 
 	ret = 0;
-	error:
-		return ret;
+error:
+	return ret;
 }
 
 /* Setup a communication between mcs7780 and agilent chip. */
@@ -501,8 +501,11 @@
 		return 0;
 
 	mcs->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
-	if (!mcs->rx_urb)
+	if (!mcs->rx_urb) {
+		usb_free_urb(mcs->tx_urb);
+		mcs->tx_urb = NULL;
 		return 0;
+	}
 
 	return 1;
 }
@@ -643,9 +646,9 @@
 	ret = mcs_set_reg(mcs, MCS_MODE_REG, rval);
 
 	mcs->speed = mcs->new_speed;
-	error:
-		mcs->new_speed = 0;
-		return ret;
+error:
+	mcs->new_speed = 0;
+	return ret;
 }
 
 /* Ioctl calls not supported at this time.  Can be an area of future work. */
@@ -738,17 +741,20 @@
 
 	ret = mcs_receive_start(mcs);
 	if (ret)
-		goto error3;
+		goto error4;
 
 	netif_start_queue(netdev);
 	return 0;
 
-	error3:
-		irlap_close(mcs->irlap);
-	error2:
-		kfree_skb(mcs->rx_buff.skb);
-	error1:
-		return ret;
+error4:
+	usb_free_urb(mcs->rx_urb);
+	usb_free_urb(mcs->tx_urb);
+error3:
+	irlap_close(mcs->irlap);
+error2:
+	kfree_skb(mcs->rx_buff.skb);
+error1:
+	return ret;
 }
 
 /* Receive callback function.  */
@@ -946,11 +952,11 @@
 	usb_set_intfdata(intf, mcs);
 	return 0;
 
-	error2:
-		free_netdev(ndev);
+error2:
+	free_netdev(ndev);
 
-	error1:
-		return ret;
+error1:
+	return ret;
 }
 
 /* The current device is removed, the USB layer tells us to shut down. */
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index 5f47584..c5bd58b 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -543,7 +543,7 @@
 	int		crclen, len = 0;
 	struct sk_buff	*skb;
 	int		ret = 0;
-	struct net_device *ndev = (struct net_device *)pci_get_drvdata(r->pdev);
+	struct net_device *ndev = pci_get_drvdata(r->pdev);
 	vlsi_irda_dev_t *idev = netdev_priv(ndev);
 
 	pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir);
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index fcbf680..a17d85a 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -146,6 +146,7 @@
 
 static void loopback_dev_free(struct net_device *dev)
 {
+	dev_net(dev)->loopback_dev = NULL;
 	free_percpu(dev->lstats);
 	free_netdev(dev);
 }
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 64dfaa3..9bf46bd 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -118,8 +118,6 @@
 				 const struct ethhdr *eth, bool local)
 {
 	struct net_device *dev = vlan->dev;
-	if (!skb)
-		return NET_RX_DROP;
 
 	if (local)
 		return vlan->forward(dev, skb);
@@ -171,9 +169,13 @@
 			hash = mc_hash(vlan, eth->h_dest);
 			if (!test_bit(hash, vlan->mc_filter))
 				continue;
+
+			err = NET_RX_DROP;
 			nskb = skb_clone(skb, GFP_ATOMIC);
-			err = macvlan_broadcast_one(nskb, vlan, eth,
-					 mode == MACVLAN_MODE_BRIDGE);
+			if (likely(nskb))
+				err = macvlan_broadcast_one(
+					nskb, vlan, eth,
+					mode == MACVLAN_MODE_BRIDGE);
 			macvlan_count_rx(vlan, skb->len + ETH_HLEN,
 					 err == NET_RX_SUCCESS, 1);
 		}
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
index db472ff..313a037 100644
--- a/drivers/net/phy/cicada.c
+++ b/drivers/net/phy/cicada.c
@@ -30,9 +30,9 @@
 #include <linux/ethtool.h>
 #include <linux/phy.h>
 
-#include <asm/io.h>
+#include <linux/io.h>
 #include <asm/irq.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
 
 /* Cicada Extended Control Register 1 */
 #define MII_CIS8201_EXT_CON1           0x17
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index a639de8..807815f 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1641,11 +1641,11 @@
 		INIT_LIST_HEAD(&tun->disabled);
 		err = tun_attach(tun, file, false);
 		if (err < 0)
-			goto err_free_dev;
+			goto err_free_flow;
 
 		err = register_netdevice(tun->dev);
 		if (err < 0)
-			goto err_free_dev;
+			goto err_detach;
 
 		if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) ||
 		    device_create_file(&tun->dev->dev, &dev_attr_owner) ||
@@ -1689,7 +1689,12 @@
 	strcpy(ifr->ifr_name, tun->dev->name);
 	return 0;
 
- err_free_dev:
+err_detach:
+	tun_detach_all(dev);
+err_free_flow:
+	tun_flow_uninit(tun);
+	security_tun_dev_free_security(tun->security);
+err_free_dev:
 	free_netdev(dev);
 	return err;
 }
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 03ad4dc..2023f3e 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -33,7 +33,7 @@
 #include <linux/usb/usbnet.h>
 
 
-#if defined(CONFIG_USB_NET_RNDIS_HOST) || defined(CONFIG_USB_NET_RNDIS_HOST_MODULE)
+#if IS_ENABLED(CONFIG_USB_NET_RNDIS_HOST)
 
 static int is_rndis(struct usb_interface_descriptor *desc)
 {
@@ -69,8 +69,7 @@
 	0xa6, 0x07, 0xc0, 0xff, 0xcb, 0x7e, 0x39, 0x2a,
 };
 
-/*
- * probes control interface, claims data interface, collects the bulk
+/* probes control interface, claims data interface, collects the bulk
  * endpoints, activates data interface (if needed), maybe sets MTU.
  * all pure cdc, except for certain firmware workarounds, and knowing
  * that rndis uses one different rule.
@@ -88,7 +87,7 @@
 	struct usb_cdc_mdlm_desc	*desc = NULL;
 	struct usb_cdc_mdlm_detail_desc *detail = NULL;
 
-	if (sizeof dev->data < sizeof *info)
+	if (sizeof(dev->data) < sizeof(*info))
 		return -EDOM;
 
 	/* expect strict spec conformance for the descriptors, but
@@ -126,10 +125,10 @@
 		 is_activesync(&intf->cur_altsetting->desc) ||
 		 is_wireless_rndis(&intf->cur_altsetting->desc));
 
-	memset(info, 0, sizeof *info);
+	memset(info, 0, sizeof(*info));
 	info->control = intf;
 	while (len > 3) {
-		if (buf [1] != USB_DT_CS_INTERFACE)
+		if (buf[1] != USB_DT_CS_INTERFACE)
 			goto next_desc;
 
 		/* use bDescriptorSubType to identify the CDC descriptors.
@@ -139,14 +138,14 @@
 		 * in favor of a complicated OID-based RPC scheme doing what
 		 * CDC Ethernet achieves with a simple descriptor.
 		 */
-		switch (buf [2]) {
+		switch (buf[2]) {
 		case USB_CDC_HEADER_TYPE:
 			if (info->header) {
 				dev_dbg(&intf->dev, "extra CDC header\n");
 				goto bad_desc;
 			}
 			info->header = (void *) buf;
-			if (info->header->bLength != sizeof *info->header) {
+			if (info->header->bLength != sizeof(*info->header)) {
 				dev_dbg(&intf->dev, "CDC header len %u\n",
 					info->header->bLength);
 				goto bad_desc;
@@ -175,7 +174,7 @@
 				goto bad_desc;
 			}
 			info->u = (void *) buf;
-			if (info->u->bLength != sizeof *info->u) {
+			if (info->u->bLength != sizeof(*info->u)) {
 				dev_dbg(&intf->dev, "CDC union len %u\n",
 					info->u->bLength);
 				goto bad_desc;
@@ -233,7 +232,7 @@
 				goto bad_desc;
 			}
 			info->ether = (void *) buf;
-			if (info->ether->bLength != sizeof *info->ether) {
+			if (info->ether->bLength != sizeof(*info->ether)) {
 				dev_dbg(&intf->dev, "CDC ether len %u\n",
 					info->ether->bLength);
 				goto bad_desc;
@@ -274,8 +273,8 @@
 			break;
 		}
 next_desc:
-		len -= buf [0];	/* bLength */
-		buf += buf [0];
+		len -= buf[0];	/* bLength */
+		buf += buf[0];
 	}
 
 	/* Microsoft ActiveSync based and some regular RNDIS devices lack the
@@ -379,9 +378,7 @@
 }
 EXPORT_SYMBOL_GPL(usbnet_cdc_unbind);
 
-/*-------------------------------------------------------------------------
- *
- * Communications Device Class, Ethernet Control model
+/* Communications Device Class, Ethernet Control model
  *
  * Takes two interfaces.  The DATA interface is inactive till an altsetting
  * is selected.  Configuration data includes class descriptors.  There's
@@ -389,8 +386,7 @@
  *
  * This should interop with whatever the 2.4 "CDCEther.c" driver
  * (by Brad Hards) talked with, with more functionality.
- *
- *-------------------------------------------------------------------------*/
+ */
 
 static void dumpspeed(struct usbnet *dev, __le32 *speeds)
 {
@@ -404,7 +400,7 @@
 {
 	struct usb_cdc_notification	*event;
 
-	if (urb->actual_length < sizeof *event)
+	if (urb->actual_length < sizeof(*event))
 		return;
 
 	/* SPEED_CHANGE can get split into two 8-byte packets */
@@ -423,7 +419,7 @@
 	case USB_CDC_NOTIFY_SPEED_CHANGE:	/* tx/rx rates */
 		netif_dbg(dev, timer, dev->net, "CDC: speed change (len %d)\n",
 			  urb->actual_length);
-		if (urb->actual_length != (sizeof *event + 8))
+		if (urb->actual_length != (sizeof(*event) + 8))
 			set_bit(EVENT_STS_SPLIT, &dev->flags);
 		else
 			dumpspeed(dev, (__le32 *) &event[1]);
@@ -469,7 +465,6 @@
 static const struct driver_info	cdc_info = {
 	.description =	"CDC Ethernet Device",
 	.flags =	FLAG_ETHER | FLAG_POINTTOPOINT,
-	// .check_connect = cdc_check_connect,
 	.bind =		usbnet_cdc_bind,
 	.unbind =	usbnet_cdc_unbind,
 	.status =	usbnet_cdc_status,
@@ -493,9 +488,8 @@
 #define DELL_VENDOR_ID		0x413C
 #define REALTEK_VENDOR_ID	0x0bda
 
-static const struct usb_device_id	products [] = {
-/*
- * BLACKLIST !!
+static const struct usb_device_id	products[] = {
+/* BLACKLIST !!
  *
  * First blacklist any products that are egregiously nonconformant
  * with the CDC Ethernet specs.  Minor braindamage we cope with; when
@@ -542,7 +536,7 @@
 	.driver_info		= 0,
 }, {
 	.match_flags    =   USB_DEVICE_ID_MATCH_INT_INFO
-	          | USB_DEVICE_ID_MATCH_DEVICE,
+			  | USB_DEVICE_ID_MATCH_DEVICE,
 	.idVendor		= 0x04DD,
 	.idProduct		= 0x8007,	/* C-700 */
 	ZAURUS_MASTER_INTERFACE,
@@ -659,8 +653,7 @@
 	.driver_info = 0,
 },
 
-/*
- * WHITELIST!!!
+/* WHITELIST!!!
  *
  * CDC Ether uses two interfaces, not necessarily consecutive.
  * We match the main interface, ignoring the optional device
@@ -672,60 +665,40 @@
  */
 {
 	/* ZTE (Vodafone) K3805-Z */
-	.match_flags    =   USB_DEVICE_ID_MATCH_VENDOR
-		 | USB_DEVICE_ID_MATCH_PRODUCT
-		 | USB_DEVICE_ID_MATCH_INT_INFO,
-	.idVendor               = ZTE_VENDOR_ID,
-	.idProduct		= 0x1003,
-	.bInterfaceClass	= USB_CLASS_COMM,
-	.bInterfaceSubClass	= USB_CDC_SUBCLASS_ETHERNET,
-	.bInterfaceProtocol	= USB_CDC_PROTO_NONE,
+	USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1003, USB_CLASS_COMM,
+				      USB_CDC_SUBCLASS_ETHERNET,
+				      USB_CDC_PROTO_NONE),
 	.driver_info = (unsigned long)&wwan_info,
 }, {
 	/* ZTE (Vodafone) K3806-Z */
-	.match_flags    =   USB_DEVICE_ID_MATCH_VENDOR
-		 | USB_DEVICE_ID_MATCH_PRODUCT
-		 | USB_DEVICE_ID_MATCH_INT_INFO,
-	.idVendor               = ZTE_VENDOR_ID,
-	.idProduct		= 0x1015,
-	.bInterfaceClass	= USB_CLASS_COMM,
-	.bInterfaceSubClass	= USB_CDC_SUBCLASS_ETHERNET,
-	.bInterfaceProtocol	= USB_CDC_PROTO_NONE,
+	USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1015, USB_CLASS_COMM,
+				      USB_CDC_SUBCLASS_ETHERNET,
+				      USB_CDC_PROTO_NONE),
 	.driver_info = (unsigned long)&wwan_info,
 }, {
 	/* ZTE (Vodafone) K4510-Z */
-	.match_flags    =   USB_DEVICE_ID_MATCH_VENDOR
-		 | USB_DEVICE_ID_MATCH_PRODUCT
-		 | USB_DEVICE_ID_MATCH_INT_INFO,
-	.idVendor               = ZTE_VENDOR_ID,
-	.idProduct		= 0x1173,
-	.bInterfaceClass	= USB_CLASS_COMM,
-	.bInterfaceSubClass	= USB_CDC_SUBCLASS_ETHERNET,
-	.bInterfaceProtocol	= USB_CDC_PROTO_NONE,
+	USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1173, USB_CLASS_COMM,
+				      USB_CDC_SUBCLASS_ETHERNET,
+				      USB_CDC_PROTO_NONE),
 	.driver_info = (unsigned long)&wwan_info,
 }, {
 	/* ZTE (Vodafone) K3770-Z */
-	.match_flags    =   USB_DEVICE_ID_MATCH_VENDOR
-		 | USB_DEVICE_ID_MATCH_PRODUCT
-		 | USB_DEVICE_ID_MATCH_INT_INFO,
-	.idVendor               = ZTE_VENDOR_ID,
-	.idProduct		= 0x1177,
-	.bInterfaceClass	= USB_CLASS_COMM,
-	.bInterfaceSubClass	= USB_CDC_SUBCLASS_ETHERNET,
-	.bInterfaceProtocol	= USB_CDC_PROTO_NONE,
+	USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1177, USB_CLASS_COMM,
+				      USB_CDC_SUBCLASS_ETHERNET,
+				      USB_CDC_PROTO_NONE),
 	.driver_info = (unsigned long)&wwan_info,
 }, {
 	/* ZTE (Vodafone) K3772-Z */
-	.match_flags    =   USB_DEVICE_ID_MATCH_VENDOR
-		 | USB_DEVICE_ID_MATCH_PRODUCT
-		 | USB_DEVICE_ID_MATCH_INT_INFO,
-	.idVendor               = ZTE_VENDOR_ID,
-	.idProduct		= 0x1181,
-	.bInterfaceClass	= USB_CLASS_COMM,
-	.bInterfaceSubClass	= USB_CDC_SUBCLASS_ETHERNET,
-	.bInterfaceProtocol	= USB_CDC_PROTO_NONE,
+	USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1181, USB_CLASS_COMM,
+				      USB_CDC_SUBCLASS_ETHERNET,
+				      USB_CDC_PROTO_NONE),
 	.driver_info = (unsigned long)&wwan_info,
 }, {
+	/* Telit modules */
+	USB_VENDOR_AND_INTERFACE_INFO(0x1bc7, USB_CLASS_COMM,
+			USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+	.driver_info = (kernel_ulong_t) &wwan_info,
+}, {
 	USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
 			USB_CDC_PROTO_NONE),
 	.driver_info = (unsigned long) &cdc_info,
@@ -736,15 +709,11 @@
 
 }, {
 	/* Various Huawei modems with a network port like the UMG1831 */
-	.match_flags    =   USB_DEVICE_ID_MATCH_VENDOR
-		 | USB_DEVICE_ID_MATCH_INT_INFO,
-	.idVendor               = HUAWEI_VENDOR_ID,
-	.bInterfaceClass	= USB_CLASS_COMM,
-	.bInterfaceSubClass	= USB_CDC_SUBCLASS_ETHERNET,
-	.bInterfaceProtocol	= 255,
+	USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_COMM,
+				      USB_CDC_SUBCLASS_ETHERNET, 255),
 	.driver_info = (unsigned long)&wwan_info,
 },
-	{ },		// END
+	{ },		/* END */
 };
 MODULE_DEVICE_TABLE(usb, products);
 
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 3a81315..6312332 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -518,6 +518,135 @@
 
 	/* 3. Combined interface devices matching on interface number */
 	{QMI_FIXED_INTF(0x0408, 0xea42, 4)},	/* Yota / Megafon M100-1 */
+	{QMI_FIXED_INTF(0x05c6, 0x7000, 0)},
+	{QMI_FIXED_INTF(0x05c6, 0x7001, 1)},
+	{QMI_FIXED_INTF(0x05c6, 0x7002, 1)},
+	{QMI_FIXED_INTF(0x05c6, 0x7101, 1)},
+	{QMI_FIXED_INTF(0x05c6, 0x7101, 2)},
+	{QMI_FIXED_INTF(0x05c6, 0x7101, 3)},
+	{QMI_FIXED_INTF(0x05c6, 0x7102, 1)},
+	{QMI_FIXED_INTF(0x05c6, 0x7102, 2)},
+	{QMI_FIXED_INTF(0x05c6, 0x7102, 3)},
+	{QMI_FIXED_INTF(0x05c6, 0x8000, 7)},
+	{QMI_FIXED_INTF(0x05c6, 0x8001, 6)},
+	{QMI_FIXED_INTF(0x05c6, 0x9000, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x9003, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x9005, 2)},
+	{QMI_FIXED_INTF(0x05c6, 0x900a, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x900b, 2)},
+	{QMI_FIXED_INTF(0x05c6, 0x900c, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x900c, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x900c, 6)},
+	{QMI_FIXED_INTF(0x05c6, 0x900d, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x900f, 3)},
+	{QMI_FIXED_INTF(0x05c6, 0x900f, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x900f, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x9010, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x9010, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x9011, 3)},
+	{QMI_FIXED_INTF(0x05c6, 0x9011, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x9021, 1)},
+	{QMI_FIXED_INTF(0x05c6, 0x9022, 2)},
+	{QMI_FIXED_INTF(0x05c6, 0x9025, 4)},	/* Alcatel-sbell ASB TL131 TDD LTE  (China Mobile) */
+	{QMI_FIXED_INTF(0x05c6, 0x9026, 3)},
+	{QMI_FIXED_INTF(0x05c6, 0x902e, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x9031, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x9032, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x9033, 3)},
+	{QMI_FIXED_INTF(0x05c6, 0x9033, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x9033, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x9033, 6)},
+	{QMI_FIXED_INTF(0x05c6, 0x9034, 3)},
+	{QMI_FIXED_INTF(0x05c6, 0x9034, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x9034, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x9034, 6)},
+	{QMI_FIXED_INTF(0x05c6, 0x9034, 7)},
+	{QMI_FIXED_INTF(0x05c6, 0x9035, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x9036, 3)},
+	{QMI_FIXED_INTF(0x05c6, 0x9037, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x9038, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x903b, 7)},
+	{QMI_FIXED_INTF(0x05c6, 0x903c, 6)},
+	{QMI_FIXED_INTF(0x05c6, 0x903d, 6)},
+	{QMI_FIXED_INTF(0x05c6, 0x903e, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x9043, 3)},
+	{QMI_FIXED_INTF(0x05c6, 0x9046, 3)},
+	{QMI_FIXED_INTF(0x05c6, 0x9046, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x9046, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x9047, 2)},
+	{QMI_FIXED_INTF(0x05c6, 0x9047, 3)},
+	{QMI_FIXED_INTF(0x05c6, 0x9047, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x9048, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x9048, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x9048, 6)},
+	{QMI_FIXED_INTF(0x05c6, 0x9048, 7)},
+	{QMI_FIXED_INTF(0x05c6, 0x9048, 8)},
+	{QMI_FIXED_INTF(0x05c6, 0x904c, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x904c, 6)},
+	{QMI_FIXED_INTF(0x05c6, 0x904c, 7)},
+	{QMI_FIXED_INTF(0x05c6, 0x904c, 8)},
+	{QMI_FIXED_INTF(0x05c6, 0x9050, 3)},
+	{QMI_FIXED_INTF(0x05c6, 0x9052, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x9053, 6)},
+	{QMI_FIXED_INTF(0x05c6, 0x9053, 7)},
+	{QMI_FIXED_INTF(0x05c6, 0x9054, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x9054, 6)},
+	{QMI_FIXED_INTF(0x05c6, 0x9055, 3)},
+	{QMI_FIXED_INTF(0x05c6, 0x9055, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x9055, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x9055, 6)},
+	{QMI_FIXED_INTF(0x05c6, 0x9055, 7)},
+	{QMI_FIXED_INTF(0x05c6, 0x9056, 3)},
+	{QMI_FIXED_INTF(0x05c6, 0x9062, 2)},
+	{QMI_FIXED_INTF(0x05c6, 0x9062, 3)},
+	{QMI_FIXED_INTF(0x05c6, 0x9062, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x9062, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x9062, 6)},
+	{QMI_FIXED_INTF(0x05c6, 0x9062, 7)},
+	{QMI_FIXED_INTF(0x05c6, 0x9062, 8)},
+	{QMI_FIXED_INTF(0x05c6, 0x9062, 9)},
+	{QMI_FIXED_INTF(0x05c6, 0x9064, 3)},
+	{QMI_FIXED_INTF(0x05c6, 0x9065, 6)},
+	{QMI_FIXED_INTF(0x05c6, 0x9065, 7)},
+	{QMI_FIXED_INTF(0x05c6, 0x9066, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x9066, 6)},
+	{QMI_FIXED_INTF(0x05c6, 0x9067, 1)},
+	{QMI_FIXED_INTF(0x05c6, 0x9068, 2)},
+	{QMI_FIXED_INTF(0x05c6, 0x9068, 3)},
+	{QMI_FIXED_INTF(0x05c6, 0x9068, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x9068, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x9068, 6)},
+	{QMI_FIXED_INTF(0x05c6, 0x9068, 7)},
+	{QMI_FIXED_INTF(0x05c6, 0x9069, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x9069, 6)},
+	{QMI_FIXED_INTF(0x05c6, 0x9069, 7)},
+	{QMI_FIXED_INTF(0x05c6, 0x9069, 8)},
+	{QMI_FIXED_INTF(0x05c6, 0x9070, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x9070, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x9075, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x9076, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x9076, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x9076, 6)},
+	{QMI_FIXED_INTF(0x05c6, 0x9076, 7)},
+	{QMI_FIXED_INTF(0x05c6, 0x9076, 8)},
+	{QMI_FIXED_INTF(0x05c6, 0x9077, 3)},
+	{QMI_FIXED_INTF(0x05c6, 0x9077, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x9077, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x9077, 6)},
+	{QMI_FIXED_INTF(0x05c6, 0x9078, 3)},
+	{QMI_FIXED_INTF(0x05c6, 0x9079, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x9079, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x9079, 6)},
+	{QMI_FIXED_INTF(0x05c6, 0x9079, 7)},
+	{QMI_FIXED_INTF(0x05c6, 0x9079, 8)},
+	{QMI_FIXED_INTF(0x05c6, 0x9080, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x9080, 6)},
+	{QMI_FIXED_INTF(0x05c6, 0x9080, 7)},
+	{QMI_FIXED_INTF(0x05c6, 0x9080, 8)},
+	{QMI_FIXED_INTF(0x05c6, 0x9083, 3)},
+	{QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
+	{QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
 	{QMI_FIXED_INTF(0x12d1, 0x140c, 1)},	/* Huawei E173 */
 	{QMI_FIXED_INTF(0x12d1, 0x14ac, 1)},	/* Huawei E1820 */
 	{QMI_FIXED_INTF(0x19d2, 0x0002, 1)},
@@ -612,7 +741,6 @@
 	{QMI_GOBI_DEVICE(0x413c, 0x8186)},	/* Dell Gobi 2000 Modem device (N0218, VU936) */
 	{QMI_GOBI_DEVICE(0x413c, 0x8194)},	/* Dell Gobi 3000 Composite */
 	{QMI_GOBI_DEVICE(0x05c6, 0x920b)},	/* Generic Gobi 2000 Modem device */
-	{QMI_GOBI_DEVICE(0x05c6, 0x920d)},	/* Gobi 3000 Composite */
 	{QMI_GOBI_DEVICE(0x05c6, 0x9225)},	/* Sony Gobi 2000 Modem device (N0279, VU730) */
 	{QMI_GOBI_DEVICE(0x05c6, 0x9245)},	/* Samsung Gobi 2000 Modem device (VL176) */
 	{QMI_GOBI_DEVICE(0x03f0, 0x251d)},	/* HP Gobi 2000 Modem device (VP412) */
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index bf64b41..d1292fe 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -564,7 +564,7 @@
 	struct net_device *dev;
 	struct net *net = sock_net(sk);
 	sa_family_t sa_family = sk->sk_family;
-	u16 port = htons(inet_sk(sk)->inet_sport);
+	__be16 port = inet_sk(sk)->inet_sport;
 
 	rcu_read_lock();
 	for_each_netdev_rcu(net, dev) {
@@ -581,7 +581,7 @@
 	struct net_device *dev;
 	struct net *net = sock_net(sk);
 	sa_family_t sa_family = sk->sk_family;
-	u16 port = htons(inet_sk(sk)->inet_sport);
+	__be16 port = inet_sk(sk)->inet_sport;
 
 	rcu_read_lock();
 	for_each_netdev_rcu(net, dev) {
@@ -2021,7 +2021,8 @@
 };
 
 /* Calls the ndo_add_vxlan_port of the caller in order to
- * supply the listening VXLAN udp ports.
+ * supply the listening VXLAN udp ports. Callers are expected
+ * to implement the ndo_add_vxlan_port.
  */
 void vxlan_get_rx_port(struct net_device *dev)
 {
@@ -2029,16 +2030,13 @@
 	struct net *net = dev_net(dev);
 	struct vxlan_net *vn = net_generic(net, vxlan_net_id);
 	sa_family_t sa_family;
-	u16 port;
-	int i;
-
-	if (!dev || !dev->netdev_ops || !dev->netdev_ops->ndo_add_vxlan_port)
-		return;
+	__be16 port;
+	unsigned int i;
 
 	spin_lock(&vn->sock_lock);
 	for (i = 0; i < PORT_HASH_SIZE; ++i) {
-		hlist_for_each_entry_rcu(vs, vs_head(net, i), hlist) {
-			port = htons(inet_sk(vs->sock->sk)->inet_sport);
+		hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) {
+			port = inet_sk(vs->sock->sk)->inet_sport;
 			sa_family = vs->sock->sk->sk_family;
 			dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family,
 							    port);
@@ -2492,15 +2490,19 @@
 
 	SET_ETHTOOL_OPS(dev, &vxlan_ethtool_ops);
 
-	/* create an fdb entry for default destination */
-	err = vxlan_fdb_create(vxlan, all_zeros_mac,
-			       &vxlan->default_dst.remote_ip,
-			       NUD_REACHABLE|NUD_PERMANENT,
-			       NLM_F_EXCL|NLM_F_CREATE,
-			       vxlan->dst_port, vxlan->default_dst.remote_vni,
-			       vxlan->default_dst.remote_ifindex, NTF_SELF);
-	if (err)
-		return err;
+	/* create an fdb entry for a valid default destination */
+	if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
+		err = vxlan_fdb_create(vxlan, all_zeros_mac,
+				       &vxlan->default_dst.remote_ip,
+				       NUD_REACHABLE|NUD_PERMANENT,
+				       NLM_F_EXCL|NLM_F_CREATE,
+				       vxlan->dst_port,
+				       vxlan->default_dst.remote_vni,
+				       vxlan->default_dst.remote_ifindex,
+				       NTF_SELF);
+		if (err)
+			return err;
+	}
 
 	err = register_netdevice(dev);
 	if (err) {
diff --git a/drivers/net/wireless/brcm80211/Kconfig b/drivers/net/wireless/brcm80211/Kconfig
index fc8a0fa..b00a7e9 100644
--- a/drivers/net/wireless/brcm80211/Kconfig
+++ b/drivers/net/wireless/brcm80211/Kconfig
@@ -28,7 +28,7 @@
 
 config BRCMFMAC_SDIO
 	bool "SDIO bus interface support for FullMAC driver"
-	depends on MMC
+	depends on (MMC = y || MMC = BRCMFMAC)
 	depends on BRCMFMAC
 	select FW_LOADER
 	default y
@@ -39,7 +39,7 @@
 
 config BRCMFMAC_USB
 	bool "USB bus interface support for FullMAC driver"
-	depends on USB
+	depends on (USB = y || USB = BRCMFMAC)
 	depends on BRCMFMAC
 	select FW_LOADER
 	---help---
diff --git a/drivers/net/wireless/cw1200/cw1200_spi.c b/drivers/net/wireless/cw1200/cw1200_spi.c
index d063760..f5e6b48 100644
--- a/drivers/net/wireless/cw1200/cw1200_spi.c
+++ b/drivers/net/wireless/cw1200/cw1200_spi.c
@@ -40,7 +40,9 @@
 	struct cw1200_common	*core;
 	const struct cw1200_platform_data_spi *pdata;
 	spinlock_t		lock; /* Serialize all bus operations */
+	wait_queue_head_t       wq;
 	int claimed;
+	int irq_disabled;
 };
 
 #define SDIO_TO_SPI_ADDR(addr) ((addr & 0x1f)>>2)
@@ -197,8 +199,11 @@
 {
 	unsigned long flags;
 
+	DECLARE_WAITQUEUE(wait, current);
+
 	might_sleep();
 
+	add_wait_queue(&self->wq, &wait);
 	spin_lock_irqsave(&self->lock, flags);
 	while (1) {
 		set_current_state(TASK_UNINTERRUPTIBLE);
@@ -211,6 +216,7 @@
 	set_current_state(TASK_RUNNING);
 	self->claimed = 1;
 	spin_unlock_irqrestore(&self->lock, flags);
+	remove_wait_queue(&self->wq, &wait);
 
 	return;
 }
@@ -222,6 +228,8 @@
 	spin_lock_irqsave(&self->lock, flags);
 	self->claimed = 0;
 	spin_unlock_irqrestore(&self->lock, flags);
+	wake_up(&self->wq);
+
 	return;
 }
 
@@ -230,6 +238,8 @@
 	struct hwbus_priv *self = dev_id;
 
 	if (self->core) {
+		disable_irq_nosync(self->func->irq);
+		self->irq_disabled = 1;
 		cw1200_irq_handler(self->core);
 		return IRQ_HANDLED;
 	} else {
@@ -263,13 +273,22 @@
 
 static int cw1200_spi_irq_unsubscribe(struct hwbus_priv *self)
 {
-	int ret = 0;
-
 	pr_debug("SW IRQ unsubscribe\n");
 	disable_irq_wake(self->func->irq);
 	free_irq(self->func->irq, self);
 
-	return ret;
+	return 0;
+}
+
+static int cw1200_spi_irq_enable(struct hwbus_priv *self, int enable)
+{
+	/* Disables are handled by the interrupt handler */
+	if (enable && self->irq_disabled) {
+		enable_irq(self->func->irq);
+		self->irq_disabled = 0;
+	}
+
+	return 0;
 }
 
 static int cw1200_spi_off(const struct cw1200_platform_data_spi *pdata)
@@ -349,6 +368,7 @@
 	.unlock			= cw1200_spi_unlock,
 	.align_size		= cw1200_spi_align_size,
 	.power_mgmt		= cw1200_spi_pm,
+	.irq_enable             = cw1200_spi_irq_enable,
 };
 
 /* Probe Function to be called by SPI stack when device is discovered */
@@ -400,6 +420,8 @@
 
 	spi_set_drvdata(func, self);
 
+	init_waitqueue_head(&self->wq);
+
 	status = cw1200_spi_irq_subscribe(self);
 
 	status = cw1200_core_probe(&cw1200_spi_hwbus_ops,
diff --git a/drivers/net/wireless/cw1200/fwio.c b/drivers/net/wireless/cw1200/fwio.c
index acdff0f..0b2061b 100644
--- a/drivers/net/wireless/cw1200/fwio.c
+++ b/drivers/net/wireless/cw1200/fwio.c
@@ -485,7 +485,7 @@
 
 	/* Enable interrupt signalling */
 	priv->hwbus_ops->lock(priv->hwbus_priv);
-	ret = __cw1200_irq_enable(priv, 1);
+	ret = __cw1200_irq_enable(priv, 2);
 	priv->hwbus_ops->unlock(priv->hwbus_priv);
 	if (ret < 0)
 		goto unsubscribe;
diff --git a/drivers/net/wireless/cw1200/hwbus.h b/drivers/net/wireless/cw1200/hwbus.h
index 8b2fc83..51dfb3a 100644
--- a/drivers/net/wireless/cw1200/hwbus.h
+++ b/drivers/net/wireless/cw1200/hwbus.h
@@ -28,6 +28,7 @@
 	void (*unlock)(struct hwbus_priv *self);
 	size_t (*align_size)(struct hwbus_priv *self, size_t size);
 	int (*power_mgmt)(struct hwbus_priv *self, bool suspend);
+	int (*irq_enable)(struct hwbus_priv *self, int enable);
 };
 
 #endif /* CW1200_HWBUS_H */
diff --git a/drivers/net/wireless/cw1200/hwio.c b/drivers/net/wireless/cw1200/hwio.c
index ff230b7..41bd761 100644
--- a/drivers/net/wireless/cw1200/hwio.c
+++ b/drivers/net/wireless/cw1200/hwio.c
@@ -273,6 +273,21 @@
 	u16 val16;
 	int ret;
 
+	/* We need to do this hack because the SPI layer can sleep on I/O
+	   and the general path involves I/O to the device in interrupt
+	   context.
+
+	   However, the initial enable call needs to go to the hardware.
+
+	   We don't worry about shutdown because we do a full reset which
+	   clears the interrupt enabled bits.
+	*/
+	if (priv->hwbus_ops->irq_enable) {
+		ret = priv->hwbus_ops->irq_enable(priv->hwbus_priv, enable);
+		if (ret || enable < 2)
+			return ret;
+	}
+
 	if (HIF_8601_SILICON == priv->hw_type) {
 		ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32);
 		if (ret < 0) {
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 95e6e61..88ce656 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -6659,19 +6659,20 @@
 		     rt2800_init_registers(rt2x00dev)))
 		return -EIO;
 
+	if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev)))
+		return -EIO;
+
 	/*
 	 * Send signal to firmware during boot time.
 	 */
 	rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
 	rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
-	if (rt2x00_is_usb(rt2x00dev)) {
+	if (rt2x00_is_usb(rt2x00dev))
 		rt2800_register_write(rt2x00dev, H2M_INT_SRC, 0);
-		rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0);
-	}
+	rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0);
 	msleep(1);
 
-	if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev) ||
-		     rt2800_wait_bbp_ready(rt2x00dev)))
+	if (unlikely(rt2800_wait_bbp_ready(rt2x00dev)))
 		return -EIO;
 
 	rt2800_init_bbp(rt2x00dev);
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index 841fb9d..9a6edb0 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -438,17 +438,16 @@
 		skb_queue_tail(&priv->rx_queue, skb);
 		usb_anchor_urb(entry, &priv->anchored);
 		ret = usb_submit_urb(entry, GFP_KERNEL);
+		usb_put_urb(entry);
 		if (ret) {
 			skb_unlink(skb, &priv->rx_queue);
 			usb_unanchor_urb(entry);
 			goto err;
 		}
-		usb_free_urb(entry);
 	}
 	return ret;
 
 err:
-	usb_free_urb(entry);
 	kfree_skb(skb);
 	usb_kill_anchored_urbs(&priv->anchored);
 	return ret;
@@ -956,8 +955,12 @@
 				  (RETRY_COUNT << 8  /* short retry limit */) |
 				  (RETRY_COUNT << 0  /* long retry limit */) |
 				  (7 << 21 /* MAX TX DMA */));
-		rtl8187_init_urbs(dev);
-		rtl8187b_init_status_urb(dev);
+		ret = rtl8187_init_urbs(dev);
+		if (ret)
+			goto rtl8187_start_exit;
+		ret = rtl8187b_init_status_urb(dev);
+		if (ret)
+			usb_kill_anchored_urbs(&priv->anchored);
 		goto rtl8187_start_exit;
 	}
 
@@ -966,7 +969,9 @@
 	rtl818x_iowrite32(priv, &priv->map->MAR[0], ~0);
 	rtl818x_iowrite32(priv, &priv->map->MAR[1], ~0);
 
-	rtl8187_init_urbs(dev);
+	ret = rtl8187_init_urbs(dev);
+	if (ret)
+		goto rtl8187_start_exit;
 
 	reg = RTL818X_RX_CONF_ONLYERLPKT |
 	      RTL818X_RX_CONF_RX_AUTORESETPHY |
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 625c6f4..77fee1d 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -406,7 +406,7 @@
 
 	init_waitqueue_head(&vif->wq);
 	vif->task = kthread_create(xenvif_kthread,
-				   (void *)vif, vif->dev->name);
+				   (void *)vif, "%s", vif->dev->name);
 	if (IS_ERR(vif->task)) {
 		pr_warn("Could not allocate kthread for %s\n", vif->dev->name);
 		err = PTR_ERR(vif->task);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 956130c..f3e591c 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -212,6 +212,49 @@
 	return false;
 }
 
+struct xenvif_count_slot_state {
+	unsigned long copy_off;
+	bool head;
+};
+
+unsigned int xenvif_count_frag_slots(struct xenvif *vif,
+				     unsigned long offset, unsigned long size,
+				     struct xenvif_count_slot_state *state)
+{
+	unsigned count = 0;
+
+	offset &= ~PAGE_MASK;
+
+	while (size > 0) {
+		unsigned long bytes;
+
+		bytes = PAGE_SIZE - offset;
+
+		if (bytes > size)
+			bytes = size;
+
+		if (start_new_rx_buffer(state->copy_off, bytes, state->head)) {
+			count++;
+			state->copy_off = 0;
+		}
+
+		if (state->copy_off + bytes > MAX_BUFFER_OFFSET)
+			bytes = MAX_BUFFER_OFFSET - state->copy_off;
+
+		state->copy_off += bytes;
+
+		offset += bytes;
+		size -= bytes;
+
+		if (offset == PAGE_SIZE)
+			offset = 0;
+
+		state->head = false;
+	}
+
+	return count;
+}
+
 /*
  * Figure out how many ring slots we're going to need to send @skb to
  * the guest. This function is essentially a dry run of
@@ -219,48 +262,39 @@
  */
 unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
 {
+	struct xenvif_count_slot_state state;
 	unsigned int count;
-	int i, copy_off;
+	unsigned char *data;
+	unsigned i;
 
-	count = DIV_ROUND_UP(skb_headlen(skb), PAGE_SIZE);
+	state.head = true;
+	state.copy_off = 0;
 
-	copy_off = skb_headlen(skb) % PAGE_SIZE;
+	/* Slot for the first (partial) page of data. */
+	count = 1;
 
+	/* Need a slot for the GSO prefix for GSO extra data? */
 	if (skb_shinfo(skb)->gso_size)
 		count++;
 
+	data = skb->data;
+	while (data < skb_tail_pointer(skb)) {
+		unsigned long offset = offset_in_page(data);
+		unsigned long size = PAGE_SIZE - offset;
+
+		if (data + size > skb_tail_pointer(skb))
+			size = skb_tail_pointer(skb) - data;
+
+		count += xenvif_count_frag_slots(vif, offset, size, &state);
+
+		data += size;
+	}
+
 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 		unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
 		unsigned long offset = skb_shinfo(skb)->frags[i].page_offset;
-		unsigned long bytes;
 
-		offset &= ~PAGE_MASK;
-
-		while (size > 0) {
-			BUG_ON(offset >= PAGE_SIZE);
-			BUG_ON(copy_off > MAX_BUFFER_OFFSET);
-
-			bytes = PAGE_SIZE - offset;
-
-			if (bytes > size)
-				bytes = size;
-
-			if (start_new_rx_buffer(copy_off, bytes, 0)) {
-				count++;
-				copy_off = 0;
-			}
-
-			if (copy_off + bytes > MAX_BUFFER_OFFSET)
-				bytes = MAX_BUFFER_OFFSET - copy_off;
-
-			copy_off += bytes;
-
-			offset += bytes;
-			size -= bytes;
-
-			if (offset == PAGE_SIZE)
-				offset = 0;
-		}
+		count += xenvif_count_frag_slots(vif, offset, size, &state);
 	}
 	return count;
 }
diff --git a/drivers/of/base.c b/drivers/of/base.c
index e486e41..865d3f6 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -1176,65 +1176,10 @@
 }
 EXPORT_SYMBOL_GPL(of_property_count_strings);
 
-/**
- * of_parse_phandle - Resolve a phandle property to a device_node pointer
- * @np: Pointer to device node holding phandle property
- * @phandle_name: Name of property holding a phandle value
- * @index: For properties holding a table of phandles, this is the index into
- *         the table
- *
- * Returns the device_node pointer with refcount incremented.  Use
- * of_node_put() on it when done.
- */
-struct device_node *of_parse_phandle(const struct device_node *np,
-				     const char *phandle_name, int index)
-{
-	const __be32 *phandle;
-	int size;
-
-	phandle = of_get_property(np, phandle_name, &size);
-	if ((!phandle) || (size < sizeof(*phandle) * (index + 1)))
-		return NULL;
-
-	return of_find_node_by_phandle(be32_to_cpup(phandle + index));
-}
-EXPORT_SYMBOL(of_parse_phandle);
-
-/**
- * of_parse_phandle_with_args() - Find a node pointed by phandle in a list
- * @np:		pointer to a device tree node containing a list
- * @list_name:	property name that contains a list
- * @cells_name:	property name that specifies phandles' arguments count
- * @index:	index of a phandle to parse out
- * @out_args:	optional pointer to output arguments structure (will be filled)
- *
- * This function is useful to parse lists of phandles and their arguments.
- * Returns 0 on success and fills out_args, on error returns appropriate
- * errno value.
- *
- * Caller is responsible to call of_node_put() on the returned out_args->node
- * pointer.
- *
- * Example:
- *
- * phandle1: node1 {
- * 	#list-cells = <2>;
- * }
- *
- * phandle2: node2 {
- * 	#list-cells = <1>;
- * }
- *
- * node3 {
- * 	list = <&phandle1 1 2 &phandle2 3>;
- * }
- *
- * To get a device_node of the `node2' node you may call this:
- * of_parse_phandle_with_args(node3, "list", "#list-cells", 1, &args);
- */
 static int __of_parse_phandle_with_args(const struct device_node *np,
 					const char *list_name,
-					const char *cells_name, int index,
+					const char *cells_name,
+					int cell_count, int index,
 					struct of_phandle_args *out_args)
 {
 	const __be32 *list, *list_end;
@@ -1262,19 +1207,32 @@
 		if (phandle) {
 			/*
 			 * Find the provider node and parse the #*-cells
-			 * property to determine the argument length
+			 * property to determine the argument length.
+			 *
+			 * This is not needed if the cell count is hard-coded
+			 * (i.e. cells_name not set, but cell_count is set),
+			 * except when we're going to return the found node
+			 * below.
 			 */
-			node = of_find_node_by_phandle(phandle);
-			if (!node) {
-				pr_err("%s: could not find phandle\n",
-					 np->full_name);
-				goto err;
+			if (cells_name || cur_index == index) {
+				node = of_find_node_by_phandle(phandle);
+				if (!node) {
+					pr_err("%s: could not find phandle\n",
+						np->full_name);
+					goto err;
+				}
 			}
-			if (of_property_read_u32(node, cells_name, &count)) {
-				pr_err("%s: could not get %s for %s\n",
-					 np->full_name, cells_name,
-					 node->full_name);
-				goto err;
+
+			if (cells_name) {
+				if (of_property_read_u32(node, cells_name,
+							 &count)) {
+					pr_err("%s: could not get %s for %s\n",
+						np->full_name, cells_name,
+						node->full_name);
+					goto err;
+				}
+			} else {
+				count = cell_count;
 			}
 
 			/*
@@ -1334,17 +1292,117 @@
 	return rc;
 }
 
+/**
+ * of_parse_phandle - Resolve a phandle property to a device_node pointer
+ * @np: Pointer to device node holding phandle property
+ * @phandle_name: Name of property holding a phandle value
+ * @index: For properties holding a table of phandles, this is the index into
+ *         the table
+ *
+ * Returns the device_node pointer with refcount incremented.  Use
+ * of_node_put() on it when done.
+ */
+struct device_node *of_parse_phandle(const struct device_node *np,
+				     const char *phandle_name, int index)
+{
+	struct of_phandle_args args;
+
+	if (index < 0)
+		return NULL;
+
+	if (__of_parse_phandle_with_args(np, phandle_name, NULL, 0,
+					 index, &args))
+		return NULL;
+
+	return args.np;
+}
+EXPORT_SYMBOL(of_parse_phandle);
+
+/**
+ * of_parse_phandle_with_args() - Find a node pointed by phandle in a list
+ * @np:		pointer to a device tree node containing a list
+ * @list_name:	property name that contains a list
+ * @cells_name:	property name that specifies phandles' arguments count
+ * @index:	index of a phandle to parse out
+ * @out_args:	optional pointer to output arguments structure (will be filled)
+ *
+ * This function is useful to parse lists of phandles and their arguments.
+ * Returns 0 on success and fills out_args, on error returns appropriate
+ * errno value.
+ *
+ * Caller is responsible to call of_node_put() on the returned out_args->node
+ * pointer.
+ *
+ * Example:
+ *
+ * phandle1: node1 {
+ * 	#list-cells = <2>;
+ * }
+ *
+ * phandle2: node2 {
+ * 	#list-cells = <1>;
+ * }
+ *
+ * node3 {
+ * 	list = <&phandle1 1 2 &phandle2 3>;
+ * }
+ *
+ * To get a device_node of the `node2' node you may call this:
+ * of_parse_phandle_with_args(node3, "list", "#list-cells", 1, &args);
+ */
 int of_parse_phandle_with_args(const struct device_node *np, const char *list_name,
 				const char *cells_name, int index,
 				struct of_phandle_args *out_args)
 {
 	if (index < 0)
 		return -EINVAL;
-	return __of_parse_phandle_with_args(np, list_name, cells_name, index, out_args);
+	return __of_parse_phandle_with_args(np, list_name, cells_name, 0,
+					    index, out_args);
 }
 EXPORT_SYMBOL(of_parse_phandle_with_args);
 
 /**
+ * of_parse_phandle_with_fixed_args() - Find a node pointed by phandle in a list
+ * @np:		pointer to a device tree node containing a list
+ * @list_name:	property name that contains a list
+ * @cell_count: number of argument cells following the phandle
+ * @index:	index of a phandle to parse out
+ * @out_args:	optional pointer to output arguments structure (will be filled)
+ *
+ * This function is useful to parse lists of phandles and their arguments.
+ * Returns 0 on success and fills out_args, on error returns appropriate
+ * errno value.
+ *
+ * Caller is responsible to call of_node_put() on the returned out_args->node
+ * pointer.
+ *
+ * Example:
+ *
+ * phandle1: node1 {
+ * }
+ *
+ * phandle2: node2 {
+ * }
+ *
+ * node3 {
+ * 	list = <&phandle1 0 2 &phandle2 2 3>;
+ * }
+ *
+ * To get a device_node of the `node2' node you may call this:
+ * of_parse_phandle_with_fixed_args(node3, "list", 2, 1, &args);
+ */
+int of_parse_phandle_with_fixed_args(const struct device_node *np,
+				const char *list_name, int cell_count,
+				int index, struct of_phandle_args *out_args)
+{
+	if (index < 0)
+		return -EINVAL;
+	return __of_parse_phandle_with_args(np, list_name, NULL, cell_count,
+					   index, out_args);
+}
+EXPORT_SYMBOL(of_parse_phandle_with_fixed_args);
+
+/**
  * of_count_phandle_with_args() - Find the number of phandles references in a property
  * @np:		pointer to a device tree node containing a list
  * @list_name:	property name that contains a list
@@ -1362,7 +1420,8 @@
 int of_count_phandle_with_args(const struct device_node *np, const char *list_name,
 				const char *cells_name)
 {
-	return __of_parse_phandle_with_args(np, list_name, cells_name, -1, NULL);
+	return __of_parse_phandle_with_args(np, list_name, cells_name, 0, -1,
+					    NULL);
 }
 EXPORT_SYMBOL(of_count_phandle_with_args);
 
@@ -1734,6 +1793,7 @@
 		ap = dt_alloc(sizeof(*ap) + len + 1, 4);
 		if (!ap)
 			continue;
+		memset(ap, 0, sizeof(*ap) + len + 1);
 		ap->alias = start;
 		of_alias_add(ap, np, id, start, len);
 	}
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 4fb06f3..229dd9d 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -11,12 +11,14 @@
 
 #include <linux/kernel.h>
 #include <linux/initrd.h>
+#include <linux/memblock.h>
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_fdt.h>
 #include <linux/string.h>
 #include <linux/errno.h>
 #include <linux/slab.h>
+#include <linux/random.h>
 
 #include <asm/setup.h>  /* for COMMAND_LINE_SIZE */
 #ifdef CONFIG_PPC
@@ -125,13 +127,13 @@
 	return score;
 }
 
-static void *unflatten_dt_alloc(unsigned long *mem, unsigned long size,
+static void *unflatten_dt_alloc(void **mem, unsigned long size,
 				       unsigned long align)
 {
 	void *res;
 
-	*mem = ALIGN(*mem, align);
-	res = (void *)*mem;
+	*mem = PTR_ALIGN(*mem, align);
+	res = *mem;
 	*mem += size;
 
 	return res;
@@ -146,9 +148,9 @@
  * @allnextpp: pointer to ->allnext from last allocated device_node
  * @fpsize: Size of the node path up at the current depth.
  */
-static unsigned long unflatten_dt_node(struct boot_param_header *blob,
-				unsigned long mem,
-				unsigned long *p,
+static void * unflatten_dt_node(struct boot_param_header *blob,
+				void *mem,
+				void **p,
 				struct device_node *dad,
 				struct device_node ***allnextpp,
 				unsigned long fpsize)
@@ -161,15 +163,15 @@
 	int has_name = 0;
 	int new_format = 0;
 
-	tag = be32_to_cpup((__be32 *)(*p));
+	tag = be32_to_cpup(*p);
 	if (tag != OF_DT_BEGIN_NODE) {
 		pr_err("Weird tag at start of node: %x\n", tag);
 		return mem;
 	}
 	*p += 4;
-	pathp = (char *)*p;
+	pathp = *p;
 	l = allocl = strlen(pathp) + 1;
-	*p = ALIGN(*p + l, 4);
+	*p = PTR_ALIGN(*p + l, 4);
 
 	/* version 0x10 has a more compact unit name here instead of the full
 	 * path. we accumulate the full path size using "fpsize", we'll rebuild
@@ -201,7 +203,6 @@
 				__alignof__(struct device_node));
 	if (allnextpp) {
 		char *fn;
-		memset(np, 0, sizeof(*np));
 		np->full_name = fn = ((char *)np) + sizeof(*np);
 		if (new_format) {
 			/* rebuild full path for new format */
@@ -239,7 +240,7 @@
 		u32 sz, noff;
 		char *pname;
 
-		tag = be32_to_cpup((__be32 *)(*p));
+		tag = be32_to_cpup(*p);
 		if (tag == OF_DT_NOP) {
 			*p += 4;
 			continue;
@@ -247,11 +248,11 @@
 		if (tag != OF_DT_PROP)
 			break;
 		*p += 4;
-		sz = be32_to_cpup((__be32 *)(*p));
-		noff = be32_to_cpup((__be32 *)((*p) + 4));
+		sz = be32_to_cpup(*p);
+		noff = be32_to_cpup(*p + 4);
 		*p += 8;
 		if (be32_to_cpu(blob->version) < 0x10)
-			*p = ALIGN(*p, sz >= 8 ? 8 : 4);
+			*p = PTR_ALIGN(*p, sz >= 8 ? 8 : 4);
 
 		pname = of_fdt_get_string(blob, noff);
 		if (pname == NULL) {
@@ -281,11 +282,11 @@
 				np->phandle = be32_to_cpup((__be32 *)*p);
 			pp->name = pname;
 			pp->length = sz;
-			pp->value = (void *)*p;
+			pp->value = *p;
 			*prev_pp = pp;
 			prev_pp = &pp->next;
 		}
-		*p = ALIGN((*p) + sz, 4);
+		*p = PTR_ALIGN((*p) + sz, 4);
 	}
 	/* with version 0x10 we may not have the name property, recreate
 	 * it here from the unit name if absent
@@ -334,7 +335,7 @@
 		else
 			mem = unflatten_dt_node(blob, mem, p, np, allnextpp,
 						fpsize);
-		tag = be32_to_cpup((__be32 *)(*p));
+		tag = be32_to_cpup(*p);
 	}
 	if (tag != OF_DT_END_NODE) {
 		pr_err("Weird tag at end of node: %x\n", tag);
@@ -360,7 +361,8 @@
 			     struct device_node **mynodes,
 			     void * (*dt_alloc)(u64 size, u64 align))
 {
-	unsigned long start, mem, size;
+	unsigned long size;
+	void *start, *mem;
 	struct device_node **allnextp = mynodes;
 
 	pr_debug(" -> unflatten_device_tree()\n");
@@ -381,32 +383,28 @@
 	}
 
 	/* First pass, scan for size */
-	start = ((unsigned long)blob) +
-		be32_to_cpu(blob->off_dt_struct);
-	size = unflatten_dt_node(blob, 0, &start, NULL, NULL, 0);
-	size = (size | 3) + 1;
+	start = ((void *)blob) + be32_to_cpu(blob->off_dt_struct);
+	size = (unsigned long)unflatten_dt_node(blob, 0, &start, NULL, NULL, 0);
+	size = ALIGN(size, 4);
 
 	pr_debug("  size is %lx, allocating...\n", size);
 
 	/* Allocate memory for the expanded device tree */
-	mem = (unsigned long)
-		dt_alloc(size + 4, __alignof__(struct device_node));
+	mem = dt_alloc(size + 4, __alignof__(struct device_node));
+	memset(mem, 0, size);
 
-	memset((void *)mem, 0, size);
+	*(__be32 *)(mem + size) = cpu_to_be32(0xdeadbeef);
 
-	((__be32 *)mem)[size / 4] = cpu_to_be32(0xdeadbeef);
-
-	pr_debug("  unflattening %lx...\n", mem);
+	pr_debug("  unflattening %p...\n", mem);
 
 	/* Second pass, do actual unflattening */
-	start = ((unsigned long)blob) +
-		be32_to_cpu(blob->off_dt_struct);
+	start = ((void *)blob) + be32_to_cpu(blob->off_dt_struct);
 	unflatten_dt_node(blob, mem, &start, NULL, &allnextp, 0);
-	if (be32_to_cpup((__be32 *)start) != OF_DT_END)
-		pr_warning("Weird tag at end of tree: %08x\n", *((u32 *)start));
-	if (be32_to_cpu(((__be32 *)mem)[size / 4]) != 0xdeadbeef)
+	if (be32_to_cpup(start) != OF_DT_END)
+		pr_warning("Weird tag at end of tree: %08x\n", be32_to_cpup(start));
+	if (be32_to_cpup(mem + size) != 0xdeadbeef)
 		pr_warning("End of tree marker overwritten: %08x\n",
-			   be32_to_cpu(((__be32 *)mem)[size / 4]));
+			   be32_to_cpup(mem + size));
 	*allnextp = NULL;
 
 	pr_debug(" <- unflatten_device_tree()\n");
@@ -628,7 +626,8 @@
  */
 void __init early_init_dt_check_for_initrd(unsigned long node)
 {
-	unsigned long start, end, len;
+	u64 start, end;
+	unsigned long len;
 	__be32 *prop;
 
 	pr_debug("Looking for initrd properties... ");
@@ -636,15 +635,16 @@
 	prop = of_get_flat_dt_prop(node, "linux,initrd-start", &len);
 	if (!prop)
 		return;
-	start = of_read_ulong(prop, len/4);
+	start = of_read_number(prop, len/4);
 
 	prop = of_get_flat_dt_prop(node, "linux,initrd-end", &len);
 	if (!prop)
 		return;
-	end = of_read_ulong(prop, len/4);
+	end = of_read_number(prop, len/4);
 
 	early_init_dt_setup_initrd_arch(start, end);
-	pr_debug("initrd_start=0x%lx  initrd_end=0x%lx\n", start, end);
+	pr_debug("initrd_start=0x%llx  initrd_end=0x%llx\n",
+		 (unsigned long long)start, (unsigned long long)end);
 }
 #else
 inline void early_init_dt_check_for_initrd(unsigned long node)
@@ -774,6 +774,17 @@
 	return 1;
 }
 
+#ifdef CONFIG_HAVE_MEMBLOCK
+/*
+ * called from unflatten_device_tree() to bootstrap devicetree itself
+ * Architectures can override this definition if memblock isn't used
+ */
+void * __init __weak early_init_dt_alloc_memory_arch(u64 size, u64 align)
+{
+	return __va(memblock_alloc(size, align));
+}
+#endif
+
 /**
  * unflatten_device_tree - create tree of device_nodes from flat blob
  *
@@ -792,3 +803,14 @@
 }
 
 #endif /* CONFIG_OF_EARLY_FLATTREE */
+
+/* Feed entire flattened device tree into the random pool */
+static int __init add_fdt_randomness(void)
+{
+	if (initial_boot_params)
+		add_device_randomness(initial_boot_params,
+				be32_to_cpu(initial_boot_params->totalsize));
+
+	return 0;
+}
+core_initcall(add_fdt_randomness);
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 1264923..1752988 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -28,7 +28,7 @@
 
 /**
  * irq_of_parse_and_map - Parse and map an interrupt into linux virq space
- * @device: Device node of the device whose interrupt is to be mapped
+ * @dev: Device node of the device whose interrupt is to be mapped
  * @index: Index of the interrupt to map
  *
  * This function is a wrapper that chains of_irq_map_one() and
diff --git a/drivers/of/of_net.c b/drivers/of/of_net.c
index ea174c8..8f9be2e 100644
--- a/drivers/of/of_net.c
+++ b/drivers/of/of_net.c
@@ -39,7 +39,7 @@
  * The function gets phy interface string from property 'phy-mode',
  * and return its index in phy_modes table, or errno in error case.
  */
-const int of_get_phy_mode(struct device_node *np)
+int of_get_phy_mode(struct device_node *np)
 {
 	const char *pm;
 	int err, i;
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index a754b84..0fe40c7 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -11,8 +11,6 @@
  * License or (at your optional) any later version of the license.
  */
 
-#include <asm/dma-contiguous.h>
-
 #include <linux/memblock.h>
 #include <linux/err.h>
 #include <linux/of.h>
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index eeca8a5..9b439ac 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -197,7 +197,7 @@
  * Returns pointer to created platform device, or NULL if a device was not
  * registered.  Unavailable devices will not get registered.
  */
-struct platform_device *of_platform_device_create_pdata(
+static struct platform_device *of_platform_device_create_pdata(
 					struct device_node *np,
 					const char *bus_id,
 					void *platform_data,
@@ -268,8 +268,11 @@
 		return NULL;
 
 	dev = amba_device_alloc(NULL, 0, 0);
-	if (!dev)
+	if (!dev) {
+		pr_err("%s(): amba_device_alloc() failed for %s\n",
+		       __func__, node->full_name);
 		return NULL;
+	}
 
 	/* setup generic device info */
 	dev->dev.coherent_dma_mask = ~0;
@@ -294,12 +297,18 @@
 		dev->irq[i] = irq_of_parse_and_map(node, i);
 
 	ret = of_address_to_resource(node, 0, &dev->res);
-	if (ret)
+	if (ret) {
+		pr_err("%s(): of_address_to_resource() failed (%d) for %s\n",
+		       __func__, ret, node->full_name);
 		goto err_free;
+	}
 
 	ret = amba_device_add(dev, &iomem_resource);
-	if (ret)
+	if (ret) {
+		pr_err("%s(): amba_device_add() failed (%d) for %s\n",
+		       __func__, ret, node->full_name);
 		goto err_free;
+	}
 
 	return dev;
 
@@ -378,6 +387,10 @@
 	}
 
 	if (of_device_is_compatible(bus, "arm,primecell")) {
+		/*
+		 * Don't return an error here to keep compatibility with older
+		 * device tree files.
+		 */
 		of_amba_device_create(bus, bus_id, platform_data, parent);
 		return 0;
 	}
diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
index f74bfcb..8eea2ef 100644
--- a/drivers/platform/x86/apple-gmux.c
+++ b/drivers/platform/x86/apple-gmux.c
@@ -393,17 +393,21 @@
 		complete(&gmux_data->powerchange_done);
 }
 
-static int gmux_suspend(struct pnp_dev *pnp, pm_message_t state)
+static int gmux_suspend(struct device *dev)
 {
+	struct pnp_dev *pnp = to_pnp_dev(dev);
 	struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp);
+
 	gmux_data->resume_client_id = gmux_active_client(gmux_data);
 	gmux_disable_interrupts(gmux_data);
 	return 0;
 }
 
-static int gmux_resume(struct pnp_dev *pnp)
+static int gmux_resume(struct device *dev)
 {
+	struct pnp_dev *pnp = to_pnp_dev(dev);
 	struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp);
+
 	gmux_enable_interrupts(gmux_data);
 	gmux_switchto(gmux_data->resume_client_id);
 	if (gmux_data->power_state == VGA_SWITCHEROO_OFF)
@@ -605,13 +609,19 @@
 	{"", 0}
 };
 
+static const struct dev_pm_ops gmux_dev_pm_ops = {
+	.suspend = gmux_suspend,
+	.resume = gmux_resume,
+};
+
 static struct pnp_driver gmux_pnp_driver = {
 	.name		= "apple-gmux",
 	.probe		= gmux_probe,
 	.remove		= gmux_remove,
 	.id_table	= gmux_device_ids,
-	.suspend	= gmux_suspend,
-	.resume		= gmux_resume
+	.driver		= {
+			.pm = &gmux_dev_pm_ops,
+	},
 };
 
 static int __init apple_gmux_init(void)
diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c
index 12adb43..a39ee38 100644
--- a/drivers/pnp/driver.c
+++ b/drivers/pnp/driver.c
@@ -163,6 +163,13 @@
 	if (!pnp_drv)
 		return 0;
 
+	if (pnp_drv->driver.pm && pnp_drv->driver.pm->suspend) {
+		error = pnp_drv->driver.pm->suspend(dev);
+		suspend_report_result(pnp_drv->driver.pm->suspend, error);
+		if (error)
+			return error;
+	}
+
 	if (pnp_drv->suspend) {
 		error = pnp_drv->suspend(pnp_dev, state);
 		if (error)
@@ -211,6 +218,12 @@
 			return error;
 	}
 
+	if (pnp_drv->driver.pm && pnp_drv->driver.pm->resume) {
+		error = pnp_drv->driver.pm->resume(dev);
+		if (error)
+			return error;
+	}
+
 	if (pnp_drv->resume) {
 		error = pnp_drv->resume(pnp_dev);
 		if (error)
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 7b8979c..bb49ab6 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -216,6 +216,13 @@
 	help
 	  Say Y here to enable support for iPAQ h1930/h1940/rx1950 battery
 
+config BATTERY_TWL4030_MADC
+	tristate "TWL4030 MADC battery driver"
+	depends on TWL4030_MADC
+	help
+	  Say Y here to enable this dumb driver for batteries managed
+	  through the TWL4030 MADC.
+
 config CHARGER_88PM860X
 	tristate "Marvell 88PM860x Charger driver"
 	depends on MFD_88PM860X && BATTERY_88PM860X
@@ -334,6 +341,12 @@
 	  You'll need this driver to charge batteries on e.g. Nokia
 	  RX-51/N900.
 
+config CHARGER_BQ24190
+	tristate "TI BQ24190 battery charger driver"
+	depends on I2C && GPIOLIB
+	help
+	  Say Y to enable support for the TI BQ24190 battery charger.
+
 config CHARGER_SMB347
 	tristate "Summit Microelectronics SMB347 Battery Charger"
 	depends on I2C
@@ -357,7 +370,7 @@
 
 config BATTERY_GOLDFISH
 	tristate "Goldfish battery driver"
-	depends on GENERIC_HARDIRQS
+	depends on GENERIC_HARDIRQS && (GOLDFISH || COMPILE_TEST)
 	help
 	  Say Y to enable support for the battery and AC power in the
 	  Goldfish emulator.
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index 653bf6c..a4b7417 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -34,6 +34,7 @@
 obj-$(CONFIG_BATTERY_MAX17042)	+= max17042_battery.o
 obj-$(CONFIG_BATTERY_Z2)	+= z2_battery.o
 obj-$(CONFIG_BATTERY_S3C_ADC)	+= s3c_adc_battery.o
+obj-$(CONFIG_BATTERY_TWL4030_MADC)	+= twl4030_madc_battery.o
 obj-$(CONFIG_CHARGER_88PM860X)	+= 88pm860x_charger.o
 obj-$(CONFIG_CHARGER_PCF50633)	+= pcf50633-charger.o
 obj-$(CONFIG_BATTERY_JZ4740)	+= jz4740-battery.o
@@ -50,6 +51,7 @@
 obj-$(CONFIG_CHARGER_MAX8997)	+= max8997_charger.o
 obj-$(CONFIG_CHARGER_MAX8998)	+= max8998_charger.o
 obj-$(CONFIG_CHARGER_BQ2415X)	+= bq2415x_charger.o
+obj-$(CONFIG_CHARGER_BQ24190)	+= bq24190_charger.o
 obj-$(CONFIG_POWER_AVS)		+= avs/
 obj-$(CONFIG_CHARGER_SMB347)	+= smb347-charger.o
 obj-$(CONFIG_CHARGER_TPS65090)	+= tps65090-charger.o
diff --git a/drivers/power/ab8500_charger.c b/drivers/power/ab8500_charger.c
index f098fda..a4c4a10 100644
--- a/drivers/power/ab8500_charger.c
+++ b/drivers/power/ab8500_charger.c
@@ -774,6 +774,7 @@
 		di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P5;
 		dev_dbg(di->dev, "USB Type - 0x%02x MaxCurr: %d", link_status,
 				di->max_usb_in_curr.usb_type_max);
+		break;
 	case USB_STAT_NOT_VALID_LINK:
 		dev_err(di->dev, "USB Type invalid - try charging anyway\n");
 		di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P5;
diff --git a/drivers/power/bq24190_charger.c b/drivers/power/bq24190_charger.c
new file mode 100644
index 0000000..ad3ff8f
--- /dev/null
+++ b/drivers/power/bq24190_charger.c
@@ -0,0 +1,1549 @@
+/*
+ * Driver for the TI bq24190 battery charger.
+ *
+ * Author: Mark A. Greer <mgreer@animalcreek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/of_irq.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/power_supply.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+
+#include <linux/power/bq24190_charger.h>
+
+
+#define	BQ24190_MANUFACTURER	"Texas Instruments"
+
+#define BQ24190_REG_ISC		0x00 /* Input Source Control */
+#define BQ24190_REG_ISC_EN_HIZ_MASK		BIT(7)
+#define BQ24190_REG_ISC_EN_HIZ_SHIFT		7
+#define BQ24190_REG_ISC_VINDPM_MASK		(BIT(6) | BIT(5) | BIT(4) | \
+						 BIT(3))
+#define BQ24190_REG_ISC_VINDPM_SHIFT		3
+#define BQ24190_REG_ISC_IINLIM_MASK		(BIT(2) | BIT(1) | BIT(0))
+#define BQ24190_REG_ISC_IINLIM_SHIFT		0
+
+#define BQ24190_REG_POC		0x01 /* Power-On Configuration */
+#define BQ24190_REG_POC_RESET_MASK		BIT(7)
+#define BQ24190_REG_POC_RESET_SHIFT		7
+#define BQ24190_REG_POC_WDT_RESET_MASK		BIT(6)
+#define BQ24190_REG_POC_WDT_RESET_SHIFT		6
+#define BQ24190_REG_POC_CHG_CONFIG_MASK		(BIT(5) | BIT(4))
+#define BQ24190_REG_POC_CHG_CONFIG_SHIFT	4
+#define BQ24190_REG_POC_SYS_MIN_MASK		(BIT(3) | BIT(2) | BIT(1))
+#define BQ24190_REG_POC_SYS_MIN_SHIFT		1
+#define BQ24190_REG_POC_BOOST_LIM_MASK		BIT(0)
+#define BQ24190_REG_POC_BOOST_LIM_SHIFT		0
+
+#define BQ24190_REG_CCC		0x02 /* Charge Current Control */
+#define BQ24190_REG_CCC_ICHG_MASK		(BIT(7) | BIT(6) | BIT(5) | \
+						 BIT(4) | BIT(3) | BIT(2))
+#define BQ24190_REG_CCC_ICHG_SHIFT		2
+#define BQ24190_REG_CCC_FORCE_20PCT_MASK	BIT(0)
+#define BQ24190_REG_CCC_FORCE_20PCT_SHIFT	0
+
+#define BQ24190_REG_PCTCC	0x03 /* Pre-charge/Termination Current Cntl */
+#define BQ24190_REG_PCTCC_IPRECHG_MASK		(BIT(7) | BIT(6) | BIT(5) | \
+						 BIT(4))
+#define BQ24190_REG_PCTCC_IPRECHG_SHIFT		4
+#define BQ24190_REG_PCTCC_ITERM_MASK		(BIT(3) | BIT(2) | BIT(1) | \
+						 BIT(0))
+#define BQ24190_REG_PCTCC_ITERM_SHIFT		0
+
+#define BQ24190_REG_CVC		0x04 /* Charge Voltage Control */
+#define BQ24190_REG_CVC_VREG_MASK		(BIT(7) | BIT(6) | BIT(5) | \
+						 BIT(4) | BIT(3) | BIT(2))
+#define BQ24190_REG_CVC_VREG_SHIFT		2
+#define BQ24190_REG_CVC_BATLOWV_MASK		BIT(1)
+#define BQ24190_REG_CVC_BATLOWV_SHIFT		1
+#define BQ24190_REG_CVC_VRECHG_MASK		BIT(0)
+#define BQ24190_REG_CVC_VRECHG_SHIFT		0
+
+#define BQ24190_REG_CTTC	0x05 /* Charge Term/Timer Control */
+#define BQ24190_REG_CTTC_EN_TERM_MASK		BIT(7)
+#define BQ24190_REG_CTTC_EN_TERM_SHIFT		7
+#define BQ24190_REG_CTTC_TERM_STAT_MASK		BIT(6)
+#define BQ24190_REG_CTTC_TERM_STAT_SHIFT	6
+#define BQ24190_REG_CTTC_WATCHDOG_MASK		(BIT(5) | BIT(4))
+#define BQ24190_REG_CTTC_WATCHDOG_SHIFT		4
+#define BQ24190_REG_CTTC_EN_TIMER_MASK		BIT(3)
+#define BQ24190_REG_CTTC_EN_TIMER_SHIFT		3
+#define BQ24190_REG_CTTC_CHG_TIMER_MASK		(BIT(2) | BIT(1))
+#define BQ24190_REG_CTTC_CHG_TIMER_SHIFT	1
+#define BQ24190_REG_CTTC_JEITA_ISET_MASK	BIT(0)
+#define BQ24190_REG_CTTC_JEITA_ISET_SHIFT	0
+
+#define BQ24190_REG_ICTRC	0x06 /* IR Comp/Thermal Regulation Control */
+#define BQ24190_REG_ICTRC_BAT_COMP_MASK		(BIT(7) | BIT(6) | BIT(5))
+#define BQ24190_REG_ICTRC_BAT_COMP_SHIFT	5
+#define BQ24190_REG_ICTRC_VCLAMP_MASK		(BIT(4) | BIT(3) | BIT(2))
+#define BQ24190_REG_ICTRC_VCLAMP_SHIFT		2
+#define BQ24190_REG_ICTRC_TREG_MASK		(BIT(1) | BIT(0))
+#define BQ24190_REG_ICTRC_TREG_SHIFT		0
+
+#define BQ24190_REG_MOC		0x07 /* Misc. Operation Control */
+#define BQ24190_REG_MOC_DPDM_EN_MASK		BIT(7)
+#define BQ24190_REG_MOC_DPDM_EN_SHIFT		7
+#define BQ24190_REG_MOC_TMR2X_EN_MASK		BIT(6)
+#define BQ24190_REG_MOC_TMR2X_EN_SHIFT		6
+#define BQ24190_REG_MOC_BATFET_DISABLE_MASK	BIT(5)
+#define BQ24190_REG_MOC_BATFET_DISABLE_SHIFT	5
+#define BQ24190_REG_MOC_JEITA_VSET_MASK		BIT(4)
+#define BQ24190_REG_MOC_JEITA_VSET_SHIFT	4
+#define BQ24190_REG_MOC_INT_MASK_MASK		(BIT(1) | BIT(0))
+#define BQ24190_REG_MOC_INT_MASK_SHIFT		0
+
+#define BQ24190_REG_SS		0x08 /* System Status */
+#define BQ24190_REG_SS_VBUS_STAT_MASK		(BIT(7) | BIT(6))
+#define BQ24190_REG_SS_VBUS_STAT_SHIFT		6
+#define BQ24190_REG_SS_CHRG_STAT_MASK		(BIT(5) | BIT(4))
+#define BQ24190_REG_SS_CHRG_STAT_SHIFT		4
+#define BQ24190_REG_SS_DPM_STAT_MASK		BIT(3)
+#define BQ24190_REG_SS_DPM_STAT_SHIFT		3
+#define BQ24190_REG_SS_PG_STAT_MASK		BIT(2)
+#define BQ24190_REG_SS_PG_STAT_SHIFT		2
+#define BQ24190_REG_SS_THERM_STAT_MASK		BIT(1)
+#define BQ24190_REG_SS_THERM_STAT_SHIFT		1
+#define BQ24190_REG_SS_VSYS_STAT_MASK		BIT(0)
+#define BQ24190_REG_SS_VSYS_STAT_SHIFT		0
+
+#define BQ24190_REG_F		0x09 /* Fault */
+#define BQ24190_REG_F_WATCHDOG_FAULT_MASK	BIT(7)
+#define BQ24190_REG_F_WATCHDOG_FAULT_SHIFT	7
+#define BQ24190_REG_F_BOOST_FAULT_MASK		BIT(6)
+#define BQ24190_REG_F_BOOST_FAULT_SHIFT		6
+#define BQ24190_REG_F_CHRG_FAULT_MASK		(BIT(5) | BIT(4))
+#define BQ24190_REG_F_CHRG_FAULT_SHIFT		4
+#define BQ24190_REG_F_BAT_FAULT_MASK		BIT(3)
+#define BQ24190_REG_F_BAT_FAULT_SHIFT		3
+#define BQ24190_REG_F_NTC_FAULT_MASK		(BIT(2) | BIT(1) | BIT(0))
+#define BQ24190_REG_F_NTC_FAULT_SHIFT		0
+
+#define BQ24190_REG_VPRS	0x0A /* Vendor/Part/Revision Status */
+#define BQ24190_REG_VPRS_PN_MASK		(BIT(5) | BIT(4) | BIT(3))
+#define BQ24190_REG_VPRS_PN_SHIFT		3
+#define BQ24190_REG_VPRS_PN_24190			0x4
+#define BQ24190_REG_VPRS_PN_24192			0x5 /* Also 24193 */
+#define BQ24190_REG_VPRS_PN_24192I			0x3
+#define BQ24190_REG_VPRS_TS_PROFILE_MASK	BIT(2)
+#define BQ24190_REG_VPRS_TS_PROFILE_SHIFT	2
+#define BQ24190_REG_VPRS_DEV_REG_MASK		(BIT(1) | BIT(0))
+#define BQ24190_REG_VPRS_DEV_REG_SHIFT		0
+
+/*
+ * The FAULT register is latched by the bq24190 (except for NTC_FAULT)
+ * so the first read after a fault returns the latched value and subsequent
+ * reads return the current value.  In order to return the fault status
+ * to the user, have the interrupt handler save the reg's value and retrieve
+ * it in the appropriate health/status routine.  Each routine has its own
+ * flag indicating whether it should use the value stored by the last run
+ * of the interrupt handler or do an actual reg read.  That way each routine
+ * can report back whatever fault may have occured.
+ */
+struct bq24190_dev_info {
+	struct i2c_client		*client;
+	struct device			*dev;
+	struct power_supply		charger;
+	struct power_supply		battery;
+	char				model_name[I2C_NAME_SIZE];
+	kernel_ulong_t			model;
+	unsigned int			gpio_int;
+	unsigned int			irq;
+	struct mutex			f_reg_lock;
+	bool				first_time;
+	bool				charger_health_valid;
+	bool				battery_health_valid;
+	bool				battery_status_valid;
+	u8				f_reg;
+	u8				ss_reg;
+	u8				watchdog;
+};
+
+/*
+ * The tables below provide a 2-way mapping for the value that goes in
+ * the register field and the real-world value that it represents.
+ * The index of the array is the value that goes in the register; the
+ * number at that index in the array is the real-world value that it
+ * represents.
+ */
+/* REG02[7:2] (ICHG) in uAh */
+static const int bq24190_ccc_ichg_values[] = {
+	 512000,  576000,  640000,  704000,  768000,  832000,  896000,  960000,
+	1024000, 1088000, 1152000, 1216000, 1280000, 1344000, 1408000, 1472000,
+	1536000, 1600000, 1664000, 1728000, 1792000, 1856000, 1920000, 1984000,
+	2048000, 2112000, 2176000, 2240000, 2304000, 2368000, 2432000, 2496000,
+	2560000, 2624000, 2688000, 2752000, 2816000, 2880000, 2944000, 3008000,
+	3072000, 3136000, 3200000, 3264000, 3328000, 3392000, 3456000, 3520000,
+	3584000, 3648000, 3712000, 3776000, 3840000, 3904000, 3968000, 4032000,
+	4096000, 4160000, 4224000, 4288000, 4352000, 4416000, 4480000, 4544000
+};
+
+/* REG04[7:2] (VREG) in uV */
+static const int bq24190_cvc_vreg_values[] = {
+	3504000, 3520000, 3536000, 3552000, 3568000, 3584000, 3600000, 3616000,
+	3632000, 3648000, 3664000, 3680000, 3696000, 3712000, 3728000, 3744000,
+	3760000, 3776000, 3792000, 3808000, 3824000, 3840000, 3856000, 3872000,
+	3888000, 3904000, 3920000, 3936000, 3952000, 3968000, 3984000, 4000000,
+	4016000, 4032000, 4048000, 4064000, 4080000, 4096000, 4112000, 4128000,
+	4144000, 4160000, 4176000, 4192000, 4208000, 4224000, 4240000, 4256000,
+	4272000, 4288000, 4304000, 4320000, 4336000, 4352000, 4368000, 4384000,
+	4400000
+};
+
+/* REG06[1:0] (TREG) in tenths of degrees Celcius */
+static const int bq24190_ictrc_treg_values[] = {
+	600, 800, 1000, 1200
+};
+
+/*
+ * Return the index in 'tbl' of greatest value that is less than or equal to
+ * 'val'.  The index range returned is 0 to 'tbl_size' - 1.  Assumes that
+ * the values in 'tbl' are sorted from smallest to largest and 'tbl_size'
+ * is less than 2^8.
+ */
+static u8 bq24190_find_idx(const int tbl[], int tbl_size, int v)
+{
+	int i;
+
+	for (i = 1; i < tbl_size; i++)
+		if (v < tbl[i])
+			break;
+
+	return i - 1;
+}
+
+/* Basic driver I/O routines */
+
+static int bq24190_read(struct bq24190_dev_info *bdi, u8 reg, u8 *data)
+{
+	int ret;
+
+	ret = i2c_smbus_read_byte_data(bdi->client, reg);
+	if (ret < 0)
+		return ret;
+
+	*data = ret;
+	return 0;
+}
+
+static int bq24190_write(struct bq24190_dev_info *bdi, u8 reg, u8 data)
+{
+	return i2c_smbus_write_byte_data(bdi->client, reg, data);
+}
+
+static int bq24190_read_mask(struct bq24190_dev_info *bdi, u8 reg,
+		u8 mask, u8 shift, u8 *data)
+{
+	u8 v;
+	int ret;
+
+	ret = bq24190_read(bdi, reg, &v);
+	if (ret < 0)
+		return ret;
+
+	v &= mask;
+	v >>= shift;
+	*data = v;
+
+	return 0;
+}
+
+static int bq24190_write_mask(struct bq24190_dev_info *bdi, u8 reg,
+		u8 mask, u8 shift, u8 data)
+{
+	u8 v;
+	int ret;
+
+	ret = bq24190_read(bdi, reg, &v);
+	if (ret < 0)
+		return ret;
+
+	v &= ~mask;
+	v |= ((data << shift) & mask);
+
+	return bq24190_write(bdi, reg, v);
+}
+
+static int bq24190_get_field_val(struct bq24190_dev_info *bdi,
+		u8 reg, u8 mask, u8 shift,
+		const int tbl[], int tbl_size,
+		int *val)
+{
+	u8 v;
+	int ret;
+
+	ret = bq24190_read_mask(bdi, reg, mask, shift, &v);
+	if (ret < 0)
+		return ret;
+
+	v = (v >= tbl_size) ? (tbl_size - 1) : v;
+	*val = tbl[v];
+
+	return 0;
+}
+
+static int bq24190_set_field_val(struct bq24190_dev_info *bdi,
+		u8 reg, u8 mask, u8 shift,
+		const int tbl[], int tbl_size,
+		int val)
+{
+	u8 idx;
+
+	idx = bq24190_find_idx(tbl, tbl_size, val);
+
+	return bq24190_write_mask(bdi, reg, mask, shift, idx);
+}
+
+#ifdef CONFIG_SYSFS
+/*
+ * There are a numerous options that are configurable on the bq24190
+ * that go well beyond what the power_supply properties provide access to.
+ * Provide sysfs access to them so they can be examined and possibly modified
+ * on the fly.  They will be provided for the charger power_supply object only
+ * and will be prefixed by 'f_' to make them easier to recognize.
+ */
+
+#define BQ24190_SYSFS_FIELD(_name, r, f, m, store)			\
+{									\
+	.attr	= __ATTR(f_##_name, m, bq24190_sysfs_show, store),	\
+	.reg	= BQ24190_REG_##r,					\
+	.mask	= BQ24190_REG_##r##_##f##_MASK,				\
+	.shift	= BQ24190_REG_##r##_##f##_SHIFT,			\
+}
+
+#define BQ24190_SYSFS_FIELD_RW(_name, r, f)				\
+		BQ24190_SYSFS_FIELD(_name, r, f, S_IWUSR | S_IRUGO,	\
+				bq24190_sysfs_store)
+
+#define BQ24190_SYSFS_FIELD_RO(_name, r, f)				\
+		BQ24190_SYSFS_FIELD(_name, r, f, S_IRUGO, NULL)
+
+static ssize_t bq24190_sysfs_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+static ssize_t bq24190_sysfs_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+struct bq24190_sysfs_field_info {
+	struct device_attribute	attr;
+	u8	reg;
+	u8	mask;
+	u8	shift;
+};
+
+/* On i386 ptrace-abi.h defines SS that breaks the macro calls below. */
+#undef SS
+
+static struct bq24190_sysfs_field_info bq24190_sysfs_field_tbl[] = {
+			/*	sysfs name	reg	field in reg */
+	BQ24190_SYSFS_FIELD_RW(en_hiz,		ISC,	EN_HIZ),
+	BQ24190_SYSFS_FIELD_RW(vindpm,		ISC,	VINDPM),
+	BQ24190_SYSFS_FIELD_RW(iinlim,		ISC,	IINLIM),
+	BQ24190_SYSFS_FIELD_RW(chg_config,	POC,	CHG_CONFIG),
+	BQ24190_SYSFS_FIELD_RW(sys_min,		POC,	SYS_MIN),
+	BQ24190_SYSFS_FIELD_RW(boost_lim,	POC,	BOOST_LIM),
+	BQ24190_SYSFS_FIELD_RW(ichg,		CCC,	ICHG),
+	BQ24190_SYSFS_FIELD_RW(force_20_pct,	CCC,	FORCE_20PCT),
+	BQ24190_SYSFS_FIELD_RW(iprechg,		PCTCC,	IPRECHG),
+	BQ24190_SYSFS_FIELD_RW(iterm,		PCTCC,	ITERM),
+	BQ24190_SYSFS_FIELD_RW(vreg,		CVC,	VREG),
+	BQ24190_SYSFS_FIELD_RW(batlowv,		CVC,	BATLOWV),
+	BQ24190_SYSFS_FIELD_RW(vrechg,		CVC,	VRECHG),
+	BQ24190_SYSFS_FIELD_RW(en_term,		CTTC,	EN_TERM),
+	BQ24190_SYSFS_FIELD_RW(term_stat,	CTTC,	TERM_STAT),
+	BQ24190_SYSFS_FIELD_RO(watchdog,	CTTC,	WATCHDOG),
+	BQ24190_SYSFS_FIELD_RW(en_timer,	CTTC,	EN_TIMER),
+	BQ24190_SYSFS_FIELD_RW(chg_timer,	CTTC,	CHG_TIMER),
+	BQ24190_SYSFS_FIELD_RW(jeta_iset,	CTTC,	JEITA_ISET),
+	BQ24190_SYSFS_FIELD_RW(bat_comp,	ICTRC,	BAT_COMP),
+	BQ24190_SYSFS_FIELD_RW(vclamp,		ICTRC,	VCLAMP),
+	BQ24190_SYSFS_FIELD_RW(treg,		ICTRC,	TREG),
+	BQ24190_SYSFS_FIELD_RW(dpdm_en,		MOC,	DPDM_EN),
+	BQ24190_SYSFS_FIELD_RW(tmr2x_en,	MOC,	TMR2X_EN),
+	BQ24190_SYSFS_FIELD_RW(batfet_disable,	MOC,	BATFET_DISABLE),
+	BQ24190_SYSFS_FIELD_RW(jeita_vset,	MOC,	JEITA_VSET),
+	BQ24190_SYSFS_FIELD_RO(int_mask,	MOC,	INT_MASK),
+	BQ24190_SYSFS_FIELD_RO(vbus_stat,	SS,	VBUS_STAT),
+	BQ24190_SYSFS_FIELD_RO(chrg_stat,	SS,	CHRG_STAT),
+	BQ24190_SYSFS_FIELD_RO(dpm_stat,	SS,	DPM_STAT),
+	BQ24190_SYSFS_FIELD_RO(pg_stat,		SS,	PG_STAT),
+	BQ24190_SYSFS_FIELD_RO(therm_stat,	SS,	THERM_STAT),
+	BQ24190_SYSFS_FIELD_RO(vsys_stat,	SS,	VSYS_STAT),
+	BQ24190_SYSFS_FIELD_RO(watchdog_fault,	F,	WATCHDOG_FAULT),
+	BQ24190_SYSFS_FIELD_RO(boost_fault,	F,	BOOST_FAULT),
+	BQ24190_SYSFS_FIELD_RO(chrg_fault,	F,	CHRG_FAULT),
+	BQ24190_SYSFS_FIELD_RO(bat_fault,	F,	BAT_FAULT),
+	BQ24190_SYSFS_FIELD_RO(ntc_fault,	F,	NTC_FAULT),
+	BQ24190_SYSFS_FIELD_RO(pn,		VPRS,	PN),
+	BQ24190_SYSFS_FIELD_RO(ts_profile,	VPRS,	TS_PROFILE),
+	BQ24190_SYSFS_FIELD_RO(dev_reg,		VPRS,	DEV_REG),
+};
+
+static struct attribute *
+	bq24190_sysfs_attrs[ARRAY_SIZE(bq24190_sysfs_field_tbl) + 1];
+
+static const struct attribute_group bq24190_sysfs_attr_group = {
+	.attrs = bq24190_sysfs_attrs,
+};
+
+static void bq24190_sysfs_init_attrs(void)
+{
+	int i, limit = ARRAY_SIZE(bq24190_sysfs_field_tbl);
+
+	for (i = 0; i < limit; i++)
+		bq24190_sysfs_attrs[i] = &bq24190_sysfs_field_tbl[i].attr.attr;
+
+	bq24190_sysfs_attrs[limit] = NULL; /* Has additional entry for this */
+}
+
+static struct bq24190_sysfs_field_info *bq24190_sysfs_field_lookup(
+		const char *name)
+{
+	int i, limit = ARRAY_SIZE(bq24190_sysfs_field_tbl);
+
+	for (i = 0; i < limit; i++)
+		if (!strcmp(name, bq24190_sysfs_field_tbl[i].attr.attr.name))
+			break;
+
+	if (i >= limit)
+		return NULL;
+
+	return &bq24190_sysfs_field_tbl[i];
+}
+
+static ssize_t bq24190_sysfs_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct power_supply *psy = dev_get_drvdata(dev);
+	struct bq24190_dev_info *bdi =
+			container_of(psy, struct bq24190_dev_info, charger);
+	struct bq24190_sysfs_field_info *info;
+	int ret;
+	u8 v;
+
+	info = bq24190_sysfs_field_lookup(attr->attr.name);
+	if (!info)
+		return -EINVAL;
+
+	ret = bq24190_read_mask(bdi, info->reg, info->mask, info->shift, &v);
+	if (ret)
+		return ret;
+
+	return scnprintf(buf, PAGE_SIZE, "%hhx\n", v);
+}
+
+static ssize_t bq24190_sysfs_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct power_supply *psy = dev_get_drvdata(dev);
+	struct bq24190_dev_info *bdi =
+			container_of(psy, struct bq24190_dev_info, charger);
+	struct bq24190_sysfs_field_info *info;
+	int ret;
+	u8 v;
+
+	info = bq24190_sysfs_field_lookup(attr->attr.name);
+	if (!info)
+		return -EINVAL;
+
+	ret = kstrtou8(buf, 0, &v);
+	if (ret < 0)
+		return ret;
+
+	ret = bq24190_write_mask(bdi, info->reg, info->mask, info->shift, v);
+	if (ret)
+		return ret;
+
+	return count;
+}
+
+static int bq24190_sysfs_create_group(struct bq24190_dev_info *bdi)
+{
+	bq24190_sysfs_init_attrs();
+
+	return sysfs_create_group(&bdi->charger.dev->kobj,
+			&bq24190_sysfs_attr_group);
+}
+
+static void bq24190_sysfs_remove_group(struct bq24190_dev_info *bdi)
+{
+	sysfs_remove_group(&bdi->charger.dev->kobj, &bq24190_sysfs_attr_group);
+}
+#else
+static int bq24190_sysfs_create_group(struct bq24190_dev_info *bdi)
+{
+	return 0;
+}
+
+static inline void bq24190_sysfs_remove_group(struct bq24190_dev_info *bdi) {}
+#endif
+
+/*
+ * According to the "Host Mode and default Mode" section of the
+ * manual, a write to any register causes the bq24190 to switch
+ * from default mode to host mode.  It will switch back to default
+ * mode after a WDT timeout unless the WDT is turned off as well.
+ * So, by simply turning off the WDT, we accomplish both with the
+ * same write.
+ */
+static int bq24190_set_mode_host(struct bq24190_dev_info *bdi)
+{
+	int ret;
+	u8 v;
+
+	ret = bq24190_read(bdi, BQ24190_REG_CTTC, &v);
+	if (ret < 0)
+		return ret;
+
+	bdi->watchdog = ((v & BQ24190_REG_CTTC_WATCHDOG_MASK) >>
+					BQ24190_REG_CTTC_WATCHDOG_SHIFT);
+	v &= ~BQ24190_REG_CTTC_WATCHDOG_MASK;
+
+	return bq24190_write(bdi, BQ24190_REG_CTTC, v);
+}
+
+static int bq24190_register_reset(struct bq24190_dev_info *bdi)
+{
+	int ret, limit = 100;
+	u8 v;
+
+	/* Reset the registers */
+	ret = bq24190_write_mask(bdi, BQ24190_REG_POC,
+			BQ24190_REG_POC_RESET_MASK,
+			BQ24190_REG_POC_RESET_SHIFT,
+			0x1);
+	if (ret < 0)
+		return ret;
+
+	/* Reset bit will be cleared by hardware so poll until it is */
+	do {
+		ret = bq24190_read_mask(bdi, BQ24190_REG_POC,
+				BQ24190_REG_POC_RESET_MASK,
+				BQ24190_REG_POC_RESET_SHIFT,
+				&v);
+		if (ret < 0)
+			return ret;
+
+		if (!v)
+			break;
+
+		udelay(10);
+	} while (--limit);
+
+	if (!limit)
+		return -EIO;
+
+	return 0;
+}
+
+/* Charger power supply property routines */
+
+static int bq24190_charger_get_charge_type(struct bq24190_dev_info *bdi,
+		union power_supply_propval *val)
+{
+	u8 v;
+	int type, ret;
+
+	ret = bq24190_read_mask(bdi, BQ24190_REG_POC,
+			BQ24190_REG_POC_CHG_CONFIG_MASK,
+			BQ24190_REG_POC_CHG_CONFIG_SHIFT,
+			&v);
+	if (ret < 0)
+		return ret;
+
+	/* If POC[CHG_CONFIG] (REG01[5:4]) == 0, charge is disabled */
+	if (!v) {
+		type = POWER_SUPPLY_CHARGE_TYPE_NONE;
+	} else {
+		ret = bq24190_read_mask(bdi, BQ24190_REG_CCC,
+				BQ24190_REG_CCC_FORCE_20PCT_MASK,
+				BQ24190_REG_CCC_FORCE_20PCT_SHIFT,
+				&v);
+		if (ret < 0)
+			return ret;
+
+		type = (v) ? POWER_SUPPLY_CHARGE_TYPE_TRICKLE :
+			     POWER_SUPPLY_CHARGE_TYPE_FAST;
+	}
+
+	val->intval = type;
+
+	return 0;
+}
+
+static int bq24190_charger_set_charge_type(struct bq24190_dev_info *bdi,
+		const union power_supply_propval *val)
+{
+	u8 chg_config, force_20pct, en_term;
+	int ret;
+
+	/*
+	 * According to the "Termination when REG02[0] = 1" section of
+	 * the bq24190 manual, the trickle charge could be less than the
+	 * termination current so it recommends turning off the termination
+	 * function.
+	 *
+	 * Note: AFAICT from the datasheet, the user will have to manually
+	 * turn off the charging when in 20% mode.  If its not turned off,
+	 * there could be battery damage.  So, use this mode at your own risk.
+	 */
+	switch (val->intval) {
+	case POWER_SUPPLY_CHARGE_TYPE_NONE:
+		chg_config = 0x0;
+		break;
+	case POWER_SUPPLY_CHARGE_TYPE_TRICKLE:
+		chg_config = 0x1;
+		force_20pct = 0x1;
+		en_term = 0x0;
+		break;
+	case POWER_SUPPLY_CHARGE_TYPE_FAST:
+		chg_config = 0x1;
+		force_20pct = 0x0;
+		en_term = 0x1;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (chg_config) { /* Enabling the charger */
+		ret = bq24190_write_mask(bdi, BQ24190_REG_CCC,
+				BQ24190_REG_CCC_FORCE_20PCT_MASK,
+				BQ24190_REG_CCC_FORCE_20PCT_SHIFT,
+				force_20pct);
+		if (ret < 0)
+			return ret;
+
+		ret = bq24190_write_mask(bdi, BQ24190_REG_CTTC,
+				BQ24190_REG_CTTC_EN_TERM_MASK,
+				BQ24190_REG_CTTC_EN_TERM_SHIFT,
+				en_term);
+		if (ret < 0)
+			return ret;
+	}
+
+	return bq24190_write_mask(bdi, BQ24190_REG_POC,
+			BQ24190_REG_POC_CHG_CONFIG_MASK,
+			BQ24190_REG_POC_CHG_CONFIG_SHIFT, chg_config);
+}
+
+static int bq24190_charger_get_health(struct bq24190_dev_info *bdi,
+		union power_supply_propval *val)
+{
+	u8 v;
+	int health, ret;
+
+	mutex_lock(&bdi->f_reg_lock);
+
+	if (bdi->charger_health_valid) {
+		v = bdi->f_reg;
+		bdi->charger_health_valid = false;
+		mutex_unlock(&bdi->f_reg_lock);
+	} else {
+		mutex_unlock(&bdi->f_reg_lock);
+
+		ret = bq24190_read(bdi, BQ24190_REG_F, &v);
+		if (ret < 0)
+			return ret;
+	}
+
+	if (v & BQ24190_REG_F_BOOST_FAULT_MASK) {
+		/*
+		 * This could be over-current or over-voltage but there's
+		 * no way to tell which.  Return 'OVERVOLTAGE' since there
+		 * isn't an 'OVERCURRENT' value defined that we can return
+		 * even if it was over-current.
+		 */
+		health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+	} else {
+		v &= BQ24190_REG_F_CHRG_FAULT_MASK;
+		v >>= BQ24190_REG_F_CHRG_FAULT_SHIFT;
+
+		switch (v) {
+		case 0x0: /* Normal */
+			health = POWER_SUPPLY_HEALTH_GOOD;
+			break;
+		case 0x1: /* Input Fault (VBUS OVP or VBAT<VBUS<3.8V) */
+			/*
+			 * This could be over-voltage or under-voltage
+			 * and there's no way to tell which.  Instead
+			 * of looking foolish and returning 'OVERVOLTAGE'
+			 * when its really under-voltage, just return
+			 * 'UNSPEC_FAILURE'.
+			 */
+			health = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+			break;
+		case 0x2: /* Thermal Shutdown */
+			health = POWER_SUPPLY_HEALTH_OVERHEAT;
+			break;
+		case 0x3: /* Charge Safety Timer Expiration */
+			health = POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE;
+			break;
+		default:
+			health = POWER_SUPPLY_HEALTH_UNKNOWN;
+		}
+	}
+
+	val->intval = health;
+
+	return 0;
+}
+
+static int bq24190_charger_get_online(struct bq24190_dev_info *bdi,
+		union power_supply_propval *val)
+{
+	u8 v;
+	int ret;
+
+	ret = bq24190_read_mask(bdi, BQ24190_REG_SS,
+			BQ24190_REG_SS_PG_STAT_MASK,
+			BQ24190_REG_SS_PG_STAT_SHIFT, &v);
+	if (ret < 0)
+		return ret;
+
+	val->intval = v;
+	return 0;
+}
+
+static int bq24190_charger_get_current(struct bq24190_dev_info *bdi,
+		union power_supply_propval *val)
+{
+	u8 v;
+	int curr, ret;
+
+	ret = bq24190_get_field_val(bdi, BQ24190_REG_CCC,
+			BQ24190_REG_CCC_ICHG_MASK, BQ24190_REG_CCC_ICHG_SHIFT,
+			bq24190_ccc_ichg_values,
+			ARRAY_SIZE(bq24190_ccc_ichg_values), &curr);
+	if (ret < 0)
+		return ret;
+
+	ret = bq24190_read_mask(bdi, BQ24190_REG_CCC,
+			BQ24190_REG_CCC_FORCE_20PCT_MASK,
+			BQ24190_REG_CCC_FORCE_20PCT_SHIFT, &v);
+	if (ret < 0)
+		return ret;
+
+	/* If FORCE_20PCT is enabled, then current is 20% of ICHG value */
+	if (v)
+		curr /= 5;
+
+	val->intval = curr;
+	return 0;
+}
+
+static int bq24190_charger_get_current_max(struct bq24190_dev_info *bdi,
+		union power_supply_propval *val)
+{
+	int idx = ARRAY_SIZE(bq24190_ccc_ichg_values) - 1;
+
+	val->intval = bq24190_ccc_ichg_values[idx];
+	return 0;
+}
+
+static int bq24190_charger_set_current(struct bq24190_dev_info *bdi,
+		const union power_supply_propval *val)
+{
+	u8 v;
+	int ret, curr = val->intval;
+
+	ret = bq24190_read_mask(bdi, BQ24190_REG_CCC,
+			BQ24190_REG_CCC_FORCE_20PCT_MASK,
+			BQ24190_REG_CCC_FORCE_20PCT_SHIFT, &v);
+	if (ret < 0)
+		return ret;
+
+	/* If FORCE_20PCT is enabled, have to multiply value passed in by 5 */
+	if (v)
+		curr *= 5;
+
+	return bq24190_set_field_val(bdi, BQ24190_REG_CCC,
+			BQ24190_REG_CCC_ICHG_MASK, BQ24190_REG_CCC_ICHG_SHIFT,
+			bq24190_ccc_ichg_values,
+			ARRAY_SIZE(bq24190_ccc_ichg_values), curr);
+}
+
+static int bq24190_charger_get_voltage(struct bq24190_dev_info *bdi,
+		union power_supply_propval *val)
+{
+	int voltage, ret;
+
+	ret = bq24190_get_field_val(bdi, BQ24190_REG_CVC,
+			BQ24190_REG_CVC_VREG_MASK, BQ24190_REG_CVC_VREG_SHIFT,
+			bq24190_cvc_vreg_values,
+			ARRAY_SIZE(bq24190_cvc_vreg_values), &voltage);
+	if (ret < 0)
+		return ret;
+
+	val->intval = voltage;
+	return 0;
+}
+
+static int bq24190_charger_get_voltage_max(struct bq24190_dev_info *bdi,
+		union power_supply_propval *val)
+{
+	int idx = ARRAY_SIZE(bq24190_cvc_vreg_values) - 1;
+
+	val->intval = bq24190_cvc_vreg_values[idx];
+	return 0;
+}
+
+static int bq24190_charger_set_voltage(struct bq24190_dev_info *bdi,
+		const union power_supply_propval *val)
+{
+	return bq24190_set_field_val(bdi, BQ24190_REG_CVC,
+			BQ24190_REG_CVC_VREG_MASK, BQ24190_REG_CVC_VREG_SHIFT,
+			bq24190_cvc_vreg_values,
+			ARRAY_SIZE(bq24190_cvc_vreg_values), val->intval);
+}
+
+static int bq24190_charger_get_property(struct power_supply *psy,
+		enum power_supply_property psp, union power_supply_propval *val)
+{
+	struct bq24190_dev_info *bdi =
+			container_of(psy, struct bq24190_dev_info, charger);
+	int ret;
+
+	dev_dbg(bdi->dev, "prop: %d\n", psp);
+
+	pm_runtime_get_sync(bdi->dev);
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_CHARGE_TYPE:
+		ret = bq24190_charger_get_charge_type(bdi, val);
+		break;
+	case POWER_SUPPLY_PROP_HEALTH:
+		ret = bq24190_charger_get_health(bdi, val);
+		break;
+	case POWER_SUPPLY_PROP_ONLINE:
+		ret = bq24190_charger_get_online(bdi, val);
+		break;
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+		ret = bq24190_charger_get_current(bdi, val);
+		break;
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+		ret = bq24190_charger_get_current_max(bdi, val);
+		break;
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+		ret = bq24190_charger_get_voltage(bdi, val);
+		break;
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
+		ret = bq24190_charger_get_voltage_max(bdi, val);
+		break;
+	case POWER_SUPPLY_PROP_SCOPE:
+		val->intval = POWER_SUPPLY_SCOPE_SYSTEM;
+		ret = 0;
+		break;
+	case POWER_SUPPLY_PROP_MODEL_NAME:
+		val->strval = bdi->model_name;
+		ret = 0;
+		break;
+	case POWER_SUPPLY_PROP_MANUFACTURER:
+		val->strval = BQ24190_MANUFACTURER;
+		ret = 0;
+		break;
+	default:
+		ret = -ENODATA;
+	}
+
+	pm_runtime_put_sync(bdi->dev);
+	return ret;
+}
+
+static int bq24190_charger_set_property(struct power_supply *psy,
+		enum power_supply_property psp,
+		const union power_supply_propval *val)
+{
+	struct bq24190_dev_info *bdi =
+			container_of(psy, struct bq24190_dev_info, charger);
+	int ret;
+
+	dev_dbg(bdi->dev, "prop: %d\n", psp);
+
+	pm_runtime_get_sync(bdi->dev);
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_CHARGE_TYPE:
+		ret = bq24190_charger_set_charge_type(bdi, val);
+		break;
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+		ret = bq24190_charger_set_current(bdi, val);
+		break;
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+		ret = bq24190_charger_set_voltage(bdi, val);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	pm_runtime_put_sync(bdi->dev);
+	return ret;
+}
+
+static int bq24190_charger_property_is_writeable(struct power_supply *psy,
+		enum power_supply_property psp)
+{
+	int ret;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_CHARGE_TYPE:
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+		ret = 1;
+		break;
+	default:
+		ret = 0;
+	}
+
+	return ret;
+}
+
+static enum power_supply_property bq24190_charger_properties[] = {
+	POWER_SUPPLY_PROP_TYPE,
+	POWER_SUPPLY_PROP_HEALTH,
+	POWER_SUPPLY_PROP_ONLINE,
+	POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
+	POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+	POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
+	POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX,
+	POWER_SUPPLY_PROP_SCOPE,
+	POWER_SUPPLY_PROP_MODEL_NAME,
+	POWER_SUPPLY_PROP_MANUFACTURER,
+};
+
+static char *bq24190_charger_supplied_to[] = {
+	"main-battery",
+};
+
+static void bq24190_charger_init(struct power_supply *charger)
+{
+	charger->name = "bq24190-charger";
+	charger->type = POWER_SUPPLY_TYPE_USB;
+	charger->properties = bq24190_charger_properties;
+	charger->num_properties = ARRAY_SIZE(bq24190_charger_properties);
+	charger->supplied_to = bq24190_charger_supplied_to;
+	charger->num_supplies = ARRAY_SIZE(bq24190_charger_supplied_to);
+	charger->get_property = bq24190_charger_get_property;
+	charger->set_property = bq24190_charger_set_property;
+	charger->property_is_writeable = bq24190_charger_property_is_writeable;
+}
+
+/* Battery power supply property routines */
+
+static int bq24190_battery_get_status(struct bq24190_dev_info *bdi,
+		union power_supply_propval *val)
+{
+	u8 ss_reg, chrg_fault;
+	int status, ret;
+
+	mutex_lock(&bdi->f_reg_lock);
+
+	if (bdi->battery_status_valid) {
+		chrg_fault = bdi->f_reg;
+		bdi->battery_status_valid = false;
+		mutex_unlock(&bdi->f_reg_lock);
+	} else {
+		mutex_unlock(&bdi->f_reg_lock);
+
+		ret = bq24190_read(bdi, BQ24190_REG_F, &chrg_fault);
+		if (ret < 0)
+			return ret;
+	}
+
+	chrg_fault &= BQ24190_REG_F_CHRG_FAULT_MASK;
+	chrg_fault >>= BQ24190_REG_F_CHRG_FAULT_SHIFT;
+
+	ret = bq24190_read(bdi, BQ24190_REG_SS, &ss_reg);
+	if (ret < 0)
+		return ret;
+
+	/*
+	 * The battery must be discharging when any of these are true:
+	 * - there is no good power source;
+	 * - there is a charge fault.
+	 * Could also be discharging when in "supplement mode" but
+	 * there is no way to tell when its in that mode.
+	 */
+	if (!(ss_reg & BQ24190_REG_SS_PG_STAT_MASK) || chrg_fault) {
+		status = POWER_SUPPLY_STATUS_DISCHARGING;
+	} else {
+		ss_reg &= BQ24190_REG_SS_CHRG_STAT_MASK;
+		ss_reg >>= BQ24190_REG_SS_CHRG_STAT_SHIFT;
+
+		switch (ss_reg) {
+		case 0x0: /* Not Charging */
+			status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+			break;
+		case 0x1: /* Pre-charge */
+		case 0x2: /* Fast Charging */
+			status = POWER_SUPPLY_STATUS_CHARGING;
+			break;
+		case 0x3: /* Charge Termination Done */
+			status = POWER_SUPPLY_STATUS_FULL;
+			break;
+		default:
+			ret = -EIO;
+		}
+	}
+
+	if (!ret)
+		val->intval = status;
+
+	return ret;
+}
+
+static int bq24190_battery_get_health(struct bq24190_dev_info *bdi,
+		union power_supply_propval *val)
+{
+	u8 v;
+	int health, ret;
+
+	mutex_lock(&bdi->f_reg_lock);
+
+	if (bdi->battery_health_valid) {
+		v = bdi->f_reg;
+		bdi->battery_health_valid = false;
+		mutex_unlock(&bdi->f_reg_lock);
+	} else {
+		mutex_unlock(&bdi->f_reg_lock);
+
+		ret = bq24190_read(bdi, BQ24190_REG_F, &v);
+		if (ret < 0)
+			return ret;
+	}
+
+	if (v & BQ24190_REG_F_BAT_FAULT_MASK) {
+		health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+	} else {
+		v &= BQ24190_REG_F_NTC_FAULT_MASK;
+		v >>= BQ24190_REG_F_NTC_FAULT_SHIFT;
+
+		switch (v) {
+		case 0x0: /* Normal */
+			health = POWER_SUPPLY_HEALTH_GOOD;
+			break;
+		case 0x1: /* TS1 Cold */
+		case 0x3: /* TS2 Cold */
+		case 0x5: /* Both Cold */
+			health = POWER_SUPPLY_HEALTH_COLD;
+			break;
+		case 0x2: /* TS1 Hot */
+		case 0x4: /* TS2 Hot */
+		case 0x6: /* Both Hot */
+			health = POWER_SUPPLY_HEALTH_OVERHEAT;
+			break;
+		default:
+			health = POWER_SUPPLY_HEALTH_UNKNOWN;
+		}
+	}
+
+	val->intval = health;
+	return 0;
+}
+
+static int bq24190_battery_get_online(struct bq24190_dev_info *bdi,
+		union power_supply_propval *val)
+{
+	u8 batfet_disable;
+	int ret;
+
+	ret = bq24190_read_mask(bdi, BQ24190_REG_MOC,
+			BQ24190_REG_MOC_BATFET_DISABLE_MASK,
+			BQ24190_REG_MOC_BATFET_DISABLE_SHIFT, &batfet_disable);
+	if (ret < 0)
+		return ret;
+
+	val->intval = !batfet_disable;
+	return 0;
+}
+
+static int bq24190_battery_set_online(struct bq24190_dev_info *bdi,
+		const union power_supply_propval *val)
+{
+	return bq24190_write_mask(bdi, BQ24190_REG_MOC,
+			BQ24190_REG_MOC_BATFET_DISABLE_MASK,
+			BQ24190_REG_MOC_BATFET_DISABLE_SHIFT, !val->intval);
+}
+
+static int bq24190_battery_get_temp_alert_max(struct bq24190_dev_info *bdi,
+		union power_supply_propval *val)
+{
+	int temp, ret;
+
+	ret = bq24190_get_field_val(bdi, BQ24190_REG_ICTRC,
+			BQ24190_REG_ICTRC_TREG_MASK,
+			BQ24190_REG_ICTRC_TREG_SHIFT,
+			bq24190_ictrc_treg_values,
+			ARRAY_SIZE(bq24190_ictrc_treg_values), &temp);
+	if (ret < 0)
+		return ret;
+
+	val->intval = temp;
+	return 0;
+}
+
+static int bq24190_battery_set_temp_alert_max(struct bq24190_dev_info *bdi,
+		const union power_supply_propval *val)
+{
+	return bq24190_set_field_val(bdi, BQ24190_REG_ICTRC,
+			BQ24190_REG_ICTRC_TREG_MASK,
+			BQ24190_REG_ICTRC_TREG_SHIFT,
+			bq24190_ictrc_treg_values,
+			ARRAY_SIZE(bq24190_ictrc_treg_values), val->intval);
+}
+
+static int bq24190_battery_get_property(struct power_supply *psy,
+		enum power_supply_property psp, union power_supply_propval *val)
+{
+	struct bq24190_dev_info *bdi =
+			container_of(psy, struct bq24190_dev_info, battery);
+	int ret;
+
+	dev_dbg(bdi->dev, "prop: %d\n", psp);
+
+	pm_runtime_get_sync(bdi->dev);
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_STATUS:
+		ret = bq24190_battery_get_status(bdi, val);
+		break;
+	case POWER_SUPPLY_PROP_HEALTH:
+		ret = bq24190_battery_get_health(bdi, val);
+		break;
+	case POWER_SUPPLY_PROP_ONLINE:
+		ret = bq24190_battery_get_online(bdi, val);
+		break;
+	case POWER_SUPPLY_PROP_TECHNOLOGY:
+		/* Could be Li-on or Li-polymer but no way to tell which */
+		val->intval = POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
+		ret = 0;
+		break;
+	case POWER_SUPPLY_PROP_TEMP_ALERT_MAX:
+		ret = bq24190_battery_get_temp_alert_max(bdi, val);
+		break;
+	case POWER_SUPPLY_PROP_SCOPE:
+		val->intval = POWER_SUPPLY_SCOPE_SYSTEM;
+		ret = 0;
+		break;
+	default:
+		ret = -ENODATA;
+	}
+
+	pm_runtime_put_sync(bdi->dev);
+	return ret;
+}
+
+static int bq24190_battery_set_property(struct power_supply *psy,
+		enum power_supply_property psp,
+		const union power_supply_propval *val)
+{
+	struct bq24190_dev_info *bdi =
+			container_of(psy, struct bq24190_dev_info, battery);
+	int ret;
+
+	dev_dbg(bdi->dev, "prop: %d\n", psp);
+
+	pm_runtime_put_sync(bdi->dev);
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_ONLINE:
+		ret = bq24190_battery_set_online(bdi, val);
+		break;
+	case POWER_SUPPLY_PROP_TEMP_ALERT_MAX:
+		ret = bq24190_battery_set_temp_alert_max(bdi, val);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	pm_runtime_put_sync(bdi->dev);
+	return ret;
+}
+
+static int bq24190_battery_property_is_writeable(struct power_supply *psy,
+		enum power_supply_property psp)
+{
+	int ret;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_ONLINE:
+	case POWER_SUPPLY_PROP_TEMP_ALERT_MAX:
+		ret = 1;
+		break;
+	default:
+		ret = 0;
+	}
+
+	return ret;
+}
+
+static enum power_supply_property bq24190_battery_properties[] = {
+	POWER_SUPPLY_PROP_STATUS,
+	POWER_SUPPLY_PROP_HEALTH,
+	POWER_SUPPLY_PROP_ONLINE,
+	POWER_SUPPLY_PROP_TECHNOLOGY,
+	POWER_SUPPLY_PROP_TEMP_ALERT_MAX,
+	POWER_SUPPLY_PROP_SCOPE,
+};
+
+static void bq24190_battery_init(struct power_supply *battery)
+{
+	battery->name = "bq24190-battery";
+	battery->type = POWER_SUPPLY_TYPE_BATTERY;
+	battery->properties = bq24190_battery_properties;
+	battery->num_properties = ARRAY_SIZE(bq24190_battery_properties);
+	battery->get_property = bq24190_battery_get_property;
+	battery->set_property = bq24190_battery_set_property;
+	battery->property_is_writeable = bq24190_battery_property_is_writeable;
+}
+
+static irqreturn_t bq24190_irq_handler_thread(int irq, void *data)
+{
+	struct bq24190_dev_info *bdi = data;
+	bool alert_userspace = false;
+	u8 ss_reg, f_reg;
+	int ret;
+
+	pm_runtime_get_sync(bdi->dev);
+
+	ret = bq24190_read(bdi, BQ24190_REG_SS, &ss_reg);
+	if (ret < 0) {
+		dev_err(bdi->dev, "Can't read SS reg: %d\n", ret);
+		goto out;
+	}
+
+	if (ss_reg != bdi->ss_reg) {
+		/*
+		 * The device is in host mode so when PG_STAT goes from 1->0
+		 * (i.e., power removed) HIZ needs to be disabled.
+		 */
+		if ((bdi->ss_reg & BQ24190_REG_SS_PG_STAT_MASK) &&
+				!(ss_reg & BQ24190_REG_SS_PG_STAT_MASK)) {
+			ret = bq24190_write_mask(bdi, BQ24190_REG_ISC,
+					BQ24190_REG_ISC_EN_HIZ_MASK,
+					BQ24190_REG_ISC_EN_HIZ_SHIFT,
+					0);
+			if (ret < 0)
+				dev_err(bdi->dev, "Can't access ISC reg: %d\n",
+					ret);
+		}
+
+		bdi->ss_reg = ss_reg;
+		alert_userspace = true;
+	}
+
+	mutex_lock(&bdi->f_reg_lock);
+
+	ret = bq24190_read(bdi, BQ24190_REG_F, &f_reg);
+	if (ret < 0) {
+		mutex_unlock(&bdi->f_reg_lock);
+		dev_err(bdi->dev, "Can't read F reg: %d\n", ret);
+		goto out;
+	}
+
+	if (f_reg != bdi->f_reg) {
+		bdi->f_reg = f_reg;
+		bdi->charger_health_valid = true;
+		bdi->battery_health_valid = true;
+		bdi->battery_status_valid = true;
+
+		alert_userspace = true;
+	}
+
+	mutex_unlock(&bdi->f_reg_lock);
+
+	/*
+	 * Sometimes bq24190 gives a steady trickle of interrupts even
+	 * though the watchdog timer is turned off and neither the STATUS
+	 * nor FAULT registers have changed.  Weed out these sprurious
+	 * interrupts so userspace isn't alerted for no reason.
+	 * In addition, the chip always generates an interrupt after
+	 * register reset so we should ignore that one (the very first
+	 * interrupt received).
+	 */
+	if (alert_userspace && !bdi->first_time) {
+		power_supply_changed(&bdi->charger);
+		power_supply_changed(&bdi->battery);
+		bdi->first_time = false;
+	}
+
+out:
+	pm_runtime_put_sync(bdi->dev);
+
+	dev_dbg(bdi->dev, "ss_reg: 0x%02x, f_reg: 0x%02x\n", ss_reg, f_reg);
+
+	return IRQ_HANDLED;
+}
+
+static int bq24190_hw_init(struct bq24190_dev_info *bdi)
+{
+	u8 v;
+	int ret;
+
+	pm_runtime_get_sync(bdi->dev);
+
+	/* First check that the device really is what its supposed to be */
+	ret = bq24190_read_mask(bdi, BQ24190_REG_VPRS,
+			BQ24190_REG_VPRS_PN_MASK,
+			BQ24190_REG_VPRS_PN_SHIFT,
+			&v);
+	if (ret < 0)
+		goto out;
+
+	if (v != bdi->model) {
+		ret = -ENODEV;
+		goto out;
+	}
+
+	ret = bq24190_register_reset(bdi);
+	if (ret < 0)
+		goto out;
+
+	ret = bq24190_set_mode_host(bdi);
+out:
+	pm_runtime_put_sync(bdi->dev);
+	return ret;
+}
+
+#ifdef CONFIG_OF
+static int bq24190_setup_dt(struct bq24190_dev_info *bdi)
+{
+	bdi->irq = irq_of_parse_and_map(bdi->dev->of_node, 0);
+	if (bdi->irq <= 0)
+		return -1;
+
+	return 0;
+}
+#else
+static int bq24190_setup_dt(struct bq24190_dev_info *bdi)
+{
+	return -1;
+}
+#endif
+
+static int bq24190_setup_pdata(struct bq24190_dev_info *bdi,
+		struct bq24190_platform_data *pdata)
+{
+	int ret;
+
+	if (!gpio_is_valid(pdata->gpio_int))
+		return -1;
+
+	ret = gpio_request(pdata->gpio_int, dev_name(bdi->dev));
+	if (ret < 0)
+		return -1;
+
+	ret = gpio_direction_input(pdata->gpio_int);
+	if (ret < 0)
+		goto out;
+
+	bdi->irq = gpio_to_irq(pdata->gpio_int);
+	if (!bdi->irq)
+		goto out;
+
+	bdi->gpio_int = pdata->gpio_int;
+	return 0;
+
+out:
+	gpio_free(pdata->gpio_int);
+	return -1;
+}
+
+static int bq24190_probe(struct i2c_client *client,
+		const struct i2c_device_id *id)
+{
+	struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+	struct device *dev = &client->dev;
+	struct bq24190_platform_data *pdata = client->dev.platform_data;
+	struct bq24190_dev_info *bdi;
+	int ret;
+
+	if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
+		dev_err(dev, "No support for SMBUS_BYTE_DATA\n");
+		return -ENODEV;
+	}
+
+	bdi = devm_kzalloc(dev, sizeof(*bdi), GFP_KERNEL);
+	if (!bdi) {
+		dev_err(dev, "Can't alloc bdi struct\n");
+		return -ENOMEM;
+	}
+
+	bdi->client = client;
+	bdi->dev = dev;
+	bdi->model = id->driver_data;
+	strncpy(bdi->model_name, id->name, I2C_NAME_SIZE);
+	mutex_init(&bdi->f_reg_lock);
+	bdi->first_time = true;
+	bdi->charger_health_valid = false;
+	bdi->battery_health_valid = false;
+	bdi->battery_status_valid = false;
+
+	i2c_set_clientdata(client, bdi);
+
+	if (dev->of_node)
+		ret = bq24190_setup_dt(bdi);
+	else
+		ret = bq24190_setup_pdata(bdi, pdata);
+
+	if (ret) {
+		dev_err(dev, "Can't get irq info\n");
+		return -EINVAL;
+	}
+
+	ret = devm_request_threaded_irq(dev, bdi->irq, NULL,
+			bq24190_irq_handler_thread,
+			IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+			"bq24190-charger", bdi);
+	if (ret < 0) {
+		dev_err(dev, "Can't set up irq handler\n");
+		goto out1;
+	}
+
+	pm_runtime_enable(dev);
+	pm_runtime_resume(dev);
+
+	ret = bq24190_hw_init(bdi);
+	if (ret < 0) {
+		dev_err(dev, "Hardware init failed\n");
+		goto out2;
+	}
+
+	bq24190_charger_init(&bdi->charger);
+
+	ret = power_supply_register(dev, &bdi->charger);
+	if (ret) {
+		dev_err(dev, "Can't register charger\n");
+		goto out2;
+	}
+
+	bq24190_battery_init(&bdi->battery);
+
+	ret = power_supply_register(dev, &bdi->battery);
+	if (ret) {
+		dev_err(dev, "Can't register battery\n");
+		goto out3;
+	}
+
+	ret = bq24190_sysfs_create_group(bdi);
+	if (ret) {
+		dev_err(dev, "Can't create sysfs entries\n");
+		goto out4;
+	}
+
+	return 0;
+
+out4:
+	power_supply_unregister(&bdi->battery);
+out3:
+	power_supply_unregister(&bdi->charger);
+out2:
+	pm_runtime_disable(dev);
+out1:
+	if (bdi->gpio_int)
+		gpio_free(bdi->gpio_int);
+
+	return ret;
+}
+
+static int bq24190_remove(struct i2c_client *client)
+{
+	struct bq24190_dev_info *bdi = i2c_get_clientdata(client);
+
+	pm_runtime_get_sync(bdi->dev);
+	bq24190_register_reset(bdi);
+	pm_runtime_put_sync(bdi->dev);
+
+	bq24190_sysfs_remove_group(bdi);
+	power_supply_unregister(&bdi->battery);
+	power_supply_unregister(&bdi->charger);
+	pm_runtime_disable(bdi->dev);
+
+	if (bdi->gpio_int)
+		gpio_free(bdi->gpio_int);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int bq24190_pm_suspend(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct bq24190_dev_info *bdi = i2c_get_clientdata(client);
+
+	pm_runtime_get_sync(bdi->dev);
+	bq24190_register_reset(bdi);
+	pm_runtime_put_sync(bdi->dev);
+
+	return 0;
+}
+
+static int bq24190_pm_resume(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct bq24190_dev_info *bdi = i2c_get_clientdata(client);
+
+	bdi->charger_health_valid = false;
+	bdi->battery_health_valid = false;
+	bdi->battery_status_valid = false;
+
+	pm_runtime_get_sync(bdi->dev);
+	bq24190_register_reset(bdi);
+	pm_runtime_put_sync(bdi->dev);
+
+	/* Things may have changed while suspended so alert upper layer */
+	power_supply_changed(&bdi->charger);
+	power_supply_changed(&bdi->battery);
+
+	return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(bq24190_pm_ops, bq24190_pm_suspend, bq24190_pm_resume);
+
+/*
+ * Only support the bq24190 right now.  The bq24192, bq24192i, and bq24193
+ * are similar but not identical so the driver needs to be extended to
+ * support them.
+ */
+static const struct i2c_device_id bq24190_i2c_ids[] = {
+	{ "bq24190", BQ24190_REG_VPRS_PN_24190 },
+	{ },
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id bq24190_of_match[] = {
+	{ .compatible = "ti,bq24190", },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, bq24190_of_match);
+#else
+static const struct of_device_id bq24190_of_match[] = {
+	{ },
+};
+#endif
+
+static struct i2c_driver bq24190_driver = {
+	.probe		= bq24190_probe,
+	.remove		= bq24190_remove,
+	.id_table	= bq24190_i2c_ids,
+	.driver = {
+		.name		= "bq24190-charger",
+		.owner		= THIS_MODULE,
+		.pm		= &bq24190_pm_ops,
+		.of_match_table	= of_match_ptr(bq24190_of_match),
+	},
+};
+module_i2c_driver(bq24190_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mark A. Greer <mgreer@animalcreek.com>");
+MODULE_ALIAS("i2c:bq24190-charger");
+MODULE_DESCRIPTION("TI BQ24190 Charger Driver");
diff --git a/drivers/power/collie_battery.c b/drivers/power/collie_battery.c
index c58d0e3..d02ae02 100644
--- a/drivers/power/collie_battery.c
+++ b/drivers/power/collie_battery.c
@@ -287,7 +287,7 @@
 };
 
 #ifdef CONFIG_PM
-static int collie_bat_suspend(struct ucb1x00_dev *dev, pm_message_t state)
+static int collie_bat_suspend(struct ucb1x00_dev *dev)
 {
 	/* flush all pending status updates */
 	flush_work(&bat_work);
diff --git a/drivers/power/max8925_power.c b/drivers/power/max8925_power.c
index 0ee1e14..b4513f2 100644
--- a/drivers/power/max8925_power.c
+++ b/drivers/power/max8925_power.c
@@ -458,6 +458,7 @@
 	of_property_read_u32(np, "fast-charge", &fast_charge);
 	of_property_read_u32(np, "no-insert-detect", &no_insert_detect);
 	of_property_read_u32(np, "no-temp-support", &no_temp_support);
+	of_node_put(np);
 
 	pdata->batt_detect = batt_detect;
 	pdata->fast_charge = fast_charge;
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index 3b2d5df..00e6672 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -67,23 +67,42 @@
 
 static void power_supply_changed_work(struct work_struct *work)
 {
+	unsigned long flags;
 	struct power_supply *psy = container_of(work, struct power_supply,
 						changed_work);
 
 	dev_dbg(psy->dev, "%s\n", __func__);
 
-	class_for_each_device(power_supply_class, NULL, psy,
-			      __power_supply_changed_work);
-
-	power_supply_update_leds(psy);
-
-	kobject_uevent(&psy->dev->kobj, KOBJ_CHANGE);
+	spin_lock_irqsave(&psy->changed_lock, flags);
+	if (psy->changed) {
+		psy->changed = false;
+		spin_unlock_irqrestore(&psy->changed_lock, flags);
+		class_for_each_device(power_supply_class, NULL, psy,
+				      __power_supply_changed_work);
+		power_supply_update_leds(psy);
+		kobject_uevent(&psy->dev->kobj, KOBJ_CHANGE);
+		spin_lock_irqsave(&psy->changed_lock, flags);
+	}
+	/*
+	 * Dependent power supplies (e.g. battery) may have changed state
+	 * as a result of this event, so poll again and hold the
+	 * wakeup_source until all events are processed.
+	 */
+	if (!psy->changed)
+		pm_relax(psy->dev);
+	spin_unlock_irqrestore(&psy->changed_lock, flags);
 }
 
 void power_supply_changed(struct power_supply *psy)
 {
+	unsigned long flags;
+
 	dev_dbg(psy->dev, "%s\n", __func__);
 
+	spin_lock_irqsave(&psy->changed_lock, flags);
+	psy->changed = true;
+	pm_stay_awake(psy->dev);
+	spin_unlock_irqrestore(&psy->changed_lock, flags);
 	schedule_work(&psy->changed_work);
 }
 EXPORT_SYMBOL_GPL(power_supply_changed);
@@ -500,6 +519,11 @@
 		goto check_supplies_failed;
 	}
 
+	spin_lock_init(&psy->changed_lock);
+	rc = device_init_wakeup(dev, true);
+	if (rc)
+		goto wakeup_init_failed;
+
 	rc = kobject_set_name(&dev->kobj, "%s", psy->name);
 	if (rc)
 		goto kobject_set_name_failed;
@@ -529,6 +553,7 @@
 register_cooler_failed:
 	psy_unregister_thermal(psy);
 register_thermal_failed:
+wakeup_init_failed:
 	device_del(dev);
 kobject_set_name_failed:
 device_add_failed:
@@ -546,6 +571,7 @@
 	power_supply_remove_triggers(psy);
 	psy_unregister_cooler(psy);
 	psy_unregister_thermal(psy);
+	device_init_wakeup(psy->dev, false);
 	device_unregister(psy->dev);
 }
 EXPORT_SYMBOL_GPL(power_supply_unregister);
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index 29178f7..44420d1 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -118,7 +118,7 @@
 	long long_val;
 
 	/* TODO: support other types than int */
-	ret = strict_strtol(buf, 10, &long_val);
+	ret = kstrtol(buf, 10, &long_val);
 	if (ret < 0)
 		return ret;
 
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index ee039dc..9b3ea53 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -14,6 +14,12 @@
 	  If your board needs a GPIO high/low to power down, say Y and
 	  create a binding in your devicetree.
 
+config POWER_RESET_MSM
+	bool "Qualcomm MSM power-off driver"
+	depends on POWER_RESET && ARCH_MSM
+	help
+	  Power off and restart support for Qualcomm boards.
+
 config POWER_RESET_QNAP
 	bool "QNAP power-off driver"
 	depends on OF_GPIO && POWER_RESET && PLAT_ORION
@@ -34,7 +40,14 @@
 config POWER_RESET_VEXPRESS
 	bool "ARM Versatile Express power-off and reset driver"
 	depends on ARM || ARM64
-	depends on POWER_RESET
+	depends on POWER_RESET && VEXPRESS_CONFIG
 	help
 	  Power off and reset support for the ARM Ltd. Versatile
 	  Express boards.
+
+config POWER_RESET_XGENE
+	bool "APM SoC X-Gene reset driver"
+	depends on ARM64
+	depends on POWER_RESET
+	help
+	  Reboot support for the APM SoC X-Gene Eval boards.
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index 372807f..3e6ed88 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -1,4 +1,6 @@
 obj-$(CONFIG_POWER_RESET_GPIO) += gpio-poweroff.o
+obj-$(CONFIG_POWER_RESET_MSM) += msm-poweroff.o
 obj-$(CONFIG_POWER_RESET_QNAP) += qnap-poweroff.o
 obj-$(CONFIG_POWER_RESET_RESTART) += restart-poweroff.o
 obj-$(CONFIG_POWER_RESET_VEXPRESS) += vexpress-poweroff.o
+obj-$(CONFIG_POWER_RESET_XGENE) += xgene-reboot.o
diff --git a/drivers/power/reset/msm-poweroff.c b/drivers/power/reset/msm-poweroff.c
new file mode 100644
index 0000000..774f9a3
--- /dev/null
+++ b/drivers/power/reset/msm-poweroff.c
@@ -0,0 +1,73 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/reboot.h>
+
+#include <asm/system_misc.h>
+
+static void __iomem *msm_ps_hold;
+
+static void do_msm_restart(enum reboot_mode reboot_mode, const char *cmd)
+{
+	writel(0, msm_ps_hold);
+	mdelay(10000);
+}
+
+static void do_msm_poweroff(void)
+{
+	/* TODO: Add poweroff capability */
+	do_msm_restart(REBOOT_HARD, NULL);
+}
+
+static int msm_restart_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *mem;
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	msm_ps_hold = devm_ioremap_resource(dev, mem);
+	if (IS_ERR(msm_ps_hold))
+		return PTR_ERR(msm_ps_hold);
+
+	pm_power_off = do_msm_poweroff;
+	arm_pm_restart = do_msm_restart;
+	return 0;
+}
+
+static const struct of_device_id of_msm_restart_match[] = {
+	{ .compatible = "qcom,pshold", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, of_msm_restart_match);
+
+static struct platform_driver msm_restart_driver = {
+	.probe = msm_restart_probe,
+	.driver = {
+		.name = "msm-restart",
+		.of_match_table = of_match_ptr(of_msm_restart_match),
+	},
+};
+
+static int __init msm_restart_init(void)
+{
+	return platform_driver_register(&msm_restart_driver);
+}
+device_initcall(msm_restart_init);
diff --git a/drivers/power/reset/xgene-reboot.c b/drivers/power/reset/xgene-reboot.c
new file mode 100644
index 0000000..ecd55f8
--- /dev/null
+++ b/drivers/power/reset/xgene-reboot.c
@@ -0,0 +1,103 @@
+/*
+ * AppliedMicro X-Gene SoC Reboot Driver
+ *
+ * Copyright (c) 2013, Applied Micro Circuits Corporation
+ * Author: Feng Kan <fkan@apm.com>
+ * Author: Loc Ho <lho@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ * This driver provides system reboot functionality for APM X-Gene SoC.
+ * For system shutdown, this is board specify. If a board designer
+ * implements GPIO shutdown, use the gpio-poweroff.c driver.
+ */
+#include <linux/io.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/stat.h>
+#include <linux/slab.h>
+#include <asm/system_misc.h>
+
+struct xgene_reboot_context {
+	struct platform_device *pdev;
+	void *csr;
+	u32 mask;
+};
+
+static struct xgene_reboot_context *xgene_restart_ctx;
+
+static void xgene_restart(char str, const char *cmd)
+{
+	struct xgene_reboot_context *ctx = xgene_restart_ctx;
+	unsigned long timeout;
+
+	/* Issue the reboot */
+	if (ctx)
+		writel(ctx->mask, ctx->csr);
+
+	timeout = jiffies + HZ;
+	while (time_before(jiffies, timeout))
+		cpu_relax();
+
+	dev_emerg(&ctx->pdev->dev, "Unable to restart system\n");
+}
+
+static int xgene_reboot_probe(struct platform_device *pdev)
+{
+	struct xgene_reboot_context *ctx;
+
+	ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+	if (!ctx) {
+		dev_err(&pdev->dev, "out of memory for context\n");
+		return -ENODEV;
+	}
+
+	ctx->csr = of_iomap(pdev->dev.of_node, 0);
+	if (!ctx->csr) {
+		devm_kfree(&pdev->dev, ctx);
+		dev_err(&pdev->dev, "can not map resource\n");
+		return -ENODEV;
+	}
+
+	if (of_property_read_u32(pdev->dev.of_node, "mask", &ctx->mask))
+		ctx->mask = 0xFFFFFFFF;
+
+	ctx->pdev = pdev;
+	arm_pm_restart = xgene_restart;
+	xgene_restart_ctx = ctx;
+
+	return 0;
+}
+
+static struct of_device_id xgene_reboot_of_match[] = {
+	{ .compatible = "apm,xgene-reboot" },
+	{}
+};
+
+static struct platform_driver xgene_reboot_driver = {
+	.probe = xgene_reboot_probe,
+	.driver = {
+		.name = "xgene-reboot",
+		.of_match_table = xgene_reboot_of_match,
+	},
+};
+
+static int __init xgene_reboot_init(void)
+{
+	return platform_driver_register(&xgene_reboot_driver);
+}
+device_initcall(xgene_reboot_init);
diff --git a/drivers/power/rx51_battery.c b/drivers/power/rx51_battery.c
index 8a6288d..1bc5857 100644
--- a/drivers/power/rx51_battery.c
+++ b/drivers/power/rx51_battery.c
@@ -25,6 +25,10 @@
 #include <linux/slab.h>
 #include <linux/i2c/twl4030-madc.h>
 
+/* RX51 specific channels */
+#define TWL4030_MADC_BTEMP_RX51	TWL4030_MADC_ADCIN0
+#define TWL4030_MADC_BCI_RX51	TWL4030_MADC_ADCIN4
+
 struct rx51_device_info {
 	struct device *dev;
 	struct power_supply bat;
@@ -37,7 +41,7 @@
 {
 	struct twl4030_madc_request req;
 
-	req.channels = 1 << channel;
+	req.channels = channel;
 	req.do_avg = 1;
 	req.method = TWL4030_MADC_SW1;
 	req.func_cb = NULL;
@@ -47,7 +51,7 @@
 	if (twl4030_madc_conversion(&req) <= 0)
 		return -ENODATA;
 
-	return req.rbuf[channel];
+	return req.rbuf[ffs(channel) - 1];
 }
 
 /*
@@ -56,7 +60,7 @@
  */
 static int rx51_battery_read_voltage(struct rx51_device_info *di)
 {
-	int voltage = rx51_battery_read_adc(12);
+	int voltage = rx51_battery_read_adc(TWL4030_MADC_VBAT);
 
 	if (voltage < 0)
 		return voltage;
@@ -108,7 +112,7 @@
 {
 	int min = 0;
 	int max = ARRAY_SIZE(rx51_temp_table2) - 1;
-	int raw = rx51_battery_read_adc(0);
+	int raw = rx51_battery_read_adc(TWL4030_MADC_BTEMP_RX51);
 
 	/* Zero and negative values are undefined */
 	if (raw <= 0)
@@ -142,7 +146,7 @@
  */
 static int rx51_battery_read_capacity(struct rx51_device_info *di)
 {
-	int capacity = rx51_battery_read_adc(4);
+	int capacity = rx51_battery_read_adc(TWL4030_MADC_BCI_RX51);
 
 	if (capacity < 0)
 		return capacity;
diff --git a/drivers/power/tosa_battery.c b/drivers/power/tosa_battery.c
index 0224de5..f4d80df 100644
--- a/drivers/power/tosa_battery.c
+++ b/drivers/power/tosa_battery.c
@@ -150,7 +150,7 @@
 
 static irqreturn_t tosa_bat_gpio_isr(int irq, void *data)
 {
-	pr_info("tosa_bat_gpio irq: %d\n", gpio_get_value(irq_to_gpio(irq)));
+	pr_info("tosa_bat_gpio irq\n");
 	schedule_work(&bat_work);
 	return IRQ_HANDLED;
 }
diff --git a/drivers/power/twl4030_charger.c b/drivers/power/twl4030_charger.c
index be98e70..d98abe9 100644
--- a/drivers/power/twl4030_charger.c
+++ b/drivers/power/twl4030_charger.c
@@ -189,7 +189,12 @@
 
 		/* Need to keep regulator on */
 		if (!bci->usb_enabled) {
-			regulator_enable(bci->usb_reg);
+			ret = regulator_enable(bci->usb_reg);
+			if (ret) {
+				dev_err(bci->dev,
+					"Failed to enable regulator\n");
+				return ret;
+			}
 			bci->usb_enabled = 1;
 		}
 
diff --git a/drivers/power/twl4030_madc_battery.c b/drivers/power/twl4030_madc_battery.c
new file mode 100644
index 0000000..7ef445a
--- /dev/null
+++ b/drivers/power/twl4030_madc_battery.c
@@ -0,0 +1,245 @@
+/*
+ * Dumb driver for LiIon batteries using TWL4030 madc.
+ *
+ * Copyright 2013 Golden Delicious Computers
+ * Lukas Märdian <lukas@goldelico.com>
+ *
+ * Based on dumb driver for gta01 battery
+ * Copyright 2009 Openmoko, Inc
+ * Balaji Rao <balajirrao@openmoko.org>
+ */
+
+#include <linux/module.h>
+#include <linux/param.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/slab.h>
+#include <linux/sort.h>
+#include <linux/i2c/twl4030-madc.h>
+#include <linux/power/twl4030_madc_battery.h>
+
+struct twl4030_madc_battery {
+	struct power_supply psy;
+	struct twl4030_madc_bat_platform_data *pdata;
+};
+
+static enum power_supply_property twl4030_madc_bat_props[] = {
+	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_STATUS,
+	POWER_SUPPLY_PROP_TECHNOLOGY,
+	POWER_SUPPLY_PROP_VOLTAGE_NOW,
+	POWER_SUPPLY_PROP_CURRENT_NOW,
+	POWER_SUPPLY_PROP_CAPACITY,
+	POWER_SUPPLY_PROP_CHARGE_FULL,
+	POWER_SUPPLY_PROP_CHARGE_NOW,
+	POWER_SUPPLY_PROP_TEMP,
+	POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
+};
+
+static int madc_read(int index)
+{
+	struct twl4030_madc_request req;
+	int val;
+
+	req.channels = index;
+	req.method = TWL4030_MADC_SW2;
+	req.type = TWL4030_MADC_WAIT;
+	req.do_avg = 0;
+	req.raw = false;
+	req.func_cb = NULL;
+
+	val = twl4030_madc_conversion(&req);
+	if (val < 0)
+		return val;
+
+	return req.rbuf[ffs(index) - 1];
+}
+
+static int twl4030_madc_bat_get_charging_status(void)
+{
+	return (madc_read(TWL4030_MADC_ICHG) > 0) ? 1 : 0;
+}
+
+static int twl4030_madc_bat_get_voltage(void)
+{
+	return madc_read(TWL4030_MADC_VBAT);
+}
+
+static int twl4030_madc_bat_get_current(void)
+{
+	return madc_read(TWL4030_MADC_ICHG) * 1000;
+}
+
+static int twl4030_madc_bat_get_temp(void)
+{
+	return madc_read(TWL4030_MADC_BTEMP) * 10;
+}
+
+static int twl4030_madc_bat_voltscale(struct twl4030_madc_battery *bat,
+					int volt)
+{
+	struct twl4030_madc_bat_calibration *calibration;
+	int i, res = 0;
+
+	/* choose charging curve */
+	if (twl4030_madc_bat_get_charging_status())
+		calibration = bat->pdata->charging;
+	else
+		calibration = bat->pdata->discharging;
+
+	if (volt > calibration[0].voltage) {
+		res = calibration[0].level;
+	} else {
+		for (i = 0; calibration[i+1].voltage >= 0; i++) {
+			if (volt <= calibration[i].voltage &&
+					volt >= calibration[i+1].voltage) {
+				/* interval found - interpolate within range */
+				res = calibration[i].level -
+					((calibration[i].voltage - volt) *
+					(calibration[i].level -
+					calibration[i+1].level)) /
+					(calibration[i].voltage -
+					calibration[i+1].voltage);
+				break;
+			}
+		}
+	}
+	return res;
+}
+
+static int twl4030_madc_bat_get_property(struct power_supply *psy,
+					enum power_supply_property psp,
+					union power_supply_propval *val)
+{
+	struct twl4030_madc_battery *bat = container_of(psy,
+					struct twl4030_madc_battery, psy);
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_STATUS:
+		if (twl4030_madc_bat_voltscale(bat,
+				twl4030_madc_bat_get_voltage()) > 95)
+			val->intval = POWER_SUPPLY_STATUS_FULL;
+		else {
+			if (twl4030_madc_bat_get_charging_status())
+				val->intval = POWER_SUPPLY_STATUS_CHARGING;
+			else
+				val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+		}
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+		val->intval = twl4030_madc_bat_get_voltage() * 1000;
+		break;
+	case POWER_SUPPLY_PROP_TECHNOLOGY:
+		val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_NOW:
+		val->intval = twl4030_madc_bat_get_current();
+		break;
+	case POWER_SUPPLY_PROP_PRESENT:
+		/* assume battery is always present */
+		val->intval = 1;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_NOW: {
+			int percent = twl4030_madc_bat_voltscale(bat,
+					twl4030_madc_bat_get_voltage());
+			val->intval = (percent * bat->pdata->capacity) / 100;
+			break;
+		}
+	case POWER_SUPPLY_PROP_CAPACITY:
+		val->intval = twl4030_madc_bat_voltscale(bat,
+					twl4030_madc_bat_get_voltage());
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_FULL:
+		val->intval = bat->pdata->capacity;
+		break;
+	case POWER_SUPPLY_PROP_TEMP:
+		val->intval = twl4030_madc_bat_get_temp();
+		break;
+	case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW: {
+			int percent = twl4030_madc_bat_voltscale(bat,
+					twl4030_madc_bat_get_voltage());
+			/* in mAh */
+			int chg = (percent * (bat->pdata->capacity/1000))/100;
+
+			/* assume discharge with 400 mA (ca. 1.5W) */
+			val->intval = (3600l * chg) / 400;
+			break;
+		}
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void twl4030_madc_bat_ext_changed(struct power_supply *psy)
+{
+	struct twl4030_madc_battery *bat = container_of(psy,
+					struct twl4030_madc_battery, psy);
+
+	power_supply_changed(&bat->psy);
+}
+
+static int twl4030_cmp(const void *a, const void *b)
+{
+	return ((struct twl4030_madc_bat_calibration *)b)->voltage -
+		((struct twl4030_madc_bat_calibration *)a)->voltage;
+}
+
+static int twl4030_madc_battery_probe(struct platform_device *pdev)
+{
+	struct twl4030_madc_battery *twl4030_madc_bat;
+	struct twl4030_madc_bat_platform_data *pdata = pdev->dev.platform_data;
+
+	twl4030_madc_bat = kzalloc(sizeof(*twl4030_madc_bat), GFP_KERNEL);
+	if (!twl4030_madc_bat)
+		return -ENOMEM;
+
+	twl4030_madc_bat->psy.name = "twl4030_battery";
+	twl4030_madc_bat->psy.type = POWER_SUPPLY_TYPE_BATTERY;
+	twl4030_madc_bat->psy.properties = twl4030_madc_bat_props;
+	twl4030_madc_bat->psy.num_properties =
+					ARRAY_SIZE(twl4030_madc_bat_props);
+	twl4030_madc_bat->psy.get_property = twl4030_madc_bat_get_property;
+	twl4030_madc_bat->psy.external_power_changed =
+					twl4030_madc_bat_ext_changed;
+
+	/* sort charging and discharging calibration data */
+	sort(pdata->charging, pdata->charging_size,
+		sizeof(struct twl4030_madc_bat_calibration),
+		twl4030_cmp, NULL);
+	sort(pdata->discharging, pdata->discharging_size,
+		sizeof(struct twl4030_madc_bat_calibration),
+		twl4030_cmp, NULL);
+
+	twl4030_madc_bat->pdata = pdata;
+	platform_set_drvdata(pdev, twl4030_madc_bat);
+	power_supply_register(&pdev->dev, &twl4030_madc_bat->psy);
+
+	return 0;
+}
+
+static int twl4030_madc_battery_remove(struct platform_device *pdev)
+{
+	struct twl4030_madc_battery *bat = platform_get_drvdata(pdev);
+
+	power_supply_unregister(&bat->psy);
+	kfree(bat);
+
+	return 0;
+}
+
+static struct platform_driver twl4030_madc_battery_driver = {
+	.driver = {
+		.name = "twl4030_madc_battery",
+	},
+	.probe  = twl4030_madc_battery_probe,
+	.remove = twl4030_madc_battery_remove,
+};
+module_platform_driver(twl4030_madc_battery_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Lukas Märdian <lukas@goldelico.com>");
+MODULE_DESCRIPTION("twl4030_madc battery driver");
diff --git a/drivers/pps/clients/pps-gpio.c b/drivers/pps/clients/pps-gpio.c
index eae0eda..9966124 100644
--- a/drivers/pps/clients/pps-gpio.c
+++ b/drivers/pps/clients/pps-gpio.c
@@ -184,7 +184,6 @@
 {
 	struct pps_gpio_device_data *data = platform_get_drvdata(pdev);
 
-	platform_set_drvdata(pdev, NULL);
 	pps_unregister_source(data->pps);
 	dev_info(&pdev->dev, "removed IRQ %d as PPS source\n", data->irq);
 	return 0;
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 9e3498b..9654aa3 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1249,6 +1249,15 @@
 	  Say "yes" here to support the real time clock on SiRF SOC chips.
 	  This driver can also be built as a module called rtc-sirfsoc.
 
+config RTC_DRV_MOXART
+	tristate "MOXA ART RTC"
+	help
+	   If you say yes here you get support for the MOXA ART
+	   RTC module.
+
+	   This driver can also be built as a module. If so, the module
+	   will be called rtc-moxart
+
 comment "HID Sensor RTC drivers"
 
 config RTC_DRV_HID_SENSOR_TIME
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index d3b4488..2dff3d2 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -130,3 +130,4 @@
 obj-$(CONFIG_RTC_DRV_WM8350)	+= rtc-wm8350.o
 obj-$(CONFIG_RTC_DRV_X1205)	+= rtc-x1205.o
 obj-$(CONFIG_RTC_DRV_SIRFSOC)	+= rtc-sirfsoc.o
+obj-$(CONFIG_RTC_DRV_MOXART)	+= rtc-moxart.o
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index be06d71..24e733c 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -1018,23 +1018,6 @@
 	cmos_do_remove(&pnp->dev);
 }
 
-#ifdef	CONFIG_PM
-
-static int cmos_pnp_suspend(struct pnp_dev *pnp, pm_message_t mesg)
-{
-	return cmos_suspend(&pnp->dev);
-}
-
-static int cmos_pnp_resume(struct pnp_dev *pnp)
-{
-	return cmos_resume(&pnp->dev);
-}
-
-#else
-#define	cmos_pnp_suspend	NULL
-#define	cmos_pnp_resume		NULL
-#endif
-
 static void cmos_pnp_shutdown(struct pnp_dev *pnp)
 {
 	if (system_state == SYSTEM_POWER_OFF && !cmos_poweroff(&pnp->dev))
@@ -1060,8 +1043,11 @@
 
 	/* flag ensures resume() gets called, and stops syslog spam */
 	.flags		= PNP_DRIVER_RES_DO_NOT_CHANGE,
-	.suspend	= cmos_pnp_suspend,
-	.resume		= cmos_pnp_resume,
+#ifdef CONFIG_PM_SLEEP
+	.driver		= {
+			.pm = &cmos_pm_ops,
+	},
+#endif
 };
 
 #endif	/* CONFIG_PNP */
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
index 308a8fe..bc7b4fc 100644
--- a/drivers/rtc/rtc-ds1511.c
+++ b/drivers/rtc/rtc-ds1511.c
@@ -89,7 +89,6 @@
 struct rtc_plat_data {
 	struct rtc_device *rtc;
 	void __iomem *ioaddr;		/* virtual base address */
-	int size;				/* amount of memory mapped */
 	int irq;
 	unsigned int irqen;
 	int alrm_sec;
@@ -479,20 +478,14 @@
 	struct rtc_plat_data *pdata;
 	int ret = 0;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res)
-		return -ENODEV;
-
 	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
 	if (!pdata)
 		return -ENOMEM;
-	pdata->size = resource_size(res);
-	if (!devm_request_mem_region(&pdev->dev, res->start, pdata->size,
-			pdev->name))
-		return -EBUSY;
-	ds1511_base = devm_ioremap(&pdev->dev, res->start, pdata->size);
-	if (!ds1511_base)
-		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	ds1511_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(ds1511_base))
+		return PTR_ERR(ds1511_base);
 	pdata->ioaddr = ds1511_base;
 	pdata->irq = platform_get_irq(pdev, 0);
 
diff --git a/drivers/rtc/rtc-ds1553.c b/drivers/rtc/rtc-ds1553.c
index 8c6c952..fd31571 100644
--- a/drivers/rtc/rtc-ds1553.c
+++ b/drivers/rtc/rtc-ds1553.c
@@ -285,19 +285,14 @@
 	void __iomem *ioaddr;
 	int ret = 0;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res)
-		return -ENODEV;
 	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
 	if (!pdata)
 		return -ENOMEM;
-	if (!devm_request_mem_region(&pdev->dev, res->start, RTC_REG_SIZE,
-			pdev->name))
-		return -EBUSY;
 
-	ioaddr = devm_ioremap(&pdev->dev, res->start, RTC_REG_SIZE);
-	if (!ioaddr)
-		return -ENOMEM;
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	ioaddr = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(ioaddr))
+		return PTR_ERR(ioaddr);
 	pdata->ioaddr = ioaddr;
 	pdata->irq = platform_get_irq(pdev, 0);
 
diff --git a/drivers/rtc/rtc-ds1742.c b/drivers/rtc/rtc-ds1742.c
index eccdc62..17b73fd 100644
--- a/drivers/rtc/rtc-ds1742.c
+++ b/drivers/rtc/rtc-ds1742.c
@@ -52,11 +52,9 @@
 #define RTC_BATT_FLAG		0x80
 
 struct rtc_plat_data {
-	struct rtc_device *rtc;
 	void __iomem *ioaddr_nvram;
 	void __iomem *ioaddr_rtc;
 	size_t size_nvram;
-	size_t size;
 	unsigned long last_jiffies;
 	struct bin_attribute nvram_attr;
 };
@@ -117,11 +115,7 @@
 	/* year is 1900 + tm->tm_year */
 	tm->tm_year = bcd2bin(year) + bcd2bin(century) * 100 - 1900;
 
-	if (rtc_valid_tm(tm) < 0) {
-		dev_err(dev, "retrieved date/time is not valid.\n");
-		rtc_time_to_tm(0, tm);
-	}
-	return 0;
+	return rtc_valid_tm(tm);
 }
 
 static const struct rtc_class_ops ds1742_rtc_ops = {
@@ -168,22 +162,17 @@
 	void __iomem *ioaddr;
 	int ret = 0;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res)
-		return -ENODEV;
 	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
 	if (!pdata)
 		return -ENOMEM;
-	pdata->size = resource_size(res);
-	if (!devm_request_mem_region(&pdev->dev, res->start, pdata->size,
-		pdev->name))
-		return -EBUSY;
-	ioaddr = devm_ioremap(&pdev->dev, res->start, pdata->size);
-	if (!ioaddr)
-		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	ioaddr = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(ioaddr))
+		return PTR_ERR(ioaddr);
 
 	pdata->ioaddr_nvram = ioaddr;
-	pdata->size_nvram = pdata->size - RTC_SIZE;
+	pdata->size_nvram = resource_size(res) - RTC_SIZE;
 	pdata->ioaddr_rtc = ioaddr + pdata->size_nvram;
 
 	sysfs_bin_attr_init(&pdata->nvram_attr);
@@ -212,7 +201,6 @@
 				  &ds1742_rtc_ops, THIS_MODULE);
 	if (IS_ERR(rtc))
 		return PTR_ERR(rtc);
-	pdata->rtc = rtc;
 
 	ret = sysfs_create_bin_file(&pdev->dev.kobj, &pdata->nvram_attr);
 
diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c
index 549b3c3..580e7b5 100644
--- a/drivers/rtc/rtc-ep93xx.c
+++ b/drivers/rtc/rtc-ep93xx.c
@@ -138,17 +138,9 @@
 		return -ENOMEM;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res)
-		return -ENXIO;
-
-	if (!devm_request_mem_region(&pdev->dev, res->start,
-				     resource_size(res), pdev->name))
-		return -EBUSY;
-
-	ep93xx_rtc->mmio_base = devm_ioremap(&pdev->dev, res->start,
-					     resource_size(res));
-	if (!ep93xx_rtc->mmio_base)
-		return -ENXIO;
+	ep93xx_rtc->mmio_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(ep93xx_rtc->mmio_base))
+		return PTR_ERR(ep93xx_rtc->mmio_base);
 
 	pdev->dev.platform_data = ep93xx_rtc;
 	platform_set_drvdata(pdev, ep93xx_rtc);
diff --git a/drivers/rtc/rtc-hid-sensor-time.c b/drivers/rtc/rtc-hid-sensor-time.c
index 7273b01..4e2a818 100644
--- a/drivers/rtc/rtc-hid-sensor-time.c
+++ b/drivers/rtc/rtc-hid-sensor-time.c
@@ -23,10 +23,6 @@
 #include <linux/iio/iio.h>
 #include <linux/rtc.h>
 
-/* Format: HID-SENSOR-usage_id_in_hex */
-/* Usage ID from spec for Time: 0x2000A0 */
-#define DRIVER_NAME "HID-SENSOR-2000a0" /* must be lowercase */
-
 enum hid_time_channel {
 	CHANNEL_SCAN_INDEX_YEAR,
 	CHANNEL_SCAN_INDEX_MONTH,
@@ -283,9 +279,11 @@
 					"hid-sensor-time", &hid_time_rtc_ops,
 					THIS_MODULE);
 
-	if (IS_ERR(time_state->rtc)) {
+	if (IS_ERR_OR_NULL(time_state->rtc)) {
+		ret = time_state->rtc ? PTR_ERR(time_state->rtc) : -ENODEV;
+		time_state->rtc = NULL;
+		sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_TIME);
 		dev_err(&pdev->dev, "rtc device register failed!\n");
-		return PTR_ERR(time_state->rtc);
 	}
 
 	return ret;
@@ -300,9 +298,19 @@
 	return 0;
 }
 
+static struct platform_device_id hid_time_ids[] = {
+	{
+		/* Format: HID-SENSOR-usage_id_in_hex_lowercase */
+		.name = "HID-SENSOR-2000a0",
+	},
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, hid_time_ids);
+
 static struct platform_driver hid_time_platform_driver = {
+	.id_table = hid_time_ids,
 	.driver = {
-		.name	= DRIVER_NAME,
+		.name	= KBUILD_MODNAME,
 		.owner	= THIS_MODULE,
 	},
 	.probe		= hid_time_probe,
diff --git a/drivers/rtc/rtc-imxdi.c b/drivers/rtc/rtc-imxdi.c
index d3a8c8e..abd7f90 100644
--- a/drivers/rtc/rtc-imxdi.c
+++ b/drivers/rtc/rtc-imxdi.c
@@ -375,24 +375,16 @@
 	struct imxdi_dev *imxdi;
 	int rc;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res)
-		return -ENODEV;
-
 	imxdi = devm_kzalloc(&pdev->dev, sizeof(*imxdi), GFP_KERNEL);
 	if (!imxdi)
 		return -ENOMEM;
 
 	imxdi->pdev = pdev;
 
-	if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
-				pdev->name))
-		return -EBUSY;
-
-	imxdi->ioaddr = devm_ioremap(&pdev->dev, res->start,
-			resource_size(res));
-	if (imxdi->ioaddr == NULL)
-		return -ENOMEM;
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	imxdi->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(imxdi->ioaddr))
+		return PTR_ERR(imxdi->ioaddr);
 
 	spin_lock_init(&imxdi->irq_lock);
 
diff --git a/drivers/rtc/rtc-lpc32xx.c b/drivers/rtc/rtc-lpc32xx.c
index 8276ae9..bfdbcb8 100644
--- a/drivers/rtc/rtc-lpc32xx.c
+++ b/drivers/rtc/rtc-lpc32xx.c
@@ -201,16 +201,9 @@
 {
 	struct resource *res;
 	struct lpc32xx_rtc *rtc;
-	resource_size_t size;
 	int rtcirq;
 	u32 tmp;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res) {
-		dev_err(&pdev->dev, "Can't get memory resource\n");
-		return -ENOENT;
-	}
-
 	rtcirq = platform_get_irq(pdev, 0);
 	if (rtcirq < 0 || rtcirq >= NR_IRQS) {
 		dev_warn(&pdev->dev, "Can't get interrupt resource\n");
@@ -224,19 +217,10 @@
 	}
 	rtc->irq = rtcirq;
 
-	size = resource_size(res);
-
-	if (!devm_request_mem_region(&pdev->dev, res->start, size,
-				     pdev->name)) {
-		dev_err(&pdev->dev, "RTC registers are not free\n");
-		return -EBUSY;
-	}
-
-	rtc->rtc_base = devm_ioremap(&pdev->dev, res->start, size);
-	if (!rtc->rtc_base) {
-		dev_err(&pdev->dev, "Can't map memory\n");
-		return -ENOMEM;
-	}
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	rtc->rtc_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(rtc->rtc_base))
+		return PTR_ERR(rtc->rtc_base);
 
 	spin_lock_init(&rtc->lock);
 
diff --git a/drivers/rtc/rtc-max77686.c b/drivers/rtc/rtc-max77686.c
index 9915cb9..9efe118 100644
--- a/drivers/rtc/rtc-max77686.c
+++ b/drivers/rtc/rtc-max77686.c
@@ -240,9 +240,9 @@
 	}
 
 	alrm->pending = 0;
-	ret = regmap_read(info->max77686->regmap, MAX77686_REG_STATUS1, &val);
+	ret = regmap_read(info->max77686->regmap, MAX77686_REG_STATUS2, &val);
 	if (ret < 0) {
-		dev_err(info->dev, "%s:%d fail to read status1 reg(%d)\n",
+		dev_err(info->dev, "%s:%d fail to read status2 reg(%d)\n",
 				__func__, __LINE__, ret);
 		goto out;
 	}
diff --git a/drivers/rtc/rtc-moxart.c b/drivers/rtc/rtc-moxart.c
new file mode 100644
index 0000000..c29dee0
--- /dev/null
+++ b/drivers/rtc/rtc-moxart.c
@@ -0,0 +1,330 @@
+/*
+ * MOXA ART RTC driver.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * Based on code from
+ * Moxa Technology Co., Ltd. <www.moxa.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/rtc.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+
+#define GPIO_RTC_RESERVED			0x0C
+#define GPIO_RTC_DATA_SET			0x10
+#define GPIO_RTC_DATA_CLEAR			0x14
+#define GPIO_RTC_PIN_PULL_ENABLE		0x18
+#define GPIO_RTC_PIN_PULL_TYPE			0x1C
+#define GPIO_RTC_INT_ENABLE			0x20
+#define GPIO_RTC_INT_RAW_STATE			0x24
+#define GPIO_RTC_INT_MASKED_STATE		0x28
+#define GPIO_RTC_INT_MASK			0x2C
+#define GPIO_RTC_INT_CLEAR			0x30
+#define GPIO_RTC_INT_TRIGGER			0x34
+#define GPIO_RTC_INT_BOTH			0x38
+#define GPIO_RTC_INT_RISE_NEG			0x3C
+#define GPIO_RTC_BOUNCE_ENABLE			0x40
+#define GPIO_RTC_BOUNCE_PRE_SCALE		0x44
+#define GPIO_RTC_PROTECT_W			0x8E
+#define GPIO_RTC_PROTECT_R			0x8F
+#define GPIO_RTC_YEAR_W				0x8C
+#define GPIO_RTC_YEAR_R				0x8D
+#define GPIO_RTC_DAY_W				0x8A
+#define GPIO_RTC_DAY_R				0x8B
+#define GPIO_RTC_MONTH_W			0x88
+#define GPIO_RTC_MONTH_R			0x89
+#define GPIO_RTC_DATE_W				0x86
+#define GPIO_RTC_DATE_R				0x87
+#define GPIO_RTC_HOURS_W			0x84
+#define GPIO_RTC_HOURS_R			0x85
+#define GPIO_RTC_MINUTES_W			0x82
+#define GPIO_RTC_MINUTES_R			0x83
+#define GPIO_RTC_SECONDS_W			0x80
+#define GPIO_RTC_SECONDS_R			0x81
+#define GPIO_RTC_DELAY_TIME			8
+
+struct moxart_rtc {
+	struct rtc_device *rtc;
+	spinlock_t rtc_lock;
+	int gpio_data, gpio_sclk, gpio_reset;
+};
+
+static int day_of_year[12] =	{ 0, 31, 59, 90, 120, 151, 181,
+				  212, 243, 273, 304, 334 };
+
+static void moxart_rtc_write_byte(struct device *dev, u8 data)
+{
+	struct moxart_rtc *moxart_rtc = dev_get_drvdata(dev);
+	int i;
+
+	for (i = 0; i < 8; i++, data >>= 1) {
+		gpio_set_value(moxart_rtc->gpio_sclk, 0);
+		gpio_set_value(moxart_rtc->gpio_data, ((data & 1) == 1));
+		udelay(GPIO_RTC_DELAY_TIME);
+		gpio_set_value(moxart_rtc->gpio_sclk, 1);
+		udelay(GPIO_RTC_DELAY_TIME);
+	}
+}
+
+static u8 moxart_rtc_read_byte(struct device *dev)
+{
+	struct moxart_rtc *moxart_rtc = dev_get_drvdata(dev);
+	int i;
+	u8 data = 0;
+
+	for (i = 0; i < 8; i++) {
+		gpio_set_value(moxart_rtc->gpio_sclk, 0);
+		udelay(GPIO_RTC_DELAY_TIME);
+		gpio_set_value(moxart_rtc->gpio_sclk, 1);
+		udelay(GPIO_RTC_DELAY_TIME);
+		if (gpio_get_value(moxart_rtc->gpio_data))
+			data |= (1 << i);
+		udelay(GPIO_RTC_DELAY_TIME);
+	}
+	return data;
+}
+
+static u8 moxart_rtc_read_register(struct device *dev, u8 cmd)
+{
+	struct moxart_rtc *moxart_rtc = dev_get_drvdata(dev);
+	u8 data;
+	unsigned long flags;
+
+	local_irq_save(flags);
+
+	gpio_direction_output(moxart_rtc->gpio_data, 0);
+	gpio_set_value(moxart_rtc->gpio_reset, 1);
+	udelay(GPIO_RTC_DELAY_TIME);
+	moxart_rtc_write_byte(dev, cmd);
+	gpio_direction_input(moxart_rtc->gpio_data);
+	udelay(GPIO_RTC_DELAY_TIME);
+	data = moxart_rtc_read_byte(dev);
+	gpio_set_value(moxart_rtc->gpio_sclk, 0);
+	gpio_set_value(moxart_rtc->gpio_reset, 0);
+	udelay(GPIO_RTC_DELAY_TIME);
+
+	local_irq_restore(flags);
+
+	return data;
+}
+
+static void moxart_rtc_write_register(struct device *dev, u8 cmd, u8 data)
+{
+	struct moxart_rtc *moxart_rtc = dev_get_drvdata(dev);
+	unsigned long flags;
+
+	local_irq_save(flags);
+
+	gpio_direction_output(moxart_rtc->gpio_data, 0);
+	gpio_set_value(moxart_rtc->gpio_reset, 1);
+	udelay(GPIO_RTC_DELAY_TIME);
+	moxart_rtc_write_byte(dev, cmd);
+	moxart_rtc_write_byte(dev, data);
+	gpio_set_value(moxart_rtc->gpio_sclk, 0);
+	gpio_set_value(moxart_rtc->gpio_reset, 0);
+	udelay(GPIO_RTC_DELAY_TIME);
+
+	local_irq_restore(flags);
+}
+
+static int moxart_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+	struct moxart_rtc *moxart_rtc = dev_get_drvdata(dev);
+
+	spin_lock_irq(&moxart_rtc->rtc_lock);
+
+	moxart_rtc_write_register(dev, GPIO_RTC_PROTECT_W, 0);
+	moxart_rtc_write_register(dev, GPIO_RTC_YEAR_W,
+				  (((tm->tm_year - 100) / 10) << 4) |
+				  ((tm->tm_year - 100) % 10));
+
+	moxart_rtc_write_register(dev, GPIO_RTC_MONTH_W,
+				  (((tm->tm_mon + 1) / 10) << 4) |
+				  ((tm->tm_mon + 1) % 10));
+
+	moxart_rtc_write_register(dev, GPIO_RTC_DATE_W,
+				  ((tm->tm_mday / 10) << 4) |
+				  (tm->tm_mday % 10));
+
+	moxart_rtc_write_register(dev, GPIO_RTC_HOURS_W,
+				  ((tm->tm_hour / 10) << 4) |
+				  (tm->tm_hour % 10));
+
+	moxart_rtc_write_register(dev, GPIO_RTC_MINUTES_W,
+				  ((tm->tm_min / 10) << 4) |
+				  (tm->tm_min % 10));
+
+	moxart_rtc_write_register(dev, GPIO_RTC_SECONDS_W,
+				  ((tm->tm_sec / 10) << 4) |
+				  (tm->tm_sec % 10));
+
+	moxart_rtc_write_register(dev, GPIO_RTC_PROTECT_W, 0x80);
+
+	spin_unlock_irq(&moxart_rtc->rtc_lock);
+
+	dev_dbg(dev, "%s: success tm_year=%d tm_mon=%d\n"
+		"tm_mday=%d tm_hour=%d tm_min=%d tm_sec=%d\n",
+		__func__, tm->tm_year, tm->tm_mon, tm->tm_mday,
+		tm->tm_hour, tm->tm_min, tm->tm_sec);
+
+	return 0;
+}
+
+static int moxart_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+	struct moxart_rtc *moxart_rtc = dev_get_drvdata(dev);
+	unsigned char v;
+
+	spin_lock_irq(&moxart_rtc->rtc_lock);
+
+	v = moxart_rtc_read_register(dev, GPIO_RTC_SECONDS_R);
+	tm->tm_sec = (((v & 0x70) >> 4) * 10) + (v & 0x0F);
+
+	v = moxart_rtc_read_register(dev, GPIO_RTC_MINUTES_R);
+	tm->tm_min = (((v & 0x70) >> 4) * 10) + (v & 0x0F);
+
+	v = moxart_rtc_read_register(dev, GPIO_RTC_HOURS_R);
+	if (v & 0x80) { /* 12-hour mode */
+		tm->tm_hour = (((v & 0x10) >> 4) * 10) + (v & 0x0F);
+		if (v & 0x20) { /* PM mode */
+			tm->tm_hour += 12;
+			if (tm->tm_hour >= 24)
+				tm->tm_hour = 0;
+		}
+	} else { /* 24-hour mode */
+		tm->tm_hour = (((v & 0x30) >> 4) * 10) + (v & 0x0F);
+	}
+
+	v = moxart_rtc_read_register(dev, GPIO_RTC_DATE_R);
+	tm->tm_mday = (((v & 0x30) >> 4) * 10) + (v & 0x0F);
+
+	v = moxart_rtc_read_register(dev, GPIO_RTC_MONTH_R);
+	tm->tm_mon = (((v & 0x10) >> 4) * 10) + (v & 0x0F);
+	tm->tm_mon--;
+
+	v = moxart_rtc_read_register(dev, GPIO_RTC_YEAR_R);
+	tm->tm_year = (((v & 0xF0) >> 4) * 10) + (v & 0x0F);
+	tm->tm_year += 100;
+	if (tm->tm_year <= 69)
+		tm->tm_year += 100;
+
+	v = moxart_rtc_read_register(dev, GPIO_RTC_DAY_R);
+	tm->tm_wday = (v & 0x0f) - 1;
+	tm->tm_yday = day_of_year[tm->tm_mon];
+	tm->tm_yday += (tm->tm_mday - 1);
+	if (tm->tm_mon >= 2) {
+		if (!(tm->tm_year % 4) && (tm->tm_year % 100))
+			tm->tm_yday++;
+	}
+
+	tm->tm_isdst = 0;
+
+	spin_unlock_irq(&moxart_rtc->rtc_lock);
+
+	return 0;
+}
+
+static const struct rtc_class_ops moxart_rtc_ops = {
+	.read_time	= moxart_rtc_read_time,
+	.set_time	= moxart_rtc_set_time,
+};
+
+static int moxart_rtc_probe(struct platform_device *pdev)
+{
+	struct moxart_rtc *moxart_rtc;
+	int ret = 0;
+
+	moxart_rtc = devm_kzalloc(&pdev->dev, sizeof(*moxart_rtc), GFP_KERNEL);
+	if (!moxart_rtc) {
+		dev_err(&pdev->dev, "devm_kzalloc failed\n");
+		return -ENOMEM;
+	}
+
+	moxart_rtc->gpio_data = of_get_named_gpio(pdev->dev.of_node,
+						  "gpio-rtc-data", 0);
+	if (!gpio_is_valid(moxart_rtc->gpio_data)) {
+		dev_err(&pdev->dev, "invalid gpio (data): %d\n",
+			moxart_rtc->gpio_data);
+		return moxart_rtc->gpio_data;
+	}
+
+	moxart_rtc->gpio_sclk = of_get_named_gpio(pdev->dev.of_node,
+						  "gpio-rtc-sclk", 0);
+	if (!gpio_is_valid(moxart_rtc->gpio_sclk)) {
+		dev_err(&pdev->dev, "invalid gpio (sclk): %d\n",
+			moxart_rtc->gpio_sclk);
+		return moxart_rtc->gpio_sclk;
+	}
+
+	moxart_rtc->gpio_reset = of_get_named_gpio(pdev->dev.of_node,
+						   "gpio-rtc-reset", 0);
+	if (!gpio_is_valid(moxart_rtc->gpio_reset)) {
+		dev_err(&pdev->dev, "invalid gpio (reset): %d\n",
+			moxart_rtc->gpio_reset);
+		return moxart_rtc->gpio_reset;
+	}
+
+	spin_lock_init(&moxart_rtc->rtc_lock);
+	platform_set_drvdata(pdev, moxart_rtc);
+
+	ret = devm_gpio_request(&pdev->dev, moxart_rtc->gpio_data, "rtc_data");
+	if (ret) {
+		dev_err(&pdev->dev, "can't get rtc_data gpio\n");
+		return ret;
+	}
+
+	ret = devm_gpio_request_one(&pdev->dev, moxart_rtc->gpio_sclk,
+				    GPIOF_DIR_OUT, "rtc_sclk");
+	if (ret) {
+		dev_err(&pdev->dev, "can't get rtc_sclk gpio\n");
+		return ret;
+	}
+
+	ret = devm_gpio_request_one(&pdev->dev, moxart_rtc->gpio_reset,
+				    GPIOF_DIR_OUT, "rtc_reset");
+	if (ret) {
+		dev_err(&pdev->dev, "can't get rtc_reset gpio\n");
+		return ret;
+	}
+
+	moxart_rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
+						   &moxart_rtc_ops,
+						   THIS_MODULE);
+	if (IS_ERR(moxart_rtc->rtc)) {
+		dev_err(&pdev->dev, "devm_rtc_device_register failed\n");
+		return PTR_ERR(moxart_rtc->rtc);
+	}
+
+	return 0;
+}
+
+static const struct of_device_id moxart_rtc_match[] = {
+	{ .compatible = "moxa,moxart-rtc" },
+	{ },
+};
+
+static struct platform_driver moxart_rtc_driver = {
+	.probe	= moxart_rtc_probe,
+	.driver	= {
+		.name		= "moxart-rtc",
+		.owner		= THIS_MODULE,
+		.of_match_table	= moxart_rtc_match,
+	},
+};
+module_platform_driver(moxart_rtc_driver);
+
+MODULE_DESCRIPTION("MOXART RTC driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
diff --git a/drivers/rtc/rtc-mv.c b/drivers/rtc/rtc-mv.c
index baab802..d536c59 100644
--- a/drivers/rtc/rtc-mv.c
+++ b/drivers/rtc/rtc-mv.c
@@ -221,26 +221,17 @@
 {
 	struct resource *res;
 	struct rtc_plat_data *pdata;
-	resource_size_t size;
 	u32 rtc_time;
 	int ret = 0;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res)
-		return -ENODEV;
-
 	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
 	if (!pdata)
 		return -ENOMEM;
 
-	size = resource_size(res);
-	if (!devm_request_mem_region(&pdev->dev, res->start, size,
-				     pdev->name))
-		return -EBUSY;
-
-	pdata->ioaddr = devm_ioremap(&pdev->dev, res->start, size);
-	if (!pdata->ioaddr)
-		return -ENOMEM;
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(pdata->ioaddr))
+		return PTR_ERR(pdata->ioaddr);
 
 	pdata->clk = devm_clk_get(&pdev->dev, NULL);
 	/* Not all SoCs require a clock.*/
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
index ab87bac..50c5726 100644
--- a/drivers/rtc/rtc-mxc.c
+++ b/drivers/rtc/rtc-mxc.c
@@ -377,22 +377,16 @@
 	unsigned long rate;
 	int ret;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res)
-		return -ENODEV;
-
 	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
 	if (!pdata)
 		return -ENOMEM;
 
 	pdata->devtype = pdev->id_entry->driver_data;
 
-	if (!devm_request_mem_region(&pdev->dev, res->start,
-				     resource_size(res), pdev->name))
-		return -EBUSY;
-
-	pdata->ioaddr = devm_ioremap(&pdev->dev, res->start,
-				     resource_size(res));
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(pdata->ioaddr))
+		return PTR_ERR(pdata->ioaddr);
 
 	pdata->clk = devm_clk_get(&pdev->dev, NULL);
 	if (IS_ERR(pdata->clk)) {
diff --git a/drivers/rtc/rtc-nuc900.c b/drivers/rtc/rtc-nuc900.c
index 22861c5..248653c 100644
--- a/drivers/rtc/rtc-nuc900.c
+++ b/drivers/rtc/rtc-nuc900.c
@@ -99,7 +99,7 @@
 	if (!timeout)
 		return ERR_PTR(-EPERM);
 
-	return 0;
+	return NULL;
 }
 
 static int nuc900_rtc_bcd2bin(unsigned int timereg,
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index c6ffbae..c7d97ee 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -70,6 +70,8 @@
 #define OMAP_RTC_KICK0_REG		0x6c
 #define OMAP_RTC_KICK1_REG		0x70
 
+#define OMAP_RTC_IRQWAKEEN		0x7c
+
 /* OMAP_RTC_CTRL_REG bit fields: */
 #define OMAP_RTC_CTRL_SPLIT		(1<<7)
 #define OMAP_RTC_CTRL_DISABLE		(1<<6)
@@ -94,12 +96,21 @@
 #define OMAP_RTC_INTERRUPTS_IT_ALARM    (1<<3)
 #define OMAP_RTC_INTERRUPTS_IT_TIMER    (1<<2)
 
+/* OMAP_RTC_IRQWAKEEN bit fields: */
+#define OMAP_RTC_IRQWAKEEN_ALARM_WAKEEN    (1<<1)
+
 /* OMAP_RTC_KICKER values */
 #define	KICK0_VALUE			0x83e70b13
 #define	KICK1_VALUE			0x95a4f1e0
 
 #define	OMAP_RTC_HAS_KICKER		0x1
 
+/*
+ * Few RTC IP revisions has special WAKE-EN Register to enable Wakeup
+ * generation for event Alarm.
+ */
+#define	OMAP_RTC_HAS_IRQWAKEEN		0x2
+
 static void __iomem	*rtc_base;
 
 #define rtc_read(addr)		readb(rtc_base + (addr))
@@ -299,12 +310,18 @@
 static int omap_rtc_alarm;
 static int omap_rtc_timer;
 
-#define	OMAP_RTC_DATA_DA830_IDX	1
+#define	OMAP_RTC_DATA_AM3352_IDX	1
+#define	OMAP_RTC_DATA_DA830_IDX		2
 
 static struct platform_device_id omap_rtc_devtype[] = {
 	{
 		.name	= DRIVER_NAME,
-	}, {
+	},
+	[OMAP_RTC_DATA_AM3352_IDX] = {
+		.name	= "am3352-rtc",
+		.driver_data = OMAP_RTC_HAS_KICKER | OMAP_RTC_HAS_IRQWAKEEN,
+	},
+	[OMAP_RTC_DATA_DA830_IDX] = {
 		.name	= "da830-rtc",
 		.driver_data = OMAP_RTC_HAS_KICKER,
 	},
@@ -316,6 +333,9 @@
 	{	.compatible	= "ti,da830-rtc",
 		.data		= &omap_rtc_devtype[OMAP_RTC_DATA_DA830_IDX],
 	},
+	{	.compatible	= "ti,am3352-rtc",
+		.data		= &omap_rtc_devtype[OMAP_RTC_DATA_AM3352_IDX],
+	},
 	{},
 };
 MODULE_DEVICE_TABLE(of, omap_rtc_of_match);
@@ -464,16 +484,28 @@
 
 static int omap_rtc_suspend(struct device *dev)
 {
+	u8 irqwake_stat;
+	struct platform_device *pdev = to_platform_device(dev);
+	const struct platform_device_id *id_entry =
+					platform_get_device_id(pdev);
+
 	irqstat = rtc_read(OMAP_RTC_INTERRUPTS_REG);
 
 	/* FIXME the RTC alarm is not currently acting as a wakeup event
-	 * source, and in fact this enable() call is just saving a flag
-	 * that's never used...
+	 * source on some platforms, and in fact this enable() call is just
+	 * saving a flag that's never used...
 	 */
-	if (device_may_wakeup(dev))
+	if (device_may_wakeup(dev)) {
 		enable_irq_wake(omap_rtc_alarm);
-	else
+
+		if (id_entry->driver_data & OMAP_RTC_HAS_IRQWAKEEN) {
+			irqwake_stat = rtc_read(OMAP_RTC_IRQWAKEEN);
+			irqwake_stat |= OMAP_RTC_IRQWAKEEN_ALARM_WAKEEN;
+			rtc_write(irqwake_stat, OMAP_RTC_IRQWAKEEN);
+		}
+	} else {
 		rtc_write(0, OMAP_RTC_INTERRUPTS_REG);
+	}
 
 	/* Disable the clock/module */
 	pm_runtime_put_sync(dev);
@@ -483,13 +515,25 @@
 
 static int omap_rtc_resume(struct device *dev)
 {
+	u8 irqwake_stat;
+	struct platform_device *pdev = to_platform_device(dev);
+	const struct platform_device_id *id_entry =
+				platform_get_device_id(pdev);
+
 	/* Enable the clock/module so that we can access the registers */
 	pm_runtime_get_sync(dev);
 
-	if (device_may_wakeup(dev))
+	if (device_may_wakeup(dev)) {
 		disable_irq_wake(omap_rtc_alarm);
-	else
+
+		if (id_entry->driver_data & OMAP_RTC_HAS_IRQWAKEEN) {
+			irqwake_stat = rtc_read(OMAP_RTC_IRQWAKEEN);
+			irqwake_stat &= ~OMAP_RTC_IRQWAKEEN_ALARM_WAKEEN;
+			rtc_write(irqwake_stat, OMAP_RTC_IRQWAKEEN);
+		}
+	} else {
 		rtc_write(irqstat, OMAP_RTC_INTERRUPTS_REG);
+	}
 	return 0;
 }
 #endif
diff --git a/drivers/rtc/rtc-palmas.c b/drivers/rtc/rtc-palmas.c
index a1fecc8..fffb7d3 100644
--- a/drivers/rtc/rtc-palmas.c
+++ b/drivers/rtc/rtc-palmas.c
@@ -238,6 +238,15 @@
 	struct palmas *palmas = dev_get_drvdata(pdev->dev.parent);
 	struct palmas_rtc *palmas_rtc = NULL;
 	int ret;
+	bool enable_bb_charging = false;
+	bool high_bb_charging;
+
+	if (pdev->dev.of_node) {
+		enable_bb_charging = of_property_read_bool(pdev->dev.of_node,
+					"ti,backup-battery-chargeable");
+		high_bb_charging = of_property_read_bool(pdev->dev.of_node,
+					"ti,backup-battery-charge-high-current");
+	}
 
 	palmas_rtc = devm_kzalloc(&pdev->dev, sizeof(struct palmas_rtc),
 			GFP_KERNEL);
@@ -254,6 +263,32 @@
 	palmas_rtc->dev = &pdev->dev;
 	platform_set_drvdata(pdev, palmas_rtc);
 
+	if (enable_bb_charging) {
+		unsigned reg = PALMAS_BACKUP_BATTERY_CTRL_BBS_BBC_LOW_ICHRG;
+
+		if (high_bb_charging)
+			reg = 0;
+
+		ret = palmas_update_bits(palmas, PALMAS_PMU_CONTROL_BASE,
+			PALMAS_BACKUP_BATTERY_CTRL,
+			PALMAS_BACKUP_BATTERY_CTRL_BBS_BBC_LOW_ICHRG, reg);
+		if (ret < 0) {
+			dev_err(&pdev->dev,
+				"BACKUP_BATTERY_CTRL update failed, %d\n", ret);
+			return ret;
+		}
+
+		ret = palmas_update_bits(palmas, PALMAS_PMU_CONTROL_BASE,
+			PALMAS_BACKUP_BATTERY_CTRL,
+			PALMAS_BACKUP_BATTERY_CTRL_BB_CHG_EN,
+			PALMAS_BACKUP_BATTERY_CTRL_BB_CHG_EN);
+		if (ret < 0) {
+			dev_err(&pdev->dev,
+				"BACKUP_BATTERY_CTRL update failed, %d\n", ret);
+			return ret;
+		}
+	}
+
 	/* Start RTC */
 	ret = palmas_update_bits(palmas, PALMAS_RTC_BASE, PALMAS_RTC_CTRL_REG,
 			PALMAS_RTC_CTRL_REG_STOP_RTC,
diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
index 205b9f7..1ee514a 100644
--- a/drivers/rtc/rtc-pcf2127.c
+++ b/drivers/rtc/rtc-pcf2127.c
@@ -203,11 +203,6 @@
 	return 0;
 }
 
-static int pcf2127_remove(struct i2c_client *client)
-{
-	return 0;
-}
-
 static const struct i2c_device_id pcf2127_id[] = {
 	{ "pcf2127", 0 },
 	{ }
@@ -229,7 +224,6 @@
 		.of_match_table = of_match_ptr(pcf2127_of_match),
 	},
 	.probe		= pcf2127_probe,
-	.remove		= pcf2127_remove,
 	.id_table	= pcf2127_id,
 };
 
diff --git a/drivers/rtc/rtc-sirfsoc.c b/drivers/rtc/rtc-sirfsoc.c
index aa7ed4b..63460cf 100644
--- a/drivers/rtc/rtc-sirfsoc.c
+++ b/drivers/rtc/rtc-sirfsoc.c
@@ -44,6 +44,7 @@
 	struct rtc_device	*rtc;
 	u32			rtc_base;
 	u32			irq;
+	unsigned		irq_wake;
 	/* Overflow for every 8 years extra time */
 	u32			overflow_rtc;
 #ifdef CONFIG_PM
@@ -355,8 +356,8 @@
 	rtcdrv->saved_counter =
 		sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_CN);
 	rtcdrv->saved_overflow_rtc = rtcdrv->overflow_rtc;
-	if (device_may_wakeup(&pdev->dev))
-		enable_irq_wake(rtcdrv->irq);
+	if (device_may_wakeup(&pdev->dev) && !enable_irq_wake(rtcdrv->irq))
+		rtcdrv->irq_wake = 1;
 
 	return 0;
 }
@@ -423,8 +424,10 @@
 	struct platform_device *pdev = to_platform_device(dev);
 	struct sirfsoc_rtc_drv *rtcdrv = platform_get_drvdata(pdev);
 	sirfsoc_rtc_thaw(dev);
-	if (device_may_wakeup(&pdev->dev))
+	if (device_may_wakeup(&pdev->dev) && rtcdrv->irq_wake) {
 		disable_irq_wake(rtcdrv->irq);
+		rtcdrv->irq_wake = 0;
+	}
 
 	return 0;
 }
@@ -434,8 +437,10 @@
 	struct platform_device *pdev = to_platform_device(dev);
 	struct sirfsoc_rtc_drv *rtcdrv = platform_get_drvdata(pdev);
 
-	if (device_may_wakeup(&pdev->dev))
+	if (device_may_wakeup(&pdev->dev) && rtcdrv->irq_wake) {
 		disable_irq_wake(rtcdrv->irq);
+		rtcdrv->irq_wake = 0;
+	}
 	return 0;
 }
 
diff --git a/drivers/rtc/rtc-stk17ta8.c b/drivers/rtc/rtc-stk17ta8.c
index af5e97e..a176ba6 100644
--- a/drivers/rtc/rtc-stk17ta8.c
+++ b/drivers/rtc/rtc-stk17ta8.c
@@ -294,19 +294,14 @@
 	void __iomem *ioaddr;
 	int ret = 0;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res)
-		return -ENODEV;
-
 	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
 	if (!pdata)
 		return -ENOMEM;
-	if (!devm_request_mem_region(&pdev->dev, res->start, RTC_REG_SIZE,
-			pdev->name))
-		return -EBUSY;
-	ioaddr = devm_ioremap(&pdev->dev, res->start, RTC_REG_SIZE);
-	if (!ioaddr)
-		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	ioaddr = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(ioaddr))
+		return PTR_ERR(ioaddr);
 	pdata->ioaddr = ioaddr;
 	pdata->irq = platform_get_irq(pdev, 0);
 
diff --git a/drivers/rtc/rtc-tx4939.c b/drivers/rtc/rtc-tx4939.c
index f9a0677..4f87234 100644
--- a/drivers/rtc/rtc-tx4939.c
+++ b/drivers/rtc/rtc-tx4939.c
@@ -244,9 +244,6 @@
 	struct resource *res;
 	int irq, ret;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res)
-		return -ENODEV;
 	irq = platform_get_irq(pdev, 0);
 	if (irq < 0)
 		return -ENODEV;
@@ -255,13 +252,10 @@
 		return -ENOMEM;
 	platform_set_drvdata(pdev, pdata);
 
-	if (!devm_request_mem_region(&pdev->dev, res->start,
-				     resource_size(res), pdev->name))
-		return -EBUSY;
-	pdata->rtcreg = devm_ioremap(&pdev->dev, res->start,
-				     resource_size(res));
-	if (!pdata->rtcreg)
-		return -EBUSY;
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	pdata->rtcreg = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(pdata->rtcreg))
+		return PTR_ERR(pdata->rtcreg);
 
 	spin_lock_init(&pdata->lock);
 	tx4939_rtc_cmd(pdata->rtcreg, TX4939_RTCCTL_COMMAND_NOP);
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index feca317..92bd22c 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -645,7 +645,7 @@
 	}
 	ASCEBC(dasd_diag_discipline.ebcname, 4);
 
-	service_subclass_irq_register();
+	irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
 	register_external_interrupt(0x2603, dasd_ext_handler);
 	dasd_diag_discipline_pointer = &dasd_diag_discipline;
 	return 0;
@@ -655,7 +655,7 @@
 dasd_diag_cleanup(void)
 {
 	unregister_external_interrupt(0x2603, dasd_ext_handler);
-	service_subclass_irq_unregister();
+	irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL);
 	dasd_diag_discipline_pointer = NULL;
 }
 
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 96e52bf..f93cc32 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -524,20 +524,20 @@
 	.llseek		= no_llseek,
 };
 
-void fs3270_create_cb(int minor)
+static void fs3270_create_cb(int minor)
 {
 	__register_chrdev(IBM_FS3270_MAJOR, minor, 1, "tub", &fs3270_fops);
 	device_create(class3270, NULL, MKDEV(IBM_FS3270_MAJOR, minor),
 		      NULL, "3270/tub%d", minor);
 }
 
-void fs3270_destroy_cb(int minor)
+static void fs3270_destroy_cb(int minor)
 {
 	device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, minor));
 	__unregister_chrdev(IBM_FS3270_MAJOR, minor, 1, "tub");
 }
 
-struct raw3270_notifier fs3270_notifier =
+static struct raw3270_notifier fs3270_notifier =
 {
 	.create = fs3270_create_cb,
 	.destroy = fs3270_destroy_cb,
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 3e4fb4e..a3aa374 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -910,12 +910,12 @@
 		spin_unlock_irqrestore(&sclp_lock, flags);
 		/* Enable service-signal interruption - needs to happen
 		 * with IRQs enabled. */
-		service_subclass_irq_register();
+		irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
 		/* Wait for signal from interrupt or timeout */
 		sclp_sync_wait();
 		/* Disable service-signal interruption - needs to happen
 		 * with IRQs enabled. */
-		service_subclass_irq_unregister();
+		irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL);
 		spin_lock_irqsave(&sclp_lock, flags);
 		del_timer(&sclp_request_timer);
 		if (sclp_init_req.status == SCLP_REQ_DONE &&
@@ -1131,7 +1131,7 @@
 	spin_unlock_irqrestore(&sclp_lock, flags);
 	/* Enable service-signal external interruption - needs to happen with
 	 * IRQs enabled. */
-	service_subclass_irq_register();
+	irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
 	sclp_init_mask(1);
 	return 0;
 
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index cee69da..a0f47c8 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -1845,17 +1845,17 @@
 	.set_termios = tty3270_set_termios
 };
 
-void tty3270_create_cb(int minor)
+static void tty3270_create_cb(int minor)
 {
 	tty_register_device(tty3270_driver, minor - RAW3270_FIRSTMINOR, NULL);
 }
 
-void tty3270_destroy_cb(int minor)
+static void tty3270_destroy_cb(int minor)
 {
 	tty_unregister_device(tty3270_driver, minor - RAW3270_FIRSTMINOR);
 }
 
-struct raw3270_notifier tty3270_notifier =
+static struct raw3270_notifier tty3270_notifier =
 {
 	.create = tty3270_create_cb,
 	.destroy = tty3270_destroy_cb,
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 9e5e146..794820a 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -30,8 +30,8 @@
 
 #define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x)
 
-#define TO_USER		0
-#define TO_KERNEL	1
+#define TO_USER		1
+#define TO_KERNEL	0
 #define CHUNK_INFO_SIZE	34 /* 2 16-byte char, each followed by blank */
 
 enum arch_id {
@@ -73,7 +73,7 @@
  * @count: Size of buffer, which should be copied
  * @mode:  Either TO_KERNEL or TO_USER
  */
-static int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode)
+int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode)
 {
 	int offs, blk_num;
 	static char buf[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index d4174b8..02300dc 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -413,7 +413,7 @@
 	register unsigned long reg2 asm ("2") = (unsigned long) msg;
 	register unsigned long reg3 asm ("3") = (unsigned long) length;
 	register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
-	register unsigned long reg5 asm ("5") = (unsigned int) psmid;
+	register unsigned long reg5 asm ("5") = psmid & 0xffffffff;
 
 	if (special == 1)
 		reg0 |= 0x400000UL;
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index 2ea6165..af2166f 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -472,7 +472,7 @@
 
 	INIT_WORK(&hotplug_work, hotplug_devices);
 
-	service_subclass_irq_register();
+	irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
 	register_external_interrupt(0x2603, kvm_extint_handler);
 
 	scan_devices();
diff --git a/drivers/video/acornfb.c b/drivers/video/acornfb.c
index 6488a73..7e8346e 100644
--- a/drivers/video/acornfb.c
+++ b/drivers/video/acornfb.c
@@ -38,14 +38,6 @@
 #include "acornfb.h"
 
 /*
- * VIDC machines can't do 16 or 32BPP modes.
- */
-#ifdef HAS_VIDC
-#undef FBCON_HAS_CFB16
-#undef FBCON_HAS_CFB32
-#endif
-
-/*
  * Default resolution.
  * NOTE that it has to be supported in the table towards
  * the end of this file.
@@ -106,238 +98,6 @@
 
 extern unsigned int vram_size;	/* set by setup.c */
 
-#ifdef HAS_VIDC
-
-#define MAX_SIZE	480*1024
-
-/* CTL     VIDC	Actual
- * 24.000  0	 8.000
- * 25.175  0	 8.392
- * 36.000  0	12.000
- * 24.000  1	12.000
- * 25.175  1	12.588
- * 24.000  2	16.000
- * 25.175  2	16.783
- * 36.000  1	18.000
- * 24.000  3	24.000
- * 36.000  2	24.000
- * 25.175  3	25.175
- * 36.000  3	36.000
- */
-struct pixclock {
-	u_long	min_clock;
-	u_long	max_clock;
-	u_int	vidc_ctl;
-	u_int	vid_ctl;
-};
-
-static struct pixclock arc_clocks[] = {
-	/* we allow +/-1% on these */
-	{ 123750, 126250, VIDC_CTRL_DIV3,   VID_CTL_24MHz },	/*  8.000MHz */
-	{  82500,  84167, VIDC_CTRL_DIV2,   VID_CTL_24MHz },	/* 12.000MHz */
-	{  61875,  63125, VIDC_CTRL_DIV1_5, VID_CTL_24MHz },	/* 16.000MHz */
-	{  41250,  42083, VIDC_CTRL_DIV1,   VID_CTL_24MHz },	/* 24.000MHz */
-};
-
-static struct pixclock *
-acornfb_valid_pixrate(struct fb_var_screeninfo *var)
-{
-	u_long pixclock = var->pixclock;
-	u_int i;
-
-	if (!var->pixclock)
-		return NULL;
-
-	for (i = 0; i < ARRAY_SIZE(arc_clocks); i++)
-		if (pixclock > arc_clocks[i].min_clock &&
-		    pixclock < arc_clocks[i].max_clock)
-			return arc_clocks + i;
-
-	return NULL;
-}
-
-/* VIDC Rules:
- * hcr  : must be even (interlace, hcr/2 must be even)
- * hswr : must be even
- * hdsr : must be odd
- * hder : must be odd
- *
- * vcr  : must be odd
- * vswr : >= 1
- * vdsr : >= 1
- * vder : >= vdsr
- * if interlaced, then hcr/2 must be even
- */
-static void
-acornfb_set_timing(struct fb_var_screeninfo *var)
-{
-	struct pixclock *pclk;
-	struct vidc_timing vidc;
-	u_int horiz_correction;
-	u_int sync_len, display_start, display_end, cycle;
-	u_int is_interlaced;
-	u_int vid_ctl, vidc_ctl;
-	u_int bandwidth;
-
-	memset(&vidc, 0, sizeof(vidc));
-
-	pclk = acornfb_valid_pixrate(var);
-	vidc_ctl = pclk->vidc_ctl;
-	vid_ctl  = pclk->vid_ctl;
-
-	bandwidth = var->pixclock * 8 / var->bits_per_pixel;
-	/* 25.175, 4bpp = 79.444ns per byte, 317.776ns per word: fifo = 2,6 */
-	if (bandwidth > 143500)
-		vidc_ctl |= VIDC_CTRL_FIFO_3_7;
-	else if (bandwidth > 71750)
-		vidc_ctl |= VIDC_CTRL_FIFO_2_6;
-	else if (bandwidth > 35875)
-		vidc_ctl |= VIDC_CTRL_FIFO_1_5;
-	else
-		vidc_ctl |= VIDC_CTRL_FIFO_0_4;
-
-	switch (var->bits_per_pixel) {
-	case 1:
-		horiz_correction = 19;
-		vidc_ctl |= VIDC_CTRL_1BPP;
-		break;
-
-	case 2:
-		horiz_correction = 11;
-		vidc_ctl |= VIDC_CTRL_2BPP;
-		break;
-
-	case 4:
-		horiz_correction = 7;
-		vidc_ctl |= VIDC_CTRL_4BPP;
-		break;
-
-	default:
-	case 8:
-		horiz_correction = 5;
-		vidc_ctl |= VIDC_CTRL_8BPP;
-		break;
-	}
-
-	if (var->sync & FB_SYNC_COMP_HIGH_ACT) /* should be FB_SYNC_COMP */
-		vidc_ctl |= VIDC_CTRL_CSYNC;
-	else {
-		if (!(var->sync & FB_SYNC_HOR_HIGH_ACT))
-			vid_ctl |= VID_CTL_HS_NHSYNC;
-
-		if (!(var->sync & FB_SYNC_VERT_HIGH_ACT))
-			vid_ctl |= VID_CTL_VS_NVSYNC;
-	}
-
-	sync_len	= var->hsync_len;
-	display_start	= sync_len + var->left_margin;
-	display_end	= display_start + var->xres;
-	cycle		= display_end + var->right_margin;
-
-	/* if interlaced, then hcr/2 must be even */
-	is_interlaced = (var->vmode & FB_VMODE_MASK) == FB_VMODE_INTERLACED;
-
-	if (is_interlaced) {
-		vidc_ctl |= VIDC_CTRL_INTERLACE;
-		if (cycle & 2) {
-			cycle += 2;
-			var->right_margin += 2;
-		}
-	}
-
-	vidc.h_cycle		= (cycle - 2) / 2;
-	vidc.h_sync_width	= (sync_len - 2) / 2;
-	vidc.h_border_start	= (display_start - 1) / 2;
-	vidc.h_display_start	= (display_start - horiz_correction) / 2;
-	vidc.h_display_end	= (display_end - horiz_correction) / 2;
-	vidc.h_border_end	= (display_end - 1) / 2;
-	vidc.h_interlace	= (vidc.h_cycle + 1) / 2;
-
-	sync_len	= var->vsync_len;
-	display_start	= sync_len + var->upper_margin;
-	display_end	= display_start + var->yres;
-	cycle		= display_end + var->lower_margin;
-
-	if (is_interlaced)
-		cycle = (cycle - 3) / 2;
-	else
-		cycle = cycle - 1;
-
-	vidc.v_cycle		= cycle;
-	vidc.v_sync_width	= sync_len - 1;
-	vidc.v_border_start	= display_start - 1;
-	vidc.v_display_start	= vidc.v_border_start;
-	vidc.v_display_end	= display_end - 1;
-	vidc.v_border_end	= vidc.v_display_end;
-
-	if (machine_is_a5k())
-		__raw_writeb(vid_ctl, IOEB_VID_CTL);
-
-	if (memcmp(&current_vidc, &vidc, sizeof(vidc))) {
-		current_vidc = vidc;
-
-		vidc_writel(0xe0000000 | vidc_ctl);
-		vidc_writel(0x80000000 | (vidc.h_cycle << 14));
-		vidc_writel(0x84000000 | (vidc.h_sync_width << 14));
-		vidc_writel(0x88000000 | (vidc.h_border_start << 14));
-		vidc_writel(0x8c000000 | (vidc.h_display_start << 14));
-		vidc_writel(0x90000000 | (vidc.h_display_end << 14));
-		vidc_writel(0x94000000 | (vidc.h_border_end << 14));
-		vidc_writel(0x98000000);
-		vidc_writel(0x9c000000 | (vidc.h_interlace << 14));
-		vidc_writel(0xa0000000 | (vidc.v_cycle << 14));
-		vidc_writel(0xa4000000 | (vidc.v_sync_width << 14));
-		vidc_writel(0xa8000000 | (vidc.v_border_start << 14));
-		vidc_writel(0xac000000 | (vidc.v_display_start << 14));
-		vidc_writel(0xb0000000 | (vidc.v_display_end << 14));
-		vidc_writel(0xb4000000 | (vidc.v_border_end << 14));
-		vidc_writel(0xb8000000);
-		vidc_writel(0xbc000000);
-	}
-#ifdef DEBUG_MODE_SELECTION
-	printk(KERN_DEBUG "VIDC registers for %dx%dx%d:\n", var->xres,
-	       var->yres, var->bits_per_pixel);
-	printk(KERN_DEBUG " H-cycle          : %d\n", vidc.h_cycle);
-	printk(KERN_DEBUG " H-sync-width     : %d\n", vidc.h_sync_width);
-	printk(KERN_DEBUG " H-border-start   : %d\n", vidc.h_border_start);
-	printk(KERN_DEBUG " H-display-start  : %d\n", vidc.h_display_start);
-	printk(KERN_DEBUG " H-display-end    : %d\n", vidc.h_display_end);
-	printk(KERN_DEBUG " H-border-end     : %d\n", vidc.h_border_end);
-	printk(KERN_DEBUG " H-interlace      : %d\n", vidc.h_interlace);
-	printk(KERN_DEBUG " V-cycle          : %d\n", vidc.v_cycle);
-	printk(KERN_DEBUG " V-sync-width     : %d\n", vidc.v_sync_width);
-	printk(KERN_DEBUG " V-border-start   : %d\n", vidc.v_border_start);
-	printk(KERN_DEBUG " V-display-start  : %d\n", vidc.v_display_start);
-	printk(KERN_DEBUG " V-display-end    : %d\n", vidc.v_display_end);
-	printk(KERN_DEBUG " V-border-end     : %d\n", vidc.v_border_end);
-	printk(KERN_DEBUG " VIDC Ctrl (E)    : 0x%08X\n", vidc_ctl);
-	printk(KERN_DEBUG " IOEB Ctrl        : 0x%08X\n", vid_ctl);
-#endif
-}
-
-static int
-acornfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
-		  u_int trans, struct fb_info *info)
-{
-	union palette pal;
-
-	if (regno >= current_par.palette_size)
-		return 1;
-
-	pal.p = 0;
-	pal.vidc.reg   = regno;
-	pal.vidc.red   = red >> 12;
-	pal.vidc.green = green >> 12;
-	pal.vidc.blue  = blue >> 12;
-
-	current_par.palette[regno] = pal;
-
-	vidc_writel(pal.p);
-
-	return 0;
-}
-#endif
-
 #ifdef HAS_VIDC20
 #include <mach/acornfb.h>
 
@@ -634,16 +394,7 @@
 	/* hsync_len must be even */
 	var->hsync_len = (var->hsync_len + 1) & ~1;
 
-#ifdef HAS_VIDC
-	/* left_margin must be odd */
-	if ((var->left_margin & 1) == 0) {
-		var->left_margin -= 1;
-		var->right_margin += 1;
-	}
-
-	/* right_margin must be odd */
-	var->right_margin |= 1;
-#elif defined(HAS_VIDC20)
+#if defined(HAS_VIDC20)
 	/* left_margin must be even */
 	if (var->left_margin & 1) {
 		var->left_margin += 1;
@@ -787,11 +538,7 @@
 		break;
 	case 8:
 		current_par.palette_size = VIDC_PALETTE_SIZE;
-#ifdef HAS_VIDC
-		info->fix.visual = FB_VISUAL_STATIC_PSEUDOCOLOR;
-#else
 		info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
-#endif
 		break;
 #ifdef HAS_VIDC20
 	case 16:
@@ -971,9 +718,6 @@
 #if defined(HAS_VIDC20)
 	fb_info.var.red.length	   = 8;
 	fb_info.var.transp.length  = 4;
-#elif defined(HAS_VIDC)
-	fb_info.var.red.length	   = 4;
-	fb_info.var.transp.length  = 1;
 #endif
 	fb_info.var.green	   = fb_info.var.red;
 	fb_info.var.blue	   = fb_info.var.red;
@@ -1310,14 +1054,6 @@
 		fb_info.fix.smem_start = handle;
 	}
 #endif
-#if defined(HAS_VIDC)
-	/*
-	 * Archimedes/A5000 machines use a fixed address for their
-	 * framebuffers.  Free unused pages
-	 */
-	free_unused_pages(PAGE_OFFSET + size, PAGE_OFFSET + MAX_SIZE);
-#endif
-
 	fb_info.fix.smem_len = size;
 	current_par.palette_size   = VIDC_PALETTE_SIZE;
 
diff --git a/drivers/video/acornfb.h b/drivers/video/acornfb.h
index fb2a7ff..175c8ff 100644
--- a/drivers/video/acornfb.h
+++ b/drivers/video/acornfb.h
@@ -13,10 +13,6 @@
 #include <asm/hardware/iomd.h>
 #define VIDC_PALETTE_SIZE	256
 #define VIDC_NAME		"VIDC20"
-#elif defined(HAS_VIDC)
-#include <asm/hardware/memc.h>
-#define VIDC_PALETTE_SIZE	16
-#define VIDC_NAME		"VIDC"
 #endif
 
 #define EXTEND8(x) ((x)|(x)<<8)
@@ -101,31 +97,6 @@
 	const struct modey_params *modey;
 };
 
-#ifdef HAS_VIDC
-
-#define VID_CTL_VS_NVSYNC	(1 << 3)
-#define VID_CTL_HS_NHSYNC	(1 << 2)
-#define VID_CTL_24MHz		(0)
-#define VID_CTL_25MHz		(1)
-#define VID_CTL_36MHz		(2)
-
-#define VIDC_CTRL_CSYNC		(1 << 7)
-#define VIDC_CTRL_INTERLACE	(1 << 6)
-#define VIDC_CTRL_FIFO_0_4	(0 << 4)
-#define VIDC_CTRL_FIFO_1_5	(1 << 4)
-#define VIDC_CTRL_FIFO_2_6	(2 << 4)
-#define VIDC_CTRL_FIFO_3_7	(3 << 4)
-#define VIDC_CTRL_1BPP		(0 << 2)
-#define VIDC_CTRL_2BPP		(1 << 2)
-#define VIDC_CTRL_4BPP		(2 << 2)
-#define VIDC_CTRL_8BPP		(3 << 2)
-#define VIDC_CTRL_DIV3		(0 << 0)
-#define VIDC_CTRL_DIV2		(1 << 0)
-#define VIDC_CTRL_DIV1_5	(2 << 0)
-#define VIDC_CTRL_DIV1		(3 << 0)
-
-#endif
-
 #ifdef HAS_VIDC20
 /*
  * VIDC20 registers
diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
index 285d552..3c14e43 100644
--- a/drivers/video/logo/logo_linux_clut224.ppm
+++ b/drivers/video/logo/logo_linux_clut224.ppm
@@ -1,883 +1,1604 @@
 P3
+# Standard 224-color Linux logo
 80 80
 255
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 11 15 17 33 49 54 59 85 92 73 97 106 
-83 116 129 105 131 142 115 114 122 74 88 93 20 29 31 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 6 6 6 6 6 6 10 10 10 10 10 10 
-10 10 10 6 6 6 6 6 6 6 6 6 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 2 3 3 17 23 26 50 67 72 73 97 106 59 85 92 73 97 106 
-105 131 142 124 127 131 105 131 142 105 131 142 53 75 83 6 8 8 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 6 6 6 10 10 10 14 14 14 22 22 22 26 26 26 30 30 30 34 34 34 
-30 30 30 30 30 30 26 26 26 18 18 18 14 14 14 10 10 10 6 6 6 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 0 0 0 
-0 0 0 1 1 1 26 35 39 59 85 92 59 85 92 59 85 92 29 43 47 53 75 83 
-108 122 132 132 98 104 108 122 132 105 131 142 101 101 101 43 45 48 6 8 8 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-6 6 6 14 14 14 26 26 26 42 42 42 54 54 54 66 66 66 78 78 78 78 78 78 
-78 78 78 74 74 74 66 66 66 54 54 54 42 42 42 26 26 26 18 18 18 10 10 10 
-6 6 6 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 0 0 0 0 0 0 0 0 0 
-11 15 17 27 40 45 59 85 92 59 85 92 27 40 45 31 45 49 73 97 106 93 121 133 
-108 122 132 108 122 132 105 131 142 108 122 132 105 131 142 73 97 106 26 35 39 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 10 10 10 
-22 22 22 42 42 42 66 66 66 86 86 86 66 66 66 38 38 38 38 38 38 22 22 22 
-26 26 26 34 34 34 54 54 54 66 66 66 86 86 86 70 70 70 46 46 46 26 26 26 
-14 14 14 6 6 6 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 7 12 13 21 31 35 42 59 64 
-53 75 83 53 75 83 50 67 72 42 59 64 32 40 45 42 59 64 73 97 106 116 116 116 
-132 98 104 116 116 116 108 122 132 117 104 110 105 131 142 83 116 129 50 67 72 7 12 13 
-1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 10 10 10 26 26 26 
-50 50 50 82 82 82 58 58 58 6 6 6 2 2 6 2 2 6 2 2 6 2 2 6 
-2 2 6 2 2 6 2 2 6 2 2 6 6 6 6 54 54 54 86 86 86 66 66 66 
-38 38 38 18 18 18 6 6 6 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 1 1 1 6 8 8 15 22 25 26 35 39 36 54 60 53 75 83 59 85 92 
-59 85 92 48 63 69 15 22 25 12 17 20 52 67 79 94 94 94 132 98 104 132 98 104 
-117 104 110 108 122 132 108 122 132 115 114 122 105 131 142 77 105 114 59 85 92 36 54 60 
-7 12 13 0 0 0 0 0 0 0 0 0 0 0 0 6 6 6 22 22 22 50 50 50 
-78 78 78 34 34 34 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 
-2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 6 6 6 70 70 70 
-78 78 78 46 46 46 22 22 22 6 6 6 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 15 22 25 29 43 47 36 54 60 42 59 64 42 59 64 48 63 69 21 31 35 
-6 8 8 29 43 47 36 50 56 43 45 48 79 78 84 132 98 104 165 78 79 132 98 104 
-108 122 132 117 104 110 117 104 110 108 122 132 77 105 114 73 97 106 95 131 149 78 102 129 
-36 50 56 0 0 0 0 0 0 0 0 0 6 6 6 18 18 18 42 42 42 82 82 82 
-26 26 26 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 
-2 2 6 2 2 6 2 2 6 14 14 14 46 46 46 34 34 34 6 6 6 2 2 6 
-42 42 42 78 78 78 42 42 42 18 18 18 6 6 6 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-27 40 45 53 75 83 48 63 69 24 31 37 6 8 12 0 0 0 18 25 28 26 35 39 
-12 17 20 26 35 39 65 78 84 112 81 86 152 81 83 137 83 86 132 98 104 117 104 110 
-117 104 110 132 98 104 132 98 104 115 114 122 73 97 106 53 75 83 95 131 149 93 124 152 
-68 78 128 15 22 25 0 0 0 0 0 0 10 10 10 30 30 30 66 66 66 58 58 58 
-2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 
-2 2 6 2 2 6 2 2 6 26 26 26 86 86 86 101 101 101 46 46 46 10 10 10 
-2 2 6 58 58 58 70 70 70 34 34 34 10 10 10 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-36 50 56 21 30 33 4 7 7 0 0 0 1 1 1 17 12 12 69 31 31 68 59 64 
-57 59 63 21 31 35 32 40 45 86 73 69 152 81 83 152 81 83 117 104 110 132 98 104 
-152 81 83 132 98 104 108 122 132 77 105 114 77 105 114 93 121 133 95 131 149 93 124 152 
-95 131 149 53 75 83 11 15 17 0 0 0 14 14 14 42 42 42 86 86 86 10 10 10 
-2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 
-2 2 6 2 2 6 2 2 6 30 30 30 94 94 94 94 94 94 58 58 58 26 26 26 
-2 2 6 6 6 6 78 78 78 54 54 54 22 22 22 6 6 6 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-17 23 26 2 3 3 0 0 0 17 12 12 69 31 31 123 55 55 123 55 55 152 81 83 
-86 73 69 17 23 26 7 12 13 45 54 57 101 101 101 137 83 86 132 98 104 132 98 104 
-137 83 86 117 104 110 77 105 114 42 59 64 50 67 72 78 102 129 91 117 157 91 117 157 
-95 131 149 83 116 129 40 48 73 6 6 6 22 22 22 62 62 62 62 62 62 2 2 6 
-2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 
-2 2 6 2 2 6 2 2 6 26 26 26 54 54 54 38 38 38 18 18 18 10 10 10 
-2 2 6 2 2 6 34 34 34 82 82 82 38 38 38 14 14 14 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-1 1 1 1 2 2 2 3 3 28 12 12 123 55 55 174 79 79 174 79 79 174 79 79 
-152 81 83 68 59 64 26 35 39 27 40 45 79 78 84 137 83 86 165 78 79 137 83 86 
-94 94 94 48 63 69 36 50 56 50 67 72 73 97 106 93 121 133 93 124 152 93 124 152 
-95 131 149 91 118 149 78 102 129 27 40 45 30 30 30 78 78 78 30 30 30 2 2 6 
-2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 
-2 2 6 2 2 6 2 2 6 10 10 10 10 10 10 2 2 6 2 2 6 2 2 6 
-2 2 6 2 2 6 2 2 6 78 78 78 50 50 50 18 18 18 6 6 6 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-4 5 3 24 53 24 19 31 15 8 7 3 90 61 47 165 78 79 174 79 79 174 79 79 
-174 79 79 137 83 86 60 52 57 7 12 13 17 23 26 70 70 70 132 98 104 112 81 86 
-79 78 84 31 45 49 15 22 25 53 75 83 91 118 149 86 106 160 91 117 157 93 124 152 
-91 117 157 93 124 152 95 131 149 53 75 83 50 50 50 86 86 86 14 14 14 2 2 6 
-2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 
-2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 
-2 2 6 2 2 6 2 2 6 54 54 54 66 66 66 26 26 26 6 6 6 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-19 31 15 34 76 34 34 76 34 19 31 15 28 12 12 123 55 55 174 79 79 174 79 79 
-174 79 79 165 78 79 112 81 86 32 40 45 15 22 25 38 53 58 65 78 84 29 31 32 
-21 30 33 42 59 64 60 80 103 78 102 129 87 112 149 84 96 162 91 117 157 93 124 152 
-91 117 157 93 124 152 93 121 133 59 85 92 57 68 71 82 85 86 2 2 6 2 2 6 
-2 2 6 6 6 6 10 10 10 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 
-2 2 6 2 2 6 2 2 6 6 6 6 14 14 14 10 10 10 2 2 6 2 2 6 
-2 2 6 2 2 6 2 2 6 18 18 18 82 82 82 34 34 34 10 10 10 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-34 76 34 40 89 40 40 89 40 34 76 34 8 15 6 48 26 18 123 55 55 174 79 79 
-174 79 79 174 79 79 137 83 86 68 59 64 32 40 45 21 30 33 31 45 49 21 31 35 
-12 17 20 48 63 69 78 102 129 81 88 166 84 96 162 91 117 157 93 124 152 91 117 157 
-93 124 152 95 131 149 83 116 129 59 85 92 57 68 71 86 86 86 2 2 6 2 2 6 
-6 6 6 6 6 6 22 22 22 34 34 34 6 6 6 2 2 6 2 2 6 2 2 6 
-2 2 6 2 2 6 18 18 18 34 34 34 10 10 10 50 50 50 22 22 22 2 2 6 
-2 2 6 2 2 6 2 2 6 10 10 10 86 86 86 42 42 42 14 14 14 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-40 89 40 40 89 40 40 89 40 40 89 40 24 53 24 6 6 6 69 31 31 123 55 55 
-123 55 55 90 61 47 69 31 31 36 32 33 21 31 35 7 12 13 18 25 28 48 63 69 
-60 80 103 68 78 128 84 101 153 84 96 162 84 96 162 91 117 157 91 117 157 84 96 162 
-91 117 157 73 97 106 48 63 69 50 67 72 57 59 63 86 86 86 2 2 6 2 2 6 
-38 38 38 116 116 116 94 94 94 22 22 22 22 22 22 2 2 6 2 2 6 2 2 6 
-14 14 14 86 86 86 124 131 137 170 170 170 151 151 151 38 38 38 26 26 26 6 6 6 
-2 2 6 2 2 6 2 2 6 2 2 6 86 86 86 46 46 46 14 14 14 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-34 76 34 40 89 40 40 89 40 40 89 40 34 76 34 19 31 15 17 12 12 48 26 18 
-48 26 18 8 7 3 10 10 22 23 29 47 51 61 92 42 59 64 21 30 33 34 45 54 
-68 78 128 81 88 166 81 82 173 86 106 160 86 106 160 84 96 162 86 106 160 87 112 149 
-91 118 149 77 105 114 52 67 79 32 40 45 50 50 50 86 86 86 2 2 6 14 14 14 
-124 131 137 198 198 198 195 195 195 116 116 116 10 10 10 2 2 6 2 2 6 6 6 6 
-101 98 89 187 187 187 210 210 210 218 218 218 214 214 214 124 131 137 14 14 14 6 6 6 
-2 2 6 2 2 6 2 2 6 2 2 6 86 86 86 50 50 50 18 18 18 6 6 6 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-19 31 15 34 76 34 40 89 40 40 89 40 40 89 40 24 53 24 8 7 3 0 0 0 
-6 8 12 28 32 52 51 61 92 54 54 122 74 77 160 68 78 128 26 35 39 6 8 8 
-34 45 54 68 78 128 84 96 162 86 106 160 86 106 160 81 88 166 84 96 162 87 112 149 
-73 97 106 36 50 56 33 49 54 18 18 18 46 46 46 86 86 86 2 2 6 54 54 54 
-218 218 218 195 195 195 226 226 226 246 246 246 58 58 58 2 2 6 2 2 6 30 30 30 
-210 210 210 253 253 253 170 170 170 124 127 131 221 221 221 234 234 234 74 74 74 2 2 6 
-2 2 6 2 2 6 2 2 6 2 2 6 70 70 70 58 58 58 22 22 22 6 6 6 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-4 5 3 24 53 24 40 89 40 40 89 40 34 76 34 12 22 15 4 5 3 4 5 3 
-13 17 26 54 54 122 78 78 174 78 78 174 78 78 174 74 77 160 51 61 92 21 31 35 
-26 35 39 53 75 83 84 101 153 81 82 173 81 88 166 84 101 153 60 80 103 60 80 103 
-53 75 83 38 53 58 42 59 64 22 22 22 46 46 46 82 82 82 2 2 6 106 106 106 
-170 170 170 26 26 26 86 86 86 226 226 226 124 127 131 10 10 10 14 14 14 46 46 46 
-231 231 231 190 190 190 6 6 6 70 70 70 90 90 90 238 238 238 151 151 151 2 2 6 
-2 2 6 2 2 6 2 2 6 2 2 6 70 70 70 58 58 58 22 22 22 6 6 6 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-1 2 2 8 15 6 24 53 24 34 76 34 19 31 15 8 15 6 63 55 20 63 55 20 
-18 18 18 40 48 73 74 77 160 78 78 174 78 78 174 81 82 173 74 77 160 52 67 79 
-17 23 26 21 31 35 60 80 103 81 88 166 74 77 160 78 102 129 36 54 60 12 17 20 
-42 59 64 48 63 69 21 31 35 18 18 18 42 42 42 86 86 86 6 6 6 116 116 116 
-106 106 106 6 6 6 70 70 70 151 151 151 124 127 131 18 18 18 38 38 38 54 54 54 
-221 221 221 106 106 106 2 2 6 14 14 14 46 46 46 190 190 190 198 198 198 2 2 6 
-2 2 6 2 2 6 2 2 6 2 2 6 74 74 74 62 62 62 22 22 22 6 6 6 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-11 15 17 0 0 0 12 22 15 19 31 15 8 15 6 63 55 20 149 139 69 149 139 69 
-63 55 20 10 10 22 54 54 122 78 78 174 78 78 174 78 78 174 81 82 173 68 78 128 
-24 31 37 6 6 6 36 50 56 60 80 103 51 61 92 42 59 64 36 50 56 31 45 49 
-29 43 47 27 40 45 6 8 8 14 14 14 42 42 42 94 94 94 14 14 14 101 101 101 
-124 127 131 2 2 6 18 18 18 116 116 116 106 107 48 121 92 8 121 92 8 98 70 6 
-170 170 170 106 106 106 2 2 6 2 2 6 2 2 6 195 195 195 195 195 195 6 6 6 
-2 2 6 2 2 6 2 2 6 2 2 6 74 74 74 62 62 62 22 22 22 6 6 6 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-26 35 39 3 5 6 1 1 1 2 3 3 35 31 12 133 118 54 175 176 80 175 176 80 
-133 118 54 35 31 12 23 29 47 54 54 122 78 78 174 78 78 174 74 77 160 68 78 128 
-51 61 92 31 45 49 26 35 39 36 50 56 29 43 47 7 12 13 21 30 33 42 59 64 
-18 25 28 7 12 13 1 1 1 10 10 10 38 38 38 90 90 90 14 14 14 58 58 58 
-210 210 210 26 26 26 62 42 6 154 114 10 226 170 11 237 188 10 220 174 15 184 138 11 
-220 174 15 174 140 55 35 31 12 2 2 6 70 70 70 246 246 246 124 131 137 2 2 6 
-2 2 6 2 2 6 2 2 6 2 2 6 70 70 70 66 66 66 26 26 26 6 6 6 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-27 40 45 17 23 26 2 3 3 1 1 1 56 77 35 165 152 80 175 176 80 175 176 80 
-175 176 80 106 107 48 22 22 22 28 32 52 54 54 122 54 54 122 51 61 92 28 32 52 
-20 27 34 31 45 49 11 15 17 7 12 13 36 50 56 31 45 49 29 43 47 36 50 56 
-6 8 8 0 0 0 0 0 0 10 10 10 38 38 38 86 86 86 14 14 14 10 10 10 
-195 195 195 198 179 130 192 133 9 220 174 15 239 182 13 237 188 10 232 195 16 239 207 25 
-237 201 50 241 208 19 232 195 16 184 138 11 198 179 130 208 206 196 42 42 42 2 2 6 
-2 2 6 2 2 6 2 2 6 2 2 6 50 50 50 74 74 74 30 30 30 6 6 6 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-15 22 25 26 35 39 15 22 25 0 0 0 35 31 12 133 118 54 175 176 80 175 176 80 
-175 176 80 165 152 80 56 77 35 6 8 12 23 29 47 13 17 26 2 2 6 0 0 0 
-1 2 2 26 35 39 26 35 39 26 35 39 42 59 64 42 59 64 20 29 31 6 8 8 
-0 0 0 0 0 0 0 0 0 10 10 10 34 34 34 86 86 86 14 14 14 2 2 6 
-121 92 8 192 133 9 219 162 10 239 182 13 237 188 10 232 195 16 241 208 19 237 201 50 
-237 201 50 239 207 25 241 208 19 241 208 19 241 208 19 230 187 11 121 92 8 2 2 6 
-2 2 6 2 2 6 2 2 6 2 2 6 50 50 50 82 82 82 34 34 34 10 10 10 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-1 2 2 15 22 25 31 45 49 6 8 12 4 5 3 63 55 20 149 139 69 175 176 80 
-175 176 80 175 176 80 106 107 48 20 16 6 1 1 1 0 0 0 2 3 3 11 15 17 
-21 30 33 36 50 56 36 50 56 24 31 37 15 22 25 6 8 8 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 10 10 10 34 34 34 82 82 82 30 30 30 62 42 6 
-180 123 7 206 145 10 230 174 11 239 182 13 237 188 10 238 202 15 241 208 19 237 201 50 
-239 207 25 241 208 19 241 208 19 241 208 19 230 187 11 220 174 15 184 138 11 6 6 6 
-2 2 6 2 2 6 2 2 6 2 2 6 26 26 26 94 94 94 42 42 42 14 14 14 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 1 2 2 29 43 47 26 35 39 3 5 6 8 7 3 106 107 48 165 152 80 
-175 176 80 149 139 69 63 55 20 4 5 3 2 3 3 12 17 20 26 35 39 26 35 39 
-17 23 26 7 12 13 6 8 8 3 5 6 1 2 2 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 10 10 10 30 30 30 78 78 78 50 50 50 104 69 6 
-192 133 9 216 158 10 236 178 12 237 188 10 232 195 16 241 208 19 237 201 50 237 201 50 
-241 208 19 241 208 19 241 208 19 204 160 10 200 144 11 216 158 10 156 118 10 2 2 6 
-2 2 6 2 2 6 2 2 6 2 2 6 6 6 6 90 90 90 54 54 54 18 18 18 
-6 6 6 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 12 17 20 27 40 45 18 25 28 1 1 1 35 31 12 106 107 48 
-149 139 69 56 77 35 8 7 3 1 2 2 12 17 20 26 35 39 21 31 35 11 15 17 
-3 5 6 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 10 10 10 30 30 30 78 78 78 46 46 46 22 22 22 
-137 92 6 204 160 10 239 182 13 237 188 10 238 202 15 241 208 19 241 208 19 241 208 19 
-241 208 19 204 160 10 184 138 11 210 150 10 216 158 10 210 150 10 98 70 6 2 2 6 
-6 6 6 54 54 54 14 14 14 2 2 6 2 2 6 62 62 62 74 74 74 30 30 30 
-10 10 10 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 1 1 1 15 22 25 33 49 54 12 17 20 2 3 3 35 31 12 
-56 77 35 20 16 6 1 1 1 18 25 28 21 31 35 11 15 17 1 1 1 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 10 10 10 34 34 34 78 78 78 50 50 50 6 6 6 
-88 55 22 139 102 15 190 146 13 230 187 11 239 207 25 232 195 16 220 174 15 190 146 13 
-171 120 8 192 133 9 210 150 10 213 154 11 185 146 40 165 152 80 101 98 89 2 2 6 
-2 2 6 78 78 78 116 116 116 58 58 58 2 2 6 22 22 22 90 90 90 46 46 46 
-18 18 18 6 6 6 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 1 1 1 27 40 45 29 43 47 3 5 6 2 3 3 
-8 7 3 1 1 1 17 23 26 31 45 49 15 22 25 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 10 10 10 38 38 38 86 86 86 50 50 50 6 6 6 
-124 127 131 168 158 138 156 107 11 171 120 8 204 160 10 184 138 11 197 138 11 200 144 11 
-206 145 10 206 145 10 197 138 11 198 179 130 195 195 195 198 198 198 170 170 170 14 14 14 
-2 2 6 22 22 22 116 116 116 116 116 116 22 22 22 2 2 6 74 74 74 70 70 70 
-30 30 30 10 10 10 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 11 15 17 31 45 49 26 35 39 3 5 6 
-0 0 0 7 12 13 27 40 45 18 25 28 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 6 6 6 18 18 18 50 50 50 101 101 101 26 26 26 10 10 10 
-124 131 137 190 190 190 168 158 138 156 107 11 197 138 11 200 144 11 197 138 11 192 133 9 
-180 123 7 185 146 40 198 179 130 187 187 187 202 202 202 221 221 221 214 214 214 66 66 66 
-2 2 6 2 2 6 50 50 50 62 62 62 6 6 6 2 2 6 10 10 10 90 90 90 
-50 50 50 18 18 18 6 6 6 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 15 22 25 36 54 60 18 25 28 
-0 0 0 21 30 33 27 40 45 2 3 3 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 10 10 10 34 34 34 74 74 74 74 74 74 2 2 6 6 6 6 
-151 151 151 198 198 198 190 190 190 168 158 138 148 132 55 156 107 11 156 107 11 169 125 40 
-168 158 138 187 187 187 190 190 190 210 210 210 246 246 246 253 253 253 253 253 253 180 180 180 
-6 6 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 62 62 62 
-74 74 74 34 34 34 14 14 14 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 27 40 45 35 52 58 
-18 25 28 35 52 58 17 23 26 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 10 10 10 22 22 22 54 54 54 94 94 94 18 18 18 2 2 6 46 46 46 
-234 234 234 221 221 221 190 190 190 190 190 190 190 190 190 187 187 187 187 187 187 190 190 190 
-190 190 190 195 195 195 214 214 214 242 242 242 253 253 253 253 253 253 253 253 253 253 253 253 
-82 82 82 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 14 14 14 
-86 86 86 54 54 54 22 22 22 6 6 6 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 7 12 13 33 49 54 
-52 72 81 36 54 60 6 8 8 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-6 6 6 18 18 18 46 46 46 90 90 90 46 46 46 18 18 18 6 6 6 180 180 180 
-253 253 253 246 246 246 202 202 202 190 190 190 190 190 190 190 190 190 190 190 190 190 190 190 
-202 202 202 231 231 231 250 250 250 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-202 202 202 14 14 14 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 
-42 42 42 86 86 86 42 42 42 18 18 18 6 6 6 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 12 17 20 
-36 54 60 29 43 47 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 6 6 
-14 14 14 38 38 38 74 74 74 66 66 66 2 2 6 6 6 6 90 90 90 250 250 250 
-253 253 253 253 253 253 238 238 238 198 198 198 190 190 190 190 190 190 195 195 195 221 221 221 
-246 246 246 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 82 82 82 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 
-2 2 6 78 78 78 70 70 70 34 34 34 14 14 14 6 6 6 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-21 30 33 35 52 58 6 8 12 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 14 14 14 
-34 34 34 66 66 66 78 78 78 6 6 6 2 2 6 18 18 18 218 218 218 253 253 253 
-253 253 253 253 253 253 253 253 253 246 246 246 226 226 226 231 231 231 246 246 246 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 180 180 180 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 
-2 2 6 18 18 18 90 90 90 62 62 62 30 30 30 10 10 10 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-12 17 20 36 54 60 29 43 47 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 10 10 10 26 26 26 
-58 58 58 90 90 90 18 18 18 2 2 6 2 2 6 106 106 106 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 250 250 250 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 231 231 231 18 18 18 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 
-2 2 6 2 2 6 18 18 18 94 94 94 54 54 54 26 26 26 10 10 10 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 21 30 33 35 52 58 6 8 12 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 6 6 22 22 22 50 50 50 
-90 90 90 26 26 26 2 2 6 2 2 6 14 14 14 195 195 195 250 250 250 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-250 250 250 242 242 242 54 54 54 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 
-2 2 6 2 2 6 2 2 6 38 38 38 86 86 86 50 50 50 22 22 22 6 6 6 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 12 17 20 36 54 60 29 43 47 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 6 6 6 14 14 14 38 38 38 82 82 82 
-34 34 34 2 2 6 2 2 6 2 2 6 42 42 42 195 195 195 246 246 246 253 253 253 
-253 253 253 253 253 253 253 253 253 250 250 250 242 242 242 242 242 242 250 250 250 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 250 250 250 246 246 246 238 238 238 
-226 226 226 231 231 231 101 101 101 6 6 6 2 2 6 2 2 6 2 2 6 2 2 6 
-2 2 6 2 2 6 2 2 6 2 2 6 38 38 38 82 82 82 42 42 42 14 14 14 
-6 6 6 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 21 30 33 35 52 58 6 8 12 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 10 10 10 26 26 26 62 62 62 66 66 66 
-2 2 6 2 2 6 2 2 6 6 6 6 70 70 70 170 170 170 202 202 202 234 234 234 
-246 246 246 250 250 250 250 250 250 238 238 238 226 226 226 231 231 231 238 238 238 250 250 250 
-250 250 250 250 250 250 246 246 246 231 231 231 214 214 214 202 202 202 202 202 202 202 202 202 
-198 198 198 202 202 202 180 180 180 18 18 18 2 2 6 2 2 6 2 2 6 2 2 6 
-2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 62 62 62 66 66 66 30 30 30 
-10 10 10 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 12 17 20 36 54 60 29 43 47 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 14 14 14 42 42 42 82 82 82 18 18 18 
-2 2 6 2 2 6 2 2 6 10 10 10 94 94 94 180 180 180 218 218 218 242 242 242 
-250 250 250 253 253 253 253 253 253 250 250 250 234 234 234 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 246 246 246 238 238 238 226 226 226 210 210 210 202 202 202 
-195 195 195 195 195 195 210 210 210 151 151 151 6 6 6 14 14 14 50 50 50 14 14 14 
-2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 6 6 6 86 86 86 46 46 46 
-18 18 18 6 6 6 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 21 30 33 35 52 58 6 8 12 0 0 0 
-0 0 0 0 0 0 0 0 0 6 6 6 22 22 22 54 54 54 70 70 70 2 2 6 
-2 2 6 10 10 10 2 2 6 22 22 22 170 170 170 231 231 231 250 250 250 253 253 253 
-253 253 253 253 253 253 253 253 253 250 250 250 242 242 242 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 246 246 246 
-231 231 231 202 202 202 198 198 198 226 226 226 94 94 94 2 2 6 6 6 6 38 38 38 
-30 30 30 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 62 62 62 66 66 66 
-26 26 26 10 10 10 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 6 8 8 33 49 54 29 43 47 6 8 12 
-0 0 0 0 0 0 0 0 0 10 10 10 30 30 30 74 74 74 50 50 50 2 2 6 
-26 26 26 26 26 26 2 2 6 106 106 106 238 238 238 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 246 246 246 218 218 218 202 202 202 210 210 210 14 14 14 2 2 6 2 2 6 
-30 30 30 22 22 22 2 2 6 2 2 6 2 2 6 2 2 6 18 18 18 86 86 86 
-42 42 42 14 14 14 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 12 17 20 33 49 54 17 23 26 
-0 0 0 0 0 0 0 0 0 14 14 14 42 42 42 90 90 90 22 22 22 2 2 6 
-42 42 42 2 2 6 18 18 18 218 218 218 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 250 250 250 221 221 221 218 218 218 101 101 101 2 2 6 14 14 14 
-18 18 18 38 38 38 10 10 10 2 2 6 2 2 6 2 2 6 2 2 6 78 78 78 
-58 58 58 22 22 22 6 6 6 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 15 22 25 36 54 60 
-0 0 0 0 0 0 0 0 0 18 18 18 54 54 54 82 82 82 2 2 6 26 26 26 
-22 22 22 2 2 6 124 127 131 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 250 250 250 238 238 238 198 198 198 6 6 6 38 38 38 
-58 58 58 26 26 26 38 38 38 2 2 6 2 2 6 2 2 6 2 2 6 46 46 46 
-78 78 78 30 30 30 10 10 10 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 21 30 33 
-36 54 60 0 0 0 0 0 0 30 30 30 74 74 74 58 58 58 2 2 6 42 42 42 
-2 2 6 22 22 22 231 231 231 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 250 250 250 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 246 246 246 46 46 46 38 38 38 
-42 42 42 14 14 14 38 38 38 14 14 14 2 2 6 2 2 6 2 2 6 6 6 6 
-86 86 86 46 46 46 14 14 14 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-36 54 60 0 0 0 0 0 0 42 42 42 90 90 90 18 18 18 18 18 18 26 26 26 
-2 2 6 116 116 116 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 250 250 250 238 238 238 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 94 94 94 6 6 6 
-2 2 6 2 2 6 10 10 10 34 34 34 2 2 6 2 2 6 2 2 6 2 2 6 
-74 74 74 58 58 58 22 22 22 6 6 6 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 36 54 60 26 26 26 66 66 66 82 82 82 2 2 6 38 38 38 6 6 6 
-14 14 14 210 210 210 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 246 246 246 242 242 242 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 151 151 151 2 2 6 
-2 2 6 2 2 6 2 2 6 46 46 46 2 2 6 2 2 6 2 2 6 2 2 6 
-42 42 42 74 74 74 30 30 30 10 10 10 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-6 6 6 36 54 60 21 30 33 90 90 90 26 26 26 6 6 6 42 42 42 2 2 6 
-74 74 74 250 250 250 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 242 242 242 242 242 242 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 180 180 180 2 2 6 
-2 2 6 2 2 6 2 2 6 46 46 46 2 2 6 2 2 6 2 2 6 2 2 6 
-10 10 10 86 86 86 38 38 38 10 10 10 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-10 10 10 26 26 26 36 54 60 82 82 82 2 2 6 22 22 22 18 18 18 2 2 6 
-151 151 151 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 234 234 234 242 242 242 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 202 202 202 2 2 6 
-2 2 6 2 2 6 2 2 6 38 38 38 2 2 6 2 2 6 2 2 6 2 2 6 
-6 6 6 86 86 86 46 46 46 14 14 14 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 6 6 
-18 18 18 46 46 46 86 86 86 36 54 60 2 2 6 34 34 34 10 10 10 6 6 6 
-210 210 210 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 234 234 234 242 242 242 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 221 221 221 6 6 6 
-2 2 6 2 2 6 6 6 6 30 30 30 2 2 6 2 2 6 2 2 6 2 2 6 
-2 2 6 82 82 82 54 54 54 18 18 18 6 6 6 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 10 10 10 
-26 26 26 66 66 66 62 62 62 2 2 6 2 2 6 38 38 38 10 10 10 26 26 26 
-238 238 238 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 231 231 231 238 238 238 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 231 231 231 6 6 6 
-2 2 6 2 2 6 10 10 10 30 30 30 2 2 6 2 2 6 2 2 6 2 2 6 
-2 2 6 66 66 66 58 58 58 22 22 22 6 6 6 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 10 10 10 
-38 38 38 78 78 78 6 6 6 2 2 6 2 2 6 46 46 46 14 14 14 42 42 42 
-246 246 246 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 231 231 231 242 242 242 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 234 234 234 10 10 10 
-2 2 6 2 2 6 22 22 22 14 14 14 2 2 6 2 2 6 2 2 6 2 2 6 
-2 2 6 66 66 66 62 62 62 22 22 22 6 6 6 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 6 6 18 18 18 
-50 50 50 74 74 74 2 2 6 2 2 6 14 14 14 70 70 70 34 34 34 62 62 62 
-250 250 250 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 231 231 231 246 246 246 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 234 234 234 14 14 14 
-2 2 6 2 2 6 30 30 30 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 
-2 2 6 66 66 66 62 62 62 22 22 22 6 6 6 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 6 6 18 18 18 
-54 54 54 62 62 62 2 2 6 2 2 6 2 2 6 30 30 30 46 46 46 70 70 70 
-250 250 250 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 231 231 231 246 246 246 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 226 226 226 10 10 10 
-2 2 6 6 6 6 30 30 30 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 
-2 2 6 66 66 66 58 58 58 22 22 22 6 6 6 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 6 6 22 22 22 
-58 58 58 62 62 62 2 2 6 2 2 6 2 2 6 2 2 6 30 30 30 78 78 78 
-250 250 250 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 231 231 231 246 246 246 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 202 202 202 2 2 6 
-22 22 22 34 34 34 20 16 6 22 22 22 26 26 26 18 18 18 6 6 6 2 2 6 
-2 2 6 82 82 82 54 54 54 18 18 18 6 6 6 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 6 6 26 26 26 
-62 62 62 106 106 106 63 55 20 184 138 11 204 160 10 121 92 8 6 6 6 62 62 62 
-238 238 238 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 231 231 231 246 246 246 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 151 151 151 18 18 18 
-14 14 14 2 2 6 2 2 6 2 2 6 6 6 6 18 18 18 66 66 66 38 38 38 
-6 6 6 94 94 94 50 50 50 18 18 18 6 6 6 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 6 6 6 10 10 10 10 10 10 18 18 18 38 38 38 
-78 78 78 138 132 106 216 158 10 242 186 14 246 190 14 246 190 14 156 118 10 10 10 10 
-90 90 90 238 238 238 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 231 231 231 250 250 250 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 246 230 190 214 187 87 214 187 87 185 146 40 35 31 12 
-2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 38 38 38 46 46 46 
-26 26 26 106 106 106 54 54 54 18 18 18 6 6 6 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 6 6 6 14 14 14 22 22 22 30 30 30 38 38 38 50 50 50 70 70 70 
-106 106 106 185 146 40 226 170 11 242 186 14 246 190 14 246 190 14 246 190 14 154 114 10 
-6 6 6 74 74 74 226 226 226 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 231 231 231 250 250 250 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 237 201 50 241 196 14 241 208 19 232 195 16 35 31 12 
-2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 6 6 6 30 30 30 26 26 26 
-204 160 10 165 152 80 66 66 66 26 26 26 6 6 6 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-6 6 6 18 18 18 38 38 38 58 58 58 78 78 78 86 86 86 101 101 101 124 127 131 
-174 140 55 210 150 10 234 174 13 246 186 14 246 190 14 246 190 14 246 190 14 237 188 10 
-98 70 6 2 2 6 46 46 46 198 198 198 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 234 234 234 242 242 242 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 214 187 87 242 186 14 241 196 14 204 160 10 20 16 6 
-2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 6 6 6 121 92 8 
-238 202 15 232 195 16 82 82 82 34 34 34 10 10 10 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-14 14 14 38 38 38 70 70 70 148 132 55 185 146 40 200 144 11 197 138 11 197 138 11 
-213 154 11 226 170 11 242 186 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 
-220 174 15 35 31 12 2 2 6 22 22 22 151 151 151 250 250 250 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 250 250 250 242 242 242 214 187 87 239 182 13 237 188 10 213 154 11 35 31 12 
-2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 62 42 6 220 174 15 
-237 188 10 237 188 10 113 101 86 42 42 42 14 14 14 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 6 6 
-22 22 22 54 54 54 148 132 55 213 154 11 226 170 11 230 174 11 226 170 11 226 170 11 
-236 178 12 242 186 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 
-241 196 14 184 138 11 10 10 10 2 2 6 6 6 6 116 116 116 242 242 242 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 231 231 231 198 198 198 213 164 39 236 178 12 236 178 12 210 150 10 137 92 6 
-20 16 6 2 2 6 2 2 6 2 2 6 6 6 6 62 42 6 200 144 11 236 178 12 
-239 182 13 239 182 13 124 112 88 58 58 58 22 22 22 6 6 6 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 10 10 10 
-30 30 30 70 70 70 169 125 40 226 170 11 239 182 13 242 186 14 242 186 14 246 186 14 
-246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 
-246 190 14 232 195 16 98 70 6 2 2 6 2 2 6 2 2 6 66 66 66 221 221 221 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 202 202 202 198 198 198 213 164 39 230 174 11 230 174 11 216 158 10 192 133 9 
-163 110 8 120 80 7 98 70 6 120 80 7 167 114 7 197 138 11 226 170 11 239 182 13 
-242 186 14 242 186 14 165 152 80 78 78 78 34 34 34 14 14 14 6 6 6 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 6 6 
-30 30 30 78 78 78 185 146 40 226 170 11 239 182 13 246 190 14 246 190 14 246 190 14 
-246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 
-246 190 14 241 196 14 204 160 10 20 16 6 2 2 6 2 2 6 2 2 6 38 38 38 
-218 218 218 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-250 250 250 202 202 202 198 198 198 213 164 39 226 170 11 236 178 12 224 166 10 210 150 10 
-200 144 11 197 138 11 192 133 9 197 138 11 210 150 10 226 170 11 242 186 14 246 190 14 
-246 190 14 246 186 14 220 174 15 124 112 88 62 62 62 30 30 30 14 14 14 6 6 6 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 10 10 10 
-30 30 30 78 78 78 174 140 55 224 166 10 239 182 13 246 190 14 246 190 14 246 190 14 
-246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 
-246 190 14 246 190 14 241 196 14 139 102 15 2 2 6 2 2 6 2 2 6 2 2 6 
-78 78 78 250 250 250 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-250 250 250 214 214 214 198 198 198 185 146 40 219 162 10 236 178 12 234 174 13 224 166 10 
-216 158 10 213 154 11 213 154 11 216 158 10 226 170 11 239 182 13 246 190 14 246 190 14 
-246 190 14 246 190 14 242 186 14 213 164 39 101 101 101 58 58 58 30 30 30 14 14 14 
-6 6 6 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 10 10 10 
-30 30 30 74 74 74 174 140 55 216 158 10 236 178 12 246 190 14 246 190 14 246 190 14 
-246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 
-246 190 14 246 190 14 241 196 14 230 187 11 62 42 6 2 2 6 2 2 6 2 2 6 
-22 22 22 238 238 238 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 226 226 226 187 187 187 169 125 40 216 158 10 236 178 12 239 182 13 236 178 12 
-230 174 11 226 170 11 226 170 11 230 174 11 236 178 12 242 186 14 246 190 14 246 190 14 
-246 190 14 246 190 14 246 186 14 239 182 13 213 164 39 106 106 106 66 66 66 34 34 34 
-14 14 14 6 6 6 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 6 6 
-26 26 26 70 70 70 149 139 69 213 154 11 236 178 12 246 190 14 246 190 14 246 190 14 
-246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 
-246 190 14 246 190 14 246 190 14 241 196 14 190 146 13 20 16 6 2 2 6 2 2 6 
-46 46 46 246 246 246 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 221 221 221 86 86 86 156 107 11 216 158 10 236 178 12 242 186 14 246 186 14 
-242 186 14 239 182 13 239 182 13 242 186 14 242 186 14 246 186 14 246 190 14 246 190 14 
-246 190 14 246 190 14 246 190 14 246 190 14 242 186 14 220 174 15 149 139 69 66 66 66 
-30 30 30 10 10 10 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 6 6 
-26 26 26 70 70 70 149 139 69 210 150 10 236 178 12 246 190 14 246 190 14 246 190 14 
-246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 
-246 190 14 246 190 14 246 190 14 246 190 14 232 195 16 121 92 8 34 34 34 106 106 106 
-221 221 221 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-242 242 242 82 82 82 20 16 6 163 110 8 216 158 10 236 178 12 242 186 14 246 190 14 
-246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 
-246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 242 186 14 149 139 69 
-46 46 46 18 18 18 6 6 6 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 10 10 10 
-30 30 30 78 78 78 149 139 69 210 150 10 236 178 12 246 186 14 246 190 14 246 190 14 
-246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 
-246 190 14 246 190 14 246 190 14 246 190 14 241 196 14 220 174 15 198 179 130 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 218 218 218 
-58 58 58 2 2 6 20 16 6 167 114 7 216 158 10 236 178 12 246 186 14 246 190 14 
-246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 
-246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 186 14 242 186 14 185 146 40 
-54 54 54 22 22 22 6 6 6 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 14 14 14 
-38 38 38 86 86 86 169 125 40 213 154 11 236 178 12 246 186 14 246 190 14 246 190 14 
-246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 
-246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 232 195 16 190 146 13 214 214 214 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 250 250 250 170 170 170 26 26 26 
-2 2 6 2 2 6 35 31 12 163 110 8 219 162 10 239 182 13 246 186 14 246 190 14 
-246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 
-246 190 14 246 190 14 246 190 14 246 190 14 246 186 14 236 178 12 224 166 10 149 139 69 
-46 46 46 18 18 18 6 6 6 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 6 6 18 18 18 
-50 50 50 113 101 86 192 133 9 224 166 10 242 186 14 246 190 14 246 190 14 246 190 14 
-246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 
-246 190 14 246 190 14 246 190 14 246 190 14 242 186 14 230 187 11 204 160 10 133 118 54 
-226 226 226 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 
-253 253 253 253 253 253 253 253 253 253 253 253 198 198 198 66 66 66 2 2 6 2 2 6 
-2 2 6 2 2 6 62 42 6 156 107 11 219 162 10 239 182 13 246 186 14 246 190 14 
-246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 
-246 190 14 246 190 14 246 190 14 242 186 14 234 174 13 213 154 11 148 132 55 66 66 66 
-30 30 30 10 10 10 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 6 6 22 22 22 
-58 58 58 148 132 55 206 145 10 234 174 13 242 186 14 246 186 14 246 190 14 246 190 14 
-246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 
-246 190 14 246 190 14 246 190 14 246 190 14 246 186 14 236 178 12 204 160 10 163 110 8 
-62 42 6 124 131 137 218 218 218 250 250 250 253 253 253 253 253 253 253 253 253 250 250 250 
-242 242 242 210 210 210 151 151 151 66 66 66 6 6 6 2 2 6 2 2 6 2 2 6 
-2 2 6 2 2 6 62 42 6 163 110 8 216 158 10 236 178 12 246 190 14 246 190 14 
-246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 
-246 190 14 239 182 13 230 174 11 216 158 10 185 146 40 124 112 88 70 70 70 38 38 38 
-18 18 18 6 6 6 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 6 6 22 22 22 
-62 62 62 169 125 40 206 145 10 224 166 10 236 178 12 239 182 13 242 186 14 242 186 14 
-246 186 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 
-246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 236 178 12 216 158 10 171 120 8 
-85 57 6 2 2 6 6 6 6 30 30 30 54 54 54 62 62 62 50 50 50 38 38 38 
-14 14 14 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 
-2 2 6 6 6 6 85 57 6 167 114 7 213 154 11 236 178 12 246 190 14 246 190 14 
-246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 242 186 14 239 182 13 239 182 13 
-230 174 11 210 150 10 174 140 55 124 112 88 82 82 82 54 54 54 34 34 34 18 18 18 
-6 6 6 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 6 6 18 18 18 
-50 50 50 169 125 40 192 133 9 200 144 11 216 158 10 219 162 10 224 166 10 226 170 11 
-230 174 11 236 178 12 239 182 13 239 182 13 242 186 14 246 186 14 246 190 14 246 190 14 
-246 190 14 246 190 14 246 190 14 246 190 14 246 186 14 230 174 11 210 150 10 163 110 8 
-104 69 6 10 10 10 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 
-2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 
-2 2 6 6 6 6 85 57 6 167 114 7 206 145 10 230 174 11 242 186 14 246 190 14 
-246 190 14 246 190 14 246 186 14 242 186 14 239 182 13 230 174 11 224 166 10 213 154 11 
-169 125 40 124 112 88 86 86 86 58 58 58 38 38 38 22 22 22 10 10 10 6 6 6 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 14 14 14 
-34 34 34 70 70 70 133 118 54 169 125 40 167 114 7 180 123 7 192 133 9 197 138 11 
-200 144 11 206 145 10 213 154 11 219 162 10 224 166 10 230 174 11 239 182 13 242 186 14 
-246 186 14 246 186 14 246 186 14 246 186 14 239 182 13 216 158 10 184 138 11 152 99 6 
-104 69 6 20 16 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 
-2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 
-2 2 6 6 6 6 85 57 6 152 99 6 192 133 9 219 162 10 236 178 12 239 182 13 
-246 186 14 242 186 14 239 182 13 236 178 12 224 166 10 206 145 10 192 133 9 148 132 55 
-94 94 94 62 62 62 42 42 42 22 22 22 14 14 14 6 6 6 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 6 6 
-18 18 18 34 34 34 58 58 58 78 78 78 101 98 89 124 112 88 133 118 54 156 107 11 
-163 110 8 167 114 7 171 120 8 180 123 7 184 138 11 197 138 11 210 150 10 219 162 10 
-226 170 11 236 178 12 236 178 12 234 174 13 219 162 10 197 138 11 163 110 8 134 84 6 
-85 57 6 10 10 10 2 2 6 2 2 6 18 18 18 38 38 38 38 38 38 38 38 38 
-38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 26 26 26 2 2 6 
-2 2 6 6 6 6 62 42 6 137 92 6 171 120 8 200 144 11 219 162 10 230 174 11 
-234 174 13 230 174 11 219 162 10 210 150 10 192 133 9 163 110 8 124 112 88 82 82 82 
-50 50 50 30 30 30 14 14 14 6 6 6 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-6 6 6 14 14 14 22 22 22 34 34 34 42 42 42 58 58 58 74 74 74 86 86 86 
-101 98 89 113 101 86 133 118 54 121 92 8 137 92 6 152 99 6 163 110 8 180 123 7 
-184 138 11 197 138 11 206 145 10 200 144 11 180 123 7 156 107 11 134 84 6 104 69 6 
-62 42 6 54 54 54 106 106 106 101 98 89 86 86 86 82 82 82 78 78 78 78 78 78 
-78 78 78 78 78 78 78 78 78 78 78 78 78 78 78 82 82 82 86 86 86 94 94 94 
-106 106 106 101 101 101 90 61 47 120 80 7 156 107 11 180 123 7 192 133 9 200 144 11 
-206 145 10 200 144 11 192 133 9 171 120 8 139 102 15 113 101 86 70 70 70 42 42 42 
-22 22 22 10 10 10 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 6 6 6 10 10 10 14 14 14 22 22 22 30 30 30 38 38 38 
-50 50 50 62 62 62 74 74 74 90 90 90 101 98 89 113 101 86 121 92 8 120 80 7 
-137 92 6 152 99 6 152 99 6 152 99 6 134 84 6 120 80 7 98 70 6 88 55 22 
-101 98 89 82 82 82 58 58 58 46 46 46 38 38 38 34 34 34 34 34 34 34 34 34 
-34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 38 38 38 42 42 42 
-54 54 54 82 82 82 94 86 71 85 57 6 134 84 6 156 107 11 167 114 7 171 120 8 
-171 120 8 167 114 7 152 99 6 121 92 8 101 98 89 62 62 62 34 34 34 18 18 18 
-6 6 6 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 6 6 6 6 6 10 10 10 
-18 18 18 22 22 22 30 30 30 42 42 42 50 50 50 66 66 66 86 86 86 101 98 89 
-94 86 71 98 70 6 104 69 6 104 69 6 104 69 6 85 57 6 88 55 22 90 90 90 
-62 62 62 38 38 38 22 22 22 14 14 14 10 10 10 10 10 10 10 10 10 10 10 10 
-10 10 10 10 10 10 6 6 6 10 10 10 10 10 10 10 10 10 10 10 10 14 14 14 
-22 22 22 42 42 42 70 70 70 94 86 71 85 57 6 104 69 6 120 80 7 137 92 6 
-134 84 6 120 80 7 94 86 71 86 86 86 58 58 58 30 30 30 14 14 14 6 6 6 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 6 6 6 10 10 10 14 14 14 18 18 18 26 26 26 38 38 38 54 54 54 
-70 70 70 86 86 86 94 86 71 94 86 71 94 86 71 86 86 86 74 74 74 50 50 50 
-30 30 30 14 14 14 6 6 6 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-6 6 6 18 18 18 34 34 34 58 58 58 82 82 82 94 86 71 94 86 71 94 86 71 
-94 86 71 94 86 71 74 74 74 50 50 50 26 26 26 14 14 14 6 6 6 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 6 6 6 6 6 6 14 14 14 18 18 18 
-30 30 30 38 38 38 46 46 46 54 54 54 50 50 50 42 42 42 30 30 30 18 18 18 
-10 10 10 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 6 6 6 14 14 14 26 26 26 38 38 38 50 50 50 58 58 58 58 58 58 
-54 54 54 42 42 42 30 30 30 18 18 18 10 10 10 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 6 6 
-6 6 6 10 10 10 14 14 14 18 18 18 18 18 18 14 14 14 10 10 10 6 6 6 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 6 6 6 14 14 14 18 18 18 22 22 22 22 22 22 
-18 18 18 14 14 14 10 10 10 6 6 6 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
-
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  6   6   6   6   6   6  10  10  10  10  10  10
+ 10  10  10   6   6   6   6   6   6   6   6   6
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   6   6   6  10  10  10  14  14  14
+ 22  22  22  26  26  26  30  30  30  34  34  34
+ 30  30  30  30  30  30  26  26  26  18  18  18
+ 14  14  14  10  10  10   6   6   6   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   1   0   0   1   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  6   6   6  14  14  14  26  26  26  42  42  42
+ 54  54  54  66  66  66  78  78  78  78  78  78
+ 78  78  78  74  74  74  66  66  66  54  54  54
+ 42  42  42  26  26  26  18  18  18  10  10  10
+  6   6   6   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   1   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0  10  10  10
+ 22  22  22  42  42  42  66  66  66  86  86  86
+ 66  66  66  38  38  38  38  38  38  22  22  22
+ 26  26  26  34  34  34  54  54  54  66  66  66
+ 86  86  86  70  70  70  46  46  46  26  26  26
+ 14  14  14   6   6   6   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   1   0   0   1   0   0   1   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0  10  10  10  26  26  26
+ 50  50  50  82  82  82  58  58  58   6   6   6
+  2   2   6   2   2   6   2   2   6   2   2   6
+  2   2   6   2   2   6   2   2   6   2   2   6
+  6   6   6  54  54  54  86  86  86  66  66  66
+ 38  38  38  18  18  18   6   6   6   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   6   6   6  22  22  22  50  50  50
+ 78  78  78  34  34  34   2   2   6   2   2   6
+  2   2   6   2   2   6   2   2   6   2   2   6
+  2   2   6   2   2   6   2   2   6   2   2   6
+  2   2   6   2   2   6   6   6   6  70  70  70
+ 78  78  78  46  46  46  22  22  22   6   6   6
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   1   0   0   1   0   0   1   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  6   6   6  18  18  18  42  42  42  82  82  82
+ 26  26  26   2   2   6   2   2   6   2   2   6
+  2   2   6   2   2   6   2   2   6   2   2   6
+  2   2   6   2   2   6   2   2   6  14  14  14
+ 46  46  46  34  34  34   6   6   6   2   2   6
+ 42  42  42  78  78  78  42  42  42  18  18  18
+  6   6   6   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   1   0   0   0   0   0   1   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+ 10  10  10  30  30  30  66  66  66  58  58  58
+  2   2   6   2   2   6   2   2   6   2   2   6
+  2   2   6   2   2   6   2   2   6   2   2   6
+  2   2   6   2   2   6   2   2   6  26  26  26
+ 86  86  86 101 101 101  46  46  46  10  10  10
+  2   2   6  58  58  58  70  70  70  34  34  34
+ 10  10  10   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   1   0   0   1   0   0   1   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+ 14  14  14  42  42  42  86  86  86  10  10  10
+  2   2   6   2   2   6   2   2   6   2   2   6
+  2   2   6   2   2   6   2   2   6   2   2   6
+  2   2   6   2   2   6   2   2   6  30  30  30
+ 94  94  94  94  94  94  58  58  58  26  26  26
+  2   2   6   6   6   6  78  78  78  54  54  54
+ 22  22  22   6   6   6   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   6   6   6
+ 22  22  22  62  62  62  62  62  62   2   2   6
+  2   2   6   2   2   6   2   2   6   2   2   6
+  2   2   6   2   2   6   2   2   6   2   2   6
+  2   2   6   2   2   6   2   2   6  26  26  26
+ 54  54  54  38  38  38  18  18  18  10  10  10
+  2   2   6   2   2   6  34  34  34  82  82  82
+ 38  38  38  14  14  14   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   1   0   0   1   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   6   6   6
+ 30  30  30  78  78  78  30  30  30   2   2   6
+  2   2   6   2   2   6   2   2   6   2   2   6
+  2   2   6   2   2   6   2   2   6   2   2   6
+  2   2   6   2   2   6   2   2   6  10  10  10
+ 10  10  10   2   2   6   2   2   6   2   2   6
+  2   2   6   2   2   6   2   2   6  78  78  78
+ 50  50  50  18  18  18   6   6   6   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   1   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0  10  10  10
+ 38  38  38  86  86  86  14  14  14   2   2   6
+  2   2   6   2   2   6   2   2   6   2   2   6
+  2   2   6   2   2   6   2   2   6   2   2   6
+  2   2   6   2   2   6   2   2   6   2   2   6
+  2   2   6   2   2   6   2   2   6   2   2   6
+  2   2   6   2   2   6   2   2   6  54  54  54
+ 66  66  66  26  26  26   6   6   6   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   1   0   0   1   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0  14  14  14
+ 42  42  42  82  82  82   2   2   6   2   2   6
+  2   2   6   6   6   6  10  10  10   2   2   6
+  2   2   6   2   2   6   2   2   6   2   2   6
+  2   2   6   2   2   6   2   2   6   6   6   6
+ 14  14  14  10  10  10   2   2   6   2   2   6
+  2   2   6   2   2   6   2   2   6  18  18  18
+ 82  82  82  34  34  34  10  10  10   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   1   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0  14  14  14
+ 46  46  46  86  86  86   2   2   6   2   2   6
+  6   6   6   6   6   6  22  22  22  34  34  34
+  6   6   6   2   2   6   2   2   6   2   2   6
+  2   2   6   2   2   6  18  18  18  34  34  34
+ 10  10  10  50  50  50  22  22  22   2   2   6
+  2   2   6   2   2   6   2   2   6  10  10  10
+ 86  86  86  42  42  42  14  14  14   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   1   0   0   1   0   0   1   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0  14  14  14
+ 46  46  46  86  86  86   2   2   6   2   2   6
+ 38  38  38 116 116 116  94  94  94  22  22  22
+ 22  22  22   2   2   6   2   2   6   2   2   6
+ 14  14  14  86  86  86 138 138 138 162 162 162
+154 154 154  38  38  38  26  26  26   6   6   6
+  2   2   6   2   2   6   2   2   6   2   2   6
+ 86  86  86  46  46  46  14  14  14   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0  14  14  14
+ 46  46  46  86  86  86   2   2   6  14  14  14
+134 134 134 198 198 198 195 195 195 116 116 116
+ 10  10  10   2   2   6   2   2   6   6   6   6
+101  98  89 187 187 187 210 210 210 218 218 218
+214 214 214 134 134 134  14  14  14   6   6   6
+  2   2   6   2   2   6   2   2   6   2   2   6
+ 86  86  86  50  50  50  18  18  18   6   6   6
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   1   0   0   0
+  0   0   1   0   0   1   0   0   1   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0  14  14  14
+ 46  46  46  86  86  86   2   2   6  54  54  54
+218 218 218 195 195 195 226 226 226 246 246 246
+ 58  58  58   2   2   6   2   2   6  30  30  30
+210 210 210 253 253 253 174 174 174 123 123 123
+221 221 221 234 234 234  74  74  74   2   2   6
+  2   2   6   2   2   6   2   2   6   2   2   6
+ 70  70  70  58  58  58  22  22  22   6   6   6
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0  14  14  14
+ 46  46  46  82  82  82   2   2   6 106 106 106
+170 170 170  26  26  26  86  86  86 226 226 226
+123 123 123  10  10  10  14  14  14  46  46  46
+231 231 231 190 190 190   6   6   6  70  70  70
+ 90  90  90 238 238 238 158 158 158   2   2   6
+  2   2   6   2   2   6   2   2   6   2   2   6
+ 70  70  70  58  58  58  22  22  22   6   6   6
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   1   0   0   0
+  0   0   1   0   0   1   0   0   1   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0  14  14  14
+ 42  42  42  86  86  86   6   6   6 116 116 116
+106 106 106   6   6   6  70  70  70 149 149 149
+128 128 128  18  18  18  38  38  38  54  54  54
+221 221 221 106 106 106   2   2   6  14  14  14
+ 46  46  46 190 190 190 198 198 198   2   2   6
+  2   2   6   2   2   6   2   2   6   2   2   6
+ 74  74  74  62  62  62  22  22  22   6   6   6
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   1   0   0   0
+  0   0   1   0   0   0   0   0   1   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0  14  14  14
+ 42  42  42  94  94  94  14  14  14 101 101 101
+128 128 128   2   2   6  18  18  18 116 116 116
+118  98  46 121  92   8 121  92   8  98  78  10
+162 162 162 106 106 106   2   2   6   2   2   6
+  2   2   6 195 195 195 195 195 195   6   6   6
+  2   2   6   2   2   6   2   2   6   2   2   6
+ 74  74  74  62  62  62  22  22  22   6   6   6
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   1   0   0   1
+  0   0   1   0   0   0   0   0   1   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0  10  10  10
+ 38  38  38  90  90  90  14  14  14  58  58  58
+210 210 210  26  26  26  54  38   6 154 114  10
+226 170  11 236 186  11 225 175  15 184 144  12
+215 174  15 175 146  61  37  26   9   2   2   6
+ 70  70  70 246 246 246 138 138 138   2   2   6
+  2   2   6   2   2   6   2   2   6   2   2   6
+ 70  70  70  66  66  66  26  26  26   6   6   6
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0  10  10  10
+ 38  38  38  86  86  86  14  14  14  10  10  10
+195 195 195 188 164 115 192 133   9 225 175  15
+239 182  13 234 190  10 232 195  16 232 200  30
+245 207  45 241 208  19 232 195  16 184 144  12
+218 194 134 211 206 186  42  42  42   2   2   6
+  2   2   6   2   2   6   2   2   6   2   2   6
+ 50  50  50  74  74  74  30  30  30   6   6   6
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0  10  10  10
+ 34  34  34  86  86  86  14  14  14   2   2   6
+121  87  25 192 133   9 219 162  10 239 182  13
+236 186  11 232 195  16 241 208  19 244 214  54
+246 218  60 246 218  38 246 215  20 241 208  19
+241 208  19 226 184  13 121  87  25   2   2   6
+  2   2   6   2   2   6   2   2   6   2   2   6
+ 50  50  50  82  82  82  34  34  34  10  10  10
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0  10  10  10
+ 34  34  34  82  82  82  30  30  30  61  42   6
+180 123   7 206 145  10 230 174  11 239 182  13
+234 190  10 238 202  15 241 208  19 246 218  74
+246 218  38 246 215  20 246 215  20 246 215  20
+226 184  13 215 174  15 184 144  12   6   6   6
+  2   2   6   2   2   6   2   2   6   2   2   6
+ 26  26  26  94  94  94  42  42  42  14  14  14
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0  10  10  10
+ 30  30  30  78  78  78  50  50  50 104  69   6
+192 133   9 216 158  10 236 178  12 236 186  11
+232 195  16 241 208  19 244 214  54 245 215  43
+246 215  20 246 215  20 241 208  19 198 155  10
+200 144  11 216 158  10 156 118  10   2   2   6
+  2   2   6   2   2   6   2   2   6   2   2   6
+  6   6   6  90  90  90  54  54  54  18  18  18
+  6   6   6   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0  10  10  10
+ 30  30  30  78  78  78  46  46  46  22  22  22
+137  92   6 210 162  10 239 182  13 238 190  10
+238 202  15 241 208  19 246 215  20 246 215  20
+241 208  19 203 166  17 185 133  11 210 150  10
+216 158  10 210 150  10 102  78  10   2   2   6
+  6   6   6  54  54  54  14  14  14   2   2   6
+  2   2   6  62  62  62  74  74  74  30  30  30
+ 10  10  10   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0  10  10  10
+ 34  34  34  78  78  78  50  50  50   6   6   6
+ 94  70  30 139 102  15 190 146  13 226 184  13
+232 200  30 232 195  16 215 174  15 190 146  13
+168 122  10 192 133   9 210 150  10 213 154  11
+202 150  34 182 157 106 101  98  89   2   2   6
+  2   2   6  78  78  78 116 116 116  58  58  58
+  2   2   6  22  22  22  90  90  90  46  46  46
+ 18  18  18   6   6   6   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0  10  10  10
+ 38  38  38  86  86  86  50  50  50   6   6   6
+128 128 128 174 154 114 156 107  11 168 122  10
+198 155  10 184 144  12 197 138  11 200 144  11
+206 145  10 206 145  10 197 138  11 188 164 115
+195 195 195 198 198 198 174 174 174  14  14  14
+  2   2   6  22  22  22 116 116 116 116 116 116
+ 22  22  22   2   2   6  74  74  74  70  70  70
+ 30  30  30  10  10  10   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   6   6   6  18  18  18
+ 50  50  50 101 101 101  26  26  26  10  10  10
+138 138 138 190 190 190 174 154 114 156 107  11
+197 138  11 200 144  11 197 138  11 192 133   9
+180 123   7 190 142  34 190 178 144 187 187 187
+202 202 202 221 221 221 214 214 214  66  66  66
+  2   2   6   2   2   6  50  50  50  62  62  62
+  6   6   6   2   2   6  10  10  10  90  90  90
+ 50  50  50  18  18  18   6   6   6   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0  10  10  10  34  34  34
+ 74  74  74  74  74  74   2   2   6   6   6   6
+144 144 144 198 198 198 190 190 190 178 166 146
+154 121  60 156 107  11 156 107  11 168 124  44
+174 154 114 187 187 187 190 190 190 210 210 210
+246 246 246 253 253 253 253 253 253 182 182 182
+  6   6   6   2   2   6   2   2   6   2   2   6
+  2   2   6   2   2   6   2   2   6  62  62  62
+ 74  74  74  34  34  34  14  14  14   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0  10  10  10  22  22  22  54  54  54
+ 94  94  94  18  18  18   2   2   6  46  46  46
+234 234 234 221 221 221 190 190 190 190 190 190
+190 190 190 187 187 187 187 187 187 190 190 190
+190 190 190 195 195 195 214 214 214 242 242 242
+253 253 253 253 253 253 253 253 253 253 253 253
+ 82  82  82   2   2   6   2   2   6   2   2   6
+  2   2   6   2   2   6   2   2   6  14  14  14
+ 86  86  86  54  54  54  22  22  22   6   6   6
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  6   6   6  18  18  18  46  46  46  90  90  90
+ 46  46  46  18  18  18   6   6   6 182 182 182
+253 253 253 246 246 246 206 206 206 190 190 190
+190 190 190 190 190 190 190 190 190 190 190 190
+206 206 206 231 231 231 250 250 250 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+202 202 202  14  14  14   2   2   6   2   2   6
+  2   2   6   2   2   6   2   2   6   2   2   6
+ 42  42  42  86  86  86  42  42  42  18  18  18
+  6   6   6   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   6   6   6
+ 14  14  14  38  38  38  74  74  74  66  66  66
+  2   2   6   6   6   6  90  90  90 250 250 250
+253 253 253 253 253 253 238 238 238 198 198 198
+190 190 190 190 190 190 195 195 195 221 221 221
+246 246 246 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253  82  82  82   2   2   6   2   2   6
+  2   2   6   2   2   6   2   2   6   2   2   6
+  2   2   6  78  78  78  70  70  70  34  34  34
+ 14  14  14   6   6   6   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0  14  14  14
+ 34  34  34  66  66  66  78  78  78   6   6   6
+  2   2   6  18  18  18 218 218 218 253 253 253
+253 253 253 253 253 253 253 253 253 246 246 246
+226 226 226 231 231 231 246 246 246 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 178 178 178   2   2   6   2   2   6
+  2   2   6   2   2   6   2   2   6   2   2   6
+  2   2   6  18  18  18  90  90  90  62  62  62
+ 30  30  30  10  10  10   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0  10  10  10  26  26  26
+ 58  58  58  90  90  90  18  18  18   2   2   6
+  2   2   6 110 110 110 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+250 250 250 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 231 231 231  18  18  18   2   2   6
+  2   2   6   2   2   6   2   2   6   2   2   6
+  2   2   6   2   2   6  18  18  18  94  94  94
+ 54  54  54  26  26  26  10  10  10   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   6   6   6  22  22  22  50  50  50
+ 90  90  90  26  26  26   2   2   6   2   2   6
+ 14  14  14 195 195 195 250 250 250 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+250 250 250 242 242 242  54  54  54   2   2   6
+  2   2   6   2   2   6   2   2   6   2   2   6
+  2   2   6   2   2   6   2   2   6  38  38  38
+ 86  86  86  50  50  50  22  22  22   6   6   6
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  6   6   6  14  14  14  38  38  38  82  82  82
+ 34  34  34   2   2   6   2   2   6   2   2   6
+ 42  42  42 195 195 195 246 246 246 253 253 253
+253 253 253 253 253 253 253 253 253 250 250 250
+242 242 242 242 242 242 250 250 250 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 250 250 250 246 246 246 238 238 238
+226 226 226 231 231 231 101 101 101   6   6   6
+  2   2   6   2   2   6   2   2   6   2   2   6
+  2   2   6   2   2   6   2   2   6   2   2   6
+ 38  38  38  82  82  82  42  42  42  14  14  14
+  6   6   6   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+ 10  10  10  26  26  26  62  62  62  66  66  66
+  2   2   6   2   2   6   2   2   6   6   6   6
+ 70  70  70 170 170 170 206 206 206 234 234 234
+246 246 246 250 250 250 250 250 250 238 238 238
+226 226 226 231 231 231 238 238 238 250 250 250
+250 250 250 250 250 250 246 246 246 231 231 231
+214 214 214 206 206 206 202 202 202 202 202 202
+198 198 198 202 202 202 182 182 182  18  18  18
+  2   2   6   2   2   6   2   2   6   2   2   6
+  2   2   6   2   2   6   2   2   6   2   2   6
+  2   2   6  62  62  62  66  66  66  30  30  30
+ 10  10  10   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+ 14  14  14  42  42  42  82  82  82  18  18  18
+  2   2   6   2   2   6   2   2   6  10  10  10
+ 94  94  94 182 182 182 218 218 218 242 242 242
+250 250 250 253 253 253 253 253 253 250 250 250
+234 234 234 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 246 246 246
+238 238 238 226 226 226 210 210 210 202 202 202
+195 195 195 195 195 195 210 210 210 158 158 158
+  6   6   6  14  14  14  50  50  50  14  14  14
+  2   2   6   2   2   6   2   2   6   2   2   6
+  2   2   6   6   6   6  86  86  86  46  46  46
+ 18  18  18   6   6   6   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   6   6   6
+ 22  22  22  54  54  54  70  70  70   2   2   6
+  2   2   6  10  10  10   2   2   6  22  22  22
+166 166 166 231 231 231 250 250 250 253 253 253
+253 253 253 253 253 253 253 253 253 250 250 250
+242 242 242 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 246 246 246
+231 231 231 206 206 206 198 198 198 226 226 226
+ 94  94  94   2   2   6   6   6   6  38  38  38
+ 30  30  30   2   2   6   2   2   6   2   2   6
+  2   2   6   2   2   6  62  62  62  66  66  66
+ 26  26  26  10  10  10   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0  10  10  10
+ 30  30  30  74  74  74  50  50  50   2   2   6
+ 26  26  26  26  26  26   2   2   6 106 106 106
+238 238 238 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 246 246 246 218 218 218 202 202 202
+210 210 210  14  14  14   2   2   6   2   2   6
+ 30  30  30  22  22  22   2   2   6   2   2   6
+  2   2   6   2   2   6  18  18  18  86  86  86
+ 42  42  42  14  14  14   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0  14  14  14
+ 42  42  42  90  90  90  22  22  22   2   2   6
+ 42  42  42   2   2   6  18  18  18 218 218 218
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 250 250 250 221 221 221
+218 218 218 101 101 101   2   2   6  14  14  14
+ 18  18  18  38  38  38  10  10  10   2   2   6
+  2   2   6   2   2   6   2   2   6  78  78  78
+ 58  58  58  22  22  22   6   6   6   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   6   6   6  18  18  18
+ 54  54  54  82  82  82   2   2   6  26  26  26
+ 22  22  22   2   2   6 123 123 123 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 250 250 250
+238 238 238 198 198 198   6   6   6  38  38  38
+ 58  58  58  26  26  26  38  38  38   2   2   6
+  2   2   6   2   2   6   2   2   6  46  46  46
+ 78  78  78  30  30  30  10  10  10   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0  10  10  10  30  30  30
+ 74  74  74  58  58  58   2   2   6  42  42  42
+  2   2   6  22  22  22 231 231 231 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 250 250 250
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 246 246 246  46  46  46  38  38  38
+ 42  42  42  14  14  14  38  38  38  14  14  14
+  2   2   6   2   2   6   2   2   6   6   6   6
+ 86  86  86  46  46  46  14  14  14   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   6   6   6  14  14  14  42  42  42
+ 90  90  90  18  18  18  18  18  18  26  26  26
+  2   2   6 116 116 116 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 250 250 250 238 238 238
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253  94  94  94   6   6   6
+  2   2   6   2   2   6  10  10  10  34  34  34
+  2   2   6   2   2   6   2   2   6   2   2   6
+ 74  74  74  58  58  58  22  22  22   6   6   6
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0  10  10  10  26  26  26  66  66  66
+ 82  82  82   2   2   6  38  38  38   6   6   6
+ 14  14  14 210 210 210 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 246 246 246 242 242 242
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 144 144 144   2   2   6
+  2   2   6   2   2   6   2   2   6  46  46  46
+  2   2   6   2   2   6   2   2   6   2   2   6
+ 42  42  42  74  74  74  30  30  30  10  10  10
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  6   6   6  14  14  14  42  42  42  90  90  90
+ 26  26  26   6   6   6  42  42  42   2   2   6
+ 74  74  74 250 250 250 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 242 242 242 242 242 242
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 182 182 182   2   2   6
+  2   2   6   2   2   6   2   2   6  46  46  46
+  2   2   6   2   2   6   2   2   6   2   2   6
+ 10  10  10  86  86  86  38  38  38  10  10  10
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+ 10  10  10  26  26  26  66  66  66  82  82  82
+  2   2   6  22  22  22  18  18  18   2   2   6
+149 149 149 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 234 234 234 242 242 242
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 206 206 206   2   2   6
+  2   2   6   2   2   6   2   2   6  38  38  38
+  2   2   6   2   2   6   2   2   6   2   2   6
+  6   6   6  86  86  86  46  46  46  14  14  14
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   6   6   6
+ 18  18  18  46  46  46  86  86  86  18  18  18
+  2   2   6  34  34  34  10  10  10   6   6   6
+210 210 210 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 234 234 234 242 242 242
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 221 221 221   6   6   6
+  2   2   6   2   2   6   6   6   6  30  30  30
+  2   2   6   2   2   6   2   2   6   2   2   6
+  2   2   6  82  82  82  54  54  54  18  18  18
+  6   6   6   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0  10  10  10
+ 26  26  26  66  66  66  62  62  62   2   2   6
+  2   2   6  38  38  38  10  10  10  26  26  26
+238 238 238 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 231 231 231 238 238 238
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 231 231 231   6   6   6
+  2   2   6   2   2   6  10  10  10  30  30  30
+  2   2   6   2   2   6   2   2   6   2   2   6
+  2   2   6  66  66  66  58  58  58  22  22  22
+  6   6   6   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0  10  10  10
+ 38  38  38  78  78  78   6   6   6   2   2   6
+  2   2   6  46  46  46  14  14  14  42  42  42
+246 246 246 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 231 231 231 242 242 242
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 234 234 234  10  10  10
+  2   2   6   2   2   6  22  22  22  14  14  14
+  2   2   6   2   2   6   2   2   6   2   2   6
+  2   2   6  66  66  66  62  62  62  22  22  22
+  6   6   6   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   6   6   6  18  18  18
+ 50  50  50  74  74  74   2   2   6   2   2   6
+ 14  14  14  70  70  70  34  34  34  62  62  62
+250 250 250 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 231 231 231 246 246 246
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 234 234 234  14  14  14
+  2   2   6   2   2   6  30  30  30   2   2   6
+  2   2   6   2   2   6   2   2   6   2   2   6
+  2   2   6  66  66  66  62  62  62  22  22  22
+  6   6   6   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   6   6   6  18  18  18
+ 54  54  54  62  62  62   2   2   6   2   2   6
+  2   2   6  30  30  30  46  46  46  70  70  70
+250 250 250 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 231 231 231 246 246 246
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 226 226 226  10  10  10
+  2   2   6   6   6   6  30  30  30   2   2   6
+  2   2   6   2   2   6   2   2   6   2   2   6
+  2   2   6  66  66  66  58  58  58  22  22  22
+  6   6   6   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   6   6   6  22  22  22
+ 58  58  58  62  62  62   2   2   6   2   2   6
+  2   2   6   2   2   6  30  30  30  78  78  78
+250 250 250 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 231 231 231 246 246 246
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 206 206 206   2   2   6
+ 22  22  22  34  34  34  18  14   6  22  22  22
+ 26  26  26  18  18  18   6   6   6   2   2   6
+  2   2   6  82  82  82  54  54  54  18  18  18
+  6   6   6   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   6   6   6  26  26  26
+ 62  62  62 106 106 106  74  54  14 185 133  11
+210 162  10 121  92   8   6   6   6  62  62  62
+238 238 238 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 231 231 231 246 246 246
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 158 158 158  18  18  18
+ 14  14  14   2   2   6   2   2   6   2   2   6
+  6   6   6  18  18  18  66  66  66  38  38  38
+  6   6   6  94  94  94  50  50  50  18  18  18
+  6   6   6   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   6   6   6
+ 10  10  10  10  10  10  18  18  18  38  38  38
+ 78  78  78 142 134 106 216 158  10 242 186  14
+246 190  14 246 190  14 156 118  10  10  10  10
+ 90  90  90 238 238 238 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 231 231 231 250 250 250
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 246 230 190
+238 204  91 238 204  91 181 142  44  37  26   9
+  2   2   6   2   2   6   2   2   6   2   2   6
+  2   2   6   2   2   6  38  38  38  46  46  46
+ 26  26  26 106 106 106  54  54  54  18  18  18
+  6   6   6   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   6   6   6  14  14  14  22  22  22
+ 30  30  30  38  38  38  50  50  50  70  70  70
+106 106 106 190 142  34 226 170  11 242 186  14
+246 190  14 246 190  14 246 190  14 154 114  10
+  6   6   6  74  74  74 226 226 226 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 231 231 231 250 250 250
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 228 184  62
+241 196  14 241 208  19 232 195  16  38  30  10
+  2   2   6   2   2   6   2   2   6   2   2   6
+  2   2   6   6   6   6  30  30  30  26  26  26
+203 166  17 154 142  90  66  66  66  26  26  26
+  6   6   6   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  6   6   6  18  18  18  38  38  38  58  58  58
+ 78  78  78  86  86  86 101 101 101 123 123 123
+175 146  61 210 150  10 234 174  13 246 186  14
+246 190  14 246 190  14 246 190  14 238 190  10
+102  78  10   2   2   6  46  46  46 198 198 198
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 234 234 234 242 242 242
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 224 178  62
+242 186  14 241 196  14 210 166  10  22  18   6
+  2   2   6   2   2   6   2   2   6   2   2   6
+  2   2   6   2   2   6   6   6   6 121  92   8
+238 202  15 232 195  16  82  82  82  34  34  34
+ 10  10  10   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+ 14  14  14  38  38  38  70  70  70 154 122  46
+190 142  34 200 144  11 197 138  11 197 138  11
+213 154  11 226 170  11 242 186  14 246 190  14
+246 190  14 246 190  14 246 190  14 246 190  14
+225 175  15  46  32   6   2   2   6  22  22  22
+158 158 158 250 250 250 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 250 250 250 242 242 242 224 178  62
+239 182  13 236 186  11 213 154  11  46  32   6
+  2   2   6   2   2   6   2   2   6   2   2   6
+  2   2   6   2   2   6  61  42   6 225 175  15
+238 190  10 236 186  11 112 100  78  42  42  42
+ 14  14  14   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   6   6   6
+ 22  22  22  54  54  54 154 122  46 213 154  11
+226 170  11 230 174  11 226 170  11 226 170  11
+236 178  12 242 186  14 246 190  14 246 190  14
+246 190  14 246 190  14 246 190  14 246 190  14
+241 196  14 184 144  12  10  10  10   2   2   6
+  6   6   6 116 116 116 242 242 242 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 231 231 231 198 198 198 214 170  54
+236 178  12 236 178  12 210 150  10 137  92   6
+ 18  14   6   2   2   6   2   2   6   2   2   6
+  6   6   6  70  47   6 200 144  11 236 178  12
+239 182  13 239 182  13 124 112  88  58  58  58
+ 22  22  22   6   6   6   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0  10  10  10
+ 30  30  30  70  70  70 180 133  36 226 170  11
+239 182  13 242 186  14 242 186  14 246 186  14
+246 190  14 246 190  14 246 190  14 246 190  14
+246 190  14 246 190  14 246 190  14 246 190  14
+246 190  14 232 195  16  98  70   6   2   2   6
+  2   2   6   2   2   6  66  66  66 221 221 221
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 206 206 206 198 198 198 214 166  58
+230 174  11 230 174  11 216 158  10 192 133   9
+163 110   8 116  81   8 102  78  10 116  81   8
+167 114   7 197 138  11 226 170  11 239 182  13
+242 186  14 242 186  14 162 146  94  78  78  78
+ 34  34  34  14  14  14   6   6   6   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   6   6   6
+ 30  30  30  78  78  78 190 142  34 226 170  11
+239 182  13 246 190  14 246 190  14 246 190  14
+246 190  14 246 190  14 246 190  14 246 190  14
+246 190  14 246 190  14 246 190  14 246 190  14
+246 190  14 241 196  14 203 166  17  22  18   6
+  2   2   6   2   2   6   2   2   6  38  38  38
+218 218 218 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+250 250 250 206 206 206 198 198 198 202 162  69
+226 170  11 236 178  12 224 166  10 210 150  10
+200 144  11 197 138  11 192 133   9 197 138  11
+210 150  10 226 170  11 242 186  14 246 190  14
+246 190  14 246 186  14 225 175  15 124 112  88
+ 62  62  62  30  30  30  14  14  14   6   6   6
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0  10  10  10
+ 30  30  30  78  78  78 174 135  50 224 166  10
+239 182  13 246 190  14 246 190  14 246 190  14
+246 190  14 246 190  14 246 190  14 246 190  14
+246 190  14 246 190  14 246 190  14 246 190  14
+246 190  14 246 190  14 241 196  14 139 102  15
+  2   2   6   2   2   6   2   2   6   2   2   6
+ 78  78  78 250 250 250 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+250 250 250 214 214 214 198 198 198 190 150  46
+219 162  10 236 178  12 234 174  13 224 166  10
+216 158  10 213 154  11 213 154  11 216 158  10
+226 170  11 239 182  13 246 190  14 246 190  14
+246 190  14 246 190  14 242 186  14 206 162  42
+101 101 101  58  58  58  30  30  30  14  14  14
+  6   6   6   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0  10  10  10
+ 30  30  30  74  74  74 174 135  50 216 158  10
+236 178  12 246 190  14 246 190  14 246 190  14
+246 190  14 246 190  14 246 190  14 246 190  14
+246 190  14 246 190  14 246 190  14 246 190  14
+246 190  14 246 190  14 241 196  14 226 184  13
+ 61  42   6   2   2   6   2   2   6   2   2   6
+ 22  22  22 238 238 238 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 226 226 226 187 187 187 180 133  36
+216 158  10 236 178  12 239 182  13 236 178  12
+230 174  11 226 170  11 226 170  11 230 174  11
+236 178  12 242 186  14 246 190  14 246 190  14
+246 190  14 246 190  14 246 186  14 239 182  13
+206 162  42 106 106 106  66  66  66  34  34  34
+ 14  14  14   6   6   6   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   6   6   6
+ 26  26  26  70  70  70 163 133  67 213 154  11
+236 178  12 246 190  14 246 190  14 246 190  14
+246 190  14 246 190  14 246 190  14 246 190  14
+246 190  14 246 190  14 246 190  14 246 190  14
+246 190  14 246 190  14 246 190  14 241 196  14
+190 146  13  18  14   6   2   2   6   2   2   6
+ 46  46  46 246 246 246 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 221 221 221  86  86  86 156 107  11
+216 158  10 236 178  12 242 186  14 246 186  14
+242 186  14 239 182  13 239 182  13 242 186  14
+242 186  14 246 186  14 246 190  14 246 190  14
+246 190  14 246 190  14 246 190  14 246 190  14
+242 186  14 225 175  15 142 122  72  66  66  66
+ 30  30  30  10  10  10   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   6   6   6
+ 26  26  26  70  70  70 163 133  67 210 150  10
+236 178  12 246 190  14 246 190  14 246 190  14
+246 190  14 246 190  14 246 190  14 246 190  14
+246 190  14 246 190  14 246 190  14 246 190  14
+246 190  14 246 190  14 246 190  14 246 190  14
+232 195  16 121  92   8  34  34  34 106 106 106
+221 221 221 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+242 242 242  82  82  82  18  14   6 163 110   8
+216 158  10 236 178  12 242 186  14 246 190  14
+246 190  14 246 190  14 246 190  14 246 190  14
+246 190  14 246 190  14 246 190  14 246 190  14
+246 190  14 246 190  14 246 190  14 246 190  14
+246 190  14 246 190  14 242 186  14 163 133  67
+ 46  46  46  18  18  18   6   6   6   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0  10  10  10
+ 30  30  30  78  78  78 163 133  67 210 150  10
+236 178  12 246 186  14 246 190  14 246 190  14
+246 190  14 246 190  14 246 190  14 246 190  14
+246 190  14 246 190  14 246 190  14 246 190  14
+246 190  14 246 190  14 246 190  14 246 190  14
+241 196  14 215 174  15 190 178 144 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 218 218 218
+ 58  58  58   2   2   6  22  18   6 167 114   7
+216 158  10 236 178  12 246 186  14 246 190  14
+246 190  14 246 190  14 246 190  14 246 190  14
+246 190  14 246 190  14 246 190  14 246 190  14
+246 190  14 246 190  14 246 190  14 246 190  14
+246 190  14 246 186  14 242 186  14 190 150  46
+ 54  54  54  22  22  22   6   6   6   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0  14  14  14
+ 38  38  38  86  86  86 180 133  36 213 154  11
+236 178  12 246 186  14 246 190  14 246 190  14
+246 190  14 246 190  14 246 190  14 246 190  14
+246 190  14 246 190  14 246 190  14 246 190  14
+246 190  14 246 190  14 246 190  14 246 190  14
+246 190  14 232 195  16 190 146  13 214 214 214
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 250 250 250 170 170 170  26  26  26
+  2   2   6   2   2   6  37  26   9 163 110   8
+219 162  10 239 182  13 246 186  14 246 190  14
+246 190  14 246 190  14 246 190  14 246 190  14
+246 190  14 246 190  14 246 190  14 246 190  14
+246 190  14 246 190  14 246 190  14 246 190  14
+246 186  14 236 178  12 224 166  10 142 122  72
+ 46  46  46  18  18  18   6   6   6   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   6   6   6  18  18  18
+ 50  50  50 109 106  95 192 133   9 224 166  10
+242 186  14 246 190  14 246 190  14 246 190  14
+246 190  14 246 190  14 246 190  14 246 190  14
+246 190  14 246 190  14 246 190  14 246 190  14
+246 190  14 246 190  14 246 190  14 246 190  14
+242 186  14 226 184  13 210 162  10 142 110  46
+226 226 226 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+198 198 198  66  66  66   2   2   6   2   2   6
+  2   2   6   2   2   6  50  34   6 156 107  11
+219 162  10 239 182  13 246 186  14 246 190  14
+246 190  14 246 190  14 246 190  14 246 190  14
+246 190  14 246 190  14 246 190  14 246 190  14
+246 190  14 246 190  14 246 190  14 242 186  14
+234 174  13 213 154  11 154 122  46  66  66  66
+ 30  30  30  10  10  10   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   6   6   6  22  22  22
+ 58  58  58 154 121  60 206 145  10 234 174  13
+242 186  14 246 186  14 246 190  14 246 190  14
+246 190  14 246 190  14 246 190  14 246 190  14
+246 190  14 246 190  14 246 190  14 246 190  14
+246 190  14 246 190  14 246 190  14 246 190  14
+246 186  14 236 178  12 210 162  10 163 110   8
+ 61  42   6 138 138 138 218 218 218 250 250 250
+253 253 253 253 253 253 253 253 253 250 250 250
+242 242 242 210 210 210 144 144 144  66  66  66
+  6   6   6   2   2   6   2   2   6   2   2   6
+  2   2   6   2   2   6  61  42   6 163 110   8
+216 158  10 236 178  12 246 190  14 246 190  14
+246 190  14 246 190  14 246 190  14 246 190  14
+246 190  14 246 190  14 246 190  14 246 190  14
+246 190  14 239 182  13 230 174  11 216 158  10
+190 142  34 124 112  88  70  70  70  38  38  38
+ 18  18  18   6   6   6   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   6   6   6  22  22  22
+ 62  62  62 168 124  44 206 145  10 224 166  10
+236 178  12 239 182  13 242 186  14 242 186  14
+246 186  14 246 190  14 246 190  14 246 190  14
+246 190  14 246 190  14 246 190  14 246 190  14
+246 190  14 246 190  14 246 190  14 246 190  14
+246 190  14 236 178  12 216 158  10 175 118   6
+ 80  54   7   2   2   6   6   6   6  30  30  30
+ 54  54  54  62  62  62  50  50  50  38  38  38
+ 14  14  14   2   2   6   2   2   6   2   2   6
+  2   2   6   2   2   6   2   2   6   2   2   6
+  2   2   6   6   6   6  80  54   7 167 114   7
+213 154  11 236 178  12 246 190  14 246 190  14
+246 190  14 246 190  14 246 190  14 246 190  14
+246 190  14 242 186  14 239 182  13 239 182  13
+230 174  11 210 150  10 174 135  50 124 112  88
+ 82  82  82  54  54  54  34  34  34  18  18  18
+  6   6   6   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   6   6   6  18  18  18
+ 50  50  50 158 118  36 192 133   9 200 144  11
+216 158  10 219 162  10 224 166  10 226 170  11
+230 174  11 236 178  12 239 182  13 239 182  13
+242 186  14 246 186  14 246 190  14 246 190  14
+246 190  14 246 190  14 246 190  14 246 190  14
+246 186  14 230 174  11 210 150  10 163 110   8
+104  69   6  10  10  10   2   2   6   2   2   6
+  2   2   6   2   2   6   2   2   6   2   2   6
+  2   2   6   2   2   6   2   2   6   2   2   6
+  2   2   6   2   2   6   2   2   6   2   2   6
+  2   2   6   6   6   6  91  60   6 167 114   7
+206 145  10 230 174  11 242 186  14 246 190  14
+246 190  14 246 190  14 246 186  14 242 186  14
+239 182  13 230 174  11 224 166  10 213 154  11
+180 133  36 124 112  88  86  86  86  58  58  58
+ 38  38  38  22  22  22  10  10  10   6   6   6
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0  14  14  14
+ 34  34  34  70  70  70 138 110  50 158 118  36
+167 114   7 180 123   7 192 133   9 197 138  11
+200 144  11 206 145  10 213 154  11 219 162  10
+224 166  10 230 174  11 239 182  13 242 186  14
+246 186  14 246 186  14 246 186  14 246 186  14
+239 182  13 216 158  10 185 133  11 152  99   6
+104  69   6  18  14   6   2   2   6   2   2   6
+  2   2   6   2   2   6   2   2   6   2   2   6
+  2   2   6   2   2   6   2   2   6   2   2   6
+  2   2   6   2   2   6   2   2   6   2   2   6
+  2   2   6   6   6   6  80  54   7 152  99   6
+192 133   9 219 162  10 236 178  12 239 182  13
+246 186  14 242 186  14 239 182  13 236 178  12
+224 166  10 206 145  10 192 133   9 154 121  60
+ 94  94  94  62  62  62  42  42  42  22  22  22
+ 14  14  14   6   6   6   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   6   6   6
+ 18  18  18  34  34  34  58  58  58  78  78  78
+101  98  89 124 112  88 142 110  46 156 107  11
+163 110   8 167 114   7 175 118   6 180 123   7
+185 133  11 197 138  11 210 150  10 219 162  10
+226 170  11 236 178  12 236 178  12 234 174  13
+219 162  10 197 138  11 163 110   8 130  83   6
+ 91  60   6  10  10  10   2   2   6   2   2   6
+ 18  18  18  38  38  38  38  38  38  38  38  38
+ 38  38  38  38  38  38  38  38  38  38  38  38
+ 38  38  38  38  38  38  26  26  26   2   2   6
+  2   2   6   6   6   6  70  47   6 137  92   6
+175 118   6 200 144  11 219 162  10 230 174  11
+234 174  13 230 174  11 219 162  10 210 150  10
+192 133   9 163 110   8 124 112  88  82  82  82
+ 50  50  50  30  30  30  14  14  14   6   6   6
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  6   6   6  14  14  14  22  22  22  34  34  34
+ 42  42  42  58  58  58  74  74  74  86  86  86
+101  98  89 122 102  70 130  98  46 121  87  25
+137  92   6 152  99   6 163 110   8 180 123   7
+185 133  11 197 138  11 206 145  10 200 144  11
+180 123   7 156 107  11 130  83   6 104  69   6
+ 50  34   6  54  54  54 110 110 110 101  98  89
+ 86  86  86  82  82  82  78  78  78  78  78  78
+ 78  78  78  78  78  78  78  78  78  78  78  78
+ 78  78  78  82  82  82  86  86  86  94  94  94
+106 106 106 101 101 101  86  66  34 124  80   6
+156 107  11 180 123   7 192 133   9 200 144  11
+206 145  10 200 144  11 192 133   9 175 118   6
+139 102  15 109 106  95  70  70  70  42  42  42
+ 22  22  22  10  10  10   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   6   6   6  10  10  10
+ 14  14  14  22  22  22  30  30  30  38  38  38
+ 50  50  50  62  62  62  74  74  74  90  90  90
+101  98  89 112 100  78 121  87  25 124  80   6
+137  92   6 152  99   6 152  99   6 152  99   6
+138  86   6 124  80   6  98  70   6  86  66  30
+101  98  89  82  82  82  58  58  58  46  46  46
+ 38  38  38  34  34  34  34  34  34  34  34  34
+ 34  34  34  34  34  34  34  34  34  34  34  34
+ 34  34  34  34  34  34  38  38  38  42  42  42
+ 54  54  54  82  82  82  94  86  76  91  60   6
+134  86   6 156 107  11 167 114   7 175 118   6
+175 118   6 167 114   7 152  99   6 121  87  25
+101  98  89  62  62  62  34  34  34  18  18  18
+  6   6   6   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   6   6   6   6   6   6  10  10  10
+ 18  18  18  22  22  22  30  30  30  42  42  42
+ 50  50  50  66  66  66  86  86  86 101  98  89
+106  86  58  98  70   6 104  69   6 104  69   6
+104  69   6  91  60   6  82  62  34  90  90  90
+ 62  62  62  38  38  38  22  22  22  14  14  14
+ 10  10  10  10  10  10  10  10  10  10  10  10
+ 10  10  10  10  10  10   6   6   6  10  10  10
+ 10  10  10  10  10  10  10  10  10  14  14  14
+ 22  22  22  42  42  42  70  70  70  89  81  66
+ 80  54   7 104  69   6 124  80   6 137  92   6
+134  86   6 116  81   8 100  82  52  86  86  86
+ 58  58  58  30  30  30  14  14  14   6   6   6
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   6   6   6  10  10  10  14  14  14
+ 18  18  18  26  26  26  38  38  38  54  54  54
+ 70  70  70  86  86  86  94  86  76  89  81  66
+ 89  81  66  86  86  86  74  74  74  50  50  50
+ 30  30  30  14  14  14   6   6   6   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  6   6   6  18  18  18  34  34  34  58  58  58
+ 82  82  82  89  81  66  89  81  66  89  81  66
+ 94  86  66  94  86  76  74  74  74  50  50  50
+ 26  26  26  14  14  14   6   6   6   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  6   6   6   6   6   6  14  14  14  18  18  18
+ 30  30  30  38  38  38  46  46  46  54  54  54
+ 50  50  50  42  42  42  30  30  30  18  18  18
+ 10  10  10   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   6   6   6  14  14  14  26  26  26
+ 38  38  38  50  50  50  58  58  58  58  58  58
+ 54  54  54  42  42  42  30  30  30  18  18  18
+ 10  10  10   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   6   6   6
+  6   6   6  10  10  10  14  14  14  18  18  18
+ 18  18  18  14  14  14  10  10  10   6   6   6
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   6   6   6
+ 14  14  14  18  18  18  22  22  22  22  22  22
+ 18  18  18  14  14  14  10  10  10   6   6   6
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
+  0   0   0   0   0   0   0   0   0   0   0   0
diff --git a/drivers/video/ps3fb.c b/drivers/video/ps3fb.c
index dbfe2c1..b269abd 100644
--- a/drivers/video/ps3fb.c
+++ b/drivers/video/ps3fb.c
@@ -952,7 +952,7 @@
 	.fb_compat_ioctl = ps3fb_ioctl
 };
 
-static struct fb_fix_screeninfo ps3fb_fix __initdata = {
+static struct fb_fix_screeninfo ps3fb_fix = {
 	.id =		DEVICE_NAME,
 	.type =		FB_TYPE_PACKED_PIXELS,
 	.visual =	FB_VISUAL_TRUECOLOR,
diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c
index 47e12cf..15c7251 100644
--- a/drivers/w1/masters/mxc_w1.c
+++ b/drivers/w1/masters/mxc_w1.c
@@ -152,8 +152,6 @@
 
 	clk_disable_unprepare(mdev->clk);
 
-	platform_set_drvdata(pdev, NULL);
-
 	return 0;
 }
 
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index 22013ca..c7c64f1 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -234,9 +234,11 @@
 {
 	long tmp;
 	struct w1_master *md = dev_to_w1_master(dev);
+	int ret;
 
-	if (strict_strtol(buf, 0, &tmp) == -EINVAL)
-		return -EINVAL;
+	ret = kstrtol(buf, 0, &tmp);
+	if (ret)
+		return ret;
 
 	mutex_lock(&md->mutex);
 	md->search_count = tmp;
@@ -266,9 +268,11 @@
 {
 	long tmp;
 	struct w1_master *md = dev_to_w1_master(dev);
+	int ret;
 
-	if (strict_strtol(buf, 0, &tmp) == -EINVAL)
-		return -EINVAL;
+	ret = kstrtol(buf, 0, &tmp);
+	if (ret)
+		return ret;
 
 	mutex_lock(&md->mutex);
 	md->enable_pullup = tmp;
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index de7e4f4..5be5e3d 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -162,7 +162,8 @@
 #define HPWDT_ARCH	32
 
 asm(".text                          \n\t"
-    ".align 4                       \n"
+    ".align 4                       \n\t"
+    ".globl asminline_call	    \n"
     "asminline_call:                \n\t"
     "pushl       %ebp               \n\t"
     "movl        %esp, %ebp         \n\t"
@@ -352,7 +353,8 @@
 #define HPWDT_ARCH	64
 
 asm(".text                      \n\t"
-    ".align 4                   \n"
+    ".align 4                   \n\t"
+    ".globl asminline_call	\n"
     "asminline_call:            \n\t"
     "pushq      %rbp            \n\t"
     "movq       %rsp, %rbp      \n\t"
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index d384a8b..aa5ecf4 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -183,7 +183,7 @@
 	else
 		flock.length = fl->fl_end - fl->fl_start + 1;
 	flock.proc_id = fl->fl_pid;
-	flock.client_id = utsname()->nodename;
+	flock.client_id = fid->clnt->name;
 	if (IS_SETLKW(cmd))
 		flock.flags = P9_LOCK_FLAGS_BLOCK;
 
@@ -260,7 +260,7 @@
 	else
 		glock.length = fl->fl_end - fl->fl_start + 1;
 	glock.proc_id = fl->fl_pid;
-	glock.client_id = utsname()->nodename;
+	glock.client_id = fid->clnt->name;
 
 	res = p9_client_getlock_dotl(fid, &glock);
 	if (res < 0)
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 25b018e..94de6d1 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -146,7 +146,7 @@
 		char type = 0, ext[32];
 		int major = -1, minor = -1;
 
-		strncpy(ext, stat->extension, sizeof(ext));
+		strlcpy(ext, stat->extension, sizeof(ext));
 		sscanf(ext, "%c %u %u", &type, &major, &minor);
 		switch (type) {
 		case 'c':
@@ -1186,7 +1186,7 @@
 			 * this even with .u extension. So check
 			 * for non NULL stat->extension
 			 */
-			strncpy(ext, stat->extension, sizeof(ext));
+			strlcpy(ext, stat->extension, sizeof(ext));
 			/* HARDLINKCOUNT %u */
 			sscanf(ext, "%13s %u", tag_name, &i_nlink);
 			if (!strncmp(tag_name, "HARDLINKCOUNT", 13))
diff --git a/fs/affs/file.c b/fs/affs/file.c
index af3261b..776e393 100644
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@ -836,7 +836,7 @@
 		struct address_space *mapping = inode->i_mapping;
 		struct page *page;
 		void *fsdata;
-		u32 size = inode->i_size;
+		loff_t size = inode->i_size;
 		int res;
 
 		res = mapping->a_ops->write_begin(NULL, mapping, size, 0, 0, &page, &fsdata);
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 0b74d31..646337d 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -751,10 +751,6 @@
 	_enter("{%x:%u},{%s},%ho",
 	       dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name, mode);
 
-	ret = -ENAMETOOLONG;
-	if (dentry->d_name.len >= AFSNAMEMAX)
-		goto error;
-
 	key = afs_request_key(dvnode->volume->cell);
 	if (IS_ERR(key)) {
 		ret = PTR_ERR(key);
@@ -816,10 +812,6 @@
 	_enter("{%x:%u},{%s}",
 	       dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name);
 
-	ret = -ENAMETOOLONG;
-	if (dentry->d_name.len >= AFSNAMEMAX)
-		goto error;
-
 	key = afs_request_key(dvnode->volume->cell);
 	if (IS_ERR(key)) {
 		ret = PTR_ERR(key);
@@ -936,10 +928,6 @@
 	_enter("{%x:%u},{%s},%ho,",
 	       dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name, mode);
 
-	ret = -ENAMETOOLONG;
-	if (dentry->d_name.len >= AFSNAMEMAX)
-		goto error;
-
 	key = afs_request_key(dvnode->volume->cell);
 	if (IS_ERR(key)) {
 		ret = PTR_ERR(key);
@@ -1005,10 +993,6 @@
 	       dvnode->fid.vid, dvnode->fid.vnode,
 	       dentry->d_name.name);
 
-	ret = -ENAMETOOLONG;
-	if (dentry->d_name.len >= AFSNAMEMAX)
-		goto error;
-
 	key = afs_request_key(dvnode->volume->cell);
 	if (IS_ERR(key)) {
 		ret = PTR_ERR(key);
@@ -1053,10 +1037,6 @@
 	       dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name,
 	       content);
 
-	ret = -ENAMETOOLONG;
-	if (dentry->d_name.len >= AFSNAMEMAX)
-		goto error;
-
 	ret = -EINVAL;
 	if (strlen(content) >= AFSPATHMAX)
 		goto error;
@@ -1127,10 +1107,6 @@
 	       new_dvnode->fid.vid, new_dvnode->fid.vnode,
 	       new_dentry->d_name.name);
 
-	ret = -ENAMETOOLONG;
-	if (new_dentry->d_name.len >= AFSNAMEMAX)
-		goto error;
-
 	key = afs_request_key(orig_dvnode->volume->cell);
 	if (IS_ERR(key)) {
 		ret = PTR_ERR(key);
diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
index 743c7c2..0f00da3 100644
--- a/fs/autofs4/dev-ioctl.c
+++ b/fs/autofs4/dev-ioctl.c
@@ -183,13 +183,14 @@
 	return 0;
 }
 
+/* Find the topmost mount satisfying test() */
 static int find_autofs_mount(const char *pathname,
 			     struct path *res,
 			     int test(struct path *path, void *data),
 			     void *data)
 {
 	struct path path;
-	int err = kern_path(pathname, 0, &path);
+	int err = kern_path_mountpoint(AT_FDCWD, pathname, &path, 0);
 	if (err)
 		return err;
 	err = -ENOENT;
@@ -197,10 +198,9 @@
 		if (path.dentry->d_sb->s_magic == AUTOFS_SUPER_MAGIC) {
 			if (test(&path, data)) {
 				path_get(&path);
-				if (!err) /* already found some */
-					path_put(res);
 				*res = path;
 				err = 0;
+				break;
 			}
 		}
 		if (!follow_up(&path))
@@ -486,12 +486,11 @@
  * mount if there is one or 0 if it isn't a mountpoint.
  *
  * If we aren't supplied with a file descriptor then we
- * lookup the nameidata of the path and check if it is the
- * root of a mount. If a type is given we are looking for
- * a particular autofs mount and if we don't find a match
- * we return fail. If the located nameidata path is the
- * root of a mount we return 1 along with the super magic
- * of the mount or 0 otherwise.
+ * lookup the path and check if it is the root of a mount.
+ * If a type is given we are looking for a particular autofs
+ * mount and if we don't find a match we return fail. If the
+ * located path is the root of a mount we return 1 along with
+ * the super magic of the mount or 0 otherwise.
  *
  * In both cases the the device number (as returned by
  * new_encode_dev()) is also returned.
@@ -519,9 +518,11 @@
 
 	if (!fp || param->ioctlfd == -1) {
 		if (autofs_type_any(type))
-			err = kern_path(name, LOOKUP_FOLLOW, &path);
+			err = kern_path_mountpoint(AT_FDCWD,
+						   name, &path, LOOKUP_FOLLOW);
 		else
-			err = find_autofs_mount(name, &path, test_by_type, &type);
+			err = find_autofs_mount(name, &path,
+						test_by_type, &type);
 		if (err)
 			goto out;
 		devid = new_encode_dev(path.dentry->d_sb->s_dev);
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index 8fb42916..6025084 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -716,14 +716,15 @@
 		return 0;
 
 	bs->bio_integrity_pool = mempool_create_slab_pool(pool_size, bip_slab);
-
-	bs->bvec_integrity_pool = biovec_create_pool(bs, pool_size);
-	if (!bs->bvec_integrity_pool)
-		return -1;
-
 	if (!bs->bio_integrity_pool)
 		return -1;
 
+	bs->bvec_integrity_pool = biovec_create_pool(bs, pool_size);
+	if (!bs->bvec_integrity_pool) {
+		mempool_destroy(bs->bio_integrity_pool);
+		return -1;
+	}
+
 	return 0;
 }
 EXPORT_SYMBOL(bioset_integrity_create);
diff --git a/fs/coredump.c b/fs/coredump.c
index 72f816d..9bdeca1 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -190,6 +190,11 @@
 				err = cn_printf(cn, "%d",
 					      task_tgid_vnr(current));
 				break;
+			/* global pid */
+			case 'P':
+				err = cn_printf(cn, "%d",
+					      task_tgid_nr(current));
+				break;
 			/* uid */
 			case 'u':
 				err = cn_printf(cn, "%d", cred->uid);
diff --git a/fs/dcache.c b/fs/dcache.c
index ca8e9cd..4d9df3c 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -88,6 +88,35 @@
 
 static struct kmem_cache *dentry_cache __read_mostly;
 
+/**
+ * read_seqbegin_or_lock - begin a sequence number check or locking block
+ * lock: sequence lock
+ * seq : sequence number to be checked
+ *
+ * First try it once optimistically without taking the lock. If that fails,
+ * take the lock. The sequence number is also used as a marker for deciding
+ * whether to be a reader (even) or writer (odd).
+ * N.B. seq must be initialized to an even number to begin with.
+ */
+static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
+{
+	if (!(*seq & 1))	/* Even */
+		*seq = read_seqbegin(lock);
+	else			/* Odd */
+		write_seqlock(lock);
+}
+
+static inline int need_seqretry(seqlock_t *lock, int seq)
+{
+	return !(seq & 1) && read_seqretry(lock, seq);
+}
+
+static inline void done_seqretry(seqlock_t *lock, int seq)
+{
+	if (seq & 1)
+		write_sequnlock(lock);
+}
+
 /*
  * This is the single most critical data structure when it comes
  * to the dcache: the hashtable for lookups. Somebody should try
@@ -1012,7 +1041,7 @@
  * the parenthood after dropping the lock and check
  * that the sequence number still matches.
  */
-static struct dentry *try_to_ascend(struct dentry *old, int locked, unsigned seq)
+static struct dentry *try_to_ascend(struct dentry *old, unsigned seq)
 {
 	struct dentry *new = old->d_parent;
 
@@ -1026,7 +1055,7 @@
 	 */
 	if (new != old->d_parent ||
 		 (old->d_flags & DCACHE_DENTRY_KILLED) ||
-		 (!locked && read_seqretry(&rename_lock, seq))) {
+		 need_seqretry(&rename_lock, seq)) {
 		spin_unlock(&new->d_lock);
 		new = NULL;
 	}
@@ -1063,13 +1092,12 @@
 {
 	struct dentry *this_parent;
 	struct list_head *next;
-	unsigned seq;
-	int locked = 0;
+	unsigned seq = 0;
 	enum d_walk_ret ret;
 	bool retry = true;
 
-	seq = read_seqbegin(&rename_lock);
 again:
+	read_seqbegin_or_lock(&rename_lock, &seq);
 	this_parent = parent;
 	spin_lock(&this_parent->d_lock);
 
@@ -1123,13 +1151,13 @@
 	 */
 	if (this_parent != parent) {
 		struct dentry *child = this_parent;
-		this_parent = try_to_ascend(this_parent, locked, seq);
+		this_parent = try_to_ascend(this_parent, seq);
 		if (!this_parent)
 			goto rename_retry;
 		next = child->d_u.d_child.next;
 		goto resume;
 	}
-	if (!locked && read_seqretry(&rename_lock, seq)) {
+	if (need_seqretry(&rename_lock, seq)) {
 		spin_unlock(&this_parent->d_lock);
 		goto rename_retry;
 	}
@@ -1138,17 +1166,13 @@
 
 out_unlock:
 	spin_unlock(&this_parent->d_lock);
-	if (locked)
-		write_sequnlock(&rename_lock);
+	done_seqretry(&rename_lock, seq);
 	return;
 
 rename_retry:
 	if (!retry)
 		return;
-	if (locked)
-		goto again;
-	locked = 1;
-	write_seqlock(&rename_lock);
+	seq = 1;
 	goto again;
 }
 
@@ -2647,9 +2671,39 @@
 	return 0;
 }
 
+/**
+ * prepend_name - prepend a pathname in front of current buffer pointer
+ * buffer: buffer pointer
+ * buflen: allocated length of the buffer
+ * name:   name string and length qstr structure
+ *
+ * With RCU path tracing, it may race with d_move(). Use ACCESS_ONCE() to
+ * make sure that either the old or the new name pointer and length are
+ * fetched. However, there may be mismatch between length and pointer.
+ * The length cannot be trusted, we need to copy it byte-by-byte until
+ * the length is reached or a null byte is found. It also prepends "/" at
+ * the beginning of the name. The sequence number check at the caller will
+ * retry it again when a d_move() does happen. So any garbage in the buffer
+ * due to mismatched pointer and length will be discarded.
+ */
 static int prepend_name(char **buffer, int *buflen, struct qstr *name)
 {
-	return prepend(buffer, buflen, name->name, name->len);
+	const char *dname = ACCESS_ONCE(name->name);
+	u32 dlen = ACCESS_ONCE(name->len);
+	char *p;
+
+	if (*buflen < dlen + 1)
+		return -ENAMETOOLONG;
+	*buflen -= dlen + 1;
+	p = *buffer -= dlen + 1;
+	*p++ = '/';
+	while (dlen--) {
+		char c = *dname++;
+		if (!c)
+			break;
+		*p++ = c;
+	}
+	return 0;
 }
 
 /**
@@ -2659,7 +2713,14 @@
  * @buffer: pointer to the end of the buffer
  * @buflen: pointer to buffer length
  *
- * Caller holds the rename_lock.
+ * The function tries to write out the pathname without taking any lock other
+ * than the RCU read lock to make sure that dentries won't go away. It only
+ * checks the sequence number of the global rename_lock as any change in the
+ * dentry's d_seq will be preceded by changes in the rename_lock sequence
+ * number. If the sequence number had been change, it will restart the whole
+ * pathname back-tracing sequence again. It performs a total of 3 trials of
+ * lockless back-tracing sequences before falling back to take the
+ * rename_lock.
  */
 static int prepend_path(const struct path *path,
 			const struct path *root,
@@ -2668,54 +2729,66 @@
 	struct dentry *dentry = path->dentry;
 	struct vfsmount *vfsmnt = path->mnt;
 	struct mount *mnt = real_mount(vfsmnt);
-	bool slash = false;
 	int error = 0;
+	unsigned seq = 0;
+	char *bptr;
+	int blen;
 
+	rcu_read_lock();
+restart:
+	bptr = *buffer;
+	blen = *buflen;
+	read_seqbegin_or_lock(&rename_lock, &seq);
 	while (dentry != root->dentry || vfsmnt != root->mnt) {
 		struct dentry * parent;
 
 		if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
 			/* Global root? */
-			if (!mnt_has_parent(mnt))
-				goto global_root;
-			dentry = mnt->mnt_mountpoint;
-			mnt = mnt->mnt_parent;
-			vfsmnt = &mnt->mnt;
-			continue;
+			if (mnt_has_parent(mnt)) {
+				dentry = mnt->mnt_mountpoint;
+				mnt = mnt->mnt_parent;
+				vfsmnt = &mnt->mnt;
+				continue;
+			}
+			/*
+			 * Filesystems needing to implement special "root names"
+			 * should do so with ->d_dname()
+			 */
+			if (IS_ROOT(dentry) &&
+			   (dentry->d_name.len != 1 ||
+			    dentry->d_name.name[0] != '/')) {
+				WARN(1, "Root dentry has weird name <%.*s>\n",
+				     (int) dentry->d_name.len,
+				     dentry->d_name.name);
+			}
+			if (!error)
+				error = is_mounted(vfsmnt) ? 1 : 2;
+			break;
 		}
 		parent = dentry->d_parent;
 		prefetch(parent);
-		spin_lock(&dentry->d_lock);
-		error = prepend_name(buffer, buflen, &dentry->d_name);
-		spin_unlock(&dentry->d_lock);
-		if (!error)
-			error = prepend(buffer, buflen, "/", 1);
+		error = prepend_name(&bptr, &blen, &dentry->d_name);
 		if (error)
 			break;
 
-		slash = true;
 		dentry = parent;
 	}
-
-	if (!error && !slash)
-		error = prepend(buffer, buflen, "/", 1);
-
-	return error;
-
-global_root:
-	/*
-	 * Filesystems needing to implement special "root names"
-	 * should do so with ->d_dname()
-	 */
-	if (IS_ROOT(dentry) &&
-	    (dentry->d_name.len != 1 || dentry->d_name.name[0] != '/')) {
-		WARN(1, "Root dentry has weird name <%.*s>\n",
-		     (int) dentry->d_name.len, dentry->d_name.name);
+	if (!(seq & 1))
+		rcu_read_unlock();
+	if (need_seqretry(&rename_lock, seq)) {
+		seq = 1;
+		goto restart;
 	}
-	if (!slash)
-		error = prepend(buffer, buflen, "/", 1);
-	if (!error)
-		error = is_mounted(vfsmnt) ? 1 : 2;
+	done_seqretry(&rename_lock, seq);
+
+	if (error >= 0 && bptr == *buffer) {
+		if (--blen < 0)
+			error = -ENAMETOOLONG;
+		else
+			*--bptr = '/';
+	}
+	*buffer = bptr;
+	*buflen = blen;
 	return error;
 }
 
@@ -2744,9 +2817,7 @@
 
 	prepend(&res, &buflen, "\0", 1);
 	br_read_lock(&vfsmount_lock);
-	write_seqlock(&rename_lock);
 	error = prepend_path(path, root, &res, &buflen);
-	write_sequnlock(&rename_lock);
 	br_read_unlock(&vfsmount_lock);
 
 	if (error < 0)
@@ -2765,9 +2836,7 @@
 
 	prepend(&res, &buflen, "\0", 1);
 	br_read_lock(&vfsmount_lock);
-	write_seqlock(&rename_lock);
 	error = prepend_path(path, &root, &res, &buflen);
-	write_sequnlock(&rename_lock);
 	br_read_unlock(&vfsmount_lock);
 
 	if (error > 1)
@@ -2833,9 +2902,7 @@
 
 	get_fs_root(current->fs, &root);
 	br_read_lock(&vfsmount_lock);
-	write_seqlock(&rename_lock);
 	error = path_with_deleted(path, &root, &res, &buflen);
-	write_sequnlock(&rename_lock);
 	br_read_unlock(&vfsmount_lock);
 	if (error < 0)
 		res = ERR_PTR(error);
@@ -2870,10 +2937,10 @@
 	char *end = buffer + buflen;
 	/* these dentries are never renamed, so d_lock is not needed */
 	if (prepend(&end, &buflen, " (deleted)", 11) ||
-	    prepend_name(&end, &buflen, &dentry->d_name) ||
+	    prepend(&end, &buflen, dentry->d_name.name, dentry->d_name.len) ||
 	    prepend(&end, &buflen, "/", 1))  
 		end = ERR_PTR(-ENAMETOOLONG);
-	return end;  
+	return end;
 }
 
 /*
@@ -2881,30 +2948,42 @@
  */
 static char *__dentry_path(struct dentry *dentry, char *buf, int buflen)
 {
-	char *end = buf + buflen;
-	char *retval;
+	char *end, *retval;
+	int len, seq = 0;
+	int error = 0;
 
-	prepend(&end, &buflen, "\0", 1);
+	rcu_read_lock();
+restart:
+	end = buf + buflen;
+	len = buflen;
+	prepend(&end, &len, "\0", 1);
 	if (buflen < 1)
 		goto Elong;
 	/* Get '/' right */
 	retval = end-1;
 	*retval = '/';
-
+	read_seqbegin_or_lock(&rename_lock, &seq);
 	while (!IS_ROOT(dentry)) {
 		struct dentry *parent = dentry->d_parent;
 		int error;
 
 		prefetch(parent);
-		spin_lock(&dentry->d_lock);
-		error = prepend_name(&end, &buflen, &dentry->d_name);
-		spin_unlock(&dentry->d_lock);
-		if (error != 0 || prepend(&end, &buflen, "/", 1) != 0)
-			goto Elong;
+		error = prepend_name(&end, &len, &dentry->d_name);
+		if (error)
+			break;
 
 		retval = end;
 		dentry = parent;
 	}
+	if (!(seq & 1))
+		rcu_read_unlock();
+	if (need_seqretry(&rename_lock, seq)) {
+		seq = 1;
+		goto restart;
+	}
+	done_seqretry(&rename_lock, seq);
+	if (error)
+		goto Elong;
 	return retval;
 Elong:
 	return ERR_PTR(-ENAMETOOLONG);
@@ -2912,13 +2991,7 @@
 
 char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen)
 {
-	char *retval;
-
-	write_seqlock(&rename_lock);
-	retval = __dentry_path(dentry, buf, buflen);
-	write_sequnlock(&rename_lock);
-
-	return retval;
+	return __dentry_path(dentry, buf, buflen);
 }
 EXPORT_SYMBOL(dentry_path_raw);
 
@@ -2927,7 +3000,6 @@
 	char *p = NULL;
 	char *retval;
 
-	write_seqlock(&rename_lock);
 	if (d_unlinked(dentry)) {
 		p = buf + buflen;
 		if (prepend(&p, &buflen, "//deleted", 10) != 0)
@@ -2935,7 +3007,6 @@
 		buflen++;
 	}
 	retval = __dentry_path(dentry, buf, buflen);
-	write_sequnlock(&rename_lock);
 	if (!IS_ERR(retval) && p)
 		*p = '/';	/* restore '/' overriden with '\0' */
 	return retval;
@@ -2974,7 +3045,6 @@
 
 	error = -ENOENT;
 	br_read_lock(&vfsmount_lock);
-	write_seqlock(&rename_lock);
 	if (!d_unlinked(pwd.dentry)) {
 		unsigned long len;
 		char *cwd = page + PAGE_SIZE;
@@ -2982,7 +3052,6 @@
 
 		prepend(&cwd, &buflen, "\0", 1);
 		error = prepend_path(&pwd, &root, &cwd, &buflen);
-		write_sequnlock(&rename_lock);
 		br_read_unlock(&vfsmount_lock);
 
 		if (error < 0)
@@ -3003,7 +3072,6 @@
 				error = -EFAULT;
 		}
 	} else {
-		write_sequnlock(&rename_lock);
 		br_read_unlock(&vfsmount_lock);
 	}
 
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index d107576..c88e355 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -609,39 +609,35 @@
 	char *full_alg_name;
 	int rc = -EINVAL;
 
-	if (!crypt_stat->cipher) {
-		ecryptfs_printk(KERN_ERR, "No cipher specified\n");
-		goto out;
-	}
 	ecryptfs_printk(KERN_DEBUG,
 			"Initializing cipher [%s]; strlen = [%d]; "
 			"key_size_bits = [%zd]\n",
 			crypt_stat->cipher, (int)strlen(crypt_stat->cipher),
 			crypt_stat->key_size << 3);
+	mutex_lock(&crypt_stat->cs_tfm_mutex);
 	if (crypt_stat->tfm) {
 		rc = 0;
-		goto out;
+		goto out_unlock;
 	}
-	mutex_lock(&crypt_stat->cs_tfm_mutex);
 	rc = ecryptfs_crypto_api_algify_cipher_name(&full_alg_name,
 						    crypt_stat->cipher, "cbc");
 	if (rc)
 		goto out_unlock;
 	crypt_stat->tfm = crypto_alloc_ablkcipher(full_alg_name, 0, 0);
-	kfree(full_alg_name);
 	if (IS_ERR(crypt_stat->tfm)) {
 		rc = PTR_ERR(crypt_stat->tfm);
 		crypt_stat->tfm = NULL;
 		ecryptfs_printk(KERN_ERR, "cryptfs: init_crypt_ctx(): "
 				"Error initializing cipher [%s]\n",
-				crypt_stat->cipher);
-		goto out_unlock;
+				full_alg_name);
+		goto out_free;
 	}
 	crypto_ablkcipher_set_flags(crypt_stat->tfm, CRYPTO_TFM_REQ_WEAK_KEY);
 	rc = 0;
+out_free:
+	kfree(full_alg_name);
 out_unlock:
 	mutex_unlock(&crypt_stat->cs_tfm_mutex);
-out:
 	return rc;
 }
 
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 293f867..473e09d 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -740,6 +740,7 @@
 		epi = rb_entry(rbp, struct epitem, rbn);
 
 		ep_unregister_pollwait(ep, epi);
+		cond_resched();
 	}
 
 	/*
@@ -754,6 +755,7 @@
 	while ((rbp = rb_first(&ep->rbr)) != NULL) {
 		epi = rb_entry(rbp, struct epitem, rbn);
 		ep_remove(ep, epi);
+		cond_resched();
 	}
 	mutex_unlock(&ep->mtx);
 
diff --git a/fs/exec.c b/fs/exec.c
index fd774c7..8875dd1 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -74,6 +74,8 @@
 void __register_binfmt(struct linux_binfmt * fmt, int insert)
 {
 	BUG_ON(!fmt);
+	if (WARN_ON(!fmt->load_binary))
+		return;
 	write_lock(&binfmt_lock);
 	insert ? list_add(&fmt->lh, &formats) :
 		 list_add_tail(&fmt->lh, &formats);
@@ -266,7 +268,7 @@
 	BUILD_BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP);
 	vma->vm_end = STACK_TOP_MAX;
 	vma->vm_start = vma->vm_end - PAGE_SIZE;
-	vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
+	vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
 	INIT_LIST_HEAD(&vma->anon_vma_chain);
 
@@ -1365,18 +1367,18 @@
 }
 EXPORT_SYMBOL(remove_arg_zero);
 
+#define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
 /*
  * cycle the list of binary formats handler, until one recognizes the image
  */
 int search_binary_handler(struct linux_binprm *bprm)
 {
-	unsigned int depth = bprm->recursion_depth;
-	int try,retval;
+	bool need_retry = IS_ENABLED(CONFIG_MODULES);
 	struct linux_binfmt *fmt;
-	pid_t old_pid, old_vpid;
+	int retval;
 
 	/* This allows 4 levels of binfmt rewrites before failing hard. */
-	if (depth > 5)
+	if (bprm->recursion_depth > 5)
 		return -ELOOP;
 
 	retval = security_bprm_check(bprm);
@@ -1387,71 +1389,67 @@
 	if (retval)
 		return retval;
 
+	retval = -ENOENT;
+ retry:
+	read_lock(&binfmt_lock);
+	list_for_each_entry(fmt, &formats, lh) {
+		if (!try_module_get(fmt->module))
+			continue;
+		read_unlock(&binfmt_lock);
+		bprm->recursion_depth++;
+		retval = fmt->load_binary(bprm);
+		bprm->recursion_depth--;
+		if (retval >= 0 || retval != -ENOEXEC ||
+		    bprm->mm == NULL || bprm->file == NULL) {
+			put_binfmt(fmt);
+			return retval;
+		}
+		read_lock(&binfmt_lock);
+		put_binfmt(fmt);
+	}
+	read_unlock(&binfmt_lock);
+
+	if (need_retry && retval == -ENOEXEC) {
+		if (printable(bprm->buf[0]) && printable(bprm->buf[1]) &&
+		    printable(bprm->buf[2]) && printable(bprm->buf[3]))
+			return retval;
+		if (request_module("binfmt-%04x", *(ushort *)(bprm->buf + 2)) < 0)
+			return retval;
+		need_retry = false;
+		goto retry;
+	}
+
+	return retval;
+}
+EXPORT_SYMBOL(search_binary_handler);
+
+static int exec_binprm(struct linux_binprm *bprm)
+{
+	pid_t old_pid, old_vpid;
+	int ret;
+
 	/* Need to fetch pid before load_binary changes it */
 	old_pid = current->pid;
 	rcu_read_lock();
 	old_vpid = task_pid_nr_ns(current, task_active_pid_ns(current->parent));
 	rcu_read_unlock();
 
-	retval = -ENOENT;
-	for (try=0; try<2; try++) {
-		read_lock(&binfmt_lock);
-		list_for_each_entry(fmt, &formats, lh) {
-			int (*fn)(struct linux_binprm *) = fmt->load_binary;
-			if (!fn)
-				continue;
-			if (!try_module_get(fmt->module))
-				continue;
-			read_unlock(&binfmt_lock);
-			bprm->recursion_depth = depth + 1;
-			retval = fn(bprm);
-			bprm->recursion_depth = depth;
-			if (retval >= 0) {
-				if (depth == 0) {
-					trace_sched_process_exec(current, old_pid, bprm);
-					ptrace_event(PTRACE_EVENT_EXEC, old_vpid);
-				}
-				put_binfmt(fmt);
-				allow_write_access(bprm->file);
-				if (bprm->file)
-					fput(bprm->file);
-				bprm->file = NULL;
-				current->did_exec = 1;
-				proc_exec_connector(current);
-				return retval;
-			}
-			read_lock(&binfmt_lock);
-			put_binfmt(fmt);
-			if (retval != -ENOEXEC || bprm->mm == NULL)
-				break;
-			if (!bprm->file) {
-				read_unlock(&binfmt_lock);
-				return retval;
-			}
-		}
-		read_unlock(&binfmt_lock);
-#ifdef CONFIG_MODULES
-		if (retval != -ENOEXEC || bprm->mm == NULL) {
-			break;
-		} else {
-#define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
-			if (printable(bprm->buf[0]) &&
-			    printable(bprm->buf[1]) &&
-			    printable(bprm->buf[2]) &&
-			    printable(bprm->buf[3]))
-				break; /* -ENOEXEC */
-			if (try)
-				break; /* -ENOEXEC */
-			request_module("binfmt-%04x", *(unsigned short *)(&bprm->buf[2]));
-		}
-#else
-		break;
-#endif
-	}
-	return retval;
-}
+	ret = search_binary_handler(bprm);
+	if (ret >= 0) {
+		trace_sched_process_exec(current, old_pid, bprm);
+		ptrace_event(PTRACE_EVENT_EXEC, old_vpid);
+		current->did_exec = 1;
+		proc_exec_connector(current);
 
-EXPORT_SYMBOL(search_binary_handler);
+		if (bprm->file) {
+			allow_write_access(bprm->file);
+			fput(bprm->file);
+			bprm->file = NULL; /* to catch use-after-free */
+		}
+	}
+
+	return ret;
+}
 
 /*
  * sys_execve() executes a new program.
@@ -1541,7 +1539,7 @@
 	if (retval < 0)
 		goto out;
 
-	retval = search_binary_handler(bprm);
+	retval = exec_binprm(bprm);
 	if (retval < 0)
 		goto out;
 
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index 293bc2e..a235f00 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -231,7 +231,7 @@
 	int result = 0;
 
 	buf->sequence++;
-	if (buf->ino == ino) {
+	if (buf->ino == ino && len <= NAME_MAX) {
 		memcpy(buf->name, name, len);
 		buf->name[len] = '\0';
 		buf->found = 1;
diff --git a/fs/file_table.c b/fs/file_table.c
index 322cd37..abdd15a 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -311,8 +311,7 @@
 				return;
 			/*
 			 * After this task has run exit_task_work(),
-			 * task_work_add() will fail.  free_ipc_ns()->
-			 * shm_destroy() can do this.  Fall through to delayed
+			 * task_work_add() will fail.  Fall through to delayed
 			 * fput to avoid leaking *file.
 			 */
 		}
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 68851ff..30f6f27 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -723,7 +723,7 @@
 	return wrote;
 }
 
-long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
+static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
 				enum wb_reason reason)
 {
 	struct wb_writeback_work work = {
@@ -1049,10 +1049,8 @@
 {
 	struct backing_dev_info *bdi;
 
-	if (!nr_pages) {
-		nr_pages = global_page_state(NR_FILE_DIRTY) +
-				global_page_state(NR_UNSTABLE_NFS);
-	}
+	if (!nr_pages)
+		nr_pages = get_nr_dirty_pages();
 
 	rcu_read_lock();
 	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
@@ -1173,6 +1171,8 @@
 			bool wakeup_bdi = false;
 			bdi = inode_to_bdi(inode);
 
+			spin_unlock(&inode->i_lock);
+			spin_lock(&bdi->wb.list_lock);
 			if (bdi_cap_writeback_dirty(bdi)) {
 				WARN(!test_bit(BDI_registered, &bdi->state),
 				     "bdi-%s not registered\n", bdi->name);
@@ -1187,8 +1187,6 @@
 					wakeup_bdi = true;
 			}
 
-			spin_unlock(&inode->i_lock);
-			spin_lock(&bdi->wb.list_lock);
 			inode->dirtied_when = jiffies;
 			list_move(&inode->i_wb_list, &bdi->wb.b_dirty);
 			spin_unlock(&bdi->wb.list_lock);
diff --git a/fs/fscache/page.c b/fs/fscache/page.c
index 8702b73..73899c1 100644
--- a/fs/fscache/page.c
+++ b/fs/fscache/page.c
@@ -913,7 +913,7 @@
 		(1 << FSCACHE_OP_WAITING) |
 		(1 << FSCACHE_OP_UNUSE_COOKIE);
 
-	ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM);
+	ret = radix_tree_maybe_preload(gfp & ~__GFP_HIGHMEM);
 	if (ret < 0)
 		goto nomem_free;
 
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index e0fe703..8443459 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -930,7 +930,7 @@
 	fc->bdi.name = "fuse";
 	fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
 	/* fuse does it's own writeback accounting */
-	fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB;
+	fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB | BDI_CAP_STRICTLIMIT;
 
 	err = bdi_init(&fc->bdi);
 	if (err)
diff --git a/fs/hfsplus/Kconfig b/fs/hfsplus/Kconfig
index a633718..24bc20f 100644
--- a/fs/hfsplus/Kconfig
+++ b/fs/hfsplus/Kconfig
@@ -11,3 +11,21 @@
 	  MacOS 8. It includes all Mac specific filesystem data such as
 	  data forks and creator codes, but it also has several UNIX
 	  style features such as file ownership and permissions.
+
+config HFSPLUS_FS_POSIX_ACL
+	bool "HFS+ POSIX Access Control Lists"
+	depends on HFSPLUS_FS
+	select FS_POSIX_ACL
+	help
+	  POSIX Access Control Lists (ACLs) support permissions for users and
+	  groups beyond the owner/group/world scheme.
+
+	  To learn more about Access Control Lists, visit the POSIX ACLs for
+	  Linux website <http://acl.bestbits.at/>.
+
+	  It needs to understand that POSIX ACLs are treated only under
+	  Linux. POSIX ACLs doesn't mean something under Mac OS X.
+	  Mac OS X beginning with version 10.4 ("Tiger") support NFSv4 ACLs,
+	  which are part of the NFSv4 standard.
+
+	  If you don't know what Access Control Lists are, say N
diff --git a/fs/hfsplus/Makefile b/fs/hfsplus/Makefile
index 09d278b..683fca2 100644
--- a/fs/hfsplus/Makefile
+++ b/fs/hfsplus/Makefile
@@ -7,3 +7,5 @@
 hfsplus-objs := super.o options.o inode.o ioctl.o extents.o catalog.o dir.o btree.o \
 		bnode.o brec.o bfind.o tables.o unicode.o wrapper.o bitmap.o part_tbl.o \
 		attributes.o xattr.o xattr_user.o xattr_security.o xattr_trusted.o
+
+hfsplus-$(CONFIG_HFSPLUS_FS_POSIX_ACL)	+= posix_acl.o
diff --git a/fs/hfsplus/acl.h b/fs/hfsplus/acl.h
new file mode 100644
index 0000000..07c0d49
--- /dev/null
+++ b/fs/hfsplus/acl.h
@@ -0,0 +1,30 @@
+/*
+ * linux/fs/hfsplus/acl.h
+ *
+ * Vyacheslav Dubeyko <slava@dubeyko.com>
+ *
+ * Handler for Posix Access Control Lists (ACLs) support.
+ */
+
+#include <linux/posix_acl_xattr.h>
+
+#ifdef CONFIG_HFSPLUS_FS_POSIX_ACL
+
+/* posix_acl.c */
+struct posix_acl *hfsplus_get_posix_acl(struct inode *inode, int type);
+extern int hfsplus_posix_acl_chmod(struct inode *);
+extern int hfsplus_init_posix_acl(struct inode *, struct inode *);
+
+#else  /* CONFIG_HFSPLUS_FS_POSIX_ACL */
+#define hfsplus_get_posix_acl NULL
+
+static inline int hfsplus_posix_acl_chmod(struct inode *inode)
+{
+	return 0;
+}
+
+static inline int hfsplus_init_posix_acl(struct inode *inode, struct inode *dir)
+{
+	return 0;
+}
+#endif  /* CONFIG_HFSPLUS_FS_POSIX_ACL */
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
index d8ce4bd..4a4fea0 100644
--- a/fs/hfsplus/dir.c
+++ b/fs/hfsplus/dir.c
@@ -16,6 +16,7 @@
 #include "hfsplus_fs.h"
 #include "hfsplus_raw.h"
 #include "xattr.h"
+#include "acl.h"
 
 static inline void hfsplus_instantiate(struct dentry *dentry,
 				       struct inode *inode, u32 cnid)
@@ -529,6 +530,9 @@
 	.getxattr		= generic_getxattr,
 	.listxattr		= hfsplus_listxattr,
 	.removexattr		= hfsplus_removexattr,
+#ifdef CONFIG_HFSPLUS_FS_POSIX_ACL
+	.get_acl		= hfsplus_get_posix_acl,
+#endif
 };
 
 const struct file_operations hfsplus_dir_operations = {
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index ede7931..2b9cd01 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -30,6 +30,7 @@
 #define DBG_EXTENT	0x00000020
 #define DBG_BITMAP	0x00000040
 #define DBG_ATTR_MOD	0x00000080
+#define DBG_ACL_MOD	0x00000100
 
 #if 0
 #define DBG_MASK	(DBG_EXTENT|DBG_INODE|DBG_BNODE_MOD)
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index f833d35..4d2edae 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -19,6 +19,7 @@
 #include "hfsplus_fs.h"
 #include "hfsplus_raw.h"
 #include "xattr.h"
+#include "acl.h"
 
 static int hfsplus_readpage(struct file *file, struct page *page)
 {
@@ -316,6 +317,13 @@
 
 	setattr_copy(inode, attr);
 	mark_inode_dirty(inode);
+
+	if (attr->ia_valid & ATTR_MODE) {
+		error = hfsplus_posix_acl_chmod(inode);
+		if (unlikely(error))
+			return error;
+	}
+
 	return 0;
 }
 
@@ -383,6 +391,9 @@
 	.getxattr	= generic_getxattr,
 	.listxattr	= hfsplus_listxattr,
 	.removexattr	= hfsplus_removexattr,
+#ifdef CONFIG_HFSPLUS_FS_POSIX_ACL
+	.get_acl	= hfsplus_get_posix_acl,
+#endif
 };
 
 static const struct file_operations hfsplus_file_operations = {
diff --git a/fs/hfsplus/posix_acl.c b/fs/hfsplus/posix_acl.c
new file mode 100644
index 0000000..b609cc1
--- /dev/null
+++ b/fs/hfsplus/posix_acl.c
@@ -0,0 +1,274 @@
+/*
+ * linux/fs/hfsplus/posix_acl.c
+ *
+ * Vyacheslav Dubeyko <slava@dubeyko.com>
+ *
+ * Handler for Posix Access Control Lists (ACLs) support.
+ */
+
+#include "hfsplus_fs.h"
+#include "xattr.h"
+#include "acl.h"
+
+struct posix_acl *hfsplus_get_posix_acl(struct inode *inode, int type)
+{
+	struct posix_acl *acl;
+	char *xattr_name;
+	char *value = NULL;
+	ssize_t size;
+
+	acl = get_cached_acl(inode, type);
+	if (acl != ACL_NOT_CACHED)
+		return acl;
+
+	switch (type) {
+	case ACL_TYPE_ACCESS:
+		xattr_name = POSIX_ACL_XATTR_ACCESS;
+		break;
+	case ACL_TYPE_DEFAULT:
+		xattr_name = POSIX_ACL_XATTR_DEFAULT;
+		break;
+	default:
+		return ERR_PTR(-EINVAL);
+	}
+
+	size = __hfsplus_getxattr(inode, xattr_name, NULL, 0);
+
+	if (size > 0) {
+		value = (char *)hfsplus_alloc_attr_entry();
+		if (unlikely(!value))
+			return ERR_PTR(-ENOMEM);
+		size = __hfsplus_getxattr(inode, xattr_name, value, size);
+	}
+
+	if (size > 0)
+		acl = posix_acl_from_xattr(&init_user_ns, value, size);
+	else if (size == -ENODATA)
+		acl = NULL;
+	else
+		acl = ERR_PTR(size);
+
+	hfsplus_destroy_attr_entry((hfsplus_attr_entry *)value);
+
+	if (!IS_ERR(acl))
+		set_cached_acl(inode, type, acl);
+
+	return acl;
+}
+
+static int hfsplus_set_posix_acl(struct inode *inode,
+					int type,
+					struct posix_acl *acl)
+{
+	int err;
+	char *xattr_name;
+	size_t size = 0;
+	char *value = NULL;
+
+	if (S_ISLNK(inode->i_mode))
+		return -EOPNOTSUPP;
+
+	switch (type) {
+	case ACL_TYPE_ACCESS:
+		xattr_name = POSIX_ACL_XATTR_ACCESS;
+		if (acl) {
+			err = posix_acl_equiv_mode(acl, &inode->i_mode);
+			if (err < 0)
+				return err;
+		}
+		err = 0;
+		break;
+
+	case ACL_TYPE_DEFAULT:
+		xattr_name = POSIX_ACL_XATTR_DEFAULT;
+		if (!S_ISDIR(inode->i_mode))
+			return acl ? -EACCES : 0;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	if (acl) {
+		size = posix_acl_xattr_size(acl->a_count);
+		if (unlikely(size > HFSPLUS_MAX_INLINE_DATA_SIZE))
+			return -ENOMEM;
+		value = (char *)hfsplus_alloc_attr_entry();
+		if (unlikely(!value))
+			return -ENOMEM;
+		err = posix_acl_to_xattr(&init_user_ns, acl, value, size);
+		if (unlikely(err < 0))
+			goto end_set_acl;
+	}
+
+	err = __hfsplus_setxattr(inode, xattr_name, value, size, 0);
+
+end_set_acl:
+	hfsplus_destroy_attr_entry((hfsplus_attr_entry *)value);
+
+	if (!err)
+		set_cached_acl(inode, type, acl);
+
+	return err;
+}
+
+int hfsplus_init_posix_acl(struct inode *inode, struct inode *dir)
+{
+	int err = 0;
+	struct posix_acl *acl = NULL;
+
+	hfs_dbg(ACL_MOD,
+		"[%s]: ino %lu, dir->ino %lu\n",
+		__func__, inode->i_ino, dir->i_ino);
+
+	if (S_ISLNK(inode->i_mode))
+		return 0;
+
+	acl = hfsplus_get_posix_acl(dir, ACL_TYPE_DEFAULT);
+	if (IS_ERR(acl))
+		return PTR_ERR(acl);
+
+	if (acl) {
+		if (S_ISDIR(inode->i_mode)) {
+			err = hfsplus_set_posix_acl(inode,
+							ACL_TYPE_DEFAULT,
+							acl);
+			if (unlikely(err))
+				goto init_acl_cleanup;
+		}
+
+		err = posix_acl_create(&acl, GFP_NOFS, &inode->i_mode);
+		if (unlikely(err < 0))
+			return err;
+
+		if (err > 0)
+			err = hfsplus_set_posix_acl(inode,
+							ACL_TYPE_ACCESS,
+							acl);
+	} else
+		inode->i_mode &= ~current_umask();
+
+init_acl_cleanup:
+	posix_acl_release(acl);
+	return err;
+}
+
+int hfsplus_posix_acl_chmod(struct inode *inode)
+{
+	int err;
+	struct posix_acl *acl;
+
+	hfs_dbg(ACL_MOD, "[%s]: ino %lu\n", __func__, inode->i_ino);
+
+	if (S_ISLNK(inode->i_mode))
+		return -EOPNOTSUPP;
+
+	acl = hfsplus_get_posix_acl(inode, ACL_TYPE_ACCESS);
+	if (IS_ERR(acl) || !acl)
+		return PTR_ERR(acl);
+
+	err = posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode);
+	if (unlikely(err))
+		return err;
+
+	err = hfsplus_set_posix_acl(inode, ACL_TYPE_ACCESS, acl);
+	posix_acl_release(acl);
+	return err;
+}
+
+static int hfsplus_xattr_get_posix_acl(struct dentry *dentry,
+					const char *name,
+					void *buffer,
+					size_t size,
+					int type)
+{
+	int err = 0;
+	struct posix_acl *acl;
+
+	hfs_dbg(ACL_MOD,
+		"[%s]: ino %lu, buffer %p, size %zu, type %#x\n",
+		__func__, dentry->d_inode->i_ino, buffer, size, type);
+
+	if (strcmp(name, "") != 0)
+		return -EINVAL;
+
+	acl = hfsplus_get_posix_acl(dentry->d_inode, type);
+	if (IS_ERR(acl))
+		return PTR_ERR(acl);
+	if (acl == NULL)
+		return -ENODATA;
+
+	err = posix_acl_to_xattr(&init_user_ns, acl, buffer, size);
+	posix_acl_release(acl);
+
+	return err;
+}
+
+static int hfsplus_xattr_set_posix_acl(struct dentry *dentry,
+					const char *name,
+					const void *value,
+					size_t size,
+					int flags,
+					int type)
+{
+	int err = 0;
+	struct inode *inode = dentry->d_inode;
+	struct posix_acl *acl = NULL;
+
+	hfs_dbg(ACL_MOD,
+		"[%s]: ino %lu, value %p, size %zu, flags %#x, type %#x\n",
+		__func__, inode->i_ino, value, size, flags, type);
+
+	if (strcmp(name, "") != 0)
+		return -EINVAL;
+
+	if (!inode_owner_or_capable(inode))
+		return -EPERM;
+
+	if (value) {
+		acl = posix_acl_from_xattr(&init_user_ns, value, size);
+		if (IS_ERR(acl))
+			return PTR_ERR(acl);
+		else if (acl) {
+			err = posix_acl_valid(acl);
+			if (err)
+				goto end_xattr_set_acl;
+		}
+	}
+
+	err = hfsplus_set_posix_acl(inode, type, acl);
+
+end_xattr_set_acl:
+	posix_acl_release(acl);
+	return err;
+}
+
+static size_t hfsplus_xattr_list_posix_acl(struct dentry *dentry,
+						char *list,
+						size_t list_size,
+						const char *name,
+						size_t name_len,
+						int type)
+{
+	/*
+	 * This method is not used.
+	 * It is used hfsplus_listxattr() instead of generic_listxattr().
+	 */
+	return -EOPNOTSUPP;
+}
+
+const struct xattr_handler hfsplus_xattr_acl_access_handler = {
+	.prefix	= POSIX_ACL_XATTR_ACCESS,
+	.flags	= ACL_TYPE_ACCESS,
+	.list	= hfsplus_xattr_list_posix_acl,
+	.get	= hfsplus_xattr_get_posix_acl,
+	.set	= hfsplus_xattr_set_posix_acl,
+};
+
+const struct xattr_handler hfsplus_xattr_acl_default_handler = {
+	.prefix	= POSIX_ACL_XATTR_DEFAULT,
+	.flags	= ACL_TYPE_DEFAULT,
+	.list	= hfsplus_xattr_list_posix_acl,
+	.get	= hfsplus_xattr_get_posix_acl,
+	.set	= hfsplus_xattr_set_posix_acl,
+};
diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c
index f663461..bd8471f 100644
--- a/fs/hfsplus/xattr.c
+++ b/fs/hfsplus/xattr.c
@@ -8,11 +8,16 @@
 
 #include "hfsplus_fs.h"
 #include "xattr.h"
+#include "acl.h"
 
 const struct xattr_handler *hfsplus_xattr_handlers[] = {
 	&hfsplus_xattr_osx_handler,
 	&hfsplus_xattr_user_handler,
 	&hfsplus_xattr_trusted_handler,
+#ifdef CONFIG_HFSPLUS_FS_POSIX_ACL
+	&hfsplus_xattr_acl_access_handler,
+	&hfsplus_xattr_acl_default_handler,
+#endif
 	&hfsplus_xattr_security_handler,
 	NULL
 };
@@ -46,11 +51,58 @@
 	return true;
 }
 
+static int can_set_system_xattr(struct inode *inode, const char *name,
+				const void *value, size_t size)
+{
+#ifdef CONFIG_HFSPLUS_FS_POSIX_ACL
+	struct posix_acl *acl;
+	int err;
+
+	if (!inode_owner_or_capable(inode))
+		return -EPERM;
+
+	/*
+	 * POSIX_ACL_XATTR_ACCESS is tied to i_mode
+	 */
+	if (strcmp(name, POSIX_ACL_XATTR_ACCESS) == 0) {
+		acl = posix_acl_from_xattr(&init_user_ns, value, size);
+		if (IS_ERR(acl))
+			return PTR_ERR(acl);
+		if (acl) {
+			err = posix_acl_equiv_mode(acl, &inode->i_mode);
+			posix_acl_release(acl);
+			if (err < 0)
+				return err;
+			mark_inode_dirty(inode);
+		}
+		/*
+		 * We're changing the ACL.  Get rid of the cached one
+		 */
+		forget_cached_acl(inode, ACL_TYPE_ACCESS);
+
+		return 0;
+	} else if (strcmp(name, POSIX_ACL_XATTR_DEFAULT) == 0) {
+		acl = posix_acl_from_xattr(&init_user_ns, value, size);
+		if (IS_ERR(acl))
+			return PTR_ERR(acl);
+		posix_acl_release(acl);
+
+		/*
+		 * We're changing the default ACL.  Get rid of the cached one
+		 */
+		forget_cached_acl(inode, ACL_TYPE_DEFAULT);
+
+		return 0;
+	}
+#endif /* CONFIG_HFSPLUS_FS_POSIX_ACL */
+	return -EOPNOTSUPP;
+}
+
 static int can_set_xattr(struct inode *inode, const char *name,
 				const void *value, size_t value_len)
 {
 	if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
-		return -EOPNOTSUPP; /* TODO: implement ACL support */
+		return can_set_system_xattr(inode, name, value, value_len);
 
 	if (!strncmp(name, XATTR_MAC_OSX_PREFIX, XATTR_MAC_OSX_PREFIX_LEN)) {
 		/*
@@ -253,11 +305,10 @@
 	return len;
 }
 
-static ssize_t hfsplus_getxattr_finder_info(struct dentry *dentry,
+static ssize_t hfsplus_getxattr_finder_info(struct inode *inode,
 						void *value, size_t size)
 {
 	ssize_t res = 0;
-	struct inode *inode = dentry->d_inode;
 	struct hfs_find_data fd;
 	u16 entry_type;
 	u16 folder_rec_len = sizeof(struct DInfo) + sizeof(struct DXInfo);
@@ -304,10 +355,9 @@
 	return res;
 }
 
-ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
+ssize_t __hfsplus_getxattr(struct inode *inode, const char *name,
 			 void *value, size_t size)
 {
-	struct inode *inode = dentry->d_inode;
 	struct hfs_find_data fd;
 	hfsplus_attr_entry *entry;
 	__be32 xattr_record_type;
@@ -333,7 +383,7 @@
 	}
 
 	if (!strcmp_xattr_finder_info(name))
-		return hfsplus_getxattr_finder_info(dentry, value, size);
+		return hfsplus_getxattr_finder_info(inode, value, size);
 
 	if (!HFSPLUS_SB(inode->i_sb)->attr_tree)
 		return -EOPNOTSUPP;
diff --git a/fs/hfsplus/xattr.h b/fs/hfsplus/xattr.h
index 847b695..841b569 100644
--- a/fs/hfsplus/xattr.h
+++ b/fs/hfsplus/xattr.h
@@ -14,8 +14,8 @@
 extern const struct xattr_handler hfsplus_xattr_osx_handler;
 extern const struct xattr_handler hfsplus_xattr_user_handler;
 extern const struct xattr_handler hfsplus_xattr_trusted_handler;
-/*extern const struct xattr_handler hfsplus_xattr_acl_access_handler;*/
-/*extern const struct xattr_handler hfsplus_xattr_acl_default_handler;*/
+extern const struct xattr_handler hfsplus_xattr_acl_access_handler;
+extern const struct xattr_handler hfsplus_xattr_acl_default_handler;
 extern const struct xattr_handler hfsplus_xattr_security_handler;
 
 extern const struct xattr_handler *hfsplus_xattr_handlers[];
@@ -29,9 +29,17 @@
 	return __hfsplus_setxattr(dentry->d_inode, name, value, size, flags);
 }
 
-ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
+ssize_t __hfsplus_getxattr(struct inode *inode, const char *name,
 			void *value, size_t size);
 
+static inline ssize_t hfsplus_getxattr(struct dentry *dentry,
+					const char *name,
+					void *value,
+					size_t size)
+{
+	return __hfsplus_getxattr(dentry->d_inode, name, value, size);
+}
+
 ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size);
 
 int hfsplus_removexattr(struct dentry *dentry, const char *name);
@@ -39,22 +47,7 @@
 int hfsplus_init_security(struct inode *inode, struct inode *dir,
 				const struct qstr *qstr);
 
-static inline int hfsplus_init_acl(struct inode *inode, struct inode *dir)
-{
-	/*TODO: implement*/
-	return 0;
-}
-
-static inline int hfsplus_init_inode_security(struct inode *inode,
-						struct inode *dir,
-						const struct qstr *qstr)
-{
-	int err;
-
-	err = hfsplus_init_acl(inode, dir);
-	if (!err)
-		err = hfsplus_init_security(inode, dir, qstr);
-	return err;
-}
+int hfsplus_init_inode_security(struct inode *inode, struct inode *dir,
+				const struct qstr *qstr);
 
 #endif
diff --git a/fs/hfsplus/xattr_security.c b/fs/hfsplus/xattr_security.c
index 83b842f..0072276 100644
--- a/fs/hfsplus/xattr_security.c
+++ b/fs/hfsplus/xattr_security.c
@@ -9,6 +9,7 @@
 #include <linux/security.h>
 #include "hfsplus_fs.h"
 #include "xattr.h"
+#include "acl.h"
 
 static int hfsplus_security_getxattr(struct dentry *dentry, const char *name,
 					void *buffer, size_t size, int type)
@@ -96,6 +97,18 @@
 					&hfsplus_initxattrs, NULL);
 }
 
+int hfsplus_init_inode_security(struct inode *inode,
+						struct inode *dir,
+						const struct qstr *qstr)
+{
+	int err;
+
+	err = hfsplus_init_posix_acl(inode, dir);
+	if (!err)
+		err = hfsplus_init_security(inode, dir, qstr);
+	return err;
+}
+
 const struct xattr_handler hfsplus_xattr_security_handler = {
 	.prefix	= XATTR_SECURITY_PREFIX,
 	.list	= hfsplus_security_listxattr,
diff --git a/fs/internal.h b/fs/internal.h
index d208937..2be46ea 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -45,6 +45,9 @@
  * namei.c
  */
 extern int __inode_permission(struct inode *, int);
+extern int user_path_mountpoint_at(int, const char __user *, unsigned int, struct path *);
+extern int vfs_path_lookup(struct dentry *, struct vfsmount *,
+			   const char *, unsigned int, struct path *);
 
 /*
  * namespace.c
diff --git a/fs/namei.c b/fs/namei.c
index 56e4f4d..409a441 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -574,9 +574,12 @@
 drop_dentry:
 	unlock_rcu_walk();
 	dput(dentry);
-	return -ECHILD;
+	goto drop_root_mnt;
 out:
 	unlock_rcu_walk();
+drop_root_mnt:
+	if (!(nd->flags & LOOKUP_ROOT))
+		nd->root.mnt = NULL;
 	return -ECHILD;
 }
 
@@ -2206,7 +2209,7 @@
 }
 
 /**
- * umount_lookup_last - look up last component for umount
+ * mountpoint_last - look up last component for umount
  * @nd:   pathwalk nameidata - currently pointing at parent directory of "last"
  * @path: pointer to container for result
  *
@@ -2233,25 +2236,28 @@
  *         to the link, and nd->path will *not* be put.
  */
 static int
-umount_lookup_last(struct nameidata *nd, struct path *path)
+mountpoint_last(struct nameidata *nd, struct path *path)
 {
 	int error = 0;
 	struct dentry *dentry;
 	struct dentry *dir = nd->path.dentry;
 
-	if (unlikely(nd->flags & LOOKUP_RCU)) {
-		WARN_ON_ONCE(1);
-		error = -ECHILD;
-		goto error_check;
+	/* If we're in rcuwalk, drop out of it to handle last component */
+	if (nd->flags & LOOKUP_RCU) {
+		if (unlazy_walk(nd, NULL)) {
+			error = -ECHILD;
+			goto out;
+		}
 	}
 
 	nd->flags &= ~LOOKUP_PARENT;
 
 	if (unlikely(nd->last_type != LAST_NORM)) {
 		error = handle_dots(nd, nd->last_type);
-		if (!error)
-			dentry = dget(nd->path.dentry);
-		goto error_check;
+		if (error)
+			goto out;
+		dentry = dget(nd->path.dentry);
+		goto done;
 	}
 
 	mutex_lock(&dir->d_inode->i_mutex);
@@ -2265,44 +2271,43 @@
 		dentry = d_alloc(dir, &nd->last);
 		if (!dentry) {
 			error = -ENOMEM;
-		} else {
-			dentry = lookup_real(dir->d_inode, dentry, nd->flags);
-			if (IS_ERR(dentry))
-				error = PTR_ERR(dentry);
+			goto out;
 		}
+		dentry = lookup_real(dir->d_inode, dentry, nd->flags);
+		error = PTR_ERR(dentry);
+		if (IS_ERR(dentry))
+			goto out;
 	}
 	mutex_unlock(&dir->d_inode->i_mutex);
 
-error_check:
-	if (!error) {
-		if (!dentry->d_inode) {
-			error = -ENOENT;
-			dput(dentry);
-		} else {
-			path->dentry = dentry;
-			path->mnt = mntget(nd->path.mnt);
-			if (should_follow_link(dentry->d_inode,
-						nd->flags & LOOKUP_FOLLOW))
-				return 1;
-			follow_mount(path);
-		}
+done:
+	if (!dentry->d_inode) {
+		error = -ENOENT;
+		dput(dentry);
+		goto out;
 	}
+	path->dentry = dentry;
+	path->mnt = mntget(nd->path.mnt);
+	if (should_follow_link(dentry->d_inode, nd->flags & LOOKUP_FOLLOW))
+		return 1;
+	follow_mount(path);
+	error = 0;
+out:
 	terminate_walk(nd);
 	return error;
 }
 
 /**
- * path_umountat - look up a path to be umounted
+ * path_mountpoint - look up a path to be umounted
  * @dfd:	directory file descriptor to start walk from
  * @name:	full pathname to walk
  * @flags:	lookup flags
- * @nd:		pathwalk nameidata
  *
  * Look up the given name, but don't attempt to revalidate the last component.
  * Returns 0 and "path" will be valid on success; Retuns error otherwise.
  */
 static int
-path_umountat(int dfd, const char *name, struct path *path, unsigned int flags)
+path_mountpoint(int dfd, const char *name, struct path *path, unsigned int flags)
 {
 	struct file *base = NULL;
 	struct nameidata nd;
@@ -2317,16 +2322,7 @@
 	if (err)
 		goto out;
 
-	/* If we're in rcuwalk, drop out of it to handle last component */
-	if (nd.flags & LOOKUP_RCU) {
-		err = unlazy_walk(&nd, NULL);
-		if (err) {
-			terminate_walk(&nd);
-			goto out;
-		}
-	}
-
-	err = umount_lookup_last(&nd, path);
+	err = mountpoint_last(&nd, path);
 	while (err > 0) {
 		void *cookie;
 		struct path link = *path;
@@ -2337,7 +2333,7 @@
 		err = follow_link(&link, &nd, &cookie);
 		if (err)
 			break;
-		err = umount_lookup_last(&nd, path);
+		err = mountpoint_last(&nd, path);
 		put_link(&nd, &link, cookie);
 	}
 out:
@@ -2350,8 +2346,22 @@
 	return err;
 }
 
+static int
+filename_mountpoint(int dfd, struct filename *s, struct path *path,
+			unsigned int flags)
+{
+	int error = path_mountpoint(dfd, s->name, path, flags | LOOKUP_RCU);
+	if (unlikely(error == -ECHILD))
+		error = path_mountpoint(dfd, s->name, path, flags);
+	if (unlikely(error == -ESTALE))
+		error = path_mountpoint(dfd, s->name, path, flags | LOOKUP_REVAL);
+	if (likely(!error))
+		audit_inode(s, path->dentry, 0);
+	return error;
+}
+
 /**
- * user_path_umountat - lookup a path from userland in order to umount it
+ * user_path_mountpoint_at - lookup a path from userland in order to umount it
  * @dfd:	directory file descriptor
  * @name:	pathname from userland
  * @flags:	lookup flags
@@ -2365,28 +2375,27 @@
  * Returns 0 and populates "path" on success.
  */
 int
-user_path_umountat(int dfd, const char __user *name, unsigned int flags,
+user_path_mountpoint_at(int dfd, const char __user *name, unsigned int flags,
 			struct path *path)
 {
 	struct filename *s = getname(name);
 	int error;
-
 	if (IS_ERR(s))
 		return PTR_ERR(s);
-
-	error = path_umountat(dfd, s->name, path, flags | LOOKUP_RCU);
-	if (unlikely(error == -ECHILD))
-		error = path_umountat(dfd, s->name, path, flags);
-	if (unlikely(error == -ESTALE))
-		error = path_umountat(dfd, s->name, path, flags | LOOKUP_REVAL);
-
-	if (likely(!error))
-		audit_inode(s, path->dentry, 0);
-
+	error = filename_mountpoint(dfd, s, path, flags);
 	putname(s);
 	return error;
 }
 
+int
+kern_path_mountpoint(int dfd, const char *name, struct path *path,
+			unsigned int flags)
+{
+	struct filename s = {.name = name};
+	return filename_mountpoint(dfd, &s, path, flags);
+}
+EXPORT_SYMBOL(kern_path_mountpoint);
+
 /*
  * It's inline, so penalty for filesystems that don't use sticky bit is
  * minimal.
diff --git a/fs/namespace.c b/fs/namespace.c
index fc2b522..da5c494 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -17,7 +17,7 @@
 #include <linux/security.h>
 #include <linux/idr.h>
 #include <linux/acct.h>		/* acct_auto_close_mnt */
-#include <linux/ramfs.h>	/* init_rootfs */
+#include <linux/init.h>		/* init_rootfs */
 #include <linux/fs_struct.h>	/* get_fs_root et.al. */
 #include <linux/fsnotify.h>	/* fsnotify_vfsmount_delete */
 #include <linux/uaccess.h>
@@ -1321,7 +1321,7 @@
 	if (!(flags & UMOUNT_NOFOLLOW))
 		lookup_flags |= LOOKUP_FOLLOW;
 
-	retval = user_path_umountat(AT_FDCWD, name, lookup_flags, &path);
+	retval = user_path_mountpoint_at(AT_FDCWD, name, lookup_flags, &path);
 	if (retval)
 		goto out;
 	mnt = real_mount(path.mnt);
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index 105a3b0..e0a65a9 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -173,8 +173,6 @@
 	int status;
 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
 
-	dprintk("NFSD: nfsd4_create_clid_dir for \"%s\"\n", dname);
-
 	if (test_and_set_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags))
 		return;
 	if (!nn->rec_file)
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 43f4229..0874998 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -368,11 +368,8 @@
 alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct svc_fh *current_fh)
 {
 	struct nfs4_delegation *dp;
-	struct nfs4_file *fp = stp->st_file;
 
 	dprintk("NFSD alloc_init_deleg\n");
-	if (fp->fi_had_conflict)
-		return NULL;
 	if (num_delegations > max_delegations)
 		return NULL;
 	dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab));
@@ -389,8 +386,7 @@
 	INIT_LIST_HEAD(&dp->dl_perfile);
 	INIT_LIST_HEAD(&dp->dl_perclnt);
 	INIT_LIST_HEAD(&dp->dl_recall_lru);
-	get_nfs4_file(fp);
-	dp->dl_file = fp;
+	dp->dl_file = NULL;
 	dp->dl_type = NFS4_OPEN_DELEGATE_READ;
 	fh_copy_shallow(&dp->dl_fh, &current_fh->fh_handle);
 	dp->dl_time = 0;
@@ -3035,7 +3031,7 @@
 	if (status) {
 		list_del_init(&dp->dl_perclnt);
 		locks_free_lock(fl);
-		return -ENOMEM;
+		return status;
 	}
 	fp->fi_lease = fl;
 	fp->fi_deleg_file = get_file(fl->fl_file);
@@ -3044,22 +3040,35 @@
 	return 0;
 }
 
-static int nfs4_set_delegation(struct nfs4_delegation *dp)
+static int nfs4_set_delegation(struct nfs4_delegation *dp, struct nfs4_file *fp)
 {
-	struct nfs4_file *fp = dp->dl_file;
+	int status;
 
-	if (!fp->fi_lease)
-		return nfs4_setlease(dp);
+	if (fp->fi_had_conflict)
+		return -EAGAIN;
+	get_nfs4_file(fp);
+	dp->dl_file = fp;
+	if (!fp->fi_lease) {
+		status = nfs4_setlease(dp);
+		if (status)
+			goto out_free;
+		return 0;
+	}
 	spin_lock(&recall_lock);
 	if (fp->fi_had_conflict) {
 		spin_unlock(&recall_lock);
-		return -EAGAIN;
+		status = -EAGAIN;
+		goto out_free;
 	}
 	atomic_inc(&fp->fi_delegees);
 	list_add(&dp->dl_perfile, &fp->fi_delegations);
 	spin_unlock(&recall_lock);
 	list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
 	return 0;
+out_free:
+	put_nfs4_file(fp);
+	dp->dl_file = fp;
+	return status;
 }
 
 static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
@@ -3134,7 +3143,7 @@
 	dp = alloc_init_deleg(oo->oo_owner.so_client, stp, fh);
 	if (dp == NULL)
 		goto out_no_deleg;
-	status = nfs4_set_delegation(dp);
+	status = nfs4_set_delegation(dp, stp->st_file);
 	if (status)
 		goto out_free;
 
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
index 8a40457..b4f788e 100644
--- a/fs/ocfs2/acl.c
+++ b/fs/ocfs2/acl.c
@@ -51,10 +51,6 @@
 		return ERR_PTR(-EINVAL);
 
 	count = size / sizeof(struct posix_acl_entry);
-	if (count < 0)
-		return ERR_PTR(-EINVAL);
-	if (count == 0)
-		return NULL;
 
 	acl = posix_acl_alloc(count, GFP_NOFS);
 	if (!acl)
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 94417a8..f37d3c0 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -2044,7 +2044,7 @@
 
 out_write_size:
 	pos += copied;
-	if (pos > inode->i_size) {
+	if (pos > i_size_read(inode)) {
 		i_size_write(inode, pos);
 		mark_inode_dirty(inode);
 	}
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 5c1c864..363f0dc 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -628,11 +628,9 @@
 				struct o2nm_node *node,
 				int idx)
 {
-	struct list_head *iter;
 	struct o2hb_callback_func *f;
 
-	list_for_each(iter, &hbcall->list) {
-		f = list_entry(iter, struct o2hb_callback_func, hc_item);
+	list_for_each_entry(f, &hbcall->list, hc_item) {
 		mlog(ML_HEARTBEAT, "calling funcs %p\n", f);
 		(f->hc_func)(node, idx, f->hc_data);
 	}
@@ -641,16 +639,9 @@
 /* Will run the list in order until we process the passed event */
 static void o2hb_run_event_list(struct o2hb_node_event *queued_event)
 {
-	int empty;
 	struct o2hb_callback *hbcall;
 	struct o2hb_node_event *event;
 
-	spin_lock(&o2hb_live_lock);
-	empty = list_empty(&queued_event->hn_item);
-	spin_unlock(&o2hb_live_lock);
-	if (empty)
-		return;
-
 	/* Holding callback sem assures we don't alter the callback
 	 * lists when doing this, and serializes ourselves with other
 	 * processes wanting callbacks. */
@@ -709,6 +700,7 @@
 	struct o2hb_node_event event =
 		{ .hn_item = LIST_HEAD_INIT(event.hn_item), };
 	struct o2nm_node *node;
+	int queued = 0;
 
 	node = o2nm_get_node_by_num(slot->ds_node_num);
 	if (!node)
@@ -726,11 +718,13 @@
 
 			o2hb_queue_node_event(&event, O2HB_NODE_DOWN_CB, node,
 					      slot->ds_node_num);
+			queued = 1;
 		}
 	}
 	spin_unlock(&o2hb_live_lock);
 
-	o2hb_run_event_list(&event);
+	if (queued)
+		o2hb_run_event_list(&event);
 
 	o2nm_node_put(node);
 }
@@ -790,6 +784,7 @@
 	unsigned int dead_ms = o2hb_dead_threshold * O2HB_REGION_TIMEOUT_MS;
 	unsigned int slot_dead_ms;
 	int tmp;
+	int queued = 0;
 
 	memcpy(hb_block, slot->ds_raw_block, reg->hr_block_bytes);
 
@@ -883,6 +878,7 @@
 					      slot->ds_node_num);
 
 			changed = 1;
+			queued = 1;
 		}
 
 		list_add_tail(&slot->ds_live_item,
@@ -934,6 +930,7 @@
 					      node, slot->ds_node_num);
 
 			changed = 1;
+			queued = 1;
 		}
 
 		/* We don't clear this because the node is still
@@ -949,7 +946,8 @@
 out:
 	spin_unlock(&o2hb_live_lock);
 
-	o2hb_run_event_list(&event);
+	if (queued)
+		o2hb_run_event_list(&event);
 
 	if (node)
 		o2nm_node_put(node);
@@ -2516,8 +2514,7 @@
 int o2hb_register_callback(const char *region_uuid,
 			   struct o2hb_callback_func *hc)
 {
-	struct o2hb_callback_func *tmp;
-	struct list_head *iter;
+	struct o2hb_callback_func *f;
 	struct o2hb_callback *hbcall;
 	int ret;
 
@@ -2540,10 +2537,9 @@
 
 	down_write(&o2hb_callback_sem);
 
-	list_for_each(iter, &hbcall->list) {
-		tmp = list_entry(iter, struct o2hb_callback_func, hc_item);
-		if (hc->hc_priority < tmp->hc_priority) {
-			list_add_tail(&hc->hc_item, iter);
+	list_for_each_entry(f, &hbcall->list, hc_item) {
+		if (hc->hc_priority < f->hc_priority) {
+			list_add_tail(&hc->hc_item, &f->hc_item);
 			break;
 		}
 	}
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index d644dc6..2cd2406 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -543,8 +543,9 @@
 	}
 
 	if (was_valid && !valid) {
-		printk(KERN_NOTICE "o2net: No longer connected to "
-		       SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc));
+		if (old_sc)
+			printk(KERN_NOTICE "o2net: No longer connected to "
+				SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc));
 		o2net_complete_nodes_nsw(nn);
 	}
 
@@ -765,32 +766,32 @@
 o2net_handler_tree_lookup(u32 msg_type, u32 key, struct rb_node ***ret_p,
 			  struct rb_node **ret_parent)
 {
-        struct rb_node **p = &o2net_handler_tree.rb_node;
-        struct rb_node *parent = NULL;
+	struct rb_node **p = &o2net_handler_tree.rb_node;
+	struct rb_node *parent = NULL;
 	struct o2net_msg_handler *nmh, *ret = NULL;
 	int cmp;
 
-        while (*p) {
-                parent = *p;
-                nmh = rb_entry(parent, struct o2net_msg_handler, nh_node);
+	while (*p) {
+		parent = *p;
+		nmh = rb_entry(parent, struct o2net_msg_handler, nh_node);
 		cmp = o2net_handler_cmp(nmh, msg_type, key);
 
-                if (cmp < 0)
-                        p = &(*p)->rb_left;
-                else if (cmp > 0)
-                        p = &(*p)->rb_right;
-                else {
+		if (cmp < 0)
+			p = &(*p)->rb_left;
+		else if (cmp > 0)
+			p = &(*p)->rb_right;
+		else {
 			ret = nmh;
-                        break;
+			break;
 		}
-        }
+	}
 
-        if (ret_p != NULL)
-                *ret_p = p;
-        if (ret_parent != NULL)
-                *ret_parent = parent;
+	if (ret_p != NULL)
+		*ret_p = p;
+	if (ret_parent != NULL)
+		*ret_parent = parent;
 
-        return ret;
+	return ret;
 }
 
 static void o2net_handler_kref_release(struct kref *kref)
@@ -1695,13 +1696,12 @@
 		ret = 0;
 
 out:
-	if (ret) {
+	if (ret && sc) {
 		printk(KERN_NOTICE "o2net: Connect attempt to " SC_NODEF_FMT
 		       " failed with errno %d\n", SC_NODEF_ARGS(sc), ret);
 		/* 0 err so that another will be queued and attempted
 		 * from set_nn_state */
-		if (sc)
-			o2net_ensure_shutdown(nn, sc, 0);
+		o2net_ensure_shutdown(nn, sc, 0);
 	}
 	if (sc)
 		sc_put(sc);
@@ -1873,12 +1873,16 @@
 
 	if (o2nm_this_node() >= node->nd_num) {
 		local_node = o2nm_get_node_by_num(o2nm_this_node());
-		printk(KERN_NOTICE "o2net: Unexpected connect attempt seen "
-		       "at node '%s' (%u, %pI4:%d) from node '%s' (%u, "
-		       "%pI4:%d)\n", local_node->nd_name, local_node->nd_num,
-		       &(local_node->nd_ipv4_address),
-		       ntohs(local_node->nd_ipv4_port), node->nd_name,
-		       node->nd_num, &sin.sin_addr.s_addr, ntohs(sin.sin_port));
+		if (local_node)
+			printk(KERN_NOTICE "o2net: Unexpected connect attempt "
+					"seen at node '%s' (%u, %pI4:%d) from "
+					"node '%s' (%u, %pI4:%d)\n",
+					local_node->nd_name, local_node->nd_num,
+					&(local_node->nd_ipv4_address),
+					ntohs(local_node->nd_ipv4_port),
+					node->nd_name,
+					node->nd_num, &sin.sin_addr.s_addr,
+					ntohs(sin.sin_port));
 		ret = -EINVAL;
 		goto out;
 	}
diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c
index fbec0be..b46278f 100644
--- a/fs/ocfs2/dlm/dlmast.c
+++ b/fs/ocfs2/dlm/dlmast.c
@@ -292,7 +292,7 @@
 	struct dlm_lock *lock = NULL;
 	struct dlm_proxy_ast *past = (struct dlm_proxy_ast *) msg->buf;
 	char *name;
-	struct list_head *iter, *head=NULL;
+	struct list_head *head = NULL;
 	__be64 cookie;
 	u32 flags;
 	u8 node;
@@ -373,8 +373,7 @@
 	/* try convert queue for both ast/bast */
 	head = &res->converting;
 	lock = NULL;
-	list_for_each(iter, head) {
-		lock = list_entry (iter, struct dlm_lock, list);
+	list_for_each_entry(lock, head, list) {
 		if (lock->ml.cookie == cookie)
 			goto do_ast;
 	}
@@ -385,8 +384,7 @@
 	else
 		head = &res->granted;
 
-	list_for_each(iter, head) {
-		lock = list_entry (iter, struct dlm_lock, list);
+	list_for_each_entry(lock, head, list) {
 		if (lock->ml.cookie == cookie)
 			goto do_ast;
 	}
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index de854cc..e051776 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -1079,11 +1079,9 @@
 static inline int dlm_lock_on_list(struct list_head *head,
 				   struct dlm_lock *lock)
 {
-	struct list_head *iter;
 	struct dlm_lock *tmplock;
 
-	list_for_each(iter, head) {
-		tmplock = list_entry(iter, struct dlm_lock, list);
+	list_for_each_entry(tmplock, head, list) {
 		if (tmplock == lock)
 			return 1;
 	}
diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c
index 29a886d..e36d63f 100644
--- a/fs/ocfs2/dlm/dlmconvert.c
+++ b/fs/ocfs2/dlm/dlmconvert.c
@@ -123,7 +123,6 @@
 					   int *kick_thread)
 {
 	enum dlm_status status = DLM_NORMAL;
-	struct list_head *iter;
 	struct dlm_lock *tmplock=NULL;
 
 	assert_spin_locked(&res->spinlock);
@@ -185,16 +184,14 @@
 
 	/* upconvert from here on */
 	status = DLM_NORMAL;
-	list_for_each(iter, &res->granted) {
-		tmplock = list_entry(iter, struct dlm_lock, list);
+	list_for_each_entry(tmplock, &res->granted, list) {
 		if (tmplock == lock)
 			continue;
 		if (!dlm_lock_compatible(tmplock->ml.type, type))
 			goto switch_queues;
 	}
 
-	list_for_each(iter, &res->converting) {
-		tmplock = list_entry(iter, struct dlm_lock, list);
+	list_for_each_entry(tmplock, &res->converting, list) {
 		if (!dlm_lock_compatible(tmplock->ml.type, type))
 			goto switch_queues;
 		/* existing conversion requests take precedence */
@@ -424,8 +421,8 @@
 	struct dlm_ctxt *dlm = data;
 	struct dlm_convert_lock *cnv = (struct dlm_convert_lock *)msg->buf;
 	struct dlm_lock_resource *res = NULL;
-	struct list_head *iter;
 	struct dlm_lock *lock = NULL;
+	struct dlm_lock *tmp_lock;
 	struct dlm_lockstatus *lksb;
 	enum dlm_status status = DLM_NORMAL;
 	u32 flags;
@@ -471,14 +468,13 @@
 		dlm_error(status);
 		goto leave;
 	}
-	list_for_each(iter, &res->granted) {
-		lock = list_entry(iter, struct dlm_lock, list);
-		if (lock->ml.cookie == cnv->cookie &&
-		    lock->ml.node == cnv->node_idx) {
+	list_for_each_entry(tmp_lock, &res->granted, list) {
+		if (tmp_lock->ml.cookie == cnv->cookie &&
+		    tmp_lock->ml.node == cnv->node_idx) {
+			lock = tmp_lock;
 			dlm_lock_get(lock);
 			break;
 		}
-		lock = NULL;
 	}
 	spin_unlock(&res->spinlock);
 	if (!lock) {
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
index 0e28e24..e33cd7a 100644
--- a/fs/ocfs2/dlm/dlmdebug.c
+++ b/fs/ocfs2/dlm/dlmdebug.c
@@ -96,7 +96,6 @@
 
 void __dlm_print_one_lock_resource(struct dlm_lock_resource *res)
 {
-	struct list_head *iter2;
 	struct dlm_lock *lock;
 	char buf[DLM_LOCKID_NAME_MAX];
 
@@ -118,18 +117,15 @@
 	       res->inflight_locks, atomic_read(&res->asts_reserved));
 	dlm_print_lockres_refmap(res);
 	printk("  granted queue:\n");
-	list_for_each(iter2, &res->granted) {
-		lock = list_entry(iter2, struct dlm_lock, list);
+	list_for_each_entry(lock, &res->granted, list) {
 		__dlm_print_lock(lock);
 	}
 	printk("  converting queue:\n");
-	list_for_each(iter2, &res->converting) {
-		lock = list_entry(iter2, struct dlm_lock, list);
+	list_for_each_entry(lock, &res->converting, list) {
 		__dlm_print_lock(lock);
 	}
 	printk("  blocked queue:\n");
-	list_for_each(iter2, &res->blocked) {
-		lock = list_entry(iter2, struct dlm_lock, list);
+	list_for_each_entry(lock, &res->blocked, list) {
 		__dlm_print_lock(lock);
 	}
 }
@@ -446,7 +442,6 @@
 {
 	struct dlm_master_list_entry *mle;
 	struct hlist_head *bucket;
-	struct hlist_node *list;
 	int i, out = 0;
 	unsigned long total = 0, longest = 0, bucket_count = 0;
 
@@ -456,9 +451,7 @@
 	spin_lock(&dlm->master_lock);
 	for (i = 0; i < DLM_HASH_BUCKETS; i++) {
 		bucket = dlm_master_hash(dlm, i);
-		hlist_for_each(list, bucket) {
-			mle = hlist_entry(list, struct dlm_master_list_entry,
-					  master_hash_node);
+		hlist_for_each_entry(mle, bucket, master_hash_node) {
 			++total;
 			++bucket_count;
 			if (len - out < 200)
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index dbb17c0..8b3382a 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -193,7 +193,7 @@
 						     unsigned int hash)
 {
 	struct hlist_head *bucket;
-	struct hlist_node *list;
+	struct dlm_lock_resource *res;
 
 	mlog(0, "%.*s\n", len, name);
 
@@ -201,9 +201,7 @@
 
 	bucket = dlm_lockres_hash(dlm, hash);
 
-	hlist_for_each(list, bucket) {
-		struct dlm_lock_resource *res = hlist_entry(list,
-			struct dlm_lock_resource, hash_node);
+	hlist_for_each_entry(res, bucket, hash_node) {
 		if (res->lockname.name[0] != name[0])
 			continue;
 		if (unlikely(res->lockname.len != len))
@@ -262,22 +260,19 @@
 
 static struct dlm_ctxt * __dlm_lookup_domain_full(const char *domain, int len)
 {
-	struct dlm_ctxt *tmp = NULL;
-	struct list_head *iter;
+	struct dlm_ctxt *tmp;
 
 	assert_spin_locked(&dlm_domain_lock);
 
 	/* tmp->name here is always NULL terminated,
 	 * but domain may not be! */
-	list_for_each(iter, &dlm_domains) {
-		tmp = list_entry (iter, struct dlm_ctxt, list);
+	list_for_each_entry(tmp, &dlm_domains, list) {
 		if (strlen(tmp->name) == len &&
 		    memcmp(tmp->name, domain, len)==0)
-			break;
-		tmp = NULL;
+			return tmp;
 	}
 
-	return tmp;
+	return NULL;
 }
 
 /* For null terminated domain strings ONLY */
@@ -366,25 +361,22 @@
  * you shouldn't trust your pointer. */
 struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm)
 {
-	struct list_head *iter;
-	struct dlm_ctxt *target = NULL;
+	struct dlm_ctxt *target;
+	struct dlm_ctxt *ret = NULL;
 
 	spin_lock(&dlm_domain_lock);
 
-	list_for_each(iter, &dlm_domains) {
-		target = list_entry (iter, struct dlm_ctxt, list);
-
+	list_for_each_entry(target, &dlm_domains, list) {
 		if (target == dlm) {
 			__dlm_get(target);
+			ret = target;
 			break;
 		}
-
-		target = NULL;
 	}
 
 	spin_unlock(&dlm_domain_lock);
 
-	return target;
+	return ret;
 }
 
 int dlm_domain_fully_joined(struct dlm_ctxt *dlm)
@@ -2296,13 +2288,10 @@
 void dlm_fire_domain_eviction_callbacks(struct dlm_ctxt *dlm,
 					int node_num)
 {
-	struct list_head *iter;
 	struct dlm_eviction_cb *cb;
 
 	down_read(&dlm_callback_sem);
-	list_for_each(iter, &dlm->dlm_eviction_callbacks) {
-		cb = list_entry(iter, struct dlm_eviction_cb, ec_item);
-
+	list_for_each_entry(cb, &dlm->dlm_eviction_callbacks, ec_item) {
 		cb->ec_func(node_num, cb->ec_data);
 	}
 	up_read(&dlm_callback_sem);
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c
index 47e67c2..5d32f75 100644
--- a/fs/ocfs2/dlm/dlmlock.c
+++ b/fs/ocfs2/dlm/dlmlock.c
@@ -91,19 +91,14 @@
 static int dlm_can_grant_new_lock(struct dlm_lock_resource *res,
 				  struct dlm_lock *lock)
 {
-	struct list_head *iter;
 	struct dlm_lock *tmplock;
 
-	list_for_each(iter, &res->granted) {
-		tmplock = list_entry(iter, struct dlm_lock, list);
-
+	list_for_each_entry(tmplock, &res->granted, list) {
 		if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type))
 			return 0;
 	}
 
-	list_for_each(iter, &res->converting) {
-		tmplock = list_entry(iter, struct dlm_lock, list);
-
+	list_for_each_entry(tmplock, &res->converting, list) {
 		if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type))
 			return 0;
 		if (!dlm_lock_compatible(tmplock->ml.convert_type,
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 33ecbe0..cf0f103 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -342,16 +342,13 @@
 {
 	struct dlm_master_list_entry *tmpmle;
 	struct hlist_head *bucket;
-	struct hlist_node *list;
 	unsigned int hash;
 
 	assert_spin_locked(&dlm->master_lock);
 
 	hash = dlm_lockid_hash(name, namelen);
 	bucket = dlm_master_hash(dlm, hash);
-	hlist_for_each(list, bucket) {
-		tmpmle = hlist_entry(list, struct dlm_master_list_entry,
-				     master_hash_node);
+	hlist_for_each_entry(tmpmle, bucket, master_hash_node) {
 		if (!dlm_mle_equal(dlm, tmpmle, name, namelen))
 			continue;
 		dlm_get_mle(tmpmle);
@@ -3183,7 +3180,7 @@
 	struct dlm_master_list_entry *mle;
 	struct dlm_lock_resource *res;
 	struct hlist_head *bucket;
-	struct hlist_node *list;
+	struct hlist_node *tmp;
 	unsigned int i;
 
 	mlog(0, "dlm=%s, dead node=%u\n", dlm->name, dead_node);
@@ -3194,10 +3191,7 @@
 	spin_lock(&dlm->master_lock);
 	for (i = 0; i < DLM_HASH_BUCKETS; i++) {
 		bucket = dlm_master_hash(dlm, i);
-		hlist_for_each(list, bucket) {
-			mle = hlist_entry(list, struct dlm_master_list_entry,
-					  master_hash_node);
-
+		hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) {
 			BUG_ON(mle->type != DLM_MLE_BLOCK &&
 			       mle->type != DLM_MLE_MASTER &&
 			       mle->type != DLM_MLE_MIGRATION);
@@ -3378,7 +3372,7 @@
 	int i;
 	struct hlist_head *bucket;
 	struct dlm_master_list_entry *mle;
-	struct hlist_node *tmp, *list;
+	struct hlist_node *tmp;
 
 	/*
 	 * We notified all other nodes that we are exiting the domain and
@@ -3394,9 +3388,7 @@
 
 	for (i = 0; i < DLM_HASH_BUCKETS; i++) {
 		bucket = dlm_master_hash(dlm, i);
-		hlist_for_each_safe(list, tmp, bucket) {
-			mle = hlist_entry(list, struct dlm_master_list_entry,
-					  master_hash_node);
+		hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) {
 			if (mle->type != DLM_MLE_BLOCK) {
 				mlog(ML_ERROR, "bad mle: %p\n", mle);
 				dlm_print_one_mle(mle);
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 773bd32..0b5adca 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -787,6 +787,7 @@
 {
 	struct dlm_lock_request lr;
 	int ret;
+	int status;
 
 	mlog(0, "\n");
 
@@ -800,13 +801,15 @@
 
 	// send message
 	ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key,
-				 &lr, sizeof(lr), request_from, NULL);
+				 &lr, sizeof(lr), request_from, &status);
 
 	/* negative status is handled by caller */
 	if (ret < 0)
 		mlog(ML_ERROR, "%s: Error %d send LOCK_REQUEST to node %u "
 		     "to recover dead node %u\n", dlm->name, ret,
 		     request_from, dead_node);
+	else
+		ret = status;
 	// return from here, then
 	// sleep until all received or error
 	return ret;
@@ -2328,6 +2331,14 @@
 			} else if (res->owner == dlm->node_num) {
 				dlm_free_dead_locks(dlm, res, dead_node);
 				__dlm_lockres_calc_usage(dlm, res);
+			} else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
+				if (test_bit(dead_node, res->refmap)) {
+					mlog(0, "%s:%.*s: dead node %u had a ref, but had "
+						"no locks and had not purged before dying\n",
+						dlm->name, res->lockname.len,
+						res->lockname.name, dead_node);
+					dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
+				}
 			}
 			spin_unlock(&res->spinlock);
 		}
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c
index e73c833..9db869d 100644
--- a/fs/ocfs2/dlm/dlmthread.c
+++ b/fs/ocfs2/dlm/dlmthread.c
@@ -286,8 +286,6 @@
 			      struct dlm_lock_resource *res)
 {
 	struct dlm_lock *lock, *target;
-	struct list_head *iter;
-	struct list_head *head;
 	int can_grant = 1;
 
 	/*
@@ -314,9 +312,7 @@
 		     dlm->name, res->lockname.len, res->lockname.name);
 		BUG();
 	}
-	head = &res->granted;
-	list_for_each(iter, head) {
-		lock = list_entry(iter, struct dlm_lock, list);
+	list_for_each_entry(lock, &res->granted, list) {
 		if (lock==target)
 			continue;
 		if (!dlm_lock_compatible(lock->ml.type,
@@ -333,9 +329,8 @@
 					target->ml.convert_type;
 		}
 	}
-	head = &res->converting;
-	list_for_each(iter, head) {
-		lock = list_entry(iter, struct dlm_lock, list);
+
+	list_for_each_entry(lock, &res->converting, list) {
 		if (lock==target)
 			continue;
 		if (!dlm_lock_compatible(lock->ml.type,
@@ -384,9 +379,7 @@
 		goto leave;
 	target = list_entry(res->blocked.next, struct dlm_lock, list);
 
-	head = &res->granted;
-	list_for_each(iter, head) {
-		lock = list_entry(iter, struct dlm_lock, list);
+	list_for_each_entry(lock, &res->granted, list) {
 		if (lock==target)
 			continue;
 		if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) {
@@ -400,9 +393,7 @@
 		}
 	}
 
-	head = &res->converting;
-	list_for_each(iter, head) {
-		lock = list_entry(iter, struct dlm_lock, list);
+	list_for_each_entry(lock, &res->converting, list) {
 		if (lock==target)
 			continue;
 		if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) {
diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c
index 850aa7e..5698b52 100644
--- a/fs/ocfs2/dlm/dlmunlock.c
+++ b/fs/ocfs2/dlm/dlmunlock.c
@@ -388,7 +388,6 @@
 	struct dlm_ctxt *dlm = data;
 	struct dlm_unlock_lock *unlock = (struct dlm_unlock_lock *)msg->buf;
 	struct dlm_lock_resource *res = NULL;
-	struct list_head *iter;
 	struct dlm_lock *lock = NULL;
 	enum dlm_status status = DLM_NORMAL;
 	int found = 0, i;
@@ -458,8 +457,7 @@
 	}
 
 	for (i=0; i<3; i++) {
-		list_for_each(iter, queue) {
-			lock = list_entry(iter, struct dlm_lock, list);
+		list_for_each_entry(lock, queue, list) {
 			if (lock->ml.cookie == unlock->cookie &&
 		    	    lock->ml.node == unlock->node_idx) {
 				dlm_lock_get(lock);
diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
index 12bafb7..efa2b3d 100644
--- a/fs/ocfs2/dlmfs/dlmfs.c
+++ b/fs/ocfs2/dlmfs/dlmfs.c
@@ -401,11 +401,8 @@
 {
 	struct inode *inode = new_inode(sb);
 	umode_t mode = S_IFDIR | 0755;
-	struct dlmfs_inode_private *ip;
 
 	if (inode) {
-		ip = DLMFS_I(inode);
-
 		inode->i_ino = get_next_ino();
 		inode_init_owner(inode, NULL, mode);
 		inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info;
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index 2487116..767370b 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -781,7 +781,6 @@
 	cpos = map_start >> osb->s_clustersize_bits;
 	mapping_end = ocfs2_clusters_for_bytes(inode->i_sb,
 					       map_start + map_len);
-	mapping_end -= cpos;
 	is_last = 0;
 	while (cpos < mapping_end && !is_last) {
 		u32 fe_flags;
@@ -852,20 +851,20 @@
 
 	down_read(&OCFS2_I(inode)->ip_alloc_sem);
 
-	if (*offset >= inode->i_size) {
+	if (*offset >= i_size_read(inode)) {
 		ret = -ENXIO;
 		goto out_unlock;
 	}
 
 	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
 		if (whence == SEEK_HOLE)
-			*offset = inode->i_size;
+			*offset = i_size_read(inode);
 		goto out_unlock;
 	}
 
 	clen = 0;
 	cpos = *offset >> cs_bits;
-	cend = ocfs2_clusters_for_bytes(inode->i_sb, inode->i_size);
+	cend = ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode));
 
 	while (cpos < cend && !is_last) {
 		ret = ocfs2_get_clusters_nocache(inode, di_bh, cpos, &hole_size,
@@ -904,8 +903,8 @@
 		extlen = clen;
 		extlen <<=  cs_bits;
 
-		if ((extoff + extlen) > inode->i_size)
-			extlen = inode->i_size - extoff;
+		if ((extoff + extlen) > i_size_read(inode))
+			extlen = i_size_read(inode) - extoff;
 		extoff += extlen;
 		if (extoff > *offset)
 			*offset = extoff;
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 3261d71..4f8197c 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -671,11 +671,7 @@
 		} else {
 			BUG_ON(why != RESTART_TRANS);
 
-			/* TODO: This can be more intelligent. */
-			credits = ocfs2_calc_extend_credits(osb->sb,
-							    &fe->id2.i_list,
-							    clusters_to_add);
-			status = ocfs2_extend_trans(handle, credits);
+			status = ocfs2_allocate_extend_trans(handle, 1);
 			if (status < 0) {
 				/* handle still has to be committed at
 				 * this point. */
@@ -1800,6 +1796,7 @@
 	ocfs2_truncate_cluster_pages(inode, byte_start, byte_len);
 
 out:
+	ocfs2_free_path(path);
 	ocfs2_schedule_truncate_log_flush(osb, 1);
 	ocfs2_run_deallocs(osb, &dealloc);
 
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
index 0c60ef2..fa32ce9 100644
--- a/fs/ocfs2/ioctl.c
+++ b/fs/ocfs2/ioctl.c
@@ -303,7 +303,7 @@
 	if (o2info_from_user(oij, req))
 		goto bail;
 
-	oij.ij_journal_size = osb->journal->j_inode->i_size;
+	oij.ij_journal_size = i_size_read(osb->journal->j_inode);
 
 	o2info_set_request_filled(&oij.ij_req);
 
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index 242170d..44fc3e5 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -455,6 +455,41 @@
 	return status;
 }
 
+/*
+ * If we have fewer than thresh credits, extend by OCFS2_MAX_TRANS_DATA.
+ * If that fails, restart the transaction & regain write access for the
+ * buffer head which is used for metadata modifications.
+ * Taken from Ext4: extend_or_restart_transaction()
+ */
+int ocfs2_allocate_extend_trans(handle_t *handle, int thresh)
+{
+	int status, old_nblks;
+
+	BUG_ON(!handle);
+
+	old_nblks = handle->h_buffer_credits;
+	trace_ocfs2_allocate_extend_trans(old_nblks, thresh);
+
+	if (old_nblks < thresh)
+		return 0;
+
+	status = jbd2_journal_extend(handle, OCFS2_MAX_TRANS_DATA);
+	if (status < 0) {
+		mlog_errno(status);
+		goto bail;
+	}
+
+	if (status > 0) {
+		status = jbd2_journal_restart(handle, OCFS2_MAX_TRANS_DATA);
+		if (status < 0)
+			mlog_errno(status);
+	}
+
+bail:
+	return status;
+}
+
+
 struct ocfs2_triggers {
 	struct jbd2_buffer_trigger_type	ot_triggers;
 	int				ot_offset;
@@ -801,14 +836,14 @@
 	inode_lock = 1;
 	di = (struct ocfs2_dinode *)bh->b_data;
 
-	if (inode->i_size <  OCFS2_MIN_JOURNAL_SIZE) {
+	if (i_size_read(inode) <  OCFS2_MIN_JOURNAL_SIZE) {
 		mlog(ML_ERROR, "Journal file size (%lld) is too small!\n",
-		     inode->i_size);
+		     i_size_read(inode));
 		status = -EINVAL;
 		goto done;
 	}
 
-	trace_ocfs2_journal_init(inode->i_size,
+	trace_ocfs2_journal_init(i_size_read(inode),
 				 (unsigned long long)inode->i_blocks,
 				 OCFS2_I(inode)->ip_clusters);
 
@@ -1096,7 +1131,7 @@
 
 	memset(bhs, 0, sizeof(struct buffer_head *) * CONCURRENT_JOURNAL_FILL);
 
-	num_blocks = ocfs2_blocks_for_bytes(inode->i_sb, inode->i_size);
+	num_blocks = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
 	v_blkno = 0;
 	while (v_blkno < num_blocks) {
 		status = ocfs2_extent_map_get_blocks(inode, v_blkno,
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index 0a99273..0b479ba 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -258,6 +258,17 @@
 int			     ocfs2_commit_trans(struct ocfs2_super *osb,
 						handle_t *handle);
 int			     ocfs2_extend_trans(handle_t *handle, int nblocks);
+int			     ocfs2_allocate_extend_trans(handle_t *handle,
+						int thresh);
+
+/*
+ * Define an arbitrary limit for the amount of data we will anticipate
+ * writing to any given transaction.  For unbounded transactions such as
+ * fallocate(2) we can write more than this, but we always
+ * start off at the maximum transaction size and grow the transaction
+ * optimistically as we go.
+ */
+#define OCFS2_MAX_TRANS_DATA	64U
 
 /*
  * Create access is for when we get a newly created buffer and we're
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index aebeacd..cd5496b 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -1082,7 +1082,7 @@
 	}
 
 retry_enospc:
-	(*ac)->ac_bits_wanted = osb->local_alloc_default_bits;
+	(*ac)->ac_bits_wanted = osb->local_alloc_bits;
 	status = ocfs2_reserve_cluster_bitmap_bits(osb, *ac);
 	if (status == -ENOSPC) {
 		if (ocfs2_recalc_la_window(osb, OCFS2_LA_EVENT_ENOSPC) ==
@@ -1154,7 +1154,7 @@
 		    OCFS2_LA_DISABLED)
 			goto bail;
 
-		ac->ac_bits_wanted = osb->local_alloc_default_bits;
+		ac->ac_bits_wanted = osb->local_alloc_bits;
 		status = ocfs2_claim_clusters(handle, ac,
 					      osb->local_alloc_bits,
 					      &cluster_off,
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
index 452068b..3d3f3c8 100644
--- a/fs/ocfs2/move_extents.c
+++ b/fs/ocfs2/move_extents.c
@@ -152,6 +152,7 @@
 	}
 
 out:
+	ocfs2_free_path(path);
 	return ret;
 }
 
@@ -845,7 +846,7 @@
 	struct ocfs2_move_extents *range = context->range;
 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 
-	if ((inode->i_size == 0) || (range->me_len == 0))
+	if ((i_size_read(inode) == 0) || (range->me_len == 0))
 		return 0;
 
 	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
diff --git a/fs/ocfs2/ocfs2_trace.h b/fs/ocfs2/ocfs2_trace.h
index 3b481f4..1b60c62 100644
--- a/fs/ocfs2/ocfs2_trace.h
+++ b/fs/ocfs2/ocfs2_trace.h
@@ -2579,6 +2579,8 @@
 
 DEFINE_OCFS2_INT_EVENT(ocfs2_extend_trans_restart);
 
+DEFINE_OCFS2_INT_INT_EVENT(ocfs2_allocate_extend_trans);
+
 DEFINE_OCFS2_ULL_ULL_UINT_UINT_EVENT(ocfs2_journal_access);
 
 DEFINE_OCFS2_ULL_EVENT(ocfs2_journal_dirty);
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index 332a281..aaa5061 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -234,7 +234,7 @@
 		len = sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset;
 	}
 
-	if (gqinode->i_size < off + len) {
+	if (i_size_read(gqinode) < off + len) {
 		loff_t rounded_end =
 				ocfs2_align_bytes_to_blocks(sb, off + len);
 
@@ -778,8 +778,8 @@
 		 */
 		WARN_ON(journal_current_handle());
 		status = ocfs2_extend_no_holes(gqinode, NULL,
-			gqinode->i_size + (need_alloc << sb->s_blocksize_bits),
-			gqinode->i_size);
+			i_size_read(gqinode) + (need_alloc << sb->s_blocksize_bits),
+			i_size_read(gqinode));
 		if (status < 0)
 			goto out_dq;
 	}
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
index 27fe7ee..2e4344b 100644
--- a/fs/ocfs2/quota_local.c
+++ b/fs/ocfs2/quota_local.c
@@ -982,14 +982,14 @@
 
 	/* We are protected by dqio_sem so no locking needed */
 	status = ocfs2_extend_no_holes(lqinode, NULL,
-				       lqinode->i_size + 2 * sb->s_blocksize,
-				       lqinode->i_size);
+				       i_size_read(lqinode) + 2 * sb->s_blocksize,
+				       i_size_read(lqinode));
 	if (status < 0) {
 		mlog_errno(status);
 		goto out;
 	}
 	status = ocfs2_simple_size_update(lqinode, oinfo->dqi_lqi_bh,
-					  lqinode->i_size + 2 * sb->s_blocksize);
+					  i_size_read(lqinode) + 2 * sb->s_blocksize);
 	if (status < 0) {
 		mlog_errno(status);
 		goto out;
@@ -1125,14 +1125,14 @@
 
 	/* We are protected by dqio_sem so no locking needed */
 	status = ocfs2_extend_no_holes(lqinode, NULL,
-				       lqinode->i_size + sb->s_blocksize,
-				       lqinode->i_size);
+				       i_size_read(lqinode) + sb->s_blocksize,
+				       i_size_read(lqinode));
 	if (status < 0) {
 		mlog_errno(status);
 		goto out;
 	}
 	status = ocfs2_simple_size_update(lqinode, oinfo->dqi_lqi_bh,
-					  lqinode->i_size + sb->s_blocksize);
+					  i_size_read(lqinode) + sb->s_blocksize);
 	if (status < 0) {
 		mlog_errno(status);
 		goto out;
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index a70d604..bf4dfc1 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -3854,7 +3854,10 @@
 	while (cpos < clusters) {
 		ret = ocfs2_get_clusters(inode, cpos, &p_cluster,
 					 &num_clusters, &ext_flags);
-
+		if (ret) {
+			mlog_errno(ret);
+			goto unlock;
+		}
 		if (p_cluster && !(ext_flags & OCFS2_EXT_REFCOUNTED)) {
 			ret = ocfs2_add_refcount_flag(inode, &di_et,
 						      &ref_tree->rf_ci,
@@ -4025,7 +4028,10 @@
 	while (cpos < clusters) {
 		ret = ocfs2_get_clusters(s_inode, cpos, &p_cluster,
 					 &num_clusters, &ext_flags);
-
+		if (ret) {
+			mlog_errno(ret);
+			goto out;
+		}
 		if (p_cluster) {
 			ret = ocfs2_add_refcounted_extent(t_inode, &et,
 							  ref_ci, ref_root_bh,
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 317ef0a..6ce0686 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -3505,7 +3505,7 @@
 	int ret, credits, ref_meta = 0, ref_credits = 0;
 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 	struct inode *tl_inode = osb->osb_tl_inode;
-	struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, };
+	struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, NULL, };
 	struct ocfs2_refcount_tree *ref_tree = NULL;
 
 	struct ocfs2_xattr_info xi = {
@@ -3609,13 +3609,14 @@
 	if (IS_ERR(ctxt.handle)) {
 		ret = PTR_ERR(ctxt.handle);
 		mlog_errno(ret);
-		goto cleanup;
+		goto out_free_ac;
 	}
 
 	ret = __ocfs2_xattr_set_handle(inode, di, &xi, &xis, &xbs, &ctxt);
 
 	ocfs2_commit_trans(osb, ctxt.handle);
 
+out_free_ac:
 	if (ctxt.data_ac)
 		ocfs2_free_alloc_context(ctxt.data_ac);
 	if (ctxt.meta_ac)
@@ -5881,6 +5882,10 @@
 	while (cpos < clusters) {
 		ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster,
 					       &num_clusters, el, &ext_flags);
+		if (ret) {
+			mlog_errno(ret);
+			break;
+		}
 
 		cpos += num_clusters;
 		if ((ext_flags & OCFS2_EXT_REFCOUNTED))
@@ -6797,7 +6802,7 @@
 	if (ret) {
 		if (*meta_ac) {
 			ocfs2_free_alloc_context(*meta_ac);
-			meta_ac = NULL;
+			*meta_ac = NULL;
 		}
 	}
 
diff --git a/fs/proc/fd.c b/fs/proc/fd.c
index 0ff80f9..985ea88 100644
--- a/fs/proc/fd.c
+++ b/fs/proc/fd.c
@@ -286,7 +286,7 @@
 	int rv = generic_permission(inode, mask);
 	if (rv == 0)
 		return 0;
-	if (task_pid(current) == proc_pid(inode))
+	if (task_tgid(current) == proc_pid(inode))
 		rv = 0;
 	return rv;
 }
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 107d026..7366e9d 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -740,6 +740,9 @@
 		ptent = pte_file_clear_soft_dirty(ptent);
 	}
 
+	if (vma->vm_flags & VM_SOFTDIRTY)
+		vma->vm_flags &= ~VM_SOFTDIRTY;
+
 	set_pte_at(vma->vm_mm, addr, pte, ptent);
 #endif
 }
@@ -949,13 +952,15 @@
 		if (is_migration_entry(entry))
 			page = migration_entry_to_page(entry);
 	} else {
-		*pme = make_pme(PM_NOT_PRESENT(pm->v2));
+		if (vma->vm_flags & VM_SOFTDIRTY)
+			flags2 |= __PM_SOFT_DIRTY;
+		*pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, flags2));
 		return;
 	}
 
 	if (page && !PageAnon(page))
 		flags |= PM_FILE;
-	if (pte_soft_dirty(pte))
+	if ((vma->vm_flags & VM_SOFTDIRTY) || pte_soft_dirty(pte))
 		flags2 |= __PM_SOFT_DIRTY;
 
 	*pme = make_pme(PM_PFRAME(frame) | PM_STATUS2(pm->v2, flags2) | flags);
@@ -974,7 +979,7 @@
 		*pme = make_pme(PM_PFRAME(pmd_pfn(pmd) + offset)
 				| PM_STATUS2(pm->v2, pmd_flags2) | PM_PRESENT);
 	else
-		*pme = make_pme(PM_NOT_PRESENT(pm->v2));
+		*pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, pmd_flags2));
 }
 #else
 static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
@@ -997,7 +1002,11 @@
 	if (vma && pmd_trans_huge_lock(pmd, vma) == 1) {
 		int pmd_flags2;
 
-		pmd_flags2 = (pmd_soft_dirty(*pmd) ? __PM_SOFT_DIRTY : 0);
+		if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(*pmd))
+			pmd_flags2 = __PM_SOFT_DIRTY;
+		else
+			pmd_flags2 = 0;
+
 		for (; addr != end; addr += PAGE_SIZE) {
 			unsigned long offset;
 
@@ -1015,12 +1024,17 @@
 	if (pmd_trans_unstable(pmd))
 		return 0;
 	for (; addr != end; addr += PAGE_SIZE) {
+		int flags2;
 
 		/* check to see if we've left 'vma' behind
 		 * and need a new, higher one */
 		if (vma && (addr >= vma->vm_end)) {
 			vma = find_vma(walk->mm, addr);
-			pme = make_pme(PM_NOT_PRESENT(pm->v2));
+			if (vma && (vma->vm_flags & VM_SOFTDIRTY))
+				flags2 = __PM_SOFT_DIRTY;
+			else
+				flags2 = 0;
+			pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, flags2));
 		}
 
 		/* check that 'vma' actually covers this address,
@@ -1044,13 +1058,15 @@
 
 #ifdef CONFIG_HUGETLB_PAGE
 static void huge_pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
-					pte_t pte, int offset)
+					pte_t pte, int offset, int flags2)
 {
 	if (pte_present(pte))
-		*pme = make_pme(PM_PFRAME(pte_pfn(pte) + offset)
-				| PM_STATUS2(pm->v2, 0) | PM_PRESENT);
+		*pme = make_pme(PM_PFRAME(pte_pfn(pte) + offset)	|
+				PM_STATUS2(pm->v2, flags2)		|
+				PM_PRESENT);
 	else
-		*pme = make_pme(PM_NOT_PRESENT(pm->v2));
+		*pme = make_pme(PM_NOT_PRESENT(pm->v2)			|
+				PM_STATUS2(pm->v2, flags2));
 }
 
 /* This function walks within one hugetlb entry in the single call */
@@ -1059,12 +1075,22 @@
 				 struct mm_walk *walk)
 {
 	struct pagemapread *pm = walk->private;
+	struct vm_area_struct *vma;
 	int err = 0;
+	int flags2;
 	pagemap_entry_t pme;
 
+	vma = find_vma(walk->mm, addr);
+	WARN_ON_ONCE(!vma);
+
+	if (vma && (vma->vm_flags & VM_SOFTDIRTY))
+		flags2 = __PM_SOFT_DIRTY;
+	else
+		flags2 = 0;
+
 	for (; addr != end; addr += PAGE_SIZE) {
 		int offset = (addr & ~hmask) >> PAGE_SHIFT;
-		huge_pte_to_pagemap_entry(&pme, pm, *pte, offset);
+		huge_pte_to_pagemap_entry(&pme, pm, *pte, offset, flags2);
 		err = add_to_pagemap(addr, &pme, pm);
 		if (err)
 			return err;
@@ -1376,8 +1402,10 @@
 	walk.mm = mm;
 
 	pol = get_vma_policy(task, vma, vma->vm_start);
-	mpol_to_str(buffer, sizeof(buffer), pol);
+	n = mpol_to_str(buffer, sizeof(buffer), pol);
 	mpol_cond_put(pol);
+	if (n < 0)
+		return n;
 
 	seq_printf(m, "%08lx %s", vma->vm_start, buffer);
 
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index a1a16eb..9100d69 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -21,6 +21,7 @@
 #include <linux/crash_dump.h>
 #include <linux/list.h>
 #include <linux/vmalloc.h>
+#include <linux/pagemap.h>
 #include <asm/uaccess.h>
 #include <asm/io.h>
 #include "internal.h"
@@ -123,11 +124,65 @@
 	return read;
 }
 
+/*
+ * Architectures may override this function to allocate ELF header in 2nd kernel
+ */
+int __weak elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
+{
+	return 0;
+}
+
+/*
+ * Architectures may override this function to free header
+ */
+void __weak elfcorehdr_free(unsigned long long addr)
+{}
+
+/*
+ * Architectures may override this function to read from ELF header
+ */
+ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
+{
+	return read_from_oldmem(buf, count, ppos, 0);
+}
+
+/*
+ * Architectures may override this function to read from notes sections
+ */
+ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
+{
+	return read_from_oldmem(buf, count, ppos, 0);
+}
+
+/*
+ * Architectures may override this function to map oldmem
+ */
+int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
+				  unsigned long from, unsigned long pfn,
+				  unsigned long size, pgprot_t prot)
+{
+	return remap_pfn_range(vma, from, pfn, size, prot);
+}
+
+/*
+ * Copy to either kernel or user space
+ */
+static int copy_to(void *target, void *src, size_t size, int userbuf)
+{
+	if (userbuf) {
+		if (copy_to_user((char __user *) target, src, size))
+			return -EFAULT;
+	} else {
+		memcpy(target, src, size);
+	}
+	return 0;
+}
+
 /* Read from the ELF header and then the crash dump. On error, negative value is
  * returned otherwise number of bytes read are returned.
  */
-static ssize_t read_vmcore(struct file *file, char __user *buffer,
-				size_t buflen, loff_t *fpos)
+static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
+			     int userbuf)
 {
 	ssize_t acc = 0, tmp;
 	size_t tsz;
@@ -144,7 +199,7 @@
 	/* Read ELF core header */
 	if (*fpos < elfcorebuf_sz) {
 		tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen);
-		if (copy_to_user(buffer, elfcorebuf + *fpos, tsz))
+		if (copy_to(buffer, elfcorebuf + *fpos, tsz, userbuf))
 			return -EFAULT;
 		buflen -= tsz;
 		*fpos += tsz;
@@ -162,7 +217,7 @@
 
 		tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen);
 		kaddr = elfnotes_buf + *fpos - elfcorebuf_sz;
-		if (copy_to_user(buffer, kaddr, tsz))
+		if (copy_to(buffer, kaddr, tsz, userbuf))
 			return -EFAULT;
 		buflen -= tsz;
 		*fpos += tsz;
@@ -178,7 +233,7 @@
 		if (*fpos < m->offset + m->size) {
 			tsz = min_t(size_t, m->offset + m->size - *fpos, buflen);
 			start = m->paddr + *fpos - m->offset;
-			tmp = read_from_oldmem(buffer, tsz, &start, 1);
+			tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
 			if (tmp < 0)
 				return tmp;
 			buflen -= tsz;
@@ -195,6 +250,55 @@
 	return acc;
 }
 
+static ssize_t read_vmcore(struct file *file, char __user *buffer,
+			   size_t buflen, loff_t *fpos)
+{
+	return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
+}
+
+/*
+ * The vmcore fault handler uses the page cache and fills data using the
+ * standard __vmcore_read() function.
+ *
+ * On s390 the fault handler is used for memory regions that can't be mapped
+ * directly with remap_pfn_range().
+ */
+static int mmap_vmcore_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+#ifdef CONFIG_S390
+	struct address_space *mapping = vma->vm_file->f_mapping;
+	pgoff_t index = vmf->pgoff;
+	struct page *page;
+	loff_t offset;
+	char *buf;
+	int rc;
+
+	page = find_or_create_page(mapping, index, GFP_KERNEL);
+	if (!page)
+		return VM_FAULT_OOM;
+	if (!PageUptodate(page)) {
+		offset = (loff_t) index << PAGE_CACHE_SHIFT;
+		buf = __va((page_to_pfn(page) << PAGE_SHIFT));
+		rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0);
+		if (rc < 0) {
+			unlock_page(page);
+			page_cache_release(page);
+			return (rc == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
+		}
+		SetPageUptodate(page);
+	}
+	unlock_page(page);
+	vmf->page = page;
+	return 0;
+#else
+	return VM_FAULT_SIGBUS;
+#endif
+}
+
+static const struct vm_operations_struct vmcore_mmap_ops = {
+	.fault = mmap_vmcore_fault,
+};
+
 /**
  * alloc_elfnotes_buf - allocate buffer for ELF note segment in
  *                      vmalloc memory
@@ -223,7 +327,7 @@
  * regions in the 1st kernel pointed to by PT_LOAD entries) into
  * virtually contiguous user-space in ELF layout.
  */
-#if defined(CONFIG_MMU) && !defined(CONFIG_S390)
+#ifdef CONFIG_MMU
 static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
 {
 	size_t size = vma->vm_end - vma->vm_start;
@@ -241,6 +345,7 @@
 
 	vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
 	vma->vm_flags |= VM_MIXEDMAP;
+	vma->vm_ops = &vmcore_mmap_ops;
 
 	len = 0;
 
@@ -282,9 +387,9 @@
 
 			tsz = min_t(size_t, m->offset + m->size - start, size);
 			paddr = m->paddr + start - m->offset;
-			if (remap_pfn_range(vma, vma->vm_start + len,
-					    paddr >> PAGE_SHIFT, tsz,
-					    vma->vm_page_prot))
+			if (remap_oldmem_pfn_range(vma, vma->vm_start + len,
+						   paddr >> PAGE_SHIFT, tsz,
+						   vma->vm_page_prot))
 				goto fail;
 			size -= tsz;
 			start += tsz;
@@ -357,7 +462,7 @@
 		notes_section = kmalloc(max_sz, GFP_KERNEL);
 		if (!notes_section)
 			return -ENOMEM;
-		rc = read_from_oldmem(notes_section, max_sz, &offset, 0);
+		rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
 		if (rc < 0) {
 			kfree(notes_section);
 			return rc;
@@ -444,7 +549,8 @@
 		if (phdr_ptr->p_type != PT_NOTE)
 			continue;
 		offset = phdr_ptr->p_offset;
-		rc = read_from_oldmem(notes_buf, phdr_ptr->p_memsz, &offset, 0);
+		rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
+					   &offset);
 		if (rc < 0)
 			return rc;
 		notes_buf += phdr_ptr->p_memsz;
@@ -536,7 +642,7 @@
 		notes_section = kmalloc(max_sz, GFP_KERNEL);
 		if (!notes_section)
 			return -ENOMEM;
-		rc = read_from_oldmem(notes_section, max_sz, &offset, 0);
+		rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
 		if (rc < 0) {
 			kfree(notes_section);
 			return rc;
@@ -623,7 +729,8 @@
 		if (phdr_ptr->p_type != PT_NOTE)
 			continue;
 		offset = phdr_ptr->p_offset;
-		rc = read_from_oldmem(notes_buf, phdr_ptr->p_memsz, &offset, 0);
+		rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
+					   &offset);
 		if (rc < 0)
 			return rc;
 		notes_buf += phdr_ptr->p_memsz;
@@ -810,7 +917,7 @@
 	addr = elfcorehdr_addr;
 
 	/* Read Elf header */
-	rc = read_from_oldmem((char*)&ehdr, sizeof(Elf64_Ehdr), &addr, 0);
+	rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf64_Ehdr), &addr);
 	if (rc < 0)
 		return rc;
 
@@ -837,7 +944,7 @@
 	if (!elfcorebuf)
 		return -ENOMEM;
 	addr = elfcorehdr_addr;
-	rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz_orig, &addr, 0);
+	rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
 	if (rc < 0)
 		goto fail;
 
@@ -866,7 +973,7 @@
 	addr = elfcorehdr_addr;
 
 	/* Read Elf header */
-	rc = read_from_oldmem((char*)&ehdr, sizeof(Elf32_Ehdr), &addr, 0);
+	rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf32_Ehdr), &addr);
 	if (rc < 0)
 		return rc;
 
@@ -892,7 +999,7 @@
 	if (!elfcorebuf)
 		return -ENOMEM;
 	addr = elfcorehdr_addr;
-	rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz_orig, &addr, 0);
+	rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
 	if (rc < 0)
 		goto fail;
 
@@ -919,7 +1026,7 @@
 	int rc=0;
 
 	addr = elfcorehdr_addr;
-	rc = read_from_oldmem(e_ident, EI_NIDENT, &addr, 0);
+	rc = elfcorehdr_read(e_ident, EI_NIDENT, &addr);
 	if (rc < 0)
 		return rc;
 	if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
@@ -952,7 +1059,14 @@
 {
 	int rc = 0;
 
-	/* If elfcorehdr= has been passed in cmdline, then capture the dump.*/
+	/* Allow architectures to allocate ELF header in 2nd kernel */
+	rc = elfcorehdr_alloc(&elfcorehdr_addr, &elfcorehdr_size);
+	if (rc)
+		return rc;
+	/*
+	 * If elfcorehdr= has been passed in cmdline or created in 2nd kernel,
+	 * then capture the dump.
+	 */
 	if (!(is_vmcore_usable()))
 		return rc;
 	rc = parse_crash_elf_headers();
@@ -960,6 +1074,8 @@
 		pr_warn("Kdump: vmcore not initialized\n");
 		return rc;
 	}
+	elfcorehdr_free(elfcorehdr_addr);
+	elfcorehdr_addr = ELFCORE_ADDR_ERR;
 
 	proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations);
 	if (proc_vmcore)
diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c
index c24f1e1..39d1465 100644
--- a/fs/ramfs/inode.c
+++ b/fs/ramfs/inode.c
@@ -244,12 +244,6 @@
 	return mount_nodev(fs_type, flags, data, ramfs_fill_super);
 }
 
-static struct dentry *rootfs_mount(struct file_system_type *fs_type,
-	int flags, const char *dev_name, void *data)
-{
-	return mount_nodev(fs_type, flags|MS_NOUSER, data, ramfs_fill_super);
-}
-
 static void ramfs_kill_sb(struct super_block *sb)
 {
 	kfree(sb->s_fs_info);
@@ -262,29 +256,23 @@
 	.kill_sb	= ramfs_kill_sb,
 	.fs_flags	= FS_USERNS_MOUNT,
 };
-static struct file_system_type rootfs_fs_type = {
-	.name		= "rootfs",
-	.mount		= rootfs_mount,
-	.kill_sb	= kill_litter_super,
-};
 
-static int __init init_ramfs_fs(void)
+int __init init_ramfs_fs(void)
 {
-	return register_filesystem(&ramfs_fs_type);
-}
-module_init(init_ramfs_fs)
-
-int __init init_rootfs(void)
-{
+	static unsigned long once;
 	int err;
 
+	if (test_and_set_bit(0, &once))
+		return 0;
+
 	err = bdi_init(&ramfs_backing_dev_info);
 	if (err)
 		return err;
 
-	err = register_filesystem(&rootfs_fs_type);
+	err = register_filesystem(&ramfs_fs_type);
 	if (err)
 		bdi_destroy(&ramfs_backing_dev_info);
 
 	return err;
 }
+module_init(init_ramfs_fs)
diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c
index fb50652..41d108e 100644
--- a/fs/squashfs/block.c
+++ b/fs/squashfs/block.c
@@ -167,17 +167,14 @@
 		/*
 		 * Block is uncompressed.
 		 */
-		int i, in, pg_offset = 0;
-
-		for (i = 0; i < b; i++) {
-			wait_on_buffer(bh[i]);
-			if (!buffer_uptodate(bh[i]))
-				goto block_release;
-		}
+		int in, pg_offset = 0;
 
 		for (bytes = length; k < b; k++) {
 			in = min(bytes, msblk->devblksize - offset);
 			bytes -= in;
+			wait_on_buffer(bh[k]);
+			if (!buffer_uptodate(bh[k]))
+				goto block_release;
 			while (in) {
 				if (pg_offset == PAGE_CACHE_SIZE) {
 					page++;
diff --git a/fs/squashfs/dir.c b/fs/squashfs/dir.c
index f7f527b..d8c2d74 100644
--- a/fs/squashfs/dir.c
+++ b/fs/squashfs/dir.c
@@ -54,6 +54,7 @@
 {
 	struct squashfs_sb_info *msblk = sb->s_fs_info;
 	int err, i, index, length = 0;
+	unsigned int size;
 	struct squashfs_dir_index dir_index;
 
 	TRACE("Entered get_dir_index_using_offset, i_count %d, f_pos %lld\n",
@@ -81,8 +82,14 @@
 			 */
 			break;
 
+		size = le32_to_cpu(dir_index.size) + 1;
+
+		/* size should never be larger than SQUASHFS_NAME_LEN */
+		if (size > SQUASHFS_NAME_LEN)
+			break;
+
 		err = squashfs_read_metadata(sb, NULL, &index_start,
-				&index_offset, le32_to_cpu(dir_index.size) + 1);
+				&index_offset, size);
 		if (err < 0)
 			break;
 
@@ -105,9 +112,8 @@
 	struct inode *inode = file_inode(file);
 	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
 	u64 block = squashfs_i(inode)->start + msblk->directory_table;
-	int offset = squashfs_i(inode)->offset, length, dir_count, size,
-				type, err;
-	unsigned int inode_number;
+	int offset = squashfs_i(inode)->offset, length, err;
+	unsigned int inode_number, dir_count, size, type;
 	struct squashfs_dir_header dirh;
 	struct squashfs_dir_entry *dire;
 
@@ -200,6 +206,9 @@
 				((short) le16_to_cpu(dire->inode_number));
 			type = le16_to_cpu(dire->type);
 
+			if (type > SQUASHFS_MAX_DIR_TYPE)
+				goto failed_read;
+
 			if (!dir_emit(ctx, dire->name, size,
 					inode_number,
 					squashfs_filetype_table[type]))
diff --git a/fs/squashfs/namei.c b/fs/squashfs/namei.c
index 7834a51..67cad77 100644
--- a/fs/squashfs/namei.c
+++ b/fs/squashfs/namei.c
@@ -79,7 +79,8 @@
 			int len)
 {
 	struct squashfs_sb_info *msblk = sb->s_fs_info;
-	int i, size, length = 0, err;
+	int i, length = 0, err;
+	unsigned int size;
 	struct squashfs_dir_index *index;
 	char *str;
 
@@ -103,6 +104,8 @@
 
 
 		size = le32_to_cpu(index->size) + 1;
+		if (size > SQUASHFS_NAME_LEN)
+			break;
 
 		err = squashfs_read_metadata(sb, index->name, &index_start,
 					&index_offset, size);
@@ -144,7 +147,8 @@
 	struct squashfs_dir_entry *dire;
 	u64 block = squashfs_i(dir)->start + msblk->directory_table;
 	int offset = squashfs_i(dir)->offset;
-	int err, length, dir_count, size;
+	int err, length;
+	unsigned int dir_count, size;
 
 	TRACE("Entered squashfs_lookup [%llx:%x]\n", block, offset);
 
diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h
index 9e2349d..4b2beda 100644
--- a/fs/squashfs/squashfs_fs.h
+++ b/fs/squashfs/squashfs_fs.h
@@ -87,7 +87,7 @@
 #define SQUASHFS_COMP_OPTS(flags)		SQUASHFS_BIT(flags, \
 						SQUASHFS_COMP_OPT)
 
-/* Max number of types and file types */
+/* Inode types including extended types */
 #define SQUASHFS_DIR_TYPE		1
 #define SQUASHFS_REG_TYPE		2
 #define SQUASHFS_SYMLINK_TYPE		3
@@ -103,6 +103,9 @@
 #define SQUASHFS_LFIFO_TYPE		13
 #define SQUASHFS_LSOCKET_TYPE		14
 
+/* Max type value stored in directory entry */
+#define SQUASHFS_MAX_DIR_TYPE		7
+
 /* Xattr types */
 #define SQUASHFS_XATTR_USER             0
 #define SQUASHFS_XATTR_TRUSTED          1
diff --git a/fs/super.c b/fs/super.c
index 5536a95..f6961ea 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -71,7 +71,7 @@
 	if (!grab_super_passive(sb))
 		return -1;
 
-	if (sb->s_op && sb->s_op->nr_cached_objects)
+	if (sb->s_op->nr_cached_objects)
 		fs_objects = sb->s_op->nr_cached_objects(sb);
 
 	total_objects = sb->s_nr_dentry_unused +
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 63d609d..3abfa6e 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -26,6 +26,7 @@
 #ifndef _I915_DRM_H_
 #define _I915_DRM_H_
 
+#include <drm/i915_pciids.h>
 #include <uapi/drm/i915_drm.h>
 
 /* For use by IPS driver */
@@ -34,4 +35,37 @@
 extern bool i915_gpu_lower(void);
 extern bool i915_gpu_busy(void);
 extern bool i915_gpu_turbo_disable(void);
+
+/*
+ * The Bridge device's PCI config space has information about the
+ * fb aperture size and the amount of pre-reserved memory.
+ * This is all handled in the intel-gtt.ko module. i915.ko only
+ * cares about the vga bit for the vga rbiter.
+ */
+#define INTEL_GMCH_CTRL		0x52
+#define INTEL_GMCH_VGA_DISABLE  (1 << 1)
+#define SNB_GMCH_CTRL		0x50
+#define    SNB_GMCH_GGMS_SHIFT	8 /* GTT Graphics Memory Size */
+#define    SNB_GMCH_GGMS_MASK	0x3
+#define    SNB_GMCH_GMS_SHIFT   3 /* Graphics Mode Select */
+#define    SNB_GMCH_GMS_MASK    0x1f
+
+#define I830_GMCH_CTRL			0x52
+
+#define I855_GMCH_GMS_MASK		0xF0
+#define I855_GMCH_GMS_STOLEN_0M		0x0
+#define I855_GMCH_GMS_STOLEN_1M		(0x1 << 4)
+#define I855_GMCH_GMS_STOLEN_4M		(0x2 << 4)
+#define I855_GMCH_GMS_STOLEN_8M		(0x3 << 4)
+#define I855_GMCH_GMS_STOLEN_16M	(0x4 << 4)
+#define I855_GMCH_GMS_STOLEN_32M	(0x5 << 4)
+#define I915_GMCH_GMS_STOLEN_48M	(0x6 << 4)
+#define I915_GMCH_GMS_STOLEN_64M	(0x7 << 4)
+#define G33_GMCH_GMS_STOLEN_128M	(0x8 << 4)
+#define G33_GMCH_GMS_STOLEN_256M	(0x9 << 4)
+#define INTEL_GMCH_GMS_STOLEN_96M	(0xa << 4)
+#define INTEL_GMCH_GMS_STOLEN_160M	(0xb << 4)
+#define INTEL_GMCH_GMS_STOLEN_224M	(0xc << 4)
+#define INTEL_GMCH_GMS_STOLEN_352M	(0xd << 4)
+
 #endif				/* _I915_DRM_H_ */
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
new file mode 100644
index 0000000..8a10f5c
--- /dev/null
+++ b/include/drm/i915_pciids.h
@@ -0,0 +1,211 @@
+/*
+ * Copyright 2013 Intel Corporation
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _I915_PCIIDS_H
+#define _I915_PCIIDS_H
+
+/*
+ * A pci_device_id struct {
+ *	__u32 vendor, device;
+ *      __u32 subvendor, subdevice;
+ *	__u32 class, class_mask;
+ *	kernel_ulong_t driver_data;
+ * };
+ * Don't use C99 here because "class" is reserved and we want to
+ * give userspace flexibility.
+ */
+#define INTEL_VGA_DEVICE(id, info) {		\
+	0x8086,	id,				\
+	~0, ~0,					\
+	0x030000, 0xff0000,			\
+	(unsigned long) info }
+
+#define INTEL_QUANTA_VGA_DEVICE(info) {		\
+	0x8086,	0x16a,				\
+	0x152d,	0x8990,				\
+	0x030000, 0xff0000,			\
+	(unsigned long) info }
+
+#define INTEL_I830_IDS(info)				\
+	INTEL_VGA_DEVICE(0x3577, info)
+
+#define INTEL_I845G_IDS(info)				\
+	INTEL_VGA_DEVICE(0x2562, info)
+
+#define INTEL_I85X_IDS(info)				\
+	INTEL_VGA_DEVICE(0x3582, info), /* I855_GM */ \
+	INTEL_VGA_DEVICE(0x358e, info)
+
+#define INTEL_I865G_IDS(info)				\
+	INTEL_VGA_DEVICE(0x2572, info) /* I865_G */
+
+#define INTEL_I915G_IDS(info)				\
+	INTEL_VGA_DEVICE(0x2582, info), /* I915_G */ \
+	INTEL_VGA_DEVICE(0x258a, info)  /* E7221_G */
+
+#define INTEL_I915GM_IDS(info)				\
+	INTEL_VGA_DEVICE(0x2592, info) /* I915_GM */
+
+#define INTEL_I945G_IDS(info)				\
+	INTEL_VGA_DEVICE(0x2772, info) /* I945_G */
+
+#define INTEL_I945GM_IDS(info)				\
+	INTEL_VGA_DEVICE(0x27a2, info), /* I945_GM */ \
+	INTEL_VGA_DEVICE(0x27ae, info)  /* I945_GME */
+
+#define INTEL_I965G_IDS(info)				\
+	INTEL_VGA_DEVICE(0x2972, info), /* I946_GZ */	\
+	INTEL_VGA_DEVICE(0x2982, info),	/* G35_G */	\
+	INTEL_VGA_DEVICE(0x2992, info),	/* I965_Q */	\
+	INTEL_VGA_DEVICE(0x29a2, info)	/* I965_G */
+
+#define INTEL_G33_IDS(info)				\
+	INTEL_VGA_DEVICE(0x29b2, info), /* Q35_G */ \
+	INTEL_VGA_DEVICE(0x29c2, info),	/* G33_G */ \
+	INTEL_VGA_DEVICE(0x29d2, info)	/* Q33_G */
+
+#define INTEL_I965GM_IDS(info)				\
+	INTEL_VGA_DEVICE(0x2a02, info),	/* I965_GM */ \
+	INTEL_VGA_DEVICE(0x2a12, info)  /* I965_GME */
+
+#define INTEL_GM45_IDS(info)				\
+	INTEL_VGA_DEVICE(0x2a42, info) /* GM45_G */
+
+#define INTEL_G45_IDS(info)				\
+	INTEL_VGA_DEVICE(0x2e02, info), /* IGD_E_G */ \
+	INTEL_VGA_DEVICE(0x2e12, info), /* Q45_G */ \
+	INTEL_VGA_DEVICE(0x2e22, info), /* G45_G */ \
+	INTEL_VGA_DEVICE(0x2e32, info), /* G41_G */ \
+	INTEL_VGA_DEVICE(0x2e42, info), /* B43_G */ \
+	INTEL_VGA_DEVICE(0x2e92, info)	/* B43_G.1 */
+
+#define INTEL_PINEVIEW_IDS(info)			\
+	INTEL_VGA_DEVICE(0xa001, info),			\
+	INTEL_VGA_DEVICE(0xa011, info)
+
+#define INTEL_IRONLAKE_D_IDS(info) \
+	INTEL_VGA_DEVICE(0x0042, info)
+
+#define INTEL_IRONLAKE_M_IDS(info) \
+	INTEL_VGA_DEVICE(0x0046, info)
+
+#define INTEL_SNB_D_IDS(info) \
+	INTEL_VGA_DEVICE(0x0102, info), \
+	INTEL_VGA_DEVICE(0x0112, info), \
+	INTEL_VGA_DEVICE(0x0122, info), \
+	INTEL_VGA_DEVICE(0x010A, info)
+
+#define INTEL_SNB_M_IDS(info) \
+	INTEL_VGA_DEVICE(0x0106, info), \
+	INTEL_VGA_DEVICE(0x0116, info), \
+	INTEL_VGA_DEVICE(0x0126, info)
+
+#define INTEL_IVB_M_IDS(info) \
+	INTEL_VGA_DEVICE(0x0156, info), /* GT1 mobile */ \
+	INTEL_VGA_DEVICE(0x0166, info)  /* GT2 mobile */
+
+#define INTEL_IVB_D_IDS(info) \
+	INTEL_VGA_DEVICE(0x0152, info), /* GT1 desktop */ \
+	INTEL_VGA_DEVICE(0x0162, info), /* GT2 desktop */ \
+	INTEL_VGA_DEVICE(0x015a, info), /* GT1 server */ \
+	INTEL_VGA_DEVICE(0x016a, info)  /* GT2 server */
+
+#define INTEL_IVB_Q_IDS(info) \
+	INTEL_QUANTA_VGA_DEVICE(info) /* Quanta transcode */
+
+#define INTEL_HSW_D_IDS(info) \
+	INTEL_VGA_DEVICE(0x0402, info), /* GT1 desktop */ \
+	INTEL_VGA_DEVICE(0x0412, info), /* GT2 desktop */ \
+	INTEL_VGA_DEVICE(0x0422, info), /* GT3 desktop */ \
+	INTEL_VGA_DEVICE(0x040a, info), /* GT1 server */ \
+	INTEL_VGA_DEVICE(0x041a, info), /* GT2 server */ \
+	INTEL_VGA_DEVICE(0x042a, info), /* GT3 server */ \
+	INTEL_VGA_DEVICE(0x040B, info), /* GT1 reserved */ \
+	INTEL_VGA_DEVICE(0x041B, info), /* GT2 reserved */ \
+	INTEL_VGA_DEVICE(0x042B, info), /* GT3 reserved */ \
+	INTEL_VGA_DEVICE(0x040E, info), /* GT1 reserved */ \
+	INTEL_VGA_DEVICE(0x041E, info), /* GT2 reserved */ \
+	INTEL_VGA_DEVICE(0x042E, info), /* GT3 reserved */ \
+	INTEL_VGA_DEVICE(0x0C02, info), /* SDV GT1 desktop */ \
+	INTEL_VGA_DEVICE(0x0C12, info), /* SDV GT2 desktop */ \
+	INTEL_VGA_DEVICE(0x0C22, info), /* SDV GT3 desktop */ \
+	INTEL_VGA_DEVICE(0x0C0A, info), /* SDV GT1 server */ \
+	INTEL_VGA_DEVICE(0x0C1A, info), /* SDV GT2 server */ \
+	INTEL_VGA_DEVICE(0x0C2A, info), /* SDV GT3 server */ \
+	INTEL_VGA_DEVICE(0x0C0B, info), /* SDV GT1 reserved */ \
+	INTEL_VGA_DEVICE(0x0C1B, info), /* SDV GT2 reserved */ \
+	INTEL_VGA_DEVICE(0x0C2B, info), /* SDV GT3 reserved */ \
+	INTEL_VGA_DEVICE(0x0C0E, info), /* SDV GT1 reserved */ \
+	INTEL_VGA_DEVICE(0x0C1E, info), /* SDV GT2 reserved */ \
+	INTEL_VGA_DEVICE(0x0C2E, info), /* SDV GT3 reserved */ \
+	INTEL_VGA_DEVICE(0x0A02, info), /* ULT GT1 desktop */ \
+	INTEL_VGA_DEVICE(0x0A12, info), /* ULT GT2 desktop */ \
+	INTEL_VGA_DEVICE(0x0A22, info), /* ULT GT3 desktop */ \
+	INTEL_VGA_DEVICE(0x0A0A, info), /* ULT GT1 server */ \
+	INTEL_VGA_DEVICE(0x0A1A, info), /* ULT GT2 server */ \
+	INTEL_VGA_DEVICE(0x0A2A, info), /* ULT GT3 server */ \
+	INTEL_VGA_DEVICE(0x0A0B, info), /* ULT GT1 reserved */ \
+	INTEL_VGA_DEVICE(0x0A1B, info), /* ULT GT2 reserved */ \
+	INTEL_VGA_DEVICE(0x0A2B, info), /* ULT GT3 reserved */ \
+	INTEL_VGA_DEVICE(0x0D02, info), /* CRW GT1 desktop */ \
+	INTEL_VGA_DEVICE(0x0D12, info), /* CRW GT2 desktop */ \
+	INTEL_VGA_DEVICE(0x0D22, info), /* CRW GT3 desktop */ \
+	INTEL_VGA_DEVICE(0x0D0A, info), /* CRW GT1 server */ \
+	INTEL_VGA_DEVICE(0x0D1A, info), /* CRW GT2 server */ \
+	INTEL_VGA_DEVICE(0x0D2A, info), /* CRW GT3 server */ \
+	INTEL_VGA_DEVICE(0x0D0B, info), /* CRW GT1 reserved */ \
+	INTEL_VGA_DEVICE(0x0D1B, info), /* CRW GT2 reserved */ \
+	INTEL_VGA_DEVICE(0x0D2B, info), /* CRW GT3 reserved */ \
+	INTEL_VGA_DEVICE(0x0D0E, info), /* CRW GT1 reserved */ \
+	INTEL_VGA_DEVICE(0x0D1E, info), /* CRW GT2 reserved */ \
+	INTEL_VGA_DEVICE(0x0D2E, info)  /* CRW GT3 reserved */ \
+
+#define INTEL_HSW_M_IDS(info) \
+	INTEL_VGA_DEVICE(0x0406, info), /* GT1 mobile */ \
+	INTEL_VGA_DEVICE(0x0416, info), /* GT2 mobile */ \
+	INTEL_VGA_DEVICE(0x0426, info), /* GT2 mobile */ \
+	INTEL_VGA_DEVICE(0x0C06, info), /* SDV GT1 mobile */ \
+	INTEL_VGA_DEVICE(0x0C16, info), /* SDV GT2 mobile */ \
+	INTEL_VGA_DEVICE(0x0C26, info), /* SDV GT3 mobile */ \
+	INTEL_VGA_DEVICE(0x0A06, info), /* ULT GT1 mobile */ \
+	INTEL_VGA_DEVICE(0x0A16, info), /* ULT GT2 mobile */ \
+	INTEL_VGA_DEVICE(0x0A26, info), /* ULT GT3 mobile */ \
+	INTEL_VGA_DEVICE(0x0A0E, info), /* ULT GT1 reserved */ \
+	INTEL_VGA_DEVICE(0x0A1E, info), /* ULT GT2 reserved */ \
+	INTEL_VGA_DEVICE(0x0A2E, info), /* ULT GT3 reserved */ \
+	INTEL_VGA_DEVICE(0x0D06, info), /* CRW GT1 mobile */ \
+	INTEL_VGA_DEVICE(0x0D16, info), /* CRW GT2 mobile */ \
+	INTEL_VGA_DEVICE(0x0D26, info)  /* CRW GT3 mobile */
+
+#define INTEL_VLV_M_IDS(info) \
+	INTEL_VGA_DEVICE(0x0f30, info), \
+	INTEL_VGA_DEVICE(0x0f31, info), \
+	INTEL_VGA_DEVICE(0x0f32, info), \
+	INTEL_VGA_DEVICE(0x0f33, info), \
+	INTEL_VGA_DEVICE(0x0157, info)
+
+#define INTEL_VLV_D_IDS(info) \
+	INTEL_VGA_DEVICE(0x0155, info)
+
+#endif /* _I915_PCIIDS_H */
diff --git a/include/dt-bindings/clock/samsung,s3c64xx-clock.h b/include/dt-bindings/clock/samsung,s3c64xx-clock.h
new file mode 100644
index 0000000..ad95c7f
--- /dev/null
+++ b/include/dt-bindings/clock/samsung,s3c64xx-clock.h
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2013 Tomasz Figa <tomasz.figa at gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Device Tree binding constants for Samsung S3C64xx clock controller.
+*/
+
+#ifndef _DT_BINDINGS_CLOCK_SAMSUNG_S3C64XX_CLOCK_H
+#define _DT_BINDINGS_CLOCK_SAMSUNG_S3C64XX_CLOCK_H
+
+/*
+ * Let each exported clock get a unique index, which is used on DT-enabled
+ * platforms to lookup the clock from a clock specifier. These indices are
+ * therefore considered an ABI and so must not be changed. This implies
+ * that new clocks should be added either in free spaces between clock groups
+ * or at the end.
+ */
+
+/* Core clocks. */
+#define CLK27M			1
+#define CLK48M			2
+#define FOUT_APLL		3
+#define FOUT_MPLL		4
+#define FOUT_EPLL		5
+#define ARMCLK			6
+#define HCLKX2			7
+#define HCLK			8
+#define PCLK			9
+
+/* HCLK bus clocks. */
+#define HCLK_3DSE		16
+#define HCLK_UHOST		17
+#define HCLK_SECUR		18
+#define HCLK_SDMA1		19
+#define HCLK_SDMA0		20
+#define HCLK_IROM		21
+#define HCLK_DDR1		22
+#define HCLK_MEM1		23
+#define HCLK_MEM0		24
+#define HCLK_USB		25
+#define HCLK_HSMMC2		26
+#define HCLK_HSMMC1		27
+#define HCLK_HSMMC0		28
+#define HCLK_MDP		29
+#define HCLK_DHOST		30
+#define HCLK_IHOST		31
+#define HCLK_DMA1		32
+#define HCLK_DMA0		33
+#define HCLK_JPEG		34
+#define HCLK_CAMIF		35
+#define HCLK_SCALER		36
+#define HCLK_2D			37
+#define HCLK_TV			38
+#define HCLK_POST0		39
+#define HCLK_ROT		40
+#define HCLK_LCD		41
+#define HCLK_TZIC		42
+#define HCLK_INTC		43
+#define HCLK_MFC		44
+#define HCLK_DDR0		45
+
+/* PCLK bus clocks. */
+#define PCLK_IIC1		48
+#define PCLK_IIS2		49
+#define PCLK_SKEY		50
+#define PCLK_CHIPID		51
+#define PCLK_SPI1		52
+#define PCLK_SPI0		53
+#define PCLK_HSIRX		54
+#define PCLK_HSITX		55
+#define PCLK_GPIO		56
+#define PCLK_IIC0		57
+#define PCLK_IIS1		58
+#define PCLK_IIS0		59
+#define PCLK_AC97		60
+#define PCLK_TZPC		61
+#define PCLK_TSADC		62
+#define PCLK_KEYPAD		63
+#define PCLK_IRDA		64
+#define PCLK_PCM1		65
+#define PCLK_PCM0		66
+#define PCLK_PWM		67
+#define PCLK_RTC		68
+#define PCLK_WDT		69
+#define PCLK_UART3		70
+#define PCLK_UART2		71
+#define PCLK_UART1		72
+#define PCLK_UART0		73
+#define PCLK_MFC		74
+
+/* Special clocks. */
+#define SCLK_UHOST		80
+#define SCLK_MMC2_48		81
+#define SCLK_MMC1_48		82
+#define SCLK_MMC0_48		83
+#define SCLK_MMC2		84
+#define SCLK_MMC1		85
+#define SCLK_MMC0		86
+#define SCLK_SPI1_48		87
+#define SCLK_SPI0_48		88
+#define SCLK_SPI1		89
+#define SCLK_SPI0		90
+#define SCLK_DAC27		91
+#define SCLK_TV27		92
+#define SCLK_SCALER27		93
+#define SCLK_SCALER		94
+#define SCLK_LCD27		95
+#define SCLK_LCD		96
+#define SCLK_FIMC		97
+#define SCLK_POST0_27		98
+#define SCLK_AUDIO2		99
+#define SCLK_POST0		100
+#define SCLK_AUDIO1		101
+#define SCLK_AUDIO0		102
+#define SCLK_SECUR		103
+#define SCLK_IRDA		104
+#define SCLK_UART		105
+#define SCLK_MFC		106
+#define SCLK_CAM		107
+#define SCLK_JPEG		108
+#define SCLK_ONENAND		109
+
+/* MEM0 bus clocks - S3C6410-specific. */
+#define MEM0_CFCON		112
+#define MEM0_ONENAND1		113
+#define MEM0_ONENAND0		114
+#define MEM0_NFCON		115
+#define MEM0_SROM		116
+
+/* Muxes. */
+#define MOUT_APLL		128
+#define MOUT_MPLL		129
+#define MOUT_EPLL		130
+#define MOUT_MFC		131
+#define MOUT_AUDIO0		132
+#define MOUT_AUDIO1		133
+#define MOUT_UART		134
+#define MOUT_SPI0		135
+#define MOUT_SPI1		136
+#define MOUT_MMC0		137
+#define MOUT_MMC1		138
+#define MOUT_MMC2		139
+#define MOUT_UHOST		140
+#define MOUT_IRDA		141
+#define MOUT_LCD		142
+#define MOUT_SCALER		143
+#define MOUT_DAC27		144
+#define MOUT_TV27		145
+#define MOUT_AUDIO2		146
+
+/* Dividers. */
+#define DOUT_MPLL		160
+#define DOUT_SECUR		161
+#define DOUT_CAM		162
+#define DOUT_JPEG		163
+#define DOUT_MFC		164
+#define DOUT_MMC0		165
+#define DOUT_MMC1		166
+#define DOUT_MMC2		167
+#define DOUT_LCD		168
+#define DOUT_SCALER		169
+#define DOUT_UHOST		170
+#define DOUT_SPI0		171
+#define DOUT_SPI1		172
+#define DOUT_AUDIO0		173
+#define DOUT_AUDIO1		174
+#define DOUT_UART		175
+#define DOUT_IRDA		176
+#define DOUT_FIMC		177
+#define DOUT_AUDIO2		178
+
+/* Total number of clocks. */
+#define NR_CLKS			(DOUT_AUDIO2 + 1)
+
+#endif /* _DT_BINDINGS_CLOCK_SAMSUNG_S3C64XX_CLOCK_H */
diff --git a/include/dt-bindings/input/input.h b/include/dt-bindings/input/input.h
new file mode 100644
index 0000000..042e7b3
--- /dev/null
+++ b/include/dt-bindings/input/input.h
@@ -0,0 +1,525 @@
+/*
+ * This header provides constants for most input bindings.
+ *
+ * Most input bindings include key code, matrix key code format.
+ * In most cases, key code and matrix key code format uses
+ * the standard values/macro defined in this header.
+ */
+
+#ifndef _DT_BINDINGS_INPUT_INPUT_H
+#define _DT_BINDINGS_INPUT_INPUT_H
+
+#define KEY_RESERVED		0
+#define KEY_ESC			1
+#define KEY_1			2
+#define KEY_2			3
+#define KEY_3			4
+#define KEY_4			5
+#define KEY_5			6
+#define KEY_6			7
+#define KEY_7			8
+#define KEY_8			9
+#define KEY_9			10
+#define KEY_0			11
+#define KEY_MINUS		12
+#define KEY_EQUAL		13
+#define KEY_BACKSPACE		14
+#define KEY_TAB			15
+#define KEY_Q			16
+#define KEY_W			17
+#define KEY_E			18
+#define KEY_R			19
+#define KEY_T			20
+#define KEY_Y			21
+#define KEY_U			22
+#define KEY_I			23
+#define KEY_O			24
+#define KEY_P			25
+#define KEY_LEFTBRACE		26
+#define KEY_RIGHTBRACE		27
+#define KEY_ENTER		28
+#define KEY_LEFTCTRL		29
+#define KEY_A			30
+#define KEY_S			31
+#define KEY_D			32
+#define KEY_F			33
+#define KEY_G			34
+#define KEY_H			35
+#define KEY_J			36
+#define KEY_K			37
+#define KEY_L			38
+#define KEY_SEMICOLON		39
+#define KEY_APOSTROPHE		40
+#define KEY_GRAVE		41
+#define KEY_LEFTSHIFT		42
+#define KEY_BACKSLASH		43
+#define KEY_Z			44
+#define KEY_X			45
+#define KEY_C			46
+#define KEY_V			47
+#define KEY_B			48
+#define KEY_N			49
+#define KEY_M			50
+#define KEY_COMMA		51
+#define KEY_DOT			52
+#define KEY_SLASH		53
+#define KEY_RIGHTSHIFT		54
+#define KEY_KPASTERISK		55
+#define KEY_LEFTALT		56
+#define KEY_SPACE		57
+#define KEY_CAPSLOCK		58
+#define KEY_F1			59
+#define KEY_F2			60
+#define KEY_F3			61
+#define KEY_F4			62
+#define KEY_F5			63
+#define KEY_F6			64
+#define KEY_F7			65
+#define KEY_F8			66
+#define KEY_F9			67
+#define KEY_F10			68
+#define KEY_NUMLOCK		69
+#define KEY_SCROLLLOCK		70
+#define KEY_KP7			71
+#define KEY_KP8			72
+#define KEY_KP9			73
+#define KEY_KPMINUS		74
+#define KEY_KP4			75
+#define KEY_KP5			76
+#define KEY_KP6			77
+#define KEY_KPPLUS		78
+#define KEY_KP1			79
+#define KEY_KP2			80
+#define KEY_KP3			81
+#define KEY_KP0			82
+#define KEY_KPDOT		83
+
+#define KEY_ZENKAKUHANKAKU	85
+#define KEY_102ND		86
+#define KEY_F11			87
+#define KEY_F12			88
+#define KEY_RO			89
+#define KEY_KATAKANA		90
+#define KEY_HIRAGANA		91
+#define KEY_HENKAN		92
+#define KEY_KATAKANAHIRAGANA	93
+#define KEY_MUHENKAN		94
+#define KEY_KPJPCOMMA		95
+#define KEY_KPENTER		96
+#define KEY_RIGHTCTRL		97
+#define KEY_KPSLASH		98
+#define KEY_SYSRQ		99
+#define KEY_RIGHTALT		100
+#define KEY_LINEFEED		101
+#define KEY_HOME		102
+#define KEY_UP			103
+#define KEY_PAGEUP		104
+#define KEY_LEFT		105
+#define KEY_RIGHT		106
+#define KEY_END			107
+#define KEY_DOWN		108
+#define KEY_PAGEDOWN		109
+#define KEY_INSERT		110
+#define KEY_DELETE		111
+#define KEY_MACRO		112
+#define KEY_MUTE		113
+#define KEY_VOLUMEDOWN		114
+#define KEY_VOLUMEUP		115
+#define KEY_POWER		116	/* SC System Power Down */
+#define KEY_KPEQUAL		117
+#define KEY_KPPLUSMINUS		118
+#define KEY_PAUSE		119
+#define KEY_SCALE		120	/* AL Compiz Scale (Expose) */
+
+#define KEY_KPCOMMA		121
+#define KEY_HANGEUL		122
+#define KEY_HANGUEL		KEY_HANGEUL
+#define KEY_HANJA		123
+#define KEY_YEN			124
+#define KEY_LEFTMETA		125
+#define KEY_RIGHTMETA		126
+#define KEY_COMPOSE		127
+
+#define KEY_STOP		128	/* AC Stop */
+#define KEY_AGAIN		129
+#define KEY_PROPS		130	/* AC Properties */
+#define KEY_UNDO		131	/* AC Undo */
+#define KEY_FRONT		132
+#define KEY_COPY		133	/* AC Copy */
+#define KEY_OPEN		134	/* AC Open */
+#define KEY_PASTE		135	/* AC Paste */
+#define KEY_FIND		136	/* AC Search */
+#define KEY_CUT			137	/* AC Cut */
+#define KEY_HELP		138	/* AL Integrated Help Center */
+#define KEY_MENU		139	/* Menu (show menu) */
+#define KEY_CALC		140	/* AL Calculator */
+#define KEY_SETUP		141
+#define KEY_SLEEP		142	/* SC System Sleep */
+#define KEY_WAKEUP		143	/* System Wake Up */
+#define KEY_FILE		144	/* AL Local Machine Browser */
+#define KEY_SENDFILE		145
+#define KEY_DELETEFILE		146
+#define KEY_XFER		147
+#define KEY_PROG1		148
+#define KEY_PROG2		149
+#define KEY_WWW			150	/* AL Internet Browser */
+#define KEY_MSDOS		151
+#define KEY_COFFEE		152	/* AL Terminal Lock/Screensaver */
+#define KEY_SCREENLOCK		KEY_COFFEE
+#define KEY_DIRECTION		153
+#define KEY_CYCLEWINDOWS	154
+#define KEY_MAIL		155
+#define KEY_BOOKMARKS		156	/* AC Bookmarks */
+#define KEY_COMPUTER		157
+#define KEY_BACK		158	/* AC Back */
+#define KEY_FORWARD		159	/* AC Forward */
+#define KEY_CLOSECD		160
+#define KEY_EJECTCD		161
+#define KEY_EJECTCLOSECD	162
+#define KEY_NEXTSONG		163
+#define KEY_PLAYPAUSE		164
+#define KEY_PREVIOUSSONG	165
+#define KEY_STOPCD		166
+#define KEY_RECORD		167
+#define KEY_REWIND		168
+#define KEY_PHONE		169	/* Media Select Telephone */
+#define KEY_ISO			170
+#define KEY_CONFIG		171	/* AL Consumer Control Configuration */
+#define KEY_HOMEPAGE		172	/* AC Home */
+#define KEY_REFRESH		173	/* AC Refresh */
+#define KEY_EXIT		174	/* AC Exit */
+#define KEY_MOVE		175
+#define KEY_EDIT		176
+#define KEY_SCROLLUP		177
+#define KEY_SCROLLDOWN		178
+#define KEY_KPLEFTPAREN		179
+#define KEY_KPRIGHTPAREN	180
+#define KEY_NEW			181	/* AC New */
+#define KEY_REDO		182	/* AC Redo/Repeat */
+
+#define KEY_F13			183
+#define KEY_F14			184
+#define KEY_F15			185
+#define KEY_F16			186
+#define KEY_F17			187
+#define KEY_F18			188
+#define KEY_F19			189
+#define KEY_F20			190
+#define KEY_F21			191
+#define KEY_F22			192
+#define KEY_F23			193
+#define KEY_F24			194
+
+#define KEY_PLAYCD		200
+#define KEY_PAUSECD		201
+#define KEY_PROG3		202
+#define KEY_PROG4		203
+#define KEY_DASHBOARD		204	/* AL Dashboard */
+#define KEY_SUSPEND		205
+#define KEY_CLOSE		206	/* AC Close */
+#define KEY_PLAY		207
+#define KEY_FASTFORWARD		208
+#define KEY_BASSBOOST		209
+#define KEY_PRINT		210	/* AC Print */
+#define KEY_HP			211
+#define KEY_CAMERA		212
+#define KEY_SOUND		213
+#define KEY_QUESTION		214
+#define KEY_EMAIL		215
+#define KEY_CHAT		216
+#define KEY_SEARCH		217
+#define KEY_CONNECT		218
+#define KEY_FINANCE		219	/* AL Checkbook/Finance */
+#define KEY_SPORT		220
+#define KEY_SHOP		221
+#define KEY_ALTERASE		222
+#define KEY_CANCEL		223	/* AC Cancel */
+#define KEY_BRIGHTNESSDOWN	224
+#define KEY_BRIGHTNESSUP	225
+#define KEY_MEDIA		226
+
+#define KEY_SWITCHVIDEOMODE	227	/* Cycle between available video
+					   outputs (Monitor/LCD/TV-out/etc) */
+#define KEY_KBDILLUMTOGGLE	228
+#define KEY_KBDILLUMDOWN	229
+#define KEY_KBDILLUMUP		230
+
+#define KEY_SEND		231	/* AC Send */
+#define KEY_REPLY		232	/* AC Reply */
+#define KEY_FORWARDMAIL		233	/* AC Forward Msg */
+#define KEY_SAVE		234	/* AC Save */
+#define KEY_DOCUMENTS		235
+
+#define KEY_BATTERY		236
+
+#define KEY_BLUETOOTH		237
+#define KEY_WLAN		238
+#define KEY_UWB			239
+
+#define KEY_UNKNOWN		240
+
+#define KEY_VIDEO_NEXT		241	/* drive next video source */
+#define KEY_VIDEO_PREV		242	/* drive previous video source */
+#define KEY_BRIGHTNESS_CYCLE	243	/* brightness up, after max is min */
+#define KEY_BRIGHTNESS_ZERO	244	/* brightness off, use ambient */
+#define KEY_DISPLAY_OFF		245	/* display device to off state */
+
+#define KEY_WIMAX		246
+#define KEY_RFKILL		247	/* Key that controls all radios */
+
+#define KEY_MICMUTE		248	/* Mute / unmute the microphone */
+
+/* Code 255 is reserved for special needs of AT keyboard driver */
+
+#define BTN_MISC		0x100
+#define BTN_0			0x100
+#define BTN_1			0x101
+#define BTN_2			0x102
+#define BTN_3			0x103
+#define BTN_4			0x104
+#define BTN_5			0x105
+#define BTN_6			0x106
+#define BTN_7			0x107
+#define BTN_8			0x108
+#define BTN_9			0x109
+
+#define BTN_MOUSE		0x110
+#define BTN_LEFT		0x110
+#define BTN_RIGHT		0x111
+#define BTN_MIDDLE		0x112
+#define BTN_SIDE		0x113
+#define BTN_EXTRA		0x114
+#define BTN_FORWARD		0x115
+#define BTN_BACK		0x116
+#define BTN_TASK		0x117
+
+#define BTN_JOYSTICK		0x120
+#define BTN_TRIGGER		0x120
+#define BTN_THUMB		0x121
+#define BTN_THUMB2		0x122
+#define BTN_TOP			0x123
+#define BTN_TOP2		0x124
+#define BTN_PINKIE		0x125
+#define BTN_BASE		0x126
+#define BTN_BASE2		0x127
+#define BTN_BASE3		0x128
+#define BTN_BASE4		0x129
+#define BTN_BASE5		0x12a
+#define BTN_BASE6		0x12b
+#define BTN_DEAD		0x12f
+
+#define BTN_GAMEPAD		0x130
+#define BTN_SOUTH		0x130
+#define BTN_A			BTN_SOUTH
+#define BTN_EAST		0x131
+#define BTN_B			BTN_EAST
+#define BTN_C			0x132
+#define BTN_NORTH		0x133
+#define BTN_X			BTN_NORTH
+#define BTN_WEST		0x134
+#define BTN_Y			BTN_WEST
+#define BTN_Z			0x135
+#define BTN_TL			0x136
+#define BTN_TR			0x137
+#define BTN_TL2			0x138
+#define BTN_TR2			0x139
+#define BTN_SELECT		0x13a
+#define BTN_START		0x13b
+#define BTN_MODE		0x13c
+#define BTN_THUMBL		0x13d
+#define BTN_THUMBR		0x13e
+
+#define BTN_DIGI		0x140
+#define BTN_TOOL_PEN		0x140
+#define BTN_TOOL_RUBBER		0x141
+#define BTN_TOOL_BRUSH		0x142
+#define BTN_TOOL_PENCIL		0x143
+#define BTN_TOOL_AIRBRUSH	0x144
+#define BTN_TOOL_FINGER		0x145
+#define BTN_TOOL_MOUSE		0x146
+#define BTN_TOOL_LENS		0x147
+#define BTN_TOOL_QUINTTAP	0x148	/* Five fingers on trackpad */
+#define BTN_TOUCH		0x14a
+#define BTN_STYLUS		0x14b
+#define BTN_STYLUS2		0x14c
+#define BTN_TOOL_DOUBLETAP	0x14d
+#define BTN_TOOL_TRIPLETAP	0x14e
+#define BTN_TOOL_QUADTAP	0x14f	/* Four fingers on trackpad */
+
+#define BTN_WHEEL		0x150
+#define BTN_GEAR_DOWN		0x150
+#define BTN_GEAR_UP		0x151
+
+#define KEY_OK			0x160
+#define KEY_SELECT		0x161
+#define KEY_GOTO		0x162
+#define KEY_CLEAR		0x163
+#define KEY_POWER2		0x164
+#define KEY_OPTION		0x165
+#define KEY_INFO		0x166	/* AL OEM Features/Tips/Tutorial */
+#define KEY_TIME		0x167
+#define KEY_VENDOR		0x168
+#define KEY_ARCHIVE		0x169
+#define KEY_PROGRAM		0x16a	/* Media Select Program Guide */
+#define KEY_CHANNEL		0x16b
+#define KEY_FAVORITES		0x16c
+#define KEY_EPG			0x16d
+#define KEY_PVR			0x16e	/* Media Select Home */
+#define KEY_MHP			0x16f
+#define KEY_LANGUAGE		0x170
+#define KEY_TITLE		0x171
+#define KEY_SUBTITLE		0x172
+#define KEY_ANGLE		0x173
+#define KEY_ZOOM		0x174
+#define KEY_MODE		0x175
+#define KEY_KEYBOARD		0x176
+#define KEY_SCREEN		0x177
+#define KEY_PC			0x178	/* Media Select Computer */
+#define KEY_TV			0x179	/* Media Select TV */
+#define KEY_TV2			0x17a	/* Media Select Cable */
+#define KEY_VCR			0x17b	/* Media Select VCR */
+#define KEY_VCR2		0x17c	/* VCR Plus */
+#define KEY_SAT			0x17d	/* Media Select Satellite */
+#define KEY_SAT2		0x17e
+#define KEY_CD			0x17f	/* Media Select CD */
+#define KEY_TAPE		0x180	/* Media Select Tape */
+#define KEY_RADIO		0x181
+#define KEY_TUNER		0x182	/* Media Select Tuner */
+#define KEY_PLAYER		0x183
+#define KEY_TEXT		0x184
+#define KEY_DVD			0x185	/* Media Select DVD */
+#define KEY_AUX			0x186
+#define KEY_MP3			0x187
+#define KEY_AUDIO		0x188	/* AL Audio Browser */
+#define KEY_VIDEO		0x189	/* AL Movie Browser */
+#define KEY_DIRECTORY		0x18a
+#define KEY_LIST		0x18b
+#define KEY_MEMO		0x18c	/* Media Select Messages */
+#define KEY_CALENDAR		0x18d
+#define KEY_RED			0x18e
+#define KEY_GREEN		0x18f
+#define KEY_YELLOW		0x190
+#define KEY_BLUE		0x191
+#define KEY_CHANNELUP		0x192	/* Channel Increment */
+#define KEY_CHANNELDOWN		0x193	/* Channel Decrement */
+#define KEY_FIRST		0x194
+#define KEY_LAST		0x195	/* Recall Last */
+#define KEY_AB			0x196
+#define KEY_NEXT		0x197
+#define KEY_RESTART		0x198
+#define KEY_SLOW		0x199
+#define KEY_SHUFFLE		0x19a
+#define KEY_BREAK		0x19b
+#define KEY_PREVIOUS		0x19c
+#define KEY_DIGITS		0x19d
+#define KEY_TEEN		0x19e
+#define KEY_TWEN		0x19f
+#define KEY_VIDEOPHONE		0x1a0	/* Media Select Video Phone */
+#define KEY_GAMES		0x1a1	/* Media Select Games */
+#define KEY_ZOOMIN		0x1a2	/* AC Zoom In */
+#define KEY_ZOOMOUT		0x1a3	/* AC Zoom Out */
+#define KEY_ZOOMRESET		0x1a4	/* AC Zoom */
+#define KEY_WORDPROCESSOR	0x1a5	/* AL Word Processor */
+#define KEY_EDITOR		0x1a6	/* AL Text Editor */
+#define KEY_SPREADSHEET		0x1a7	/* AL Spreadsheet */
+#define KEY_GRAPHICSEDITOR	0x1a8	/* AL Graphics Editor */
+#define KEY_PRESENTATION	0x1a9	/* AL Presentation App */
+#define KEY_DATABASE		0x1aa	/* AL Database App */
+#define KEY_NEWS		0x1ab	/* AL Newsreader */
+#define KEY_VOICEMAIL		0x1ac	/* AL Voicemail */
+#define KEY_ADDRESSBOOK		0x1ad	/* AL Contacts/Address Book */
+#define KEY_MESSENGER		0x1ae	/* AL Instant Messaging */
+#define KEY_DISPLAYTOGGLE	0x1af	/* Turn display (LCD) on and off */
+#define KEY_SPELLCHECK		0x1b0   /* AL Spell Check */
+#define KEY_LOGOFF		0x1b1   /* AL Logoff */
+
+#define KEY_DOLLAR		0x1b2
+#define KEY_EURO		0x1b3
+
+#define KEY_FRAMEBACK		0x1b4	/* Consumer - transport controls */
+#define KEY_FRAMEFORWARD	0x1b5
+#define KEY_CONTEXT_MENU	0x1b6	/* GenDesc - system context menu */
+#define KEY_MEDIA_REPEAT	0x1b7	/* Consumer - transport control */
+#define KEY_10CHANNELSUP	0x1b8	/* 10 channels up (10+) */
+#define KEY_10CHANNELSDOWN	0x1b9	/* 10 channels down (10-) */
+#define KEY_IMAGES		0x1ba	/* AL Image Browser */
+
+#define KEY_DEL_EOL		0x1c0
+#define KEY_DEL_EOS		0x1c1
+#define KEY_INS_LINE		0x1c2
+#define KEY_DEL_LINE		0x1c3
+
+#define KEY_FN			0x1d0
+#define KEY_FN_ESC		0x1d1
+#define KEY_FN_F1		0x1d2
+#define KEY_FN_F2		0x1d3
+#define KEY_FN_F3		0x1d4
+#define KEY_FN_F4		0x1d5
+#define KEY_FN_F5		0x1d6
+#define KEY_FN_F6		0x1d7
+#define KEY_FN_F7		0x1d8
+#define KEY_FN_F8		0x1d9
+#define KEY_FN_F9		0x1da
+#define KEY_FN_F10		0x1db
+#define KEY_FN_F11		0x1dc
+#define KEY_FN_F12		0x1dd
+#define KEY_FN_1		0x1de
+#define KEY_FN_2		0x1df
+#define KEY_FN_D		0x1e0
+#define KEY_FN_E		0x1e1
+#define KEY_FN_F		0x1e2
+#define KEY_FN_S		0x1e3
+#define KEY_FN_B		0x1e4
+
+#define KEY_BRL_DOT1		0x1f1
+#define KEY_BRL_DOT2		0x1f2
+#define KEY_BRL_DOT3		0x1f3
+#define KEY_BRL_DOT4		0x1f4
+#define KEY_BRL_DOT5		0x1f5
+#define KEY_BRL_DOT6		0x1f6
+#define KEY_BRL_DOT7		0x1f7
+#define KEY_BRL_DOT8		0x1f8
+#define KEY_BRL_DOT9		0x1f9
+#define KEY_BRL_DOT10		0x1fa
+
+#define KEY_NUMERIC_0		0x200	/* used by phones, remote controls, */
+#define KEY_NUMERIC_1		0x201	/* and other keypads */
+#define KEY_NUMERIC_2		0x202
+#define KEY_NUMERIC_3		0x203
+#define KEY_NUMERIC_4		0x204
+#define KEY_NUMERIC_5		0x205
+#define KEY_NUMERIC_6		0x206
+#define KEY_NUMERIC_7		0x207
+#define KEY_NUMERIC_8		0x208
+#define KEY_NUMERIC_9		0x209
+#define KEY_NUMERIC_STAR	0x20a
+#define KEY_NUMERIC_POUND	0x20b
+
+#define KEY_CAMERA_FOCUS	0x210
+#define KEY_WPS_BUTTON		0x211	/* WiFi Protected Setup key */
+
+#define KEY_TOUCHPAD_TOGGLE	0x212	/* Request switch touchpad on or off */
+#define KEY_TOUCHPAD_ON		0x213
+#define KEY_TOUCHPAD_OFF	0x214
+
+#define KEY_CAMERA_ZOOMIN	0x215
+#define KEY_CAMERA_ZOOMOUT	0x216
+#define KEY_CAMERA_UP		0x217
+#define KEY_CAMERA_DOWN		0x218
+#define KEY_CAMERA_LEFT		0x219
+#define KEY_CAMERA_RIGHT	0x21a
+
+#define KEY_ATTENDANT_ON	0x21b
+#define KEY_ATTENDANT_OFF	0x21c
+#define KEY_ATTENDANT_TOGGLE	0x21d	/* Attendant call on or off */
+#define KEY_LIGHTS_TOGGLE	0x21e	/* Reading light on or off */
+
+#define BTN_DPAD_UP		0x220
+#define BTN_DPAD_DOWN		0x221
+#define BTN_DPAD_LEFT		0x222
+#define BTN_DPAD_RIGHT		0x223
+
+#define MATRIX_KEY(row, col, code)	\
+	((((row) & 0xFF) << 24) | (((col) & 0xFF) << 16) | ((code) & 0xFFFF))
+
+#endif /* _DT_BINDINGS_INPUT_INPUT_H */
diff --git a/include/linux/amba/pl080.h b/include/linux/amba/pl080.h
index 3e7b62f..91b84a7 100644
--- a/include/linux/amba/pl080.h
+++ b/include/linux/amba/pl080.h
@@ -87,6 +87,7 @@
 #define PL080_CONTROL_SB_SIZE_MASK		(0x7 << 12)
 #define PL080_CONTROL_SB_SIZE_SHIFT		(12)
 #define PL080_CONTROL_TRANSFER_SIZE_MASK	(0xfff << 0)
+#define PL080S_CONTROL_TRANSFER_SIZE_MASK	(0x1ffffff << 0)
 #define PL080_CONTROL_TRANSFER_SIZE_SHIFT	(0)
 
 #define PL080_BSIZE_1				(0x0)
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index c388155..5f66d51 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -243,6 +243,8 @@
  * BDI_CAP_EXEC_MAP:       Can be mapped for execution
  *
  * BDI_CAP_SWAP_BACKED:    Count shmem/tmpfs objects as swap-backed.
+ *
+ * BDI_CAP_STRICTLIMIT:    Keep number of dirty pages below bdi threshold.
  */
 #define BDI_CAP_NO_ACCT_DIRTY	0x00000001
 #define BDI_CAP_NO_WRITEBACK	0x00000002
@@ -254,6 +256,7 @@
 #define BDI_CAP_NO_ACCT_WB	0x00000080
 #define BDI_CAP_SWAP_BACKED	0x00000100
 #define BDI_CAP_STABLE_WRITES	0x00000200
+#define BDI_CAP_STRICTLIMIT	0x00000400
 
 #define BDI_CAP_VMFLAGS \
 	(BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP)
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 70cf138..e8112ae 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -31,7 +31,7 @@
 #ifdef __alpha__
 	unsigned int taso:1;
 #endif
-	unsigned int recursion_depth;
+	unsigned int recursion_depth; /* only for search_binary_handler() */
 	struct file * file;
 	struct cred *cred;	/* new credentials */
 	int unsafe;		/* how unsafe this exec is (mask of LSM_UNSAFE_*) */
diff --git a/include/linux/clk-private.h b/include/linux/clk-private.h
index dd7adff..8138c94 100644
--- a/include/linux/clk-private.h
+++ b/include/linux/clk-private.h
@@ -33,8 +33,11 @@
 	const char		**parent_names;
 	struct clk		**parents;
 	u8			num_parents;
+	u8			new_parent_index;
 	unsigned long		rate;
 	unsigned long		new_rate;
+	struct clk		*new_parent;
+	struct clk		*new_child;
 	unsigned long		flags;
 	unsigned int		enable_count;
 	unsigned int		prepare_count;
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 1ec14a7..73bdb69 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -12,6 +12,7 @@
 #define __LINUX_CLK_PROVIDER_H
 
 #include <linux/clk.h>
+#include <linux/io.h>
 
 #ifdef CONFIG_COMMON_CLK
 
@@ -27,6 +28,7 @@
 #define CLK_IS_ROOT		BIT(4) /* root clk, has no parent */
 #define CLK_IS_BASIC		BIT(5) /* Basic clk, can't do a to_clk_foo() */
 #define CLK_GET_RATE_NOCACHE	BIT(6) /* do not use the cached clk rate */
+#define CLK_SET_RATE_NO_REPARENT BIT(7) /* don't re-parent on rate change */
 
 struct clk_hw;
 
@@ -79,6 +81,10 @@
  * @round_rate:	Given a target rate as input, returns the closest rate actually
  * 		supported by the clock.
  *
+ * @determine_rate: Given a target rate as input, returns the closest rate
+ *		actually supported by the clock, and optionally the parent clock
+ *		that should be used to provide the clock rate.
+ *
  * @get_parent:	Queries the hardware to determine the parent of a clock.  The
  * 		return value is a u8 which specifies the index corresponding to
  * 		the parent clock.  This index can be applied to either the
@@ -126,6 +132,9 @@
 					unsigned long parent_rate);
 	long		(*round_rate)(struct clk_hw *hw, unsigned long,
 					unsigned long *);
+	long		(*determine_rate)(struct clk_hw *hw, unsigned long rate,
+					unsigned long *best_parent_rate,
+					struct clk **best_parent_clk);
 	int		(*set_parent)(struct clk_hw *hw, u8 index);
 	u8		(*get_parent)(struct clk_hw *hw);
 	int		(*set_rate)(struct clk_hw *hw, unsigned long,
@@ -327,8 +336,10 @@
 #define CLK_MUX_INDEX_ONE		BIT(0)
 #define CLK_MUX_INDEX_BIT		BIT(1)
 #define CLK_MUX_HIWORD_MASK		BIT(2)
+#define CLK_MUX_READ_ONLY	BIT(3) /* mux setting cannot be changed */
 
 extern const struct clk_ops clk_mux_ops;
+extern const struct clk_ops clk_mux_ro_ops;
 
 struct clk *clk_register_mux(struct device *dev, const char *name,
 		const char **parent_names, u8 num_parents, unsigned long flags,
@@ -418,6 +429,7 @@
 struct clk_hw *__clk_get_hw(struct clk *clk);
 u8 __clk_get_num_parents(struct clk *clk);
 struct clk *__clk_get_parent(struct clk *clk);
+struct clk *clk_get_parent_by_index(struct clk *clk, u8 index);
 unsigned int __clk_get_enable_count(struct clk *clk);
 unsigned int __clk_get_prepare_count(struct clk *clk);
 unsigned long __clk_get_rate(struct clk *clk);
@@ -425,6 +437,9 @@
 bool __clk_is_prepared(struct clk *clk);
 bool __clk_is_enabled(struct clk *clk);
 struct clk *__clk_lookup(const char *name);
+long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
+			      unsigned long *best_parent_rate,
+			      struct clk **best_parent_p);
 
 /*
  * FIXME clock api without lock protection
@@ -490,5 +505,21 @@
 #define of_clk_init(matches) \
 	{ while (0); }
 #endif /* CONFIG_OF */
+
+/*
+ * wrap access to peripherals in accessor routines
+ * for improved portability across platforms
+ */
+
+static inline u32 clk_readl(u32 __iomem *reg)
+{
+	return readl(reg);
+}
+
+static inline void clk_writel(u32 val, u32 __iomem *reg)
+{
+	writel(val, reg);
+}
+
 #endif /* CONFIG_COMMON_CLK */
 #endif /* CLK_PROVIDER_H */
diff --git a/include/linux/cmdline-parser.h b/include/linux/cmdline-parser.h
new file mode 100644
index 0000000..98e892e
--- /dev/null
+++ b/include/linux/cmdline-parser.h
@@ -0,0 +1,43 @@
+/*
+ * Parsing command line, get the partitions information.
+ *
+ * Written by Cai Zhiyong <caizhiyong@huawei.com>
+ *
+ */
+#ifndef CMDLINEPARSEH
+#define CMDLINEPARSEH
+
+#include <linux/blkdev.h>
+
+/* partition flags */
+#define PF_RDONLY                   0x01 /* Device is read only */
+#define PF_POWERUP_LOCK             0x02 /* Always locked after reset */
+
+struct cmdline_subpart {
+	char name[BDEVNAME_SIZE]; /* partition name, such as 'rootfs' */
+	sector_t from;
+	sector_t size;
+	int flags;
+	struct cmdline_subpart *next_subpart;
+};
+
+struct cmdline_parts {
+	char name[BDEVNAME_SIZE]; /* block device, such as 'mmcblk0' */
+	unsigned int nr_subparts;
+	struct cmdline_subpart *subpart;
+	struct cmdline_parts *next_parts;
+};
+
+void cmdline_parts_free(struct cmdline_parts **parts);
+
+int cmdline_parts_parse(struct cmdline_parts **parts, const char *cmdline);
+
+struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts,
+					 const char *bdev);
+
+void cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size,
+		       int slot,
+		       int (*add_part)(int, struct cmdline_subpart *, void *),
+		       void *param);
+
+#endif /* CMDLINEPARSEH */
diff --git a/include/linux/compat.h b/include/linux/compat.h
index ec1aee4..345da00 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -43,6 +43,7 @@
 #define COMPAT_SYSCALL_DEFINEx(x, name, ...)				\
 	asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));\
 	static inline long C_SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__));\
+	asmlinkage long compat_SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__));\
 	asmlinkage long compat_SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__))\
 	{								\
 		return C_SYSC##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__));	\
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index 37e4f8d..fe68a5a 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -12,6 +12,15 @@
 extern unsigned long long elfcorehdr_addr;
 extern unsigned long long elfcorehdr_size;
 
+extern int __weak elfcorehdr_alloc(unsigned long long *addr,
+				   unsigned long long *size);
+extern void __weak elfcorehdr_free(unsigned long long addr);
+extern ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos);
+extern ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos);
+extern int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
+					 unsigned long from, unsigned long pfn,
+					 unsigned long size, pgprot_t prot);
+
 extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
 						unsigned long, int);
 
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index e151d4c..653073d 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -10,6 +10,7 @@
 
 #include <linux/bio.h>
 #include <linux/blkdev.h>
+#include <linux/math64.h>
 #include <linux/ratelimit.h>
 
 struct dm_dev;
@@ -550,6 +551,14 @@
 #define DM_MAPIO_REMAPPED	1
 #define DM_MAPIO_REQUEUE	DM_ENDIO_REQUEUE
 
+#define dm_sector_div64(x, y)( \
+{ \
+	u64 _res; \
+	(x) = div64_u64_rem(x, y, &_res); \
+	_res; \
+} \
+)
+
 /*
  * Ceiling(n / sz)
  */
diff --git a/include/linux/dma/mmp-pdma.h b/include/linux/dma/mmp-pdma.h
new file mode 100644
index 0000000..2dc9b2b
--- /dev/null
+++ b/include/linux/dma/mmp-pdma.h
@@ -0,0 +1,15 @@
+#ifndef _MMP_PDMA_H_
+#define _MMP_PDMA_H_
+
+struct dma_chan;
+
+#ifdef CONFIG_MMP_PDMA
+bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param);
+#else
+static inline bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param)
+{
+	return false;
+}
+#endif
+
+#endif /* _MMP_PDMA_H_ */
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index cb286b1..0bc7275 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -38,7 +38,10 @@
 #define DMA_MIN_COOKIE	1
 #define DMA_MAX_COOKIE	INT_MAX
 
-#define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0)
+static inline int dma_submit_error(dma_cookie_t cookie)
+{
+	return cookie < 0 ? cookie : 0;
+}
 
 /**
  * enum dma_status - DMA transaction status
@@ -370,6 +373,25 @@
 	unsigned int slave_id;
 };
 
+/* struct dma_slave_caps - expose capabilities of a slave channel only
+ *
+ * @src_addr_widths: bit mask of src addr widths the channel supports
+ * @dstn_addr_widths: bit mask of dstn addr widths the channel supports
+ * @directions: bit mask of slave direction the channel supported
+ * 	since the enum dma_transfer_direction is not defined as bits for each
+ * 	type of direction, the dma controller should fill (1 << <TYPE>) and same
+ * 	should be checked by controller as well
+ * @cmd_pause: true, if pause and thereby resume is supported
+ * @cmd_terminate: true, if terminate cmd is supported
+ */
+struct dma_slave_caps {
+	u32 src_addr_widths;
+	u32 dstn_addr_widths;
+	u32 directions;
+	bool cmd_pause;
+	bool cmd_terminate;
+};
+
 static inline const char *dma_chan_name(struct dma_chan *chan)
 {
 	return dev_name(&chan->dev->device);
@@ -532,6 +554,7 @@
  *	struct with auxiliary transfer status information, otherwise the call
  *	will just return a simple status code
  * @device_issue_pending: push pending transactions to hardware
+ * @device_slave_caps: return the slave channel capabilities
  */
 struct dma_device {
 
@@ -597,6 +620,7 @@
 					    dma_cookie_t cookie,
 					    struct dma_tx_state *txstate);
 	void (*device_issue_pending)(struct dma_chan *chan);
+	int (*device_slave_caps)(struct dma_chan *chan, struct dma_slave_caps *caps);
 };
 
 static inline int dmaengine_device_control(struct dma_chan *chan,
@@ -670,6 +694,21 @@
 	return chan->device->device_prep_interleaved_dma(chan, xt, flags);
 }
 
+static inline int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
+{
+	if (!chan || !caps)
+		return -EINVAL;
+
+	/* check if the channel supports slave transactions */
+	if (!test_bit(DMA_SLAVE, chan->device->cap_mask.bits))
+		return -ENXIO;
+
+	if (chan->device->device_slave_caps)
+		return chan->device->device_slave_caps(chan, caps);
+
+	return -ENXIO;
+}
+
 static inline int dmaengine_terminate_all(struct dma_chan *chan)
 {
 	return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
@@ -958,8 +997,9 @@
 	}
 }
 
-enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
 #ifdef CONFIG_DMA_ENGINE
+struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
+enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
 enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
 void dma_issue_pending_all(void);
 struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
@@ -967,6 +1007,14 @@
 struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
 void dma_release_channel(struct dma_chan *chan);
 #else
+static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
+{
+	return NULL;
+}
+static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
+{
+	return DMA_SUCCESS;
+}
 static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
 {
 	return DMA_SUCCESS;
@@ -994,7 +1042,7 @@
 int dma_async_device_register(struct dma_device *device);
 void dma_async_device_unregister(struct dma_device *device);
 void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
-struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
+struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
 struct dma_chan *net_dma_find_channel(void);
 #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
 #define dma_request_slave_channel_compat(mask, x, y, dev, name) \
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index cf5d2af61..ff0b981 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -9,7 +9,6 @@
 #define _LINUX_EVENTFD_H
 
 #include <linux/fcntl.h>
-#include <linux/file.h>
 #include <linux/wait.h>
 
 /*
@@ -26,6 +25,8 @@
 #define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
 #define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE)
 
+struct file;
+
 #ifdef CONFIG_EVENTFD
 
 struct file *eventfd_file_create(unsigned int count, int flags);
diff --git a/include/linux/fsl/mxs-dma.h b/include/linux/fsl/mxs-dma.h
deleted file mode 100644
index 55d8702..0000000
--- a/include/linux/fsl/mxs-dma.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __MACH_MXS_DMA_H__
-#define __MACH_MXS_DMA_H__
-
-#include <linux/dmaengine.h>
-
-struct mxs_dma_data {
-	int chan_irq;
-};
-
-extern int mxs_dma_is_apbh(struct dma_chan *chan);
-extern int mxs_dma_is_apbx(struct dma_chan *chan);
-#endif /* __MACH_MXS_DMA_H__ */
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
index 661d374..f8d41cb 100644
--- a/include/linux/genalloc.h
+++ b/include/linux/genalloc.h
@@ -66,8 +66,8 @@
 	struct list_head next_chunk;	/* next chunk in pool */
 	atomic_t avail;
 	phys_addr_t phys_addr;		/* physical starting address of memory chunk */
-	unsigned long start_addr;	/* starting address of memory chunk */
-	unsigned long end_addr;		/* ending address of memory chunk */
+	unsigned long start_addr;	/* start address of memory chunk */
+	unsigned long end_addr;		/* end address of memory chunk (inclusive) */
 	unsigned long bits[0];		/* bitmap for allocating memory chunk */
 };
 
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index c2b1801..0393270 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -66,6 +66,9 @@
 						vm_flags_t vm_flags);
 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
 int dequeue_hwpoisoned_huge_page(struct page *page);
+bool isolate_huge_page(struct page *page, struct list_head *list);
+void putback_active_hugepage(struct page *page);
+bool is_hugepage_active(struct page *page);
 void copy_huge_page(struct page *dst, struct page *src);
 
 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
@@ -134,6 +137,9 @@
 	return 0;
 }
 
+#define isolate_huge_page(p, l) false
+#define putback_active_hugepage(p)	do {} while (0)
+#define is_hugepage_active(x)	false
 static inline void copy_huge_page(struct page *dst, struct page *src)
 {
 }
@@ -261,6 +267,8 @@
 };
 
 struct page *alloc_huge_page_node(struct hstate *h, int nid);
+struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
+				unsigned long addr, int avoid_reserve);
 
 /* arch callback */
 int __init alloc_bootmem_huge_page(struct hstate *h);
@@ -371,9 +379,23 @@
 	return __basepage_index(page);
 }
 
+extern void dissolve_free_huge_pages(unsigned long start_pfn,
+				     unsigned long end_pfn);
+int pmd_huge_support(void);
+/*
+ * Currently hugepage migration is enabled only for pmd-based hugepage.
+ * This function will be updated when hugepage migration is more widely
+ * supported.
+ */
+static inline int hugepage_migration_support(struct hstate *h)
+{
+	return pmd_huge_support() && (huge_page_shift(h) == PMD_SHIFT);
+}
+
 #else	/* CONFIG_HUGETLB_PAGE */
 struct hstate {};
 #define alloc_huge_page_node(h, nid) NULL
+#define alloc_huge_page_noerr(v, a, r) NULL
 #define alloc_bootmem_huge_page(h) NULL
 #define hstate_file(f) NULL
 #define hstate_sizelog(s) NULL
@@ -396,6 +418,9 @@
 {
 	return page->index;
 }
+#define dissolve_free_huge_pages(s, e)	do {} while (0)
+#define pmd_huge_support()	0
+#define hugepage_migration_support(h)	0
 #endif	/* CONFIG_HUGETLB_PAGE */
 
 #endif /* _LINUX_HUGETLB_H */
diff --git a/include/linux/init.h b/include/linux/init.h
index e73f2b7..f1c27a7 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -153,6 +153,7 @@
 void setup_arch(char **);
 void prepare_namespace(void);
 void __init load_default_modules(void);
+int __init init_rootfs(void);
 
 extern void (*late_time_init)(void);
 
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index c4d870b..19c19a5 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -22,7 +22,7 @@
 	int in_use;
 	unsigned short seq;
 	unsigned short seq_max;
-	struct rw_semaphore rw_mutex;
+	struct rw_semaphore rwsem;
 	struct idr ipcs_idr;
 	int next_id;
 };
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index 3e203eb..0e5d9ec 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -66,6 +66,7 @@
 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
 		    u32 offset, struct device_node *);
 void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
+void gic_cpu_if_down(void);
 
 static inline void gic_init(unsigned int nr, int start,
 			    void __iomem *dist , void __iomem *cpu)
diff --git a/include/linux/irqchip/mmp.h b/include/linux/irqchip/mmp.h
new file mode 100644
index 0000000..c78a892
--- /dev/null
+++ b/include/linux/irqchip/mmp.h
@@ -0,0 +1,6 @@
+#ifndef	__IRQCHIP_MMP_H
+#define	__IRQCHIP_MMP_H
+
+extern struct irq_chip icu_irq_chip;
+
+#endif	/* __IRQCHIP_MMP_H */
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index ca1d27a..925eaf2 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -264,10 +264,36 @@
 extern void arch_disarm_kprobe(struct kprobe *p);
 extern int arch_init_kprobes(void);
 extern void show_registers(struct pt_regs *regs);
-extern kprobe_opcode_t *get_insn_slot(void);
-extern void free_insn_slot(kprobe_opcode_t *slot, int dirty);
 extern void kprobes_inc_nmissed_count(struct kprobe *p);
 
+struct kprobe_insn_cache {
+	struct mutex mutex;
+	void *(*alloc)(void);	/* allocate insn page */
+	void (*free)(void *);	/* free insn page */
+	struct list_head pages; /* list of kprobe_insn_page */
+	size_t insn_size;	/* size of instruction slot */
+	int nr_garbage;
+};
+
+extern kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c);
+extern void __free_insn_slot(struct kprobe_insn_cache *c,
+			     kprobe_opcode_t *slot, int dirty);
+
+#define DEFINE_INSN_CACHE_OPS(__name)					\
+extern struct kprobe_insn_cache kprobe_##__name##_slots;		\
+									\
+static inline kprobe_opcode_t *get_##__name##_slot(void)		\
+{									\
+	return __get_insn_slot(&kprobe_##__name##_slots);		\
+}									\
+									\
+static inline void free_##__name##_slot(kprobe_opcode_t *slot, int dirty)\
+{									\
+	__free_insn_slot(&kprobe_##__name##_slots, slot, dirty);	\
+}									\
+
+DEFINE_INSN_CACHE_OPS(insn);
+
 #ifdef CONFIG_OPTPROBES
 /*
  * Internal structure for direct jump optimized probe
@@ -287,13 +313,13 @@
 extern void arch_unoptimize_kprobes(struct list_head *oplist,
 				    struct list_head *done_list);
 extern void arch_unoptimize_kprobe(struct optimized_kprobe *op);
-extern kprobe_opcode_t *get_optinsn_slot(void);
-extern void free_optinsn_slot(kprobe_opcode_t *slot, int dirty);
 extern int arch_within_optimized_kprobe(struct optimized_kprobe *op,
 					unsigned long addr);
 
 extern void opt_pre_handler(struct kprobe *p, struct pt_regs *regs);
 
+DEFINE_INSN_CACHE_OPS(optinsn);
+
 #ifdef CONFIG_SYSCTL
 extern int sysctl_kprobes_optimization;
 extern int proc_kprobes_optimization_handler(struct ctl_table *table,
diff --git a/include/linux/lz4.h b/include/linux/lz4.h
index d21c13f..4356686 100644
--- a/include/linux/lz4.h
+++ b/include/linux/lz4.h
@@ -67,8 +67,8 @@
  *	note :  Destination buffer must be already allocated.
  *		slightly faster than lz4_decompress_unknownoutputsize()
  */
-int lz4_decompress(const char *src, size_t *src_len, char *dest,
-		size_t actual_dest_len);
+int lz4_decompress(const unsigned char *src, size_t *src_len,
+		unsigned char *dest, size_t actual_dest_len);
 
 /*
  * lz4_decompress_unknownoutputsize()
@@ -82,6 +82,6 @@
  *		  Error if return (< 0)
  *	note :  Destination buffer must be already allocated.
  */
-int lz4_decompress_unknownoutputsize(const char *src, size_t src_len,
-		char *dest, size_t *dest_len);
+int lz4_decompress_unknownoutputsize(const unsigned char *src, size_t src_len,
+		unsigned char *dest, size_t *dest_len);
 #endif
diff --git a/include/linux/math64.h b/include/linux/math64.h
index 2913b86..69ed5f5 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -31,6 +31,15 @@
 }
 
 /**
+ * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
+ */
+static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
+{
+	*remainder = dividend % divisor;
+	return dividend / divisor;
+}
+
+/**
  * div64_u64 - unsigned 64bit divide with 64bit divisor
  */
 static inline u64 div64_u64(u64 dividend, u64 divisor)
@@ -63,6 +72,10 @@
 extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
 #endif
 
+#ifndef div64_u64_rem
+extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
+#endif
+
 #ifndef div64_u64
 extern u64 div64_u64(u64 dividend, u64 divisor);
 #endif
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index f388203..31e95ac 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -60,6 +60,8 @@
 void memblock_trim_memory(phys_addr_t align);
 
 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
+			    unsigned long  *end_pfn);
 void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
 			  unsigned long *out_end_pfn, int *out_nid);
 
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 0d7df39..da6716b 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -91,7 +91,6 @@
 }
 
 #define vma_policy(vma) ((vma)->vm_policy)
-#define vma_set_policy(vma, pol) ((vma)->vm_policy = (pol))
 
 static inline void mpol_get(struct mempolicy *pol)
 {
@@ -126,6 +125,7 @@
 	spinlock_t lock;
 };
 
+int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst);
 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
 int mpol_set_shared_policy(struct shared_policy *info,
 				struct vm_area_struct *vma,
@@ -173,7 +173,7 @@
 /* Check if a vma is migratable */
 static inline int vma_migratable(struct vm_area_struct *vma)
 {
-	if (vma->vm_flags & (VM_IO | VM_HUGETLB | VM_PFNMAP))
+	if (vma->vm_flags & (VM_IO | VM_PFNMAP))
 		return 0;
 	/*
 	 * Migration allocates pages in the highest zone. If we cannot
@@ -240,7 +240,12 @@
 }
 
 #define vma_policy(vma) NULL
-#define vma_set_policy(vma, pol) do {} while(0)
+
+static inline int
+vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
+{
+	return 0;
+}
 
 static inline void numa_policy_init(void)
 {
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index ce35113..b22883d 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -108,7 +108,6 @@
 	unsigned int			cd_gpio;
 	void (*set_pwr)(struct platform_device *host, int state);
 	void (*set_clk_div)(struct platform_device *host, int state);
-	int (*get_cd)(struct platform_device *host);
 	int (*write16_hook)(struct tmio_mmc_host *host, int addr);
 	/* clock management callbacks */
 	int (*clk_enable)(struct platform_device *pdev, unsigned int *f);
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index a405d3dc..6fe5214 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -41,8 +41,6 @@
 			struct page *, struct page *, enum migrate_mode);
 extern int migrate_pages(struct list_head *l, new_page_t x,
 		unsigned long private, enum migrate_mode mode, int reason);
-extern int migrate_huge_page(struct page *, new_page_t x,
-		unsigned long private, enum migrate_mode mode);
 
 extern int fail_migrate_page(struct address_space *,
 			struct page *, struct page *);
@@ -62,9 +60,6 @@
 static inline int migrate_pages(struct list_head *l, new_page_t x,
 		unsigned long private, enum migrate_mode mode, int reason)
 	{ return -ENOSYS; }
-static inline int migrate_huge_page(struct page *page, new_page_t x,
-		unsigned long private, enum migrate_mode mode)
-	{ return -ENOSYS; }
 
 static inline int migrate_prep(void) { return -ENOSYS; }
 static inline int migrate_prep_local(void) { return -ENOSYS; }
diff --git a/include/linux/mm.h b/include/linux/mm.h
index d2d59b4..caf543c 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -115,6 +115,12 @@
 #define VM_ARCH_1	0x01000000	/* Architecture-specific flag */
 #define VM_DONTDUMP	0x04000000	/* Do not include in the core dump */
 
+#ifdef CONFIG_MEM_SOFT_DIRTY
+# define VM_SOFTDIRTY	0x08000000	/* Not soft dirty clean area */
+#else
+# define VM_SOFTDIRTY	0
+#endif
+
 #define VM_MIXEDMAP	0x10000000	/* Can contain "struct page" and pure PFN pages */
 #define VM_HUGEPAGE	0x20000000	/* MADV_HUGEPAGE marked this vma */
 #define VM_NOHUGEPAGE	0x40000000	/* MADV_NOHUGEPAGE marked this vma */
@@ -489,20 +495,6 @@
 	return (unsigned long)page[1].lru.prev;
 }
 
-static inline int compound_trans_order(struct page *page)
-{
-	int order;
-	unsigned long flags;
-
-	if (!PageHead(page))
-		return 0;
-
-	flags = compound_lock_irqsave(page);
-	order = compound_order(page);
-	compound_unlock_irqrestore(page, flags);
-	return order;
-}
-
 static inline void set_compound_order(struct page *page, unsigned long order)
 {
 	page[1].lru.prev = (void *)order;
@@ -637,12 +629,12 @@
 #endif
 
 /*
- * The identification function is only used by the buddy allocator for
- * determining if two pages could be buddies. We are not really
- * identifying a zone since we could be using a the section number
- * id if we have not node id available in page flags.
- * We guarantee only that it will return the same value for two
- * combinable pages in a zone.
+ * The identification function is mainly used by the buddy allocator for
+ * determining if two pages could be buddies. We are not really identifying
+ * the zone since we could be using the section number id if we do not have
+ * node id available in page flags.
+ * We only guarantee that it will return the same value for two combinable
+ * pages in a zone.
  */
 static inline int page_zone_id(struct page *page)
 {
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 1397ccf..cf55945 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -2,6 +2,7 @@
 #define LINUX_MM_INLINE_H
 
 #include <linux/huge_mm.h>
+#include <linux/swap.h>
 
 /**
  * page_is_file_cache - should the page be on a file LRU or anon LRU?
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 443243b..da51bec 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -208,6 +208,8 @@
 	__mmc_claim_host(host, NULL);
 }
 
+struct device_node;
 extern u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max);
+extern int mmc_of_parse_voltage(struct device_node *np, u32 *mask);
 
 #endif /* LINUX_MMC_CORE_H */
diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h
index e3c6a74..3e781b8 100644
--- a/include/linux/mmc/sdhci.h
+++ b/include/linux/mmc/sdhci.h
@@ -171,6 +171,7 @@
 	unsigned int            ocr_avail_sdio;	/* OCR bit masks */
 	unsigned int            ocr_avail_sd;
 	unsigned int            ocr_avail_mmc;
+	u32 ocr_mask;		/* available voltages */
 
 	wait_queue_head_t	buf_ready_int;	/* Waitqueue for Buffer Read Ready interrupt */
 	unsigned int		tuning_done;	/* Condition flag set when CMD19 succeeds */
diff --git a/include/linux/mmc/sh_mmcif.h b/include/linux/mmc/sh_mmcif.h
index e7d5dd6..ccd8fb2 100644
--- a/include/linux/mmc/sh_mmcif.h
+++ b/include/linux/mmc/sh_mmcif.h
@@ -16,7 +16,6 @@
 
 #include <linux/io.h>
 #include <linux/platform_device.h>
-#include <linux/sh_dma.h>
 
 /*
  * MMCIF : CE_CLK_CTRL [19:16]
@@ -33,12 +32,12 @@
  */
 
 struct sh_mmcif_plat_data {
-	void (*set_pwr)(struct platform_device *pdev, int state);
-	void (*down_pwr)(struct platform_device *pdev);
 	int (*get_cd)(struct platform_device *pdef);
 	unsigned int		slave_id_tx;	/* embedded slave_id_[tr]x */
 	unsigned int		slave_id_rx;
 	bool			use_cd_gpio : 1;
+	bool			ccs_unsupported : 1;
+	bool			clk_ctrl2_present : 1;
 	unsigned int		cd_gpio;
 	u8			sup_pclk;	/* 1 :SH7757, 0: SH7724/SH7372 */
 	unsigned long		caps;
@@ -62,6 +61,7 @@
 #define MMCIF_CE_INT_MASK	0x00000044
 #define MMCIF_CE_HOST_STS1	0x00000048
 #define MMCIF_CE_HOST_STS2	0x0000004C
+#define MMCIF_CE_CLK_CTRL2	0x00000070
 #define MMCIF_CE_VERSION	0x0000007C
 
 /* CE_BUF_ACC */
diff --git a/include/linux/mmc/sh_mobile_sdhi.h b/include/linux/mmc/sh_mobile_sdhi.h
index b76bcf0..68927ae 100644
--- a/include/linux/mmc/sh_mobile_sdhi.h
+++ b/include/linux/mmc/sh_mobile_sdhi.h
@@ -25,8 +25,6 @@
 	unsigned long tmio_caps2;
 	u32 tmio_ocr_mask;	/* available MMC voltages */
 	unsigned int cd_gpio;
-	void (*set_pwr)(struct platform_device *pdev, int state);
-	int (*get_cd)(struct platform_device *pdev);
 
 	/* callbacks for board specific setup code */
 	int (*init)(struct platform_device *pdev,
diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h
index 7d88d27..b0c73e4 100644
--- a/include/linux/mmc/slot-gpio.h
+++ b/include/linux/mmc/slot-gpio.h
@@ -18,7 +18,8 @@
 void mmc_gpio_free_ro(struct mmc_host *host);
 
 int mmc_gpio_get_cd(struct mmc_host *host);
-int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio);
+int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio,
+			unsigned int debounce);
 void mmc_gpio_free_cd(struct mmc_host *host);
 
 #endif
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index af4a3b7..bd791e4 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -105,6 +105,7 @@
 enum zone_stat_item {
 	/* First 128 byte cacheline (assuming 64 bit words) */
 	NR_FREE_PAGES,
+	NR_ALLOC_BATCH,
 	NR_LRU_BASE,
 	NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */
 	NR_ACTIVE_ANON,		/*  "     "     "   "       "         */
@@ -352,7 +353,6 @@
 	 * free areas of different sizes
 	 */
 	spinlock_t		lock;
-	int                     all_unreclaimable; /* All pages pinned */
 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
 	/* Set to true when the PG_migrate_skip bits should be cleared */
 	bool			compact_blockskip_flush;
diff --git a/include/linux/namei.h b/include/linux/namei.h
index cd09751..8e47bc7 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -58,7 +58,6 @@
 
 extern int user_path_at(int, const char __user *, unsigned, struct path *);
 extern int user_path_at_empty(int, const char __user *, unsigned, struct path *, int *empty);
-extern int user_path_umountat(int, const char __user *, unsigned int, struct path *);
 
 #define user_path(name, path) user_path_at(AT_FDCWD, name, LOOKUP_FOLLOW, path)
 #define user_lpath(name, path) user_path_at(AT_FDCWD, name, 0, path)
@@ -71,8 +70,7 @@
 extern struct dentry *user_path_create(int, const char __user *, struct path *, unsigned int);
 extern void done_path_create(struct path *, struct dentry *);
 extern struct dentry *kern_path_locked(const char *, struct path *);
-extern int vfs_path_lookup(struct dentry *, struct vfsmount *,
-			   const char *, unsigned int, struct path *);
+extern int kern_path_mountpoint(int, const char *, struct path *, unsigned int);
 
 extern struct dentry *lookup_one_len(const char *, struct dentry *, int);
 
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 041b42a..3de49ac 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -950,14 +950,14 @@
  *	multiple net devices on single physical port.
  *
  * void (*ndo_add_vxlan_port)(struct  net_device *dev,
- *			      sa_family_t sa_family, __u16 port);
+ *			      sa_family_t sa_family, __be16 port);
  *	Called by vxlan to notiy a driver about the UDP port and socket
  *	address family that vxlan is listnening to. It is called only when
  *	a new port starts listening. The operation is protected by the
  *	vxlan_net->sock_lock.
  *
  * void (*ndo_del_vxlan_port)(struct  net_device *dev,
- *			      sa_family_t sa_family, __u16 port);
+ *			      sa_family_t sa_family, __be16 port);
  *	Called by vxlan to notify the driver about a UDP port and socket
  *	address family that vxlan is not listening to anymore. The operation
  *	is protected by the vxlan_net->sock_lock.
@@ -1093,10 +1093,10 @@
 							struct netdev_phys_port_id *ppid);
 	void			(*ndo_add_vxlan_port)(struct  net_device *dev,
 						      sa_family_t sa_family,
-						      __u16 port);
+						      __be16 port);
 	void			(*ndo_del_vxlan_port)(struct  net_device *dev,
 						      sa_family_t sa_family,
-						      __u16 port);
+						      __be16 port);
 };
 
 /*
diff --git a/include/linux/of.h b/include/linux/of.h
index 3a45c4f..f95aee3 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -281,6 +281,9 @@
 extern int of_parse_phandle_with_args(const struct device_node *np,
 	const char *list_name, const char *cells_name, int index,
 	struct of_phandle_args *out_args);
+extern int of_parse_phandle_with_fixed_args(const struct device_node *np,
+	const char *list_name, int cells_count, int index,
+	struct of_phandle_args *out_args);
 extern int of_count_phandle_with_args(const struct device_node *np,
 	const char *list_name, const char *cells_name);
 
@@ -324,12 +327,6 @@
  */
 const __be32 *of_prop_next_u32(struct property *prop, const __be32 *cur,
 			       u32 *pu);
-#define of_property_for_each_u32(np, propname, prop, p, u)	\
-	for (prop = of_find_property(np, propname, NULL),	\
-		p = of_prop_next_u32(prop, NULL, &u);		\
-		p;						\
-		p = of_prop_next_u32(prop, p, &u))
-
 /*
  * struct property *prop;
  * const char *s;
@@ -338,11 +335,6 @@
  *         printk("String value: %s\n", s);
  */
 const char *of_prop_next_string(struct property *prop, const char *cur);
-#define of_property_for_each_string(np, propname, prop, s)	\
-	for (prop = of_find_property(np, propname, NULL),	\
-		s = of_prop_next_string(prop, NULL);		\
-		s;						\
-		s = of_prop_next_string(prop, s))
 
 int of_device_is_stdout_path(struct device_node *dn);
 
@@ -497,6 +489,13 @@
 	return -ENOSYS;
 }
 
+static inline int of_parse_phandle_with_fixed_args(const struct device_node *np,
+	const char *list_name, int cells_count, int index,
+	struct of_phandle_args *out_args)
+{
+	return -ENOSYS;
+}
+
 static inline int of_count_phandle_with_args(struct device_node *np,
 					     const char *list_name,
 					     const char *cells_name)
@@ -519,12 +518,20 @@
 	return 0;
 }
 
+static inline const __be32 *of_prop_next_u32(struct property *prop,
+		const __be32 *cur, u32 *pu)
+{
+	return NULL;
+}
+
+static inline const char *of_prop_next_string(struct property *prop,
+		const char *cur)
+{
+	return NULL;
+}
+
 #define of_match_ptr(_ptr)	NULL
 #define of_match_node(_matches, _node)	NULL
-#define of_property_for_each_u32(np, propname, prop, p, u) \
-	while (0)
-#define of_property_for_each_string(np, propname, prop, s) \
-	while (0)
 #endif /* CONFIG_OF */
 
 #ifndef of_node_to_nid
@@ -573,6 +580,18 @@
 	return of_property_read_u32_array(np, propname, out_value, 1);
 }
 
+#define of_property_for_each_u32(np, propname, prop, p, u)	\
+	for (prop = of_find_property(np, propname, NULL),	\
+		p = of_prop_next_u32(prop, NULL, &u);		\
+		p;						\
+		p = of_prop_next_u32(prop, p, &u))
+
+#define of_property_for_each_string(np, propname, prop, s)	\
+	for (prop = of_find_property(np, propname, NULL),	\
+		s = of_prop_next_string(prop, NULL);		\
+		s;						\
+		s = of_prop_next_string(prop, s))
+
 #if defined(CONFIG_PROC_FS) && defined(CONFIG_PROC_DEVICETREE)
 extern void proc_device_tree_add_node(struct device_node *, struct proc_dir_entry *);
 extern void proc_device_tree_add_prop(struct proc_dir_entry *pde, struct property *prop);
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index 19f26f8..a478c62 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -109,8 +109,7 @@
  * physical addresses.
  */
 #ifdef CONFIG_BLK_DEV_INITRD
-extern void early_init_dt_setup_initrd_arch(unsigned long start,
-					    unsigned long end);
+extern void early_init_dt_setup_initrd_arch(u64 start, u64 end);
 #endif
 
 /* Early flat tree scan hooks */
diff --git a/include/linux/of_net.h b/include/linux/of_net.h
index 61bf53b..34597c8 100644
--- a/include/linux/of_net.h
+++ b/include/linux/of_net.h
@@ -9,10 +9,10 @@
 
 #ifdef CONFIG_OF_NET
 #include <linux/of.h>
-extern const int of_get_phy_mode(struct device_node *np);
+extern int of_get_phy_mode(struct device_node *np);
 extern const void *of_get_mac_address(struct device_node *np);
 #else
-static inline const int of_get_phy_mode(struct device_node *np)
+static inline int of_get_phy_mode(struct device_node *np)
 {
 	return -ENODEV;
 }
diff --git a/include/linux/platform_data/dma-rcar-hpbdma.h b/include/linux/platform_data/dma-rcar-hpbdma.h
new file mode 100644
index 0000000..648b8ea
--- /dev/null
+++ b/include/linux/platform_data/dma-rcar-hpbdma.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2011-2013 Renesas Electronics Corporation
+ * Copyright (C) 2013 Cogent Embedded, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef __DMA_RCAR_HPBDMA_H
+#define __DMA_RCAR_HPBDMA_H
+
+#include <linux/bitops.h>
+#include <linux/types.h>
+
+/* Transmit sizes and respective register values */
+enum {
+	XMIT_SZ_8BIT	= 0,
+	XMIT_SZ_16BIT	= 1,
+	XMIT_SZ_32BIT	= 2,
+	XMIT_SZ_MAX
+};
+
+/* DMA control register (DCR) bits */
+#define HPB_DMAE_DCR_DTAMD		(1u << 26)
+#define HPB_DMAE_DCR_DTAC		(1u << 25)
+#define HPB_DMAE_DCR_DTAU		(1u << 24)
+#define HPB_DMAE_DCR_DTAU1		(1u << 23)
+#define HPB_DMAE_DCR_SWMD		(1u << 22)
+#define HPB_DMAE_DCR_BTMD		(1u << 21)
+#define HPB_DMAE_DCR_PKMD		(1u << 20)
+#define HPB_DMAE_DCR_CT			(1u << 18)
+#define HPB_DMAE_DCR_ACMD		(1u << 17)
+#define HPB_DMAE_DCR_DIP		(1u << 16)
+#define HPB_DMAE_DCR_SMDL		(1u << 13)
+#define HPB_DMAE_DCR_SPDAM		(1u << 12)
+#define HPB_DMAE_DCR_SDRMD_MASK		(3u << 10)
+#define HPB_DMAE_DCR_SDRMD_MOD		(0u << 10)
+#define HPB_DMAE_DCR_SDRMD_AUTO		(1u << 10)
+#define HPB_DMAE_DCR_SDRMD_TIMER	(2u << 10)
+#define HPB_DMAE_DCR_SPDS_MASK		(3u << 8)
+#define HPB_DMAE_DCR_SPDS_8BIT		(0u << 8)
+#define HPB_DMAE_DCR_SPDS_16BIT		(1u << 8)
+#define HPB_DMAE_DCR_SPDS_32BIT		(2u << 8)
+#define HPB_DMAE_DCR_DMDL		(1u << 5)
+#define HPB_DMAE_DCR_DPDAM		(1u << 4)
+#define HPB_DMAE_DCR_DDRMD_MASK		(3u << 2)
+#define HPB_DMAE_DCR_DDRMD_MOD		(0u << 2)
+#define HPB_DMAE_DCR_DDRMD_AUTO		(1u << 2)
+#define HPB_DMAE_DCR_DDRMD_TIMER	(2u << 2)
+#define HPB_DMAE_DCR_DPDS_MASK		(3u << 0)
+#define HPB_DMAE_DCR_DPDS_8BIT		(0u << 0)
+#define HPB_DMAE_DCR_DPDS_16BIT		(1u << 0)
+#define HPB_DMAE_DCR_DPDS_32BIT		(2u << 0)
+
+/* Asynchronous reset register (ASYNCRSTR) bits */
+#define HPB_DMAE_ASYNCRSTR_ASRST41	BIT(10)
+#define HPB_DMAE_ASYNCRSTR_ASRST40	BIT(9)
+#define HPB_DMAE_ASYNCRSTR_ASRST39	BIT(8)
+#define HPB_DMAE_ASYNCRSTR_ASRST27	BIT(7)
+#define HPB_DMAE_ASYNCRSTR_ASRST26	BIT(6)
+#define HPB_DMAE_ASYNCRSTR_ASRST25	BIT(5)
+#define HPB_DMAE_ASYNCRSTR_ASRST24	BIT(4)
+#define HPB_DMAE_ASYNCRSTR_ASRST23	BIT(3)
+#define HPB_DMAE_ASYNCRSTR_ASRST22	BIT(2)
+#define HPB_DMAE_ASYNCRSTR_ASRST21	BIT(1)
+#define HPB_DMAE_ASYNCRSTR_ASRST20	BIT(0)
+
+struct hpb_dmae_slave_config {
+	unsigned int	id;
+	dma_addr_t	addr;
+	u32		dcr;
+	u32		port;
+	u32		rstr;
+	u32		mdr;
+	u32		mdm;
+	u32		flags;
+#define	HPB_DMAE_SET_ASYNC_RESET	BIT(0)
+#define	HPB_DMAE_SET_ASYNC_MODE		BIT(1)
+	u32		dma_ch;
+};
+
+#define HPB_DMAE_CHANNEL(_irq, _s_id)	\
+{					\
+	.ch_irq		= _irq,		\
+	.s_id		= _s_id,	\
+}
+
+struct hpb_dmae_channel {
+	unsigned int	ch_irq;
+	unsigned int	s_id;
+};
+
+struct hpb_dmae_pdata {
+	const struct hpb_dmae_slave_config *slaves;
+	int num_slaves;
+	const struct hpb_dmae_channel *channels;
+	int num_channels;
+	const unsigned int ts_shift[XMIT_SZ_MAX];
+	int num_hw_channels;
+};
+
+#endif
diff --git a/include/linux/platform_data/edma.h b/include/linux/platform_data/edma.h
index 57300fd..179fb91 100644
--- a/include/linux/platform_data/edma.h
+++ b/include/linux/platform_data/edma.h
@@ -180,4 +180,6 @@
 	const s16	(*xbar_chans)[2];
 };
 
+int edma_trigger_channel(unsigned);
+
 #endif
diff --git a/include/linux/power/bq24190_charger.h b/include/linux/power/bq24190_charger.h
new file mode 100644
index 0000000..9f02837
--- /dev/null
+++ b/include/linux/power/bq24190_charger.h
@@ -0,0 +1,16 @@
+/*
+ * Platform data for the TI bq24190 battery charger driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _BQ24190_CHARGER_H_
+#define _BQ24190_CHARGER_H_
+
+struct bq24190_platform_data {
+	unsigned int	gpio_int;	/* GPIO pin that's connected to INT# */
+};
+
+#endif
diff --git a/include/linux/power/twl4030_madc_battery.h b/include/linux/power/twl4030_madc_battery.h
new file mode 100644
index 0000000..23110dc
--- /dev/null
+++ b/include/linux/power/twl4030_madc_battery.h
@@ -0,0 +1,39 @@
+/*
+ * Dumb driver for LiIon batteries using TWL4030 madc.
+ *
+ * Copyright 2013 Golden Delicious Computers
+ * Nikolaus Schaller <hns@goldelico.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation;  either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __TWL4030_MADC_BATTERY_H
+#define __TWL4030_MADC_BATTERY_H
+
+/*
+ * Usually we can assume 100% @ 4.15V and 0% @ 3.3V but curves differ for
+ * charging and discharging!
+ */
+
+struct twl4030_madc_bat_calibration {
+	short voltage;	/* in mV - specify -1 for end of list */
+	short level;	/* in percent (0 .. 100%) */
+};
+
+struct twl4030_madc_bat_platform_data {
+	unsigned int capacity;	/* total capacity in uAh */
+	struct twl4030_madc_bat_calibration *charging;
+	int charging_size;
+	struct twl4030_madc_bat_calibration *discharging;
+	int discharging_size;
+};
+
+#endif
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 804b906..5c26006 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -15,6 +15,7 @@
 
 #include <linux/workqueue.h>
 #include <linux/leds.h>
+#include <linux/spinlock.h>
 
 struct device;
 
@@ -194,6 +195,8 @@
 	/* private */
 	struct device *dev;
 	struct work_struct changed_work;
+	spinlock_t changed_lock;
+	bool changed;
 #ifdef CONFIG_THERMAL
 	struct thermal_zone_device *tzd;
 	struct thermal_cooling_device *tcd;
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index ffc444c..4039407 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -231,6 +231,7 @@
 unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
 				unsigned long index, unsigned long max_scan);
 int radix_tree_preload(gfp_t gfp_mask);
+int radix_tree_maybe_preload(gfp_t gfp_mask);
 void radix_tree_init(void);
 void *radix_tree_tag_set(struct radix_tree_root *root,
 			unsigned long index, unsigned int tag);
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index 0f42469..73069cb 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -101,6 +101,7 @@
 extern const struct raid6_calls raid6_avx2x1;
 extern const struct raid6_calls raid6_avx2x2;
 extern const struct raid6_calls raid6_avx2x4;
+extern const struct raid6_calls raid6_tilegx8;
 
 struct raid6_recov_calls {
 	void (*data2)(int, size_t, int, int, void **);
diff --git a/include/linux/ramfs.h b/include/linux/ramfs.h
index 69e37c2..753207c 100644
--- a/include/linux/ramfs.h
+++ b/include/linux/ramfs.h
@@ -25,7 +25,7 @@
 
 extern const struct file_operations ramfs_file_operations;
 extern const struct vm_operations_struct generic_file_vm_ops;
-extern int __init init_rootfs(void);
+extern int __init init_ramfs_fs(void);
 
 int ramfs_fill_super(struct super_block *sb, void *data, int silent);
 
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
index 0022c1b..aa870a4 100644
--- a/include/linux/rbtree.h
+++ b/include/linux/rbtree.h
@@ -68,6 +68,10 @@
 extern struct rb_node *rb_first(const struct rb_root *);
 extern struct rb_node *rb_last(const struct rb_root *);
 
+/* Postorder iteration - always visit the parent after its children */
+extern struct rb_node *rb_first_postorder(const struct rb_root *);
+extern struct rb_node *rb_next_postorder(const struct rb_node *);
+
 /* Fast replacement of a single node without remove/rebalance/add/rebalance */
 extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, 
 			    struct rb_root *root);
@@ -81,4 +85,22 @@
 	*rb_link = node;
 }
 
+/**
+ * rbtree_postorder_for_each_entry_safe - iterate over rb_root in post order of
+ * given type safe against removal of rb_node entry
+ *
+ * @pos:	the 'type *' to use as a loop cursor.
+ * @n:		another 'type *' to use as temporary storage
+ * @root:	'rb_root *' of the rbtree.
+ * @field:	the name of the rb_node field within 'type'.
+ */
+#define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \
+	for (pos = rb_entry(rb_first_postorder(root), typeof(*pos), field),\
+		n = rb_entry(rb_next_postorder(&pos->field), \
+			typeof(*pos), field); \
+	     &pos->field; \
+	     pos = n, \
+		n = rb_entry(rb_next_postorder(&pos->field), \
+			typeof(*pos), field))
+
 #endif	/* _LINUX_RBTREE_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index ce1e1c0..45f254d 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2169,15 +2169,15 @@
  * all we care about is that we have a task with the appropriate
  * pid, we don't actually care if we have the right task.
  */
-static inline int has_group_leader_pid(struct task_struct *p)
+static inline bool has_group_leader_pid(struct task_struct *p)
 {
-	return p->pid == p->tgid;
+	return task_pid(p) == p->signal->leader_pid;
 }
 
 static inline
-int same_thread_group(struct task_struct *p1, struct task_struct *p2)
+bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
 {
-	return p1->tgid == p2->tgid;
+	return p1->signal == p2->signal;
 }
 
 static inline struct task_struct *next_thread(const struct task_struct *p)
diff --git a/include/linux/sh_dma.h b/include/linux/sh_dma.h
index 4e83f3e..b7b43b8 100644
--- a/include/linux/sh_dma.h
+++ b/include/linux/sh_dma.h
@@ -33,13 +33,44 @@
 	char		mid_rid;
 };
 
+/**
+ * struct sh_dmae_channel - DMAC channel platform data
+ * @offset:		register offset within the main IOMEM resource
+ * @dmars:		channel DMARS register offset
+ * @chclr_offset:	channel CHCLR register offset
+ * @dmars_bit:		channel DMARS field offset within the register
+ * @chclr_bit:		bit position, to be set to reset the channel
+ */
 struct sh_dmae_channel {
 	unsigned int	offset;
 	unsigned int	dmars;
-	unsigned int	dmars_bit;
 	unsigned int	chclr_offset;
+	unsigned char	dmars_bit;
+	unsigned char	chclr_bit;
 };
 
+/**
+ * struct sh_dmae_pdata - DMAC platform data
+ * @slave:		array of slaves
+ * @slave_num:		number of slaves in the above array
+ * @channel:		array of DMA channels
+ * @channel_num:	number of channels in the above array
+ * @ts_low_shift:	shift of the low part of the TS field
+ * @ts_low_mask:	low TS field mask
+ * @ts_high_shift:	additional shift of the high part of the TS field
+ * @ts_high_mask:	high TS field mask
+ * @ts_shift:		array of Transfer Size shifts, indexed by TS value
+ * @ts_shift_num:	number of shifts in the above array
+ * @dmaor_init:		DMAOR initialisation value
+ * @chcr_offset:	CHCR address offset
+ * @chcr_ie_bit:	CHCR Interrupt Enable bit
+ * @dmaor_is_32bit:	DMAOR is a 32-bit register
+ * @needs_tend_set:	the TEND register has to be set
+ * @no_dmars:		DMAC has no DMARS registers
+ * @chclr_present:	DMAC has one or several CHCLR registers
+ * @chclr_bitwise:	channel CHCLR registers are bitwise
+ * @slave_only:		DMAC cannot be used for MEMCPY
+ */
 struct sh_dmae_pdata {
 	const struct sh_dmae_slave_config *slave;
 	int slave_num;
@@ -59,42 +90,22 @@
 	unsigned int needs_tend_set:1;
 	unsigned int no_dmars:1;
 	unsigned int chclr_present:1;
+	unsigned int chclr_bitwise:1;
 	unsigned int slave_only:1;
 };
 
-/* DMA register */
-#define SAR	0x00
-#define DAR	0x04
-#define TCR	0x08
-#define CHCR	0x0C
-#define DMAOR	0x40
-
-#define TEND	0x18 /* USB-DMAC */
-
 /* DMAOR definitions */
 #define DMAOR_AE	0x00000004
 #define DMAOR_NMIF	0x00000002
 #define DMAOR_DME	0x00000001
 
 /* Definitions for the SuperH DMAC */
-#define REQ_L	0x00000000
-#define REQ_E	0x00080000
-#define RACK_H	0x00000000
-#define RACK_L	0x00040000
-#define ACK_R	0x00000000
-#define ACK_W	0x00020000
-#define ACK_H	0x00000000
-#define ACK_L	0x00010000
 #define DM_INC	0x00004000
 #define DM_DEC	0x00008000
 #define DM_FIX	0x0000c000
 #define SM_INC	0x00001000
 #define SM_DEC	0x00002000
 #define SM_FIX	0x00003000
-#define RS_IN	0x00000200
-#define RS_OUT	0x00000300
-#define TS_BLK	0x00000040
-#define TM_BUR	0x00000020
 #define CHCR_DE	0x00000001
 #define CHCR_TE	0x00000002
 #define CHCR_IE	0x00000004
diff --git a/include/linux/shdma-base.h b/include/linux/shdma-base.h
index 5b1c984..f92c0a4 100644
--- a/include/linux/shdma-base.h
+++ b/include/linux/shdma-base.h
@@ -96,7 +96,7 @@
 	dma_addr_t (*slave_addr)(struct shdma_chan *);
 	int (*desc_setup)(struct shdma_chan *, struct shdma_desc *,
 			  dma_addr_t, dma_addr_t, size_t *);
-	int (*set_slave)(struct shdma_chan *, int, bool);
+	int (*set_slave)(struct shdma_chan *, int, dma_addr_t, bool);
 	void (*setup_xfer)(struct shdma_chan *, int);
 	void (*start_xfer)(struct shdma_chan *, struct shdma_desc *);
 	struct shdma_desc *(*embedded_desc)(void *, int);
@@ -116,7 +116,6 @@
 
 int shdma_request_irq(struct shdma_chan *, int,
 			   unsigned long, const char *);
-void shdma_free_irq(struct shdma_chan *);
 bool shdma_reset(struct shdma_dev *sdev);
 void shdma_chan_probe(struct shdma_dev *sdev,
 			   struct shdma_chan *schan, int id);
diff --git a/include/linux/smp.h b/include/linux/smp.h
index c181399..cfb7ca0 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -28,6 +28,27 @@
 int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
 			     int wait);
 
+/*
+ * Call a function on all processors
+ */
+int on_each_cpu(smp_call_func_t func, void *info, int wait);
+
+/*
+ * Call a function on processors specified by mask, which might include
+ * the local one.
+ */
+void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
+		void *info, bool wait);
+
+/*
+ * Call a function on each processor for which the supplied function
+ * cond_func returns a positive value. This may include the local
+ * processor.
+ */
+void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
+		smp_call_func_t func, void *info, bool wait,
+		gfp_t gfp_flags);
+
 #ifdef CONFIG_SMP
 
 #include <linux/preempt.h>
@@ -95,27 +116,6 @@
 #endif
 
 /*
- * Call a function on all processors
- */
-int on_each_cpu(smp_call_func_t func, void *info, int wait);
-
-/*
- * Call a function on processors specified by mask, which might include
- * the local one.
- */
-void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
-		void *info, bool wait);
-
-/*
- * Call a function on each processor for which the supplied function
- * cond_func returns a positive value. This may include the local
- * processor.
- */
-void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
-		smp_call_func_t func, void *info, bool wait,
-		gfp_t gfp_flags);
-
-/*
  * Mark the boot cpu "online" so that it can call console drivers in
  * printk() and can access its per-cpu storage.
  */
@@ -139,43 +139,6 @@
 }
 #define smp_call_function(func, info, wait) \
 			(up_smp_call_function(func, info))
-#define on_each_cpu(func, info, wait)		\
-	({					\
-		unsigned long __flags;		\
-		local_irq_save(__flags);	\
-		func(info);			\
-		local_irq_restore(__flags);	\
-		0;				\
-	})
-/*
- * Note we still need to test the mask even for UP
- * because we actually can get an empty mask from
- * code that on SMP might call us without the local
- * CPU in the mask.
- */
-#define on_each_cpu_mask(mask, func, info, wait) \
-	do {						\
-		if (cpumask_test_cpu(0, (mask))) {	\
-			local_irq_disable();		\
-			(func)(info);			\
-			local_irq_enable();		\
-		}					\
-	} while (0)
-/*
- * Preemption is disabled here to make sure the cond_func is called under the
- * same condtions in UP and SMP.
- */
-#define on_each_cpu_cond(cond_func, func, info, wait, gfp_flags)\
-	do {							\
-		void *__info = (info);				\
-		preempt_disable();				\
-		if ((cond_func)(0, __info)) {			\
-			local_irq_disable();			\
-			(func)(__info);				\
-			local_irq_enable();			\
-		}						\
-		preempt_enable();				\
-	} while (0)
 
 static inline void smp_send_reschedule(int cpu) { }
 #define smp_prepare_boot_cpu()			do {} while (0)
diff --git a/include/linux/spi/mmc_spi.h b/include/linux/spi/mmc_spi.h
index 32be8db..274bc0f 100644
--- a/include/linux/spi/mmc_spi.h
+++ b/include/linux/spi/mmc_spi.h
@@ -7,6 +7,11 @@
 struct device;
 struct mmc_host;
 
+#define MMC_SPI_USE_CD_GPIO			(1 << 0)
+#define MMC_SPI_USE_RO_GPIO			(1 << 1)
+#define MMC_SPI_CD_GPIO_ACTIVE_LOW		(1 << 2)
+#define MMC_SPI_RO_GPIO_ACTIVE_LOW		(1 << 3)
+
 /* Put this in platform_data of a device being used to manage an MMC/SD
  * card slot.  (Modeled after PXA mmc glue; see that for usage examples.)
  *
@@ -21,17 +26,19 @@
 		void *);
 	void (*exit)(struct device *, void *);
 
-	/* sense switch on sd cards */
-	int (*get_ro)(struct device *);
-
 	/*
-	 * If board does not use CD interrupts, driver can optimize polling
-	 * using this function.
+	 * Card Detect and Read Only GPIOs. To enable debouncing on the card
+	 * detect GPIO, set the cd_debounce to the debounce time in
+	 * microseconds.
 	 */
-	int (*get_cd)(struct device *);
+	unsigned int flags;
+	unsigned int cd_gpio;
+	unsigned int cd_debounce;
+	unsigned int ro_gpio;
 
 	/* Capabilities to pass into mmc core (e.g. MMC_CAP_NEEDS_POLL). */
 	unsigned long caps;
+	unsigned long caps2;
 
 	/* how long to debounce card detect, in msecs */
 	u16 detect_delay;
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index 6ce690d..437ddb6 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -264,12 +264,30 @@
 	return 0;
 }
 
+static inline int get_time(char **bpp, time_t *time)
+{
+	char buf[50];
+	long long ll;
+	int len = qword_get(bpp, buf, sizeof(buf));
+
+	if (len < 0)
+		return -EINVAL;
+	if (len == 0)
+		return -ENOENT;
+
+	if (kstrtoll(buf, 0, &ll))
+		return -EINVAL;
+
+	*time = (time_t)ll;
+	return 0;
+}
+
 static inline time_t get_expiry(char **bpp)
 {
-	int rv;
+	time_t rv;
 	struct timespec boot;
 
-	if (get_int(bpp, &rv))
+	if (get_time(bpp, &rv))
 		return 0;
 	if (rv < 0)
 		return 0;
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 1f0216b..6eecfc2 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -243,7 +243,6 @@
 	struct xdr_buf		rq_res;
 	struct page *		rq_pages[RPCSVC_MAXPAGES];
 	struct page *		*rq_respages;	/* points into rq_pages */
-	int			rq_resused;	/* number of pages used for result */
 	struct page *		*rq_next_page; /* next reply page to use */
 
 	struct kvec		rq_vec[RPCSVC_MAXPAGES]; /* generally useful.. */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index d95cde5..c03c139 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -182,6 +182,33 @@
 #define SWAP_MAP_SHMEM	0xbf	/* Owned by shmem/tmpfs, in first swap_map */
 
 /*
+ * We use this to track usage of a cluster. A cluster is a block of swap disk
+ * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All
+ * free clusters are organized into a list. We fetch an entry from the list to
+ * get a free cluster.
+ *
+ * The data field stores next cluster if the cluster is free or cluster usage
+ * counter otherwise. The flags field determines if a cluster is free. This is
+ * protected by swap_info_struct.lock.
+ */
+struct swap_cluster_info {
+	unsigned int data:24;
+	unsigned int flags:8;
+};
+#define CLUSTER_FLAG_FREE 1 /* This cluster is free */
+#define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */
+
+/*
+ * We assign a cluster to each CPU, so each CPU can allocate swap entry from
+ * its own cluster and swapout sequentially. The purpose is to optimize swapout
+ * throughput.
+ */
+struct percpu_cluster {
+	struct swap_cluster_info index; /* Current cluster index */
+	unsigned int next; /* Likely next allocation offset */
+};
+
+/*
  * The in-memory structure used to track swap areas.
  */
 struct swap_info_struct {
@@ -191,14 +218,16 @@
 	signed char	next;		/* next type on the swap list */
 	unsigned int	max;		/* extent of the swap_map */
 	unsigned char *swap_map;	/* vmalloc'ed array of usage counts */
+	struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */
+	struct swap_cluster_info free_cluster_head; /* free cluster list head */
+	struct swap_cluster_info free_cluster_tail; /* free cluster list tail */
 	unsigned int lowest_bit;	/* index of first free in swap_map */
 	unsigned int highest_bit;	/* index of last free in swap_map */
 	unsigned int pages;		/* total of usable pages of swap */
 	unsigned int inuse_pages;	/* number of those currently in use */
 	unsigned int cluster_next;	/* likely index for next allocation */
 	unsigned int cluster_nr;	/* countdown to next cluster search */
-	unsigned int lowest_alloc;	/* while preparing discard cluster */
-	unsigned int highest_alloc;	/* while preparing discard cluster */
+	struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */
 	struct swap_extent *curr_swap_extent;
 	struct swap_extent first_swap_extent;
 	struct block_device *bdev;	/* swap device or bdev of swap file */
@@ -212,14 +241,18 @@
 					 * protect map scan related fields like
 					 * swap_map, lowest_bit, highest_bit,
 					 * inuse_pages, cluster_next,
-					 * cluster_nr, lowest_alloc and
-					 * highest_alloc. other fields are only
-					 * changed at swapon/swapoff, so are
-					 * protected by swap_lock. changing
-					 * flags need hold this lock and
-					 * swap_lock. If both locks need hold,
-					 * hold swap_lock first.
+					 * cluster_nr, lowest_alloc,
+					 * highest_alloc, free/discard cluster
+					 * list. other fields are only changed
+					 * at swapon/swapoff, so are protected
+					 * by swap_lock. changing flags need
+					 * hold this lock and swap_lock. If
+					 * both locks need hold, hold swap_lock
+					 * first.
 					 */
+	struct work_struct discard_work; /* discard worker */
+	struct swap_cluster_info discard_cluster_head; /* list head of discard clusters */
+	struct swap_cluster_info discard_cluster_tail; /* list tail of discard clusters */
 };
 
 struct swap_list_t {
@@ -414,6 +447,7 @@
 
 #else /* CONFIG_SWAP */
 
+#define swap_address_space(entry)		(NULL)
 #define get_nr_swap_pages()			0L
 #define total_swap_pages			0L
 #define total_swapcache_pages()			0UL
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 84662ec..7fac04e 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -186,6 +186,7 @@
 #define __SYSCALL_DEFINEx(x, name, ...)					\
 	asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));	\
 	static inline long SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__));	\
+	asmlinkage long SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__));	\
 	asmlinkage long SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__))	\
 	{								\
 		long ret = SYSC##name(__MAP(x,__SC_CAST,__VA_ARGS__));	\
diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h
index 2c02f3a..80cf817 100644
--- a/include/linux/vgaarb.h
+++ b/include/linux/vgaarb.h
@@ -65,8 +65,15 @@
  *     out of the arbitration process (and can be safe to take
  *     interrupts at any time.
  */
+#if defined(CONFIG_VGA_ARB)
 extern void vga_set_legacy_decoding(struct pci_dev *pdev,
 				    unsigned int decodes);
+#else
+static inline void vga_set_legacy_decoding(struct pci_dev *pdev,
+					   unsigned int decodes)
+{
+}
+#endif
 
 /**
  *     vga_get         - acquire & locks VGA resources
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index bd6cf61..1855f0a 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -70,6 +70,12 @@
 		THP_ZERO_PAGE_ALLOC,
 		THP_ZERO_PAGE_ALLOC_FAILED,
 #endif
+#ifdef CONFIG_SMP
+		NR_TLB_REMOTE_FLUSH,	/* cpu tried to flush others' tlbs */
+		NR_TLB_REMOTE_FLUSH_RECEIVED,/* cpu received ipi for flush */
+#endif
+		NR_TLB_LOCAL_FLUSH_ALL,
+		NR_TLB_LOCAL_FLUSH_ONE,
 		NR_VM_EVENT_ITEMS
 };
 
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index c586679..e4b9480 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -143,7 +143,6 @@
 }
 
 extern unsigned long global_reclaimable_pages(void);
-extern unsigned long zone_reclaimable_pages(struct zone *zone);
 
 #ifdef CONFIG_NUMA
 /*
@@ -198,7 +197,7 @@
 extern void dec_zone_state(struct zone *, enum zone_stat_item);
 extern void __dec_zone_state(struct zone *, enum zone_stat_item);
 
-void refresh_cpu_vm_stats(int);
+void cpu_vm_stats_fold(int cpu);
 void refresh_zone_stat_thresholds(void);
 
 void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
@@ -255,6 +254,7 @@
 
 static inline void refresh_cpu_vm_stats(int cpu) { }
 static inline void refresh_zone_stat_thresholds(void) { }
+static inline void cpu_vm_stats_fold(int cpu) { }
 
 static inline void drain_zonestat(struct zone *zone,
 			struct per_cpu_pageset *pset) { }
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 4e198ca..021b8a3 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -98,8 +98,6 @@
 int try_to_writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
 				  enum wb_reason reason);
 void sync_inodes_sb(struct super_block *);
-long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
-				enum wb_reason reason);
 void wakeup_flusher_threads(long nr_pages, enum wb_reason reason);
 void inode_wait_for_writeback(struct inode *inode);
 
diff --git a/include/net/9p/client.h b/include/net/9p/client.h
index 4c7c01a..c38a005 100644
--- a/include/net/9p/client.h
+++ b/include/net/9p/client.h
@@ -26,6 +26,8 @@
 #ifndef NET_9P_CLIENT_H
 #define NET_9P_CLIENT_H
 
+#include <linux/utsname.h>
+
 /* Number of requests per row */
 #define P9_ROW_MAXTAG 255
 
@@ -134,6 +136,7 @@
  * @tagpool - transaction id accounting for session
  * @reqs - 2D array of requests
  * @max_tag - current maximum tag id allocated
+ * @name - node name used as client id
  *
  * The client structure is used to keep track of various per-client
  * state that has been instantiated.
@@ -164,6 +167,8 @@
 	struct p9_idpool *tagpool;
 	struct p9_req_t *reqs[P9_ROW_MAXTAG];
 	int max_tag;
+
+	char name[__NEW_UTS_LEN + 1];
 };
 
 /**
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index 3c4211f..ea0cc26 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -190,7 +190,9 @@
 }
 
 extern int			ndisc_init(void);
+extern int			ndisc_late_init(void);
 
+extern void			ndisc_late_cleanup(void);
 extern void			ndisc_cleanup(void);
 
 extern int			ndisc_rcv(struct sk_buff *skb);
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index 6bc943e..d0c6134 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -268,11 +268,13 @@
 
 	TP_PROTO(struct page *page,
 			int alloc_order, int fallback_order,
-			int alloc_migratetype, int fallback_migratetype),
+			int alloc_migratetype, int fallback_migratetype,
+			int change_ownership),
 
 	TP_ARGS(page,
 		alloc_order, fallback_order,
-		alloc_migratetype, fallback_migratetype),
+		alloc_migratetype, fallback_migratetype,
+		change_ownership),
 
 	TP_STRUCT__entry(
 		__field(	struct page *,	page			)
@@ -280,6 +282,7 @@
 		__field(	int,		fallback_order		)
 		__field(	int,		alloc_migratetype	)
 		__field(	int,		fallback_migratetype	)
+		__field(	int,		change_ownership	)
 	),
 
 	TP_fast_assign(
@@ -288,6 +291,7 @@
 		__entry->fallback_order		= fallback_order;
 		__entry->alloc_migratetype	= alloc_migratetype;
 		__entry->fallback_migratetype	= fallback_migratetype;
+		__entry->change_ownership	= change_ownership;
 	),
 
 	TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
@@ -299,7 +303,7 @@
 		__entry->alloc_migratetype,
 		__entry->fallback_migratetype,
 		__entry->fallback_order < pageblock_order,
-		__entry->alloc_migratetype == __entry->fallback_migratetype)
+		__entry->change_ownership)
 );
 
 #endif /* _TRACE_KMEM_H */
diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h
index afd0cbd..f1e12bd 100644
--- a/include/uapi/linux/dm-ioctl.h
+++ b/include/uapi/linux/dm-ioctl.h
@@ -267,9 +267,9 @@
 #define DM_DEV_SET_GEOMETRY	_IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
 
 #define DM_VERSION_MAJOR	4
-#define DM_VERSION_MINOR	25
+#define DM_VERSION_MINOR	26
 #define DM_VERSION_PATCHLEVEL	0
-#define DM_VERSION_EXTRA	"-ioctl (2013-06-26)"
+#define DM_VERSION_EXTRA	"-ioctl (2013-08-15)"
 
 /* Status bits */
 #define DM_READONLY_FLAG	(1 << 0) /* In/Out */
diff --git a/init/Kconfig b/init/Kconfig
index bfa9e13..18bd9e3 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1670,6 +1670,7 @@
 
 menuconfig MODULES
 	bool "Enable loadable module support"
+	option modules
 	help
 	  Kernel modules are small pieces of compiled code which can
 	  be inserted in the running kernel, rather than being
diff --git a/init/do_mounts.c b/init/do_mounts.c
index 816014c..a51cddc 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -26,6 +26,8 @@
 #include <linux/async.h>
 #include <linux/fs_struct.h>
 #include <linux/slab.h>
+#include <linux/ramfs.h>
+#include <linux/shmem_fs.h>
 
 #include <linux/nfs_fs.h>
 #include <linux/nfs_fs_sb.h>
@@ -588,3 +590,46 @@
 	sys_mount(".", "/", NULL, MS_MOVE, NULL);
 	sys_chroot(".");
 }
+
+static bool is_tmpfs;
+static struct dentry *rootfs_mount(struct file_system_type *fs_type,
+	int flags, const char *dev_name, void *data)
+{
+	static unsigned long once;
+	void *fill = ramfs_fill_super;
+
+	if (test_and_set_bit(0, &once))
+		return ERR_PTR(-ENODEV);
+
+	if (IS_ENABLED(CONFIG_TMPFS) && is_tmpfs)
+		fill = shmem_fill_super;
+
+	return mount_nodev(fs_type, flags, data, fill);
+}
+
+static struct file_system_type rootfs_fs_type = {
+	.name		= "rootfs",
+	.mount		= rootfs_mount,
+	.kill_sb	= kill_litter_super,
+};
+
+int __init init_rootfs(void)
+{
+	int err = register_filesystem(&rootfs_fs_type);
+
+	if (err)
+		return err;
+
+	if (IS_ENABLED(CONFIG_TMPFS) && !saved_root_name[0] &&
+		(!root_fs_names || strstr(root_fs_names, "tmpfs"))) {
+		err = shmem_init();
+		is_tmpfs = true;
+	} else {
+		err = init_ramfs_fs();
+	}
+
+	if (err)
+		unregister_filesystem(&rootfs_fs_type);
+
+	return err;
+}
diff --git a/ipc/msg.c b/ipc/msg.c
index b65fdf1..b0d541d4 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -70,8 +70,6 @@
 
 #define msg_ids(ns)	((ns)->ids[IPC_MSG_IDS])
 
-#define msg_unlock(msq)		ipc_unlock(&(msq)->q_perm)
-
 static void freeque(struct ipc_namespace *, struct kern_ipc_perm *);
 static int newque(struct ipc_namespace *, struct ipc_params *);
 #ifdef CONFIG_PROC_FS
@@ -172,7 +170,7 @@
  * @ns: namespace
  * @params: ptr to the structure that contains the key and msgflg
  *
- * Called with msg_ids.rw_mutex held (writer)
+ * Called with msg_ids.rwsem held (writer)
  */
 static int newque(struct ipc_namespace *ns, struct ipc_params *params)
 {
@@ -259,8 +257,8 @@
  * removes the message queue from message queue ID IDR, and cleans up all the
  * messages associated with this queue.
  *
- * msg_ids.rw_mutex (writer) and the spinlock for this message queue are held
- * before freeque() is called. msg_ids.rw_mutex remains locked on exit.
+ * msg_ids.rwsem (writer) and the spinlock for this message queue are held
+ * before freeque() is called. msg_ids.rwsem remains locked on exit.
  */
 static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
 {
@@ -270,7 +268,8 @@
 	expunge_all(msq, -EIDRM);
 	ss_wakeup(&msq->q_senders, 1);
 	msg_rmid(ns, msq);
-	msg_unlock(msq);
+	ipc_unlock_object(&msq->q_perm);
+	rcu_read_unlock();
 
 	list_for_each_entry_safe(msg, t, &msq->q_messages, m_list) {
 		atomic_dec(&ns->msg_hdrs);
@@ -282,7 +281,7 @@
 }
 
 /*
- * Called with msg_ids.rw_mutex and ipcp locked.
+ * Called with msg_ids.rwsem and ipcp locked.
  */
 static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
 {
@@ -386,9 +385,9 @@
 }
 
 /*
- * This function handles some msgctl commands which require the rw_mutex
+ * This function handles some msgctl commands which require the rwsem
  * to be held in write mode.
- * NOTE: no locks must be held, the rw_mutex is taken inside this function.
+ * NOTE: no locks must be held, the rwsem is taken inside this function.
  */
 static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
 		       struct msqid_ds __user *buf, int version)
@@ -403,7 +402,7 @@
 			return -EFAULT;
 	}
 
-	down_write(&msg_ids(ns).rw_mutex);
+	down_write(&msg_ids(ns).rwsem);
 	rcu_read_lock();
 
 	ipcp = ipcctl_pre_down_nolock(ns, &msg_ids(ns), msqid, cmd,
@@ -459,7 +458,7 @@
 out_unlock1:
 	rcu_read_unlock();
 out_up:
-	up_write(&msg_ids(ns).rw_mutex);
+	up_write(&msg_ids(ns).rwsem);
 	return err;
 }
 
@@ -494,7 +493,7 @@
 		msginfo.msgmnb = ns->msg_ctlmnb;
 		msginfo.msgssz = MSGSSZ;
 		msginfo.msgseg = MSGSEG;
-		down_read(&msg_ids(ns).rw_mutex);
+		down_read(&msg_ids(ns).rwsem);
 		if (cmd == MSG_INFO) {
 			msginfo.msgpool = msg_ids(ns).in_use;
 			msginfo.msgmap = atomic_read(&ns->msg_hdrs);
@@ -505,7 +504,7 @@
 			msginfo.msgtql = MSGTQL;
 		}
 		max_id = ipc_get_maxid(&msg_ids(ns));
-		up_read(&msg_ids(ns).rw_mutex);
+		up_read(&msg_ids(ns).rwsem);
 		if (copy_to_user(buf, &msginfo, sizeof(struct msginfo)))
 			return -EFAULT;
 		return (max_id < 0) ? 0 : max_id;
diff --git a/ipc/namespace.c b/ipc/namespace.c
index 4be6581..59451c1 100644
--- a/ipc/namespace.c
+++ b/ipc/namespace.c
@@ -81,7 +81,7 @@
 	int next_id;
 	int total, in_use;
 
-	down_write(&ids->rw_mutex);
+	down_write(&ids->rwsem);
 
 	in_use = ids->in_use;
 
@@ -89,11 +89,12 @@
 		perm = idr_find(&ids->ipcs_idr, next_id);
 		if (perm == NULL)
 			continue;
-		ipc_lock_by_ptr(perm);
+		rcu_read_lock();
+		ipc_lock_object(perm);
 		free(ns, perm);
 		total++;
 	}
-	up_write(&ids->rw_mutex);
+	up_write(&ids->rwsem);
 }
 
 static void free_ipc_ns(struct ipc_namespace *ns)
diff --git a/ipc/sem.c b/ipc/sem.c
index 4108889..69b6a21 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -322,7 +322,7 @@
 }
 
 /*
- * sem_lock_(check_) routines are called in the paths where the rw_mutex
+ * sem_lock_(check_) routines are called in the paths where the rwsem
  * is not held.
  *
  * The caller holds the RCU read lock.
@@ -426,7 +426,7 @@
  * @ns: namespace
  * @params: ptr to the structure that contains key, semflg and nsems
  *
- * Called with sem_ids.rw_mutex held (as a writer)
+ * Called with sem_ids.rwsem held (as a writer)
  */
 
 static int newary(struct ipc_namespace *ns, struct ipc_params *params)
@@ -492,7 +492,7 @@
 
 
 /*
- * Called with sem_ids.rw_mutex and ipcp locked.
+ * Called with sem_ids.rwsem and ipcp locked.
  */
 static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
 {
@@ -503,7 +503,7 @@
 }
 
 /*
- * Called with sem_ids.rw_mutex and ipcp locked.
+ * Called with sem_ids.rwsem and ipcp locked.
  */
 static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
 				struct ipc_params *params)
@@ -994,8 +994,8 @@
 	return semzcnt;
 }
 
-/* Free a semaphore set. freeary() is called with sem_ids.rw_mutex locked
- * as a writer and the spinlock for this semaphore set hold. sem_ids.rw_mutex
+/* Free a semaphore set. freeary() is called with sem_ids.rwsem locked
+ * as a writer and the spinlock for this semaphore set hold. sem_ids.rwsem
  * remains locked on exit.
  */
 static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
@@ -1116,7 +1116,7 @@
 		seminfo.semmnu = SEMMNU;
 		seminfo.semmap = SEMMAP;
 		seminfo.semume = SEMUME;
-		down_read(&sem_ids(ns).rw_mutex);
+		down_read(&sem_ids(ns).rwsem);
 		if (cmd == SEM_INFO) {
 			seminfo.semusz = sem_ids(ns).in_use;
 			seminfo.semaem = ns->used_sems;
@@ -1125,7 +1125,7 @@
 			seminfo.semaem = SEMAEM;
 		}
 		max_id = ipc_get_maxid(&sem_ids(ns));
-		up_read(&sem_ids(ns).rw_mutex);
+		up_read(&sem_ids(ns).rwsem);
 		if (copy_to_user(p, &seminfo, sizeof(struct seminfo))) 
 			return -EFAULT;
 		return (max_id < 0) ? 0: max_id;
@@ -1431,9 +1431,9 @@
 }
 
 /*
- * This function handles some semctl commands which require the rw_mutex
+ * This function handles some semctl commands which require the rwsem
  * to be held in write mode.
- * NOTE: no locks must be held, the rw_mutex is taken inside this function.
+ * NOTE: no locks must be held, the rwsem is taken inside this function.
  */
 static int semctl_down(struct ipc_namespace *ns, int semid,
 		       int cmd, int version, void __user *p)
@@ -1448,7 +1448,7 @@
 			return -EFAULT;
 	}
 
-	down_write(&sem_ids(ns).rw_mutex);
+	down_write(&sem_ids(ns).rwsem);
 	rcu_read_lock();
 
 	ipcp = ipcctl_pre_down_nolock(ns, &sem_ids(ns), semid, cmd,
@@ -1487,7 +1487,7 @@
 out_unlock1:
 	rcu_read_unlock();
 out_up:
-	up_write(&sem_ids(ns).rw_mutex);
+	up_write(&sem_ids(ns).rwsem);
 	return err;
 }
 
diff --git a/ipc/shm.c b/ipc/shm.c
index c6b4ad5..2821cdf 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -19,6 +19,9 @@
  * namespaces support
  * OpenVZ, SWsoft Inc.
  * Pavel Emelianov <xemul@openvz.org>
+ *
+ * Better ipc lock (kern_ipc_perm.lock) handling
+ * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013.
  */
 
 #include <linux/slab.h>
@@ -80,8 +83,8 @@
 }
 
 /*
- * Called with shm_ids.rw_mutex (writer) and the shp structure locked.
- * Only shm_ids.rw_mutex remains locked on exit.
+ * Called with shm_ids.rwsem (writer) and the shp structure locked.
+ * Only shm_ids.rwsem remains locked on exit.
  */
 static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
 {
@@ -124,8 +127,28 @@
 				IPC_SHM_IDS, sysvipc_shm_proc_show);
 }
 
+static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
+{
+	struct kern_ipc_perm *ipcp = ipc_obtain_object(&shm_ids(ns), id);
+
+	if (IS_ERR(ipcp))
+		return ERR_CAST(ipcp);
+
+	return container_of(ipcp, struct shmid_kernel, shm_perm);
+}
+
+static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id)
+{
+	struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id);
+
+	if (IS_ERR(ipcp))
+		return ERR_CAST(ipcp);
+
+	return container_of(ipcp, struct shmid_kernel, shm_perm);
+}
+
 /*
- * shm_lock_(check_) routines are called in the paths where the rw_mutex
+ * shm_lock_(check_) routines are called in the paths where the rwsem
  * is not necessarily held.
  */
 static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
@@ -144,17 +167,6 @@
 	ipc_lock_object(&ipcp->shm_perm);
 }
 
-static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns,
-						int id)
-{
-	struct kern_ipc_perm *ipcp = ipc_lock_check(&shm_ids(ns), id);
-
-	if (IS_ERR(ipcp))
-		return (struct shmid_kernel *)ipcp;
-
-	return container_of(ipcp, struct shmid_kernel, shm_perm);
-}
-
 static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
 {
 	ipc_rmid(&shm_ids(ns), &s->shm_perm);
@@ -182,7 +194,7 @@
  * @ns: namespace
  * @shp: struct to free
  *
- * It has to be called with shp and shm_ids.rw_mutex (writer) locked,
+ * It has to be called with shp and shm_ids.rwsem (writer) locked,
  * but returns with shp unlocked and freed.
  */
 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
@@ -230,7 +242,7 @@
 	struct shmid_kernel *shp;
 	struct ipc_namespace *ns = sfd->ns;
 
-	down_write(&shm_ids(ns).rw_mutex);
+	down_write(&shm_ids(ns).rwsem);
 	/* remove from the list of attaches of the shm segment */
 	shp = shm_lock(ns, sfd->id);
 	BUG_ON(IS_ERR(shp));
@@ -241,10 +253,10 @@
 		shm_destroy(ns, shp);
 	else
 		shm_unlock(shp);
-	up_write(&shm_ids(ns).rw_mutex);
+	up_write(&shm_ids(ns).rwsem);
 }
 
-/* Called with ns->shm_ids(ns).rw_mutex locked */
+/* Called with ns->shm_ids(ns).rwsem locked */
 static int shm_try_destroy_current(int id, void *p, void *data)
 {
 	struct ipc_namespace *ns = data;
@@ -275,7 +287,7 @@
 	return 0;
 }
 
-/* Called with ns->shm_ids(ns).rw_mutex locked */
+/* Called with ns->shm_ids(ns).rwsem locked */
 static int shm_try_destroy_orphaned(int id, void *p, void *data)
 {
 	struct ipc_namespace *ns = data;
@@ -286,7 +298,7 @@
 	 * We want to destroy segments without users and with already
 	 * exit'ed originating process.
 	 *
-	 * As shp->* are changed under rw_mutex, it's safe to skip shp locking.
+	 * As shp->* are changed under rwsem, it's safe to skip shp locking.
 	 */
 	if (shp->shm_creator != NULL)
 		return 0;
@@ -300,10 +312,10 @@
 
 void shm_destroy_orphaned(struct ipc_namespace *ns)
 {
-	down_write(&shm_ids(ns).rw_mutex);
+	down_write(&shm_ids(ns).rwsem);
 	if (shm_ids(ns).in_use)
 		idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
-	up_write(&shm_ids(ns).rw_mutex);
+	up_write(&shm_ids(ns).rwsem);
 }
 
 
@@ -315,10 +327,10 @@
 		return;
 
 	/* Destroy all already created segments, but not mapped yet */
-	down_write(&shm_ids(ns).rw_mutex);
+	down_write(&shm_ids(ns).rwsem);
 	if (shm_ids(ns).in_use)
 		idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_current, ns);
-	up_write(&shm_ids(ns).rw_mutex);
+	up_write(&shm_ids(ns).rwsem);
 }
 
 static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
@@ -452,7 +464,7 @@
  * @ns: namespace
  * @params: ptr to the structure that contains key, size and shmflg
  *
- * Called with shm_ids.rw_mutex held as a writer.
+ * Called with shm_ids.rwsem held as a writer.
  */
 
 static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
@@ -560,7 +572,7 @@
 }
 
 /*
- * Called with shm_ids.rw_mutex and ipcp locked.
+ * Called with shm_ids.rwsem and ipcp locked.
  */
 static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
 {
@@ -571,7 +583,7 @@
 }
 
 /*
- * Called with shm_ids.rw_mutex and ipcp locked.
+ * Called with shm_ids.rwsem and ipcp locked.
  */
 static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
 				struct ipc_params *params)
@@ -684,7 +696,7 @@
 
 /*
  * Calculate and add used RSS and swap pages of a shm.
- * Called with shm_ids.rw_mutex held as a reader
+ * Called with shm_ids.rwsem held as a reader
  */
 static void shm_add_rss_swap(struct shmid_kernel *shp,
 	unsigned long *rss_add, unsigned long *swp_add)
@@ -711,7 +723,7 @@
 }
 
 /*
- * Called with shm_ids.rw_mutex held as a reader
+ * Called with shm_ids.rwsem held as a reader
  */
 static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
 		unsigned long *swp)
@@ -740,9 +752,9 @@
 }
 
 /*
- * This function handles some shmctl commands which require the rw_mutex
+ * This function handles some shmctl commands which require the rwsem
  * to be held in write mode.
- * NOTE: no locks must be held, the rw_mutex is taken inside this function.
+ * NOTE: no locks must be held, the rwsem is taken inside this function.
  */
 static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
 		       struct shmid_ds __user *buf, int version)
@@ -757,14 +769,13 @@
 			return -EFAULT;
 	}
 
-	down_write(&shm_ids(ns).rw_mutex);
+	down_write(&shm_ids(ns).rwsem);
 	rcu_read_lock();
 
-	ipcp = ipcctl_pre_down(ns, &shm_ids(ns), shmid, cmd,
-			       &shmid64.shm_perm, 0);
+	ipcp = ipcctl_pre_down_nolock(ns, &shm_ids(ns), shmid, cmd,
+				      &shmid64.shm_perm, 0);
 	if (IS_ERR(ipcp)) {
 		err = PTR_ERR(ipcp);
-		/* the ipc lock is not held upon failure */
 		goto out_unlock1;
 	}
 
@@ -772,14 +783,16 @@
 
 	err = security_shm_shmctl(shp, cmd);
 	if (err)
-		goto out_unlock0;
+		goto out_unlock1;
 
 	switch (cmd) {
 	case IPC_RMID:
+		ipc_lock_object(&shp->shm_perm);
 		/* do_shm_rmid unlocks the ipc object and rcu */
 		do_shm_rmid(ns, ipcp);
 		goto out_up;
 	case IPC_SET:
+		ipc_lock_object(&shp->shm_perm);
 		err = ipc_update_perm(&shmid64.shm_perm, ipcp);
 		if (err)
 			goto out_unlock0;
@@ -787,6 +800,7 @@
 		break;
 	default:
 		err = -EINVAL;
+		goto out_unlock1;
 	}
 
 out_unlock0:
@@ -794,32 +808,27 @@
 out_unlock1:
 	rcu_read_unlock();
 out_up:
-	up_write(&shm_ids(ns).rw_mutex);
+	up_write(&shm_ids(ns).rwsem);
 	return err;
 }
 
-SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
+static int shmctl_nolock(struct ipc_namespace *ns, int shmid,
+			 int cmd, int version, void __user *buf)
 {
+	int err;
 	struct shmid_kernel *shp;
-	int err, version;
-	struct ipc_namespace *ns;
 
-	if (cmd < 0 || shmid < 0) {
-		err = -EINVAL;
-		goto out;
-	}
-
-	version = ipc_parse_version(&cmd);
-	ns = current->nsproxy->ipc_ns;
-
-	switch (cmd) { /* replace with proc interface ? */
-	case IPC_INFO:
-	{
-		struct shminfo64 shminfo;
-
+	/* preliminary security checks for *_INFO */
+	if (cmd == IPC_INFO || cmd == SHM_INFO) {
 		err = security_shm_shmctl(NULL, cmd);
 		if (err)
 			return err;
+	}
+
+	switch (cmd) {
+	case IPC_INFO:
+	{
+		struct shminfo64 shminfo;
 
 		memset(&shminfo, 0, sizeof(shminfo));
 		shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
@@ -830,9 +839,9 @@
 		if(copy_shminfo_to_user (buf, &shminfo, version))
 			return -EFAULT;
 
-		down_read(&shm_ids(ns).rw_mutex);
+		down_read(&shm_ids(ns).rwsem);
 		err = ipc_get_maxid(&shm_ids(ns));
-		up_read(&shm_ids(ns).rw_mutex);
+		up_read(&shm_ids(ns).rwsem);
 
 		if(err<0)
 			err = 0;
@@ -842,19 +851,15 @@
 	{
 		struct shm_info shm_info;
 
-		err = security_shm_shmctl(NULL, cmd);
-		if (err)
-			return err;
-
 		memset(&shm_info, 0, sizeof(shm_info));
-		down_read(&shm_ids(ns).rw_mutex);
+		down_read(&shm_ids(ns).rwsem);
 		shm_info.used_ids = shm_ids(ns).in_use;
 		shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp);
 		shm_info.shm_tot = ns->shm_tot;
 		shm_info.swap_attempts = 0;
 		shm_info.swap_successes = 0;
 		err = ipc_get_maxid(&shm_ids(ns));
-		up_read(&shm_ids(ns).rw_mutex);
+		up_read(&shm_ids(ns).rwsem);
 		if (copy_to_user(buf, &shm_info, sizeof(shm_info))) {
 			err = -EFAULT;
 			goto out;
@@ -869,27 +874,31 @@
 		struct shmid64_ds tbuf;
 		int result;
 
+		rcu_read_lock();
 		if (cmd == SHM_STAT) {
-			shp = shm_lock(ns, shmid);
+			shp = shm_obtain_object(ns, shmid);
 			if (IS_ERR(shp)) {
 				err = PTR_ERR(shp);
-				goto out;
+				goto out_unlock;
 			}
 			result = shp->shm_perm.id;
 		} else {
-			shp = shm_lock_check(ns, shmid);
+			shp = shm_obtain_object_check(ns, shmid);
 			if (IS_ERR(shp)) {
 				err = PTR_ERR(shp);
-				goto out;
+				goto out_unlock;
 			}
 			result = 0;
 		}
+
 		err = -EACCES;
 		if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
 			goto out_unlock;
+
 		err = security_shm_shmctl(shp, cmd);
 		if (err)
 			goto out_unlock;
+
 		memset(&tbuf, 0, sizeof(tbuf));
 		kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
 		tbuf.shm_segsz	= shp->shm_segsz;
@@ -899,43 +908,76 @@
 		tbuf.shm_cpid	= shp->shm_cprid;
 		tbuf.shm_lpid	= shp->shm_lprid;
 		tbuf.shm_nattch	= shp->shm_nattch;
-		shm_unlock(shp);
-		if(copy_shmid_to_user (buf, &tbuf, version))
+		rcu_read_unlock();
+
+		if (copy_shmid_to_user(buf, &tbuf, version))
 			err = -EFAULT;
 		else
 			err = result;
 		goto out;
 	}
+	default:
+		return -EINVAL;
+	}
+
+out_unlock:
+	rcu_read_unlock();
+out:
+	return err;
+}
+
+SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
+{
+	struct shmid_kernel *shp;
+	int err, version;
+	struct ipc_namespace *ns;
+
+	if (cmd < 0 || shmid < 0)
+		return -EINVAL;
+
+	version = ipc_parse_version(&cmd);
+	ns = current->nsproxy->ipc_ns;
+
+	switch (cmd) {
+	case IPC_INFO:
+	case SHM_INFO:
+	case SHM_STAT:
+	case IPC_STAT:
+		return shmctl_nolock(ns, shmid, cmd, version, buf);
+	case IPC_RMID:
+	case IPC_SET:
+		return shmctl_down(ns, shmid, cmd, buf, version);
 	case SHM_LOCK:
 	case SHM_UNLOCK:
 	{
 		struct file *shm_file;
 
-		shp = shm_lock_check(ns, shmid);
+		rcu_read_lock();
+		shp = shm_obtain_object_check(ns, shmid);
 		if (IS_ERR(shp)) {
 			err = PTR_ERR(shp);
-			goto out;
+			goto out_unlock1;
 		}
 
 		audit_ipc_obj(&(shp->shm_perm));
+		err = security_shm_shmctl(shp, cmd);
+		if (err)
+			goto out_unlock1;
 
+		ipc_lock_object(&shp->shm_perm);
 		if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
 			kuid_t euid = current_euid();
 			err = -EPERM;
 			if (!uid_eq(euid, shp->shm_perm.uid) &&
 			    !uid_eq(euid, shp->shm_perm.cuid))
-				goto out_unlock;
+				goto out_unlock0;
 			if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK))
-				goto out_unlock;
+				goto out_unlock0;
 		}
 
-		err = security_shm_shmctl(shp, cmd);
-		if (err)
-			goto out_unlock;
-
 		shm_file = shp->shm_file;
 		if (is_file_hugepages(shm_file))
-			goto out_unlock;
+			goto out_unlock0;
 
 		if (cmd == SHM_LOCK) {
 			struct user_struct *user = current_user();
@@ -944,32 +986,31 @@
 				shp->shm_perm.mode |= SHM_LOCKED;
 				shp->mlock_user = user;
 			}
-			goto out_unlock;
+			goto out_unlock0;
 		}
 
 		/* SHM_UNLOCK */
 		if (!(shp->shm_perm.mode & SHM_LOCKED))
-			goto out_unlock;
+			goto out_unlock0;
 		shmem_lock(shm_file, 0, shp->mlock_user);
 		shp->shm_perm.mode &= ~SHM_LOCKED;
 		shp->mlock_user = NULL;
 		get_file(shm_file);
-		shm_unlock(shp);
+		ipc_unlock_object(&shp->shm_perm);
+		rcu_read_unlock();
 		shmem_unlock_mapping(shm_file->f_mapping);
+
 		fput(shm_file);
-		goto out;
-	}
-	case IPC_RMID:
-	case IPC_SET:
-		err = shmctl_down(ns, shmid, cmd, buf, version);
 		return err;
+	}
 	default:
 		return -EINVAL;
 	}
 
-out_unlock:
-	shm_unlock(shp);
-out:
+out_unlock0:
+	ipc_unlock_object(&shp->shm_perm);
+out_unlock1:
+	rcu_read_unlock();
 	return err;
 }
 
@@ -1037,10 +1078,11 @@
 	 * additional creator id...
 	 */
 	ns = current->nsproxy->ipc_ns;
-	shp = shm_lock_check(ns, shmid);
+	rcu_read_lock();
+	shp = shm_obtain_object_check(ns, shmid);
 	if (IS_ERR(shp)) {
 		err = PTR_ERR(shp);
-		goto out;
+		goto out_unlock;
 	}
 
 	err = -EACCES;
@@ -1051,24 +1093,31 @@
 	if (err)
 		goto out_unlock;
 
+	ipc_lock_object(&shp->shm_perm);
 	path = shp->shm_file->f_path;
 	path_get(&path);
 	shp->shm_nattch++;
 	size = i_size_read(path.dentry->d_inode);
-	shm_unlock(shp);
+	ipc_unlock_object(&shp->shm_perm);
+	rcu_read_unlock();
 
 	err = -ENOMEM;
 	sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
-	if (!sfd)
-		goto out_put_dentry;
+	if (!sfd) {
+		path_put(&path);
+		goto out_nattch;
+	}
 
 	file = alloc_file(&path, f_mode,
 			  is_file_hugepages(shp->shm_file) ?
 				&shm_file_operations_huge :
 				&shm_file_operations);
 	err = PTR_ERR(file);
-	if (IS_ERR(file))
-		goto out_free;
+	if (IS_ERR(file)) {
+		kfree(sfd);
+		path_put(&path);
+		goto out_nattch;
+	}
 
 	file->private_data = sfd;
 	file->f_mapping = shp->shm_file->f_mapping;
@@ -1094,7 +1143,7 @@
 		    addr > current->mm->start_stack - size - PAGE_SIZE * 5)
 			goto invalid;
 	}
-		
+
 	addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate);
 	*raddr = addr;
 	err = 0;
@@ -1109,7 +1158,7 @@
 	fput(file);
 
 out_nattch:
-	down_write(&shm_ids(ns).rw_mutex);
+	down_write(&shm_ids(ns).rwsem);
 	shp = shm_lock(ns, shmid);
 	BUG_ON(IS_ERR(shp));
 	shp->shm_nattch--;
@@ -1117,20 +1166,13 @@
 		shm_destroy(ns, shp);
 	else
 		shm_unlock(shp);
-	up_write(&shm_ids(ns).rw_mutex);
-
-out:
+	up_write(&shm_ids(ns).rwsem);
 	return err;
 
 out_unlock:
-	shm_unlock(shp);
-	goto out;
-
-out_free:
-	kfree(sfd);
-out_put_dentry:
-	path_put(&path);
-	goto out_nattch;
+	rcu_read_unlock();
+out:
+	return err;
 }
 
 SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
@@ -1235,8 +1277,7 @@
 #else /* CONFIG_MMU */
 	/* under NOMMU conditions, the exact address to be destroyed must be
 	 * given */
-	retval = -EINVAL;
-	if (vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
+	if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
 		do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
 		retval = 0;
 	}
diff --git a/ipc/util.c b/ipc/util.c
index 4704223..e829da9 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -15,6 +15,14 @@
  * Jun 2006 - namespaces ssupport
  *            OpenVZ, SWsoft Inc.
  *            Pavel Emelianov <xemul@openvz.org>
+ *
+ * General sysv ipc locking scheme:
+ *  when doing ipc id lookups, take the ids->rwsem
+ *      rcu_read_lock()
+ *          obtain the ipc object (kern_ipc_perm)
+ *          perform security, capabilities, auditing and permission checks, etc.
+ *          acquire the ipc lock (kern_ipc_perm.lock) throught ipc_lock_object()
+ *             perform data updates (ie: SET, RMID, LOCK/UNLOCK commands)
  */
 
 #include <linux/mm.h>
@@ -119,7 +127,7 @@
  
 void ipc_init_ids(struct ipc_ids *ids)
 {
-	init_rwsem(&ids->rw_mutex);
+	init_rwsem(&ids->rwsem);
 
 	ids->in_use = 0;
 	ids->seq = 0;
@@ -174,7 +182,7 @@
  *	@ids: Identifier set
  *	@key: The key to find
  *	
- *	Requires ipc_ids.rw_mutex locked.
+ *	Requires ipc_ids.rwsem locked.
  *	Returns the LOCKED pointer to the ipc structure if found or NULL
  *	if not.
  *	If key is found ipc points to the owning ipc structure
@@ -197,7 +205,8 @@
 			continue;
 		}
 
-		ipc_lock_by_ptr(ipc);
+		rcu_read_lock();
+		ipc_lock_object(ipc);
 		return ipc;
 	}
 
@@ -208,7 +217,7 @@
  *	ipc_get_maxid 	-	get the last assigned id
  *	@ids: IPC identifier set
  *
- *	Called with ipc_ids.rw_mutex held.
+ *	Called with ipc_ids.rwsem held.
  */
 
 int ipc_get_maxid(struct ipc_ids *ids)
@@ -246,7 +255,7 @@
  *	is returned. The 'new' entry is returned in a locked state on success.
  *	On failure the entry is not locked and a negative err-code is returned.
  *
- *	Called with writer ipc_ids.rw_mutex held.
+ *	Called with writer ipc_ids.rwsem held.
  */
 int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
 {
@@ -312,9 +321,9 @@
 {
 	int err;
 
-	down_write(&ids->rw_mutex);
+	down_write(&ids->rwsem);
 	err = ops->getnew(ns, params);
-	up_write(&ids->rw_mutex);
+	up_write(&ids->rwsem);
 	return err;
 }
 
@@ -331,7 +340,7 @@
  *
  *	On success, the IPC id is returned.
  *
- *	It is called with ipc_ids.rw_mutex and ipcp->lock held.
+ *	It is called with ipc_ids.rwsem and ipcp->lock held.
  */
 static int ipc_check_perms(struct ipc_namespace *ns,
 			   struct kern_ipc_perm *ipcp,
@@ -376,7 +385,7 @@
 	 * Take the lock as a writer since we are potentially going to add
 	 * a new entry + read locks are not "upgradable"
 	 */
-	down_write(&ids->rw_mutex);
+	down_write(&ids->rwsem);
 	ipcp = ipc_findkey(ids, params->key);
 	if (ipcp == NULL) {
 		/* key not used */
@@ -402,7 +411,7 @@
 		}
 		ipc_unlock(ipcp);
 	}
-	up_write(&ids->rw_mutex);
+	up_write(&ids->rwsem);
 
 	return err;
 }
@@ -413,7 +422,7 @@
  *	@ids: IPC identifier set
  *	@ipcp: ipc perm structure containing the identifier to remove
  *
- *	ipc_ids.rw_mutex (as a writer) and the spinlock for this ID are held
+ *	ipc_ids.rwsem (as a writer) and the spinlock for this ID are held
  *	before this function is called, and remain locked on the exit.
  */
  
@@ -621,7 +630,7 @@
 }
 
 /**
- * ipc_lock - Lock an ipc structure without rw_mutex held
+ * ipc_lock - Lock an ipc structure without rwsem held
  * @ids: IPC identifier set
  * @id: ipc id to look for
  *
@@ -677,22 +686,6 @@
 	return out;
 }
 
-struct kern_ipc_perm *ipc_lock_check(struct ipc_ids *ids, int id)
-{
-	struct kern_ipc_perm *out;
-
-	out = ipc_lock(ids, id);
-	if (IS_ERR(out))
-		return out;
-
-	if (ipc_checkid(out, id)) {
-		ipc_unlock(out);
-		return ERR_PTR(-EIDRM);
-	}
-
-	return out;
-}
-
 /**
  * ipcget - Common sys_*get() code
  * @ns : namsepace
@@ -733,7 +726,7 @@
 }
 
 /**
- * ipcctl_pre_down - retrieve an ipc and check permissions for some IPC_XXX cmd
+ * ipcctl_pre_down_nolock - retrieve an ipc and check permissions for some IPC_XXX cmd
  * @ns:  the ipc namespace
  * @ids:  the table of ids where to look for the ipc
  * @id:   the id of the ipc to retrieve
@@ -746,29 +739,13 @@
  * It must be called without any lock held and
  *  - retrieves the ipc with the given id in the given table.
  *  - performs some audit and permission check, depending on the given cmd
- *  - returns the ipc with the ipc lock held in case of success
- *    or an err-code without any lock held otherwise.
+ *  - returns a pointer to the ipc object or otherwise, the corresponding error.
  *
- * Call holding the both the rw_mutex and the rcu read lock.
+ * Call holding the both the rwsem and the rcu read lock.
  */
-struct kern_ipc_perm *ipcctl_pre_down(struct ipc_namespace *ns,
-				      struct ipc_ids *ids, int id, int cmd,
-				      struct ipc64_perm *perm, int extra_perm)
-{
-	struct kern_ipc_perm *ipcp;
-
-	ipcp = ipcctl_pre_down_nolock(ns, ids, id, cmd, perm, extra_perm);
-	if (IS_ERR(ipcp))
-		goto out;
-
-	spin_lock(&ipcp->lock);
-out:
-	return ipcp;
-}
-
 struct kern_ipc_perm *ipcctl_pre_down_nolock(struct ipc_namespace *ns,
-					     struct ipc_ids *ids, int id, int cmd,
-					     struct ipc64_perm *perm, int extra_perm)
+					struct ipc_ids *ids, int id, int cmd,
+					struct ipc64_perm *perm, int extra_perm)
 {
 	kuid_t euid;
 	int err = -EPERM;
@@ -846,7 +823,8 @@
 		ipc = idr_find(&ids->ipcs_idr, pos);
 		if (ipc != NULL) {
 			*new_pos = pos + 1;
-			ipc_lock_by_ptr(ipc);
+			rcu_read_lock();
+			ipc_lock_object(ipc);
 			return ipc;
 		}
 	}
@@ -884,7 +862,7 @@
 	 * Take the lock - this will be released by the corresponding
 	 * call to stop().
 	 */
-	down_read(&ids->rw_mutex);
+	down_read(&ids->rwsem);
 
 	/* pos < 0 is invalid */
 	if (*pos < 0)
@@ -911,7 +889,7 @@
 
 	ids = &iter->ns->ids[iface->ids];
 	/* Release the lock we took in start() */
-	up_read(&ids->rw_mutex);
+	up_read(&ids->rwsem);
 }
 
 static int sysvipc_proc_show(struct seq_file *s, void *it)
diff --git a/ipc/util.h b/ipc/util.h
index b6a6a88..c5f3338b 100644
--- a/ipc/util.h
+++ b/ipc/util.h
@@ -94,10 +94,10 @@
 #define ipcid_to_idx(id) ((id) % SEQ_MULTIPLIER)
 #define ipcid_to_seqx(id) ((id) / SEQ_MULTIPLIER)
 
-/* must be called with ids->rw_mutex acquired for writing */
+/* must be called with ids->rwsem acquired for writing */
 int ipc_addid(struct ipc_ids *, struct kern_ipc_perm *, int);
 
-/* must be called with ids->rw_mutex acquired for reading */
+/* must be called with ids->rwsem acquired for reading */
 int ipc_get_maxid(struct ipc_ids *);
 
 /* must be called with both locks acquired. */
@@ -131,9 +131,6 @@
 struct kern_ipc_perm *ipcctl_pre_down_nolock(struct ipc_namespace *ns,
 					     struct ipc_ids *ids, int id, int cmd,
 					     struct ipc64_perm *perm, int extra_perm);
-struct kern_ipc_perm *ipcctl_pre_down(struct ipc_namespace *ns,
-				      struct ipc_ids *ids, int id, int cmd,
-				      struct ipc64_perm *perm, int extra_perm);
 
 #ifndef CONFIG_ARCH_WANT_IPC_PARSE_VERSION
   /* On IA-64, we always use the "64-bit version" of the IPC structures.  */ 
@@ -174,19 +171,12 @@
 	assert_spin_locked(&perm->lock);
 }
 
-static inline void ipc_lock_by_ptr(struct kern_ipc_perm *perm)
-{
-	rcu_read_lock();
-	ipc_lock_object(perm);
-}
-
 static inline void ipc_unlock(struct kern_ipc_perm *perm)
 {
 	ipc_unlock_object(perm);
 	rcu_read_unlock();
 }
 
-struct kern_ipc_perm *ipc_lock_check(struct ipc_ids *ids, int id);
 struct kern_ipc_perm *ipc_obtain_object_check(struct ipc_ids *ids, int id);
 int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids,
 			struct ipc_ops *ops, struct ipc_params *params);
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index e0aeb32..2418b6e 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -60,6 +60,7 @@
 #include <linux/poll.h>
 #include <linux/flex_array.h> /* used in cgroup_attach_task */
 #include <linux/kthread.h>
+#include <linux/file.h>
 
 #include <linux/atomic.h>
 
@@ -4034,8 +4035,8 @@
 	struct cgroup_event *event;
 	struct cgroup_subsys_state *cfile_css;
 	unsigned int efd, cfd;
-	struct file *efile;
-	struct file *cfile;
+	struct fd efile;
+	struct fd cfile;
 	char *endp;
 	int ret;
 
@@ -4058,31 +4059,31 @@
 	init_waitqueue_func_entry(&event->wait, cgroup_event_wake);
 	INIT_WORK(&event->remove, cgroup_event_remove);
 
-	efile = eventfd_fget(efd);
-	if (IS_ERR(efile)) {
-		ret = PTR_ERR(efile);
+	efile = fdget(efd);
+	if (!efile.file) {
+		ret = -EBADF;
 		goto out_kfree;
 	}
 
-	event->eventfd = eventfd_ctx_fileget(efile);
+	event->eventfd = eventfd_ctx_fileget(efile.file);
 	if (IS_ERR(event->eventfd)) {
 		ret = PTR_ERR(event->eventfd);
 		goto out_put_efile;
 	}
 
-	cfile = fget(cfd);
-	if (!cfile) {
+	cfile = fdget(cfd);
+	if (!cfile.file) {
 		ret = -EBADF;
 		goto out_put_eventfd;
 	}
 
 	/* the process need read permission on control file */
 	/* AV: shouldn't we check that it's been opened for read instead? */
-	ret = inode_permission(file_inode(cfile), MAY_READ);
+	ret = inode_permission(file_inode(cfile.file), MAY_READ);
 	if (ret < 0)
 		goto out_put_cfile;
 
-	event->cft = __file_cft(cfile);
+	event->cft = __file_cft(cfile.file);
 	if (IS_ERR(event->cft)) {
 		ret = PTR_ERR(event->cft);
 		goto out_put_cfile;
@@ -4103,7 +4104,7 @@
 
 	ret = -EINVAL;
 	event->css = cgroup_css(cgrp, event->cft->ss);
-	cfile_css = css_from_dir(cfile->f_dentry->d_parent, event->cft->ss);
+	cfile_css = css_from_dir(cfile.file->f_dentry->d_parent, event->cft->ss);
 	if (event->css && event->css == cfile_css && css_tryget(event->css))
 		ret = 0;
 
@@ -4121,25 +4122,25 @@
 	if (ret)
 		goto out_put_css;
 
-	efile->f_op->poll(efile, &event->pt);
+	efile.file->f_op->poll(efile.file, &event->pt);
 
 	spin_lock(&cgrp->event_list_lock);
 	list_add(&event->list, &cgrp->event_list);
 	spin_unlock(&cgrp->event_list_lock);
 
-	fput(cfile);
-	fput(efile);
+	fdput(cfile);
+	fdput(efile);
 
 	return 0;
 
 out_put_css:
 	css_put(event->css);
 out_put_cfile:
-	fput(cfile);
+	fdput(cfile);
 out_put_eventfd:
 	eventfd_ctx_put(event->eventfd);
 out_put_efile:
-	fput(efile);
+	fdput(efile);
 out_kfree:
 	kfree(event);
 
diff --git a/kernel/extable.c b/kernel/extable.c
index 67460b9..832cb28 100644
--- a/kernel/extable.c
+++ b/kernel/extable.c
@@ -41,7 +41,7 @@
 /* Sort the kernel's built-in exception table */
 void __init sort_main_extable(void)
 {
-	if (main_extable_sort_needed) {
+	if (main_extable_sort_needed && __stop___ex_table > __start___ex_table) {
 		pr_notice("Sorting __ex_table...\n");
 		sort_extable(__start___ex_table, __stop___ex_table);
 	}
diff --git a/kernel/fork.c b/kernel/fork.c
index c9eaf201..81ccb4f 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -351,7 +351,6 @@
 	struct rb_node **rb_link, *rb_parent;
 	int retval;
 	unsigned long charge;
-	struct mempolicy *pol;
 
 	uprobe_start_dup_mmap();
 	down_write(&oldmm->mmap_sem);
@@ -400,11 +399,9 @@
 			goto fail_nomem;
 		*tmp = *mpnt;
 		INIT_LIST_HEAD(&tmp->anon_vma_chain);
-		pol = mpol_dup(vma_policy(mpnt));
-		retval = PTR_ERR(pol);
-		if (IS_ERR(pol))
+		retval = vma_dup_policy(mpnt, tmp);
+		if (retval)
 			goto fail_nomem_policy;
-		vma_set_policy(tmp, pol);
 		tmp->vm_mm = mm;
 		if (anon_vma_fork(tmp, mpnt))
 			goto fail_nomem_anon_vma_fork;
@@ -472,7 +469,7 @@
 	uprobe_end_dup_mmap();
 	return retval;
 fail_nomem_anon_vma_fork:
-	mpol_put(pol);
+	mpol_put(vma_policy(tmp));
 fail_nomem_policy:
 	kmem_cache_free(vm_area_cachep, tmp);
 fail_nomem:
@@ -1173,13 +1170,16 @@
 		return ERR_PTR(-EINVAL);
 
 	/*
-	 * If the new process will be in a different pid namespace
-	 * don't allow the creation of threads.
+	 * If the new process will be in a different pid or user namespace
+	 * do not allow it to share a thread group or signal handlers or
+	 * parent with the forking task.
 	 */
-	if ((clone_flags & (CLONE_VM|CLONE_NEWPID)) &&
-	    (task_active_pid_ns(current) !=
-	     current->nsproxy->pid_ns_for_children))
-		return ERR_PTR(-EINVAL);
+	if (clone_flags & (CLONE_SIGHAND | CLONE_PARENT)) {
+		if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) ||
+		    (task_active_pid_ns(current) !=
+				current->nsproxy->pid_ns_for_children))
+			return ERR_PTR(-EINVAL);
+	}
 
 	retval = security_task_create(clone_flags);
 	if (retval)
@@ -1576,15 +1576,6 @@
 	long nr;
 
 	/*
-	 * Do some preliminary argument and permissions checking before we
-	 * actually start allocating stuff
-	 */
-	if (clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) {
-		if (clone_flags & (CLONE_THREAD|CLONE_PARENT))
-			return -EINVAL;
-	}
-
-	/*
 	 * Determine whether and which event to report to ptracer.  When
 	 * called from kernel_thread or CLONE_UNTRACED is explicitly
 	 * requested, no event is reported; otherwise, report if the event
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 59f7b55..2a74f30 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1474,11 +1474,8 @@
 	if (first_colon && (!first_space || first_colon < first_space))
 		return parse_crashkernel_mem(ck_cmdline, system_ram,
 				crash_size, crash_base);
-	else
-		return parse_crashkernel_simple(ck_cmdline, crash_size,
-				crash_base);
 
-	return 0;
+	return parse_crashkernel_simple(ck_cmdline, crash_size, crash_base);
 }
 
 /*
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 6e33498..a0d367a 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -112,6 +112,7 @@
 struct kprobe_insn_page {
 	struct list_head list;
 	kprobe_opcode_t *insns;		/* Page of instruction slots */
+	struct kprobe_insn_cache *cache;
 	int nused;
 	int ngarbage;
 	char slot_used[];
@@ -121,12 +122,6 @@
 	(offsetof(struct kprobe_insn_page, slot_used) +	\
 	 (sizeof(char) * (slots)))
 
-struct kprobe_insn_cache {
-	struct list_head pages;	/* list of kprobe_insn_page */
-	size_t insn_size;	/* size of instruction slot */
-	int nr_garbage;
-};
-
 static int slots_per_page(struct kprobe_insn_cache *c)
 {
 	return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
@@ -138,8 +133,20 @@
 	SLOT_USED = 2,
 };
 
-static DEFINE_MUTEX(kprobe_insn_mutex);	/* Protects kprobe_insn_slots */
-static struct kprobe_insn_cache kprobe_insn_slots = {
+static void *alloc_insn_page(void)
+{
+	return module_alloc(PAGE_SIZE);
+}
+
+static void free_insn_page(void *page)
+{
+	module_free(NULL, page);
+}
+
+struct kprobe_insn_cache kprobe_insn_slots = {
+	.mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex),
+	.alloc = alloc_insn_page,
+	.free = free_insn_page,
 	.pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
 	.insn_size = MAX_INSN_SIZE,
 	.nr_garbage = 0,
@@ -150,10 +157,12 @@
  * __get_insn_slot() - Find a slot on an executable page for an instruction.
  * We allocate an executable page if there's no room on existing ones.
  */
-static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
+kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
 {
 	struct kprobe_insn_page *kip;
+	kprobe_opcode_t *slot = NULL;
 
+	mutex_lock(&c->mutex);
  retry:
 	list_for_each_entry(kip, &c->pages, list) {
 		if (kip->nused < slots_per_page(c)) {
@@ -162,7 +171,8 @@
 				if (kip->slot_used[i] == SLOT_CLEAN) {
 					kip->slot_used[i] = SLOT_USED;
 					kip->nused++;
-					return kip->insns + (i * c->insn_size);
+					slot = kip->insns + (i * c->insn_size);
+					goto out;
 				}
 			}
 			/* kip->nused is broken. Fix it. */
@@ -178,37 +188,29 @@
 	/* All out of space.  Need to allocate a new page. */
 	kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
 	if (!kip)
-		return NULL;
+		goto out;
 
 	/*
 	 * Use module_alloc so this page is within +/- 2GB of where the
 	 * kernel image and loaded module images reside. This is required
 	 * so x86_64 can correctly handle the %rip-relative fixups.
 	 */
-	kip->insns = module_alloc(PAGE_SIZE);
+	kip->insns = c->alloc();
 	if (!kip->insns) {
 		kfree(kip);
-		return NULL;
+		goto out;
 	}
 	INIT_LIST_HEAD(&kip->list);
 	memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
 	kip->slot_used[0] = SLOT_USED;
 	kip->nused = 1;
 	kip->ngarbage = 0;
+	kip->cache = c;
 	list_add(&kip->list, &c->pages);
-	return kip->insns;
-}
-
-
-kprobe_opcode_t __kprobes *get_insn_slot(void)
-{
-	kprobe_opcode_t *ret = NULL;
-
-	mutex_lock(&kprobe_insn_mutex);
-	ret = __get_insn_slot(&kprobe_insn_slots);
-	mutex_unlock(&kprobe_insn_mutex);
-
-	return ret;
+	slot = kip->insns;
+out:
+	mutex_unlock(&c->mutex);
+	return slot;
 }
 
 /* Return 1 if all garbages are collected, otherwise 0. */
@@ -225,7 +227,7 @@
 		 */
 		if (!list_is_singular(&kip->list)) {
 			list_del(&kip->list);
-			module_free(NULL, kip->insns);
+			kip->cache->free(kip->insns);
 			kfree(kip);
 		}
 		return 1;
@@ -255,11 +257,12 @@
 	return 0;
 }
 
-static void __kprobes __free_insn_slot(struct kprobe_insn_cache *c,
-				       kprobe_opcode_t *slot, int dirty)
+void __kprobes __free_insn_slot(struct kprobe_insn_cache *c,
+				kprobe_opcode_t *slot, int dirty)
 {
 	struct kprobe_insn_page *kip;
 
+	mutex_lock(&c->mutex);
 	list_for_each_entry(kip, &c->pages, list) {
 		long idx = ((long)slot - (long)kip->insns) /
 				(c->insn_size * sizeof(kprobe_opcode_t));
@@ -272,45 +275,25 @@
 					collect_garbage_slots(c);
 			} else
 				collect_one_slot(kip, idx);
-			return;
+			goto out;
 		}
 	}
 	/* Could not free this slot. */
 	WARN_ON(1);
+out:
+	mutex_unlock(&c->mutex);
 }
 
-void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
-{
-	mutex_lock(&kprobe_insn_mutex);
-	__free_insn_slot(&kprobe_insn_slots, slot, dirty);
-	mutex_unlock(&kprobe_insn_mutex);
-}
 #ifdef CONFIG_OPTPROBES
 /* For optimized_kprobe buffer */
-static DEFINE_MUTEX(kprobe_optinsn_mutex); /* Protects kprobe_optinsn_slots */
-static struct kprobe_insn_cache kprobe_optinsn_slots = {
+struct kprobe_insn_cache kprobe_optinsn_slots = {
+	.mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex),
+	.alloc = alloc_insn_page,
+	.free = free_insn_page,
 	.pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
 	/* .insn_size is initialized later */
 	.nr_garbage = 0,
 };
-/* Get a slot for optimized_kprobe buffer */
-kprobe_opcode_t __kprobes *get_optinsn_slot(void)
-{
-	kprobe_opcode_t *ret = NULL;
-
-	mutex_lock(&kprobe_optinsn_mutex);
-	ret = __get_insn_slot(&kprobe_optinsn_slots);
-	mutex_unlock(&kprobe_optinsn_mutex);
-
-	return ret;
-}
-
-void __kprobes free_optinsn_slot(kprobe_opcode_t * slot, int dirty)
-{
-	mutex_lock(&kprobe_optinsn_mutex);
-	__free_insn_slot(&kprobe_optinsn_slots, slot, dirty);
-	mutex_unlock(&kprobe_optinsn_mutex);
-}
 #endif
 #endif
 
diff --git a/kernel/modsign_pubkey.c b/kernel/modsign_pubkey.c
index 2b6e699..7cbd450 100644
--- a/kernel/modsign_pubkey.c
+++ b/kernel/modsign_pubkey.c
@@ -18,14 +18,14 @@
 
 struct key *modsign_keyring;
 
-extern __initdata const u8 modsign_certificate_list[];
-extern __initdata const u8 modsign_certificate_list_end[];
+extern __initconst const u8 modsign_certificate_list[];
+extern __initconst const u8 modsign_certificate_list_end[];
 
 /*
  * We need to make sure ccache doesn't cache the .o file as it doesn't notice
  * if modsign.pub changes.
  */
-static __initdata const char annoy_ccache[] = __TIME__ "foo";
+static __initconst const char annoy_ccache[] = __TIME__ "foo";
 
 /*
  * Load the compiled-in keys
diff --git a/kernel/panic.c b/kernel/panic.c
index 8018646..b6c482c 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -123,10 +123,14 @@
 	 */
 	smp_send_stop();
 
-	kmsg_dump(KMSG_DUMP_PANIC);
-
+	/*
+	 * Run any panic handlers, including those that might need to
+	 * add information to the kmsg dump output.
+	 */
 	atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
 
+	kmsg_dump(KMSG_DUMP_PANIC);
+
 	bust_spinlocks(0);
 
 	if (!panic_blink)
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 349587b..358a146 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -352,7 +352,7 @@
 		struct mem_extent *ext, *cur, *aux;
 
 		zone_start = zone->zone_start_pfn;
-		zone_end = zone->zone_start_pfn + zone->spanned_pages;
+		zone_end = zone_end_pfn(zone);
 
 		list_for_each_entry(ext, list, hook)
 			if (zone_start <= ext->end)
@@ -884,7 +884,7 @@
 			continue;
 
 		mark_free_pages(zone);
-		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
+		max_zone_pfn = zone_end_pfn(zone);
 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
 			if (saveable_highmem_page(zone, pfn))
 				n++;
@@ -948,7 +948,7 @@
 			continue;
 
 		mark_free_pages(zone);
-		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
+		max_zone_pfn = zone_end_pfn(zone);
 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
 			if (saveable_page(zone, pfn))
 				n++;
@@ -1041,7 +1041,7 @@
 		unsigned long max_zone_pfn;
 
 		mark_free_pages(zone);
-		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
+		max_zone_pfn = zone_end_pfn(zone);
 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
 			if (page_is_saveable(zone, pfn))
 				memory_bm_set_bit(orig_bm, pfn);
@@ -1093,7 +1093,7 @@
 	unsigned long pfn, max_zone_pfn;
 
 	for_each_populated_zone(zone) {
-		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
+		max_zone_pfn = zone_end_pfn(zone);
 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
 			if (pfn_valid(pfn)) {
 				struct page *page = pfn_to_page(pfn);
@@ -1755,7 +1755,7 @@
 
 	/* Clear page flags */
 	for_each_populated_zone(zone) {
-		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
+		max_zone_pfn = zone_end_pfn(zone);
 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
 			if (pfn_valid(pfn))
 				swsusp_unset_page_free(pfn_to_page(pfn));
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index a146ee3..dd562e9 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -236,7 +236,7 @@
 	 */
 	int dumpable = 0;
 	/* Don't let security modules deny introspection */
-	if (task == current)
+	if (same_thread_group(task, current))
 		return 0;
 	rcu_read_lock();
 	tcred = __task_cred(task);
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 33eb462..b02a339 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -122,7 +122,7 @@
 	STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key);
 EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
 
-int debug_lockdep_rcu_enabled(void)
+int notrace debug_lockdep_rcu_enabled(void)
 {
 	return rcu_scheduler_active && debug_locks &&
 	       current->lockdep_recursion == 0;
diff --git a/kernel/signal.c b/kernel/signal.c
index 50e4107..ded28b9 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -3394,7 +3394,7 @@
 		new_ka.sa.sa_restorer = compat_ptr(restorer);
 #endif
 		ret |= copy_from_user(&mask, &act->sa_mask, sizeof(mask));
-		ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
+		ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
 		if (ret)
 			return -EFAULT;
 		sigset_from_compat(&new_ka.sa.sa_mask, &mask);
@@ -3406,7 +3406,7 @@
 		ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), 
 			       &oact->sa_handler);
 		ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask));
-		ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+		ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
 #ifdef __ARCH_HAS_SA_RESTORER
 		ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
 				&oact->sa_restorer);
diff --git a/kernel/smp.c b/kernel/smp.c
index 449b707..0564571 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -48,10 +48,13 @@
 				cpu_to_node(cpu)))
 			return notifier_from_errno(-ENOMEM);
 		if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
-				cpu_to_node(cpu)))
+				cpu_to_node(cpu))) {
+			free_cpumask_var(cfd->cpumask);
 			return notifier_from_errno(-ENOMEM);
+		}
 		cfd->csd = alloc_percpu(struct call_single_data);
 		if (!cfd->csd) {
+			free_cpumask_var(cfd->cpumask_ipi);
 			free_cpumask_var(cfd->cpumask);
 			return notifier_from_errno(-ENOMEM);
 		}
@@ -572,8 +575,10 @@
  *
  * If @wait is true, then returns once @func has returned.
  *
- * You must not call this function with disabled interrupts or
- * from a hardware interrupt handler or from a bottom half handler.
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler.  The
+ * exception is that it may be used during early boot while
+ * early_boot_irqs_disabled is set.
  */
 void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
 			void *info, bool wait)
@@ -582,9 +587,10 @@
 
 	smp_call_function_many(mask, func, info, wait);
 	if (cpumask_test_cpu(cpu, mask)) {
-		local_irq_disable();
+		unsigned long flags;
+		local_irq_save(flags);
 		func(info);
-		local_irq_enable();
+		local_irq_restore(flags);
 	}
 	put_cpu();
 }
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index 5cdd806..4b082b5 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -34,6 +34,20 @@
 #else
 #define raw_read_can_lock(l)	read_can_lock(l)
 #define raw_write_can_lock(l)	write_can_lock(l)
+
+/*
+ * Some architectures can relax in favour of the CPU owning the lock.
+ */
+#ifndef arch_read_relax
+# define arch_read_relax(l)	cpu_relax()
+#endif
+#ifndef arch_write_relax
+# define arch_write_relax(l)	cpu_relax()
+#endif
+#ifndef arch_spin_relax
+# define arch_spin_relax(l)	cpu_relax()
+#endif
+
 /*
  * We build the __lock_function inlines here. They are too large for
  * inlining all over the place, but here is only one user per function
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 07f6fc4..dc69093 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1225,7 +1225,7 @@
 		.data		= &hugepages_treat_as_movable,
 		.maxlen		= sizeof(int),
 		.mode		= 0644,
-		.proc_handler	= hugetlb_treat_movable_handler,
+		.proc_handler	= proc_dointvec,
 	},
 	{
 		.procname	= "nr_overcommit_hugepages",
diff --git a/kernel/task_work.c b/kernel/task_work.c
index 65bd3c9..8727032 100644
--- a/kernel/task_work.c
+++ b/kernel/task_work.c
@@ -4,6 +4,23 @@
 
 static struct callback_head work_exited; /* all we need is ->next == NULL */
 
+/**
+ * task_work_add - ask the @task to execute @work->func()
+ * @task: the task which should run the callback
+ * @work: the callback to run
+ * @notify: send the notification if true
+ *
+ * Queue @work for task_work_run() below and notify the @task if @notify.
+ * Fails if the @task is exiting/exited and thus it can't process this @work.
+ * Otherwise @work->func() will be called when the @task returns from kernel
+ * mode or exits.
+ *
+ * This is like the signal handler which runs in kernel mode, but it doesn't
+ * try to wake up the @task.
+ *
+ * RETURNS:
+ * 0 if succeeds or -ESRCH.
+ */
 int
 task_work_add(struct task_struct *task, struct callback_head *work, bool notify)
 {
@@ -21,11 +38,22 @@
 	return 0;
 }
 
+/**
+ * task_work_cancel - cancel a pending work added by task_work_add()
+ * @task: the task which should execute the work
+ * @func: identifies the work to remove
+ *
+ * Find the last queued pending work with ->func == @func and remove
+ * it from queue.
+ *
+ * RETURNS:
+ * The found work or NULL if not found.
+ */
 struct callback_head *
 task_work_cancel(struct task_struct *task, task_work_func_t func)
 {
 	struct callback_head **pprev = &task->task_works;
-	struct callback_head *work = NULL;
+	struct callback_head *work;
 	unsigned long flags;
 	/*
 	 * If cmpxchg() fails we continue without updating pprev.
@@ -35,7 +63,7 @@
 	 */
 	raw_spin_lock_irqsave(&task->pi_lock, flags);
 	while ((work = ACCESS_ONCE(*pprev))) {
-		read_barrier_depends();
+		smp_read_barrier_depends();
 		if (work->func != func)
 			pprev = &work->next;
 		else if (cmpxchg(pprev, work, work->next) == work)
@@ -46,6 +74,14 @@
 	return work;
 }
 
+/**
+ * task_work_run - execute the works added by task_work_add()
+ *
+ * Flush the pending works. Should be used by the core kernel code.
+ * Called before the task returns to the user-mode or stops, or when
+ * it exits. In the latter case task_work_add() can no longer add the
+ * new work after task_work_run() returns.
+ */
 void task_work_run(void)
 {
 	struct task_struct *task = current;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index a6d098c..03cf44a 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1978,12 +1978,27 @@
 
 void ftrace_modify_all_code(int command)
 {
+	int update = command & FTRACE_UPDATE_TRACE_FUNC;
+
+	/*
+	 * If the ftrace_caller calls a ftrace_ops func directly,
+	 * we need to make sure that it only traces functions it
+	 * expects to trace. When doing the switch of functions,
+	 * we need to update to the ftrace_ops_list_func first
+	 * before the transition between old and new calls are set,
+	 * as the ftrace_ops_list_func will check the ops hashes
+	 * to make sure the ops are having the right functions
+	 * traced.
+	 */
+	if (update)
+		ftrace_update_ftrace_func(ftrace_ops_list_func);
+
 	if (command & FTRACE_UPDATE_CALLS)
 		ftrace_replace_code(1);
 	else if (command & FTRACE_DISABLE_CALLS)
 		ftrace_replace_code(0);
 
-	if (command & FTRACE_UPDATE_TRACE_FUNC)
+	if (update && ftrace_trace_function != ftrace_ops_list_func)
 		ftrace_update_ftrace_func(ftrace_trace_function);
 
 	if (command & FTRACE_START_FUNC_RET)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 496f94d..7974ba2 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3166,11 +3166,6 @@
 };
 
 /*
- * Only trace on a CPU if the bitmask is set:
- */
-static cpumask_var_t tracing_cpumask;
-
-/*
  * The tracer itself will not take this lock, but still we want
  * to provide a consistent cpumask to user-space:
  */
@@ -3186,11 +3181,12 @@
 tracing_cpumask_read(struct file *filp, char __user *ubuf,
 		     size_t count, loff_t *ppos)
 {
+	struct trace_array *tr = file_inode(filp)->i_private;
 	int len;
 
 	mutex_lock(&tracing_cpumask_update_lock);
 
-	len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
+	len = cpumask_scnprintf(mask_str, count, tr->tracing_cpumask);
 	if (count - len < 2) {
 		count = -EINVAL;
 		goto out_err;
@@ -3208,7 +3204,7 @@
 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
 		      size_t count, loff_t *ppos)
 {
-	struct trace_array *tr = filp->private_data;
+	struct trace_array *tr = file_inode(filp)->i_private;
 	cpumask_var_t tracing_cpumask_new;
 	int err, cpu;
 
@@ -3228,12 +3224,12 @@
 		 * Increase/decrease the disabled counter if we are
 		 * about to flip a bit in the cpumask:
 		 */
-		if (cpumask_test_cpu(cpu, tracing_cpumask) &&
+		if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
 				!cpumask_test_cpu(cpu, tracing_cpumask_new)) {
 			atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
 			ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
 		}
-		if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
+		if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
 				cpumask_test_cpu(cpu, tracing_cpumask_new)) {
 			atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
 			ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
@@ -3242,7 +3238,7 @@
 	arch_spin_unlock(&ftrace_max_lock);
 	local_irq_enable();
 
-	cpumask_copy(tracing_cpumask, tracing_cpumask_new);
+	cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
 
 	mutex_unlock(&tracing_cpumask_update_lock);
 	free_cpumask_var(tracing_cpumask_new);
@@ -3256,9 +3252,10 @@
 }
 
 static const struct file_operations tracing_cpumask_fops = {
-	.open		= tracing_open_generic,
+	.open		= tracing_open_generic_tr,
 	.read		= tracing_cpumask_read,
 	.write		= tracing_cpumask_write,
+	.release	= tracing_release_generic_tr,
 	.llseek		= generic_file_llseek,
 };
 
@@ -5938,6 +5935,11 @@
 	if (!tr->name)
 		goto out_free_tr;
 
+	if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
+		goto out_free_tr;
+
+	cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
+
 	raw_spin_lock_init(&tr->start_lock);
 
 	tr->current_trace = &nop_trace;
@@ -5969,6 +5971,7 @@
  out_free_tr:
 	if (tr->trace_buffer.buffer)
 		ring_buffer_free(tr->trace_buffer.buffer);
+	free_cpumask_var(tr->tracing_cpumask);
 	kfree(tr->name);
 	kfree(tr);
 
@@ -6098,6 +6101,9 @@
 {
 	int cpu;
 
+	trace_create_file("tracing_cpumask", 0644, d_tracer,
+			  tr, &tracing_cpumask_fops);
+
 	trace_create_file("trace_options", 0644, d_tracer,
 			  tr, &tracing_iter_fops);
 
@@ -6147,9 +6153,6 @@
 
 	init_tracer_debugfs(&global_trace, d_tracer);
 
-	trace_create_file("tracing_cpumask", 0644, d_tracer,
-			&global_trace, &tracing_cpumask_fops);
-
 	trace_create_file("available_tracers", 0444, d_tracer,
 			&global_trace, &show_traces_fops);
 
@@ -6371,7 +6374,7 @@
 	if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
 		goto out;
 
-	if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
+	if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
 		goto out_free_buffer_mask;
 
 	/* Only allocate trace_printk buffers if a trace_printk exists */
@@ -6386,7 +6389,7 @@
 		ring_buf_size = 1;
 
 	cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
-	cpumask_copy(tracing_cpumask, cpu_all_mask);
+	cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
 
 	raw_spin_lock_init(&global_trace.start_lock);
 
@@ -6441,7 +6444,7 @@
 #ifdef CONFIG_TRACER_MAX_TRACE
 	free_percpu(global_trace.max_buffer.data);
 #endif
-	free_cpumask_var(tracing_cpumask);
+	free_cpumask_var(global_trace.tracing_cpumask);
 out_free_buffer_mask:
 	free_cpumask_var(tracing_buffer_mask);
 out:
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index fe39acd..10c86fb 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -206,6 +206,7 @@
 	struct dentry		*event_dir;
 	struct list_head	systems;
 	struct list_head	events;
+	cpumask_var_t		tracing_cpumask; /* only trace on set CPUs */
 	int			ref;
 };
 
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 29a7ebc..368a4d5 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1489,12 +1489,7 @@
 }
 
 static int
-event_create_dir(struct dentry *parent,
-		 struct ftrace_event_file *file,
-		 const struct file_operations *id,
-		 const struct file_operations *enable,
-		 const struct file_operations *filter,
-		 const struct file_operations *format)
+event_create_dir(struct dentry *parent, struct ftrace_event_file *file)
 {
 	struct ftrace_event_call *call = file->event_call;
 	struct trace_array *tr = file->tr;
@@ -1522,12 +1517,13 @@
 
 	if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
 		trace_create_file("enable", 0644, file->dir, file,
-				  enable);
+				  &ftrace_enable_fops);
 
 #ifdef CONFIG_PERF_EVENTS
 	if (call->event.type && call->class->reg)
 		trace_create_file("id", 0444, file->dir,
-				  (void *)(long)call->event.type, id);
+				  (void *)(long)call->event.type,
+				  &ftrace_event_id_fops);
 #endif
 
 	/*
@@ -1544,10 +1540,10 @@
 		}
 	}
 	trace_create_file("filter", 0644, file->dir, call,
-			  filter);
+			  &ftrace_event_filter_fops);
 
 	trace_create_file("format", 0444, file->dir, call,
-			  format);
+			  &ftrace_event_format_fops);
 
 	return 0;
 }
@@ -1648,12 +1644,7 @@
 
 /* Add an event to a trace directory */
 static int
-__trace_add_new_event(struct ftrace_event_call *call,
-		      struct trace_array *tr,
-		      const struct file_operations *id,
-		      const struct file_operations *enable,
-		      const struct file_operations *filter,
-		      const struct file_operations *format)
+__trace_add_new_event(struct ftrace_event_call *call, struct trace_array *tr)
 {
 	struct ftrace_event_file *file;
 
@@ -1661,7 +1652,7 @@
 	if (!file)
 		return -ENOMEM;
 
-	return event_create_dir(tr->event_dir, file, id, enable, filter, format);
+	return event_create_dir(tr->event_dir, file);
 }
 
 /*
@@ -1683,8 +1674,7 @@
 }
 
 struct ftrace_module_file_ops;
-static void __add_event_to_tracers(struct ftrace_event_call *call,
-				   struct ftrace_module_file_ops *file_ops);
+static void __add_event_to_tracers(struct ftrace_event_call *call);
 
 /* Add an additional event_call dynamically */
 int trace_add_event_call(struct ftrace_event_call *call)
@@ -1695,7 +1685,7 @@
 
 	ret = __register_event(call, NULL);
 	if (ret >= 0)
-		__add_event_to_tracers(call, NULL);
+		__add_event_to_tracers(call);
 
 	mutex_unlock(&event_mutex);
 	mutex_unlock(&trace_types_lock);
@@ -1769,100 +1759,21 @@
 
 #ifdef CONFIG_MODULES
 
-static LIST_HEAD(ftrace_module_file_list);
-
-/*
- * Modules must own their file_operations to keep up with
- * reference counting.
- */
-struct ftrace_module_file_ops {
-	struct list_head		list;
-	struct module			*mod;
-	struct file_operations		id;
-	struct file_operations		enable;
-	struct file_operations		format;
-	struct file_operations		filter;
-};
-
-static struct ftrace_module_file_ops *
-find_ftrace_file_ops(struct ftrace_module_file_ops *file_ops, struct module *mod)
-{
-	/*
-	 * As event_calls are added in groups by module,
-	 * when we find one file_ops, we don't need to search for
-	 * each call in that module, as the rest should be the
-	 * same. Only search for a new one if the last one did
-	 * not match.
-	 */
-	if (file_ops && mod == file_ops->mod)
-		return file_ops;
-
-	list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
-		if (file_ops->mod == mod)
-			return file_ops;
-	}
-	return NULL;
-}
-
-static struct ftrace_module_file_ops *
-trace_create_file_ops(struct module *mod)
-{
-	struct ftrace_module_file_ops *file_ops;
-
-	/*
-	 * This is a bit of a PITA. To allow for correct reference
-	 * counting, modules must "own" their file_operations.
-	 * To do this, we allocate the file operations that will be
-	 * used in the event directory.
-	 */
-
-	file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
-	if (!file_ops)
-		return NULL;
-
-	file_ops->mod = mod;
-
-	file_ops->id = ftrace_event_id_fops;
-	file_ops->id.owner = mod;
-
-	file_ops->enable = ftrace_enable_fops;
-	file_ops->enable.owner = mod;
-
-	file_ops->filter = ftrace_event_filter_fops;
-	file_ops->filter.owner = mod;
-
-	file_ops->format = ftrace_event_format_fops;
-	file_ops->format.owner = mod;
-
-	list_add(&file_ops->list, &ftrace_module_file_list);
-
-	return file_ops;
-}
-
 static void trace_module_add_events(struct module *mod)
 {
-	struct ftrace_module_file_ops *file_ops = NULL;
 	struct ftrace_event_call **call, **start, **end;
 
 	start = mod->trace_events;
 	end = mod->trace_events + mod->num_trace_events;
 
-	if (start == end)
-		return;
-
-	file_ops = trace_create_file_ops(mod);
-	if (!file_ops)
-		return;
-
 	for_each_event(call, start, end) {
 		__register_event(*call, mod);
-		__add_event_to_tracers(*call, file_ops);
+		__add_event_to_tracers(*call);
 	}
 }
 
 static void trace_module_remove_events(struct module *mod)
 {
-	struct ftrace_module_file_ops *file_ops;
 	struct ftrace_event_call *call, *p;
 	bool clear_trace = false;
 
@@ -1874,16 +1785,6 @@
 			__trace_remove_event_call(call);
 		}
 	}
-
-	/* Now free the file_operations */
-	list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
-		if (file_ops->mod == mod)
-			break;
-	}
-	if (&file_ops->list != &ftrace_module_file_list) {
-		list_del(&file_ops->list);
-		kfree(file_ops);
-	}
 	up_write(&trace_event_sem);
 
 	/*
@@ -1919,67 +1820,21 @@
 	return 0;
 }
 
-static int
-__trace_add_new_mod_event(struct ftrace_event_call *call,
-			  struct trace_array *tr,
-			  struct ftrace_module_file_ops *file_ops)
-{
-	return __trace_add_new_event(call, tr,
-				     &file_ops->id, &file_ops->enable,
-				     &file_ops->filter, &file_ops->format);
-}
-
-#else
-static inline struct ftrace_module_file_ops *
-find_ftrace_file_ops(struct ftrace_module_file_ops *file_ops, struct module *mod)
-{
-	return NULL;
-}
-static inline int trace_module_notify(struct notifier_block *self,
-				      unsigned long val, void *data)
-{
-	return 0;
-}
-static inline int
-__trace_add_new_mod_event(struct ftrace_event_call *call,
-			  struct trace_array *tr,
-			  struct ftrace_module_file_ops *file_ops)
-{
-	return -ENODEV;
-}
+static struct notifier_block trace_module_nb = {
+	.notifier_call = trace_module_notify,
+	.priority = 0,
+};
 #endif /* CONFIG_MODULES */
 
 /* Create a new event directory structure for a trace directory. */
 static void
 __trace_add_event_dirs(struct trace_array *tr)
 {
-	struct ftrace_module_file_ops *file_ops = NULL;
 	struct ftrace_event_call *call;
 	int ret;
 
 	list_for_each_entry(call, &ftrace_events, list) {
-		if (call->mod) {
-			/*
-			 * Directories for events by modules need to
-			 * keep module ref counts when opened (as we don't
-			 * want the module to disappear when reading one
-			 * of these files). The file_ops keep account of
-			 * the module ref count.
-			 */
-			file_ops = find_ftrace_file_ops(file_ops, call->mod);
-			if (!file_ops)
-				continue; /* Warn? */
-			ret = __trace_add_new_mod_event(call, tr, file_ops);
-			if (ret < 0)
-				pr_warning("Could not create directory for event %s\n",
-					   call->name);
-			continue;
-		}
-		ret = __trace_add_new_event(call, tr,
-					    &ftrace_event_id_fops,
-					    &ftrace_enable_fops,
-					    &ftrace_event_filter_fops,
-					    &ftrace_event_format_fops);
+		ret = __trace_add_new_event(call, tr);
 		if (ret < 0)
 			pr_warning("Could not create directory for event %s\n",
 				   call->name);
@@ -2287,11 +2142,7 @@
 
 
 	list_for_each_entry(file, &tr->events, list) {
-		ret = event_create_dir(tr->event_dir, file,
-				       &ftrace_event_id_fops,
-				       &ftrace_enable_fops,
-				       &ftrace_event_filter_fops,
-				       &ftrace_event_format_fops);
+		ret = event_create_dir(tr->event_dir, file);
 		if (ret < 0)
 			pr_warning("Could not create directory for event %s\n",
 				   file->event_call->name);
@@ -2332,29 +2183,14 @@
 		remove_event_file_dir(file);
 }
 
-static void
-__add_event_to_tracers(struct ftrace_event_call *call,
-		       struct ftrace_module_file_ops *file_ops)
+static void __add_event_to_tracers(struct ftrace_event_call *call)
 {
 	struct trace_array *tr;
 
-	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
-		if (file_ops)
-			__trace_add_new_mod_event(call, tr, file_ops);
-		else
-			__trace_add_new_event(call, tr,
-					      &ftrace_event_id_fops,
-					      &ftrace_enable_fops,
-					      &ftrace_event_filter_fops,
-					      &ftrace_event_format_fops);
-	}
+	list_for_each_entry(tr, &ftrace_trace_arrays, list)
+		__trace_add_new_event(call, tr);
 }
 
-static struct notifier_block trace_module_nb = {
-	.notifier_call = trace_module_notify,
-	.priority = 0,
-};
-
 extern struct ftrace_event_call *__start_ftrace_events[];
 extern struct ftrace_event_call *__stop_ftrace_events[];
 
@@ -2559,10 +2395,11 @@
 	if (ret)
 		return ret;
 
+#ifdef CONFIG_MODULES
 	ret = register_module_notifier(&trace_module_nb);
 	if (ret)
 		pr_warning("Failed to register trace events module notifier\n");
-
+#endif
 	return 0;
 }
 early_initcall(event_trace_memsetup);
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 8fd0365..559329d 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -200,8 +200,8 @@
 		#type, #name, offsetof(typeof(trace), name),		\
 		sizeof(trace.name), is_signed_type(type)
 
-static
-int  __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len)
+static int __init
+__set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len)
 {
 	int i;
 	int pos = 0;
@@ -228,7 +228,7 @@
 	return pos;
 }
 
-static int set_syscall_print_fmt(struct ftrace_event_call *call)
+static int __init set_syscall_print_fmt(struct ftrace_event_call *call)
 {
 	char *print_fmt;
 	int len;
@@ -253,7 +253,7 @@
 	return 0;
 }
 
-static void free_syscall_print_fmt(struct ftrace_event_call *call)
+static void __init free_syscall_print_fmt(struct ftrace_event_call *call)
 {
 	struct syscall_metadata *entry = call->data;
 
@@ -459,7 +459,7 @@
 	mutex_unlock(&syscall_trace_lock);
 }
 
-static int init_syscall_trace(struct ftrace_event_call *call)
+static int __init init_syscall_trace(struct ftrace_event_call *call)
 {
 	int id;
 	int num;
diff --git a/kernel/up.c b/kernel/up.c
index c54c75e..630d72b 100644
--- a/kernel/up.c
+++ b/kernel/up.c
@@ -10,12 +10,64 @@
 int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
 				int wait)
 {
+	unsigned long flags;
+
 	WARN_ON(cpu != 0);
 
-	local_irq_disable();
-	(func)(info);
-	local_irq_enable();
+	local_irq_save(flags);
+	func(info);
+	local_irq_restore(flags);
 
 	return 0;
 }
 EXPORT_SYMBOL(smp_call_function_single);
+
+int on_each_cpu(smp_call_func_t func, void *info, int wait)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	func(info);
+	local_irq_restore(flags);
+	return 0;
+}
+EXPORT_SYMBOL(on_each_cpu);
+
+/*
+ * Note we still need to test the mask even for UP
+ * because we actually can get an empty mask from
+ * code that on SMP might call us without the local
+ * CPU in the mask.
+ */
+void on_each_cpu_mask(const struct cpumask *mask,
+		      smp_call_func_t func, void *info, bool wait)
+{
+	unsigned long flags;
+
+	if (cpumask_test_cpu(0, mask)) {
+		local_irq_save(flags);
+		func(info);
+		local_irq_restore(flags);
+	}
+}
+EXPORT_SYMBOL(on_each_cpu_mask);
+
+/*
+ * Preemption is disabled here to make sure the cond_func is called under the
+ * same condtions in UP and SMP.
+ */
+void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
+		      smp_call_func_t func, void *info, bool wait,
+		      gfp_t gfp_flags)
+{
+	unsigned long flags;
+
+	preempt_disable();
+	if (cond_func(0, info)) {
+		local_irq_save(flags);
+		func(info);
+		local_irq_restore(flags);
+	}
+	preempt_enable();
+}
+EXPORT_SYMBOL(on_each_cpu_cond);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 652bea9..c9eef36 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1461,7 +1461,7 @@
 
 config RBTREE_TEST
 	tristate "Red-Black tree test"
-	depends on m && DEBUG_KERNEL
+	depends on DEBUG_KERNEL
 	help
 	  A benchmark measuring the performance of the rbtree library.
 	  Also includes rbtree invariant checks.
diff --git a/lib/crc32.c b/lib/crc32.c
index 072fbd8..410093d 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -131,11 +131,14 @@
 #endif
 
 /**
- * crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32
- * @crc: seed value for computation.  ~0 for Ethernet, sometimes 0 for
- *	other uses, or the previous crc32 value if computing incrementally.
- * @p: pointer to buffer over which CRC is run
+ * crc32_le_generic() - Calculate bitwise little-endian Ethernet AUTODIN II
+ *			CRC32/CRC32C
+ * @crc: seed value for computation.  ~0 for Ethernet, sometimes 0 for other
+ *	 uses, or the previous crc32/crc32c value if computing incrementally.
+ * @p: pointer to buffer over which CRC32/CRC32C is run
  * @len: length of buffer @p
+ * @tab: little-endian Ethernet table
+ * @polynomial: CRC32/CRC32c LE polynomial
  */
 static inline u32 __pure crc32_le_generic(u32 crc, unsigned char const *p,
 					  size_t len, const u32 (*tab)[256],
@@ -201,11 +204,13 @@
 EXPORT_SYMBOL(__crc32c_le);
 
 /**
- * crc32_be() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32
+ * crc32_be_generic() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32
  * @crc: seed value for computation.  ~0 for Ethernet, sometimes 0 for
  *	other uses, or the previous crc32 value if computing incrementally.
- * @p: pointer to buffer over which CRC is run
+ * @p: pointer to buffer over which CRC32 is run
  * @len: length of buffer @p
+ * @tab: big-endian Ethernet table
+ * @polynomial: CRC32 BE polynomial
  */
 static inline u32 __pure crc32_be_generic(u32 crc, unsigned char const *p,
 					  size_t len, const u32 (*tab)[256],
diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c
index 19ff89e..d619b28 100644
--- a/lib/decompress_inflate.c
+++ b/lib/decompress_inflate.c
@@ -48,7 +48,7 @@
 		out_len = 0x8000; /* 32 K */
 		out_buf = malloc(out_len);
 	} else {
-		out_len = 0x7fffffff; /* no limit */
+		out_len = ((size_t)~0) - (size_t)out_buf; /* no limit */
 	}
 	if (!out_buf) {
 		error("Out of memory while allocating output buffer");
diff --git a/lib/div64.c b/lib/div64.c
index a163b6c..4382ad7 100644
--- a/lib/div64.c
+++ b/lib/div64.c
@@ -79,6 +79,46 @@
 #endif
 
 /**
+ * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
+ * @dividend:	64bit dividend
+ * @divisor:	64bit divisor
+ * @remainder:  64bit remainder
+ *
+ * This implementation is a comparable to algorithm used by div64_u64.
+ * But this operation, which includes math for calculating the remainder,
+ * is kept distinct to avoid slowing down the div64_u64 operation on 32bit
+ * systems.
+ */
+#ifndef div64_u64_rem
+u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
+{
+	u32 high = divisor >> 32;
+	u64 quot;
+
+	if (high == 0) {
+		u32 rem32;
+		quot = div_u64_rem(dividend, divisor, &rem32);
+		*remainder = rem32;
+	} else {
+		int n = 1 + fls(high);
+		quot = div_u64(dividend >> n, divisor >> n);
+
+		if (quot != 0)
+			quot--;
+
+		*remainder = dividend - quot * divisor;
+		if (*remainder >= divisor) {
+			quot++;
+			*remainder -= divisor;
+		}
+	}
+
+	return quot;
+}
+EXPORT_SYMBOL(div64_u64_rem);
+#endif
+
+/**
  * div64_u64 - unsigned 64bit divide with 64bit divisor
  * @dividend:	64bit dividend
  * @divisor:	64bit divisor
diff --git a/lib/genalloc.c b/lib/genalloc.c
index b35cfa9..26cf20b 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -37,6 +37,11 @@
 #include <linux/of_address.h>
 #include <linux/of_device.h>
 
+static inline size_t chunk_size(const struct gen_pool_chunk *chunk)
+{
+	return chunk->end_addr - chunk->start_addr + 1;
+}
+
 static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
 {
 	unsigned long val, nval;
@@ -182,13 +187,13 @@
 	int nbytes = sizeof(struct gen_pool_chunk) +
 				BITS_TO_LONGS(nbits) * sizeof(long);
 
-	chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid);
+	chunk = kzalloc_node(nbytes, GFP_KERNEL, nid);
 	if (unlikely(chunk == NULL))
 		return -ENOMEM;
 
 	chunk->phys_addr = phys;
 	chunk->start_addr = virt;
-	chunk->end_addr = virt + size;
+	chunk->end_addr = virt + size - 1;
 	atomic_set(&chunk->avail, size);
 
 	spin_lock(&pool->lock);
@@ -213,7 +218,7 @@
 
 	rcu_read_lock();
 	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
-		if (addr >= chunk->start_addr && addr < chunk->end_addr) {
+		if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
 			paddr = chunk->phys_addr + (addr - chunk->start_addr);
 			break;
 		}
@@ -242,7 +247,7 @@
 		chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
 		list_del(&chunk->next_chunk);
 
-		end_bit = (chunk->end_addr - chunk->start_addr) >> order;
+		end_bit = chunk_size(chunk) >> order;
 		bit = find_next_bit(chunk->bits, end_bit, 0);
 		BUG_ON(bit < end_bit);
 
@@ -283,7 +288,7 @@
 		if (size > atomic_read(&chunk->avail))
 			continue;
 
-		end_bit = (chunk->end_addr - chunk->start_addr) >> order;
+		end_bit = chunk_size(chunk) >> order;
 retry:
 		start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits,
 				pool->data);
@@ -330,8 +335,8 @@
 	nbits = (size + (1UL << order) - 1) >> order;
 	rcu_read_lock();
 	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
-		if (addr >= chunk->start_addr && addr < chunk->end_addr) {
-			BUG_ON(addr + size > chunk->end_addr);
+		if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
+			BUG_ON(addr + size - 1 > chunk->end_addr);
 			start_bit = (addr - chunk->start_addr) >> order;
 			remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
 			BUG_ON(remain);
@@ -400,7 +405,7 @@
 
 	rcu_read_lock();
 	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
-		size += chunk->end_addr - chunk->start_addr;
+		size += chunk_size(chunk);
 	rcu_read_unlock();
 	return size;
 }
@@ -519,7 +524,6 @@
 /**
  * dev_get_gen_pool - Obtain the gen_pool (if any) for a device
  * @dev: device to retrieve the gen_pool from
- * @name: Optional name for the gen_pool, usually NULL
  *
  * Returns the gen_pool for the device if one is present, or NULL.
  */
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
index 411be80..df6839e 100644
--- a/lib/lz4/lz4_decompress.c
+++ b/lib/lz4/lz4_decompress.c
@@ -283,8 +283,8 @@
 	return (int) (-(((char *) ip) - source));
 }
 
-int lz4_decompress(const char *src, size_t *src_len, char *dest,
-		size_t actual_dest_len)
+int lz4_decompress(const unsigned char *src, size_t *src_len,
+		unsigned char *dest, size_t actual_dest_len)
 {
 	int ret = -1;
 	int input_len = 0;
@@ -302,8 +302,8 @@
 EXPORT_SYMBOL(lz4_decompress);
 #endif
 
-int lz4_decompress_unknownoutputsize(const char *src, size_t src_len,
-		char *dest, size_t *dest_len)
+int lz4_decompress_unknownoutputsize(const unsigned char *src, size_t src_len,
+		unsigned char *dest, size_t *dest_len)
 {
 	int ret = -1;
 	int out_len = 0;
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index e796429..7811ed3 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -32,6 +32,7 @@
 #include <linux/string.h>
 #include <linux/bitops.h>
 #include <linux/rcupdate.h>
+#include <linux/hardirq.h>		/* in_interrupt() */
 
 
 #ifdef __KERNEL__
@@ -207,7 +208,12 @@
 	struct radix_tree_node *ret = NULL;
 	gfp_t gfp_mask = root_gfp_mask(root);
 
-	if (!(gfp_mask & __GFP_WAIT)) {
+	/*
+	 * Preload code isn't irq safe and it doesn't make sence to use
+	 * preloading in the interrupt anyway as all the allocations have to
+	 * be atomic. So just do normal allocation when in interrupt.
+	 */
+	if (!(gfp_mask & __GFP_WAIT) && !in_interrupt()) {
 		struct radix_tree_preload *rtp;
 
 		/*
@@ -264,7 +270,7 @@
  * To make use of this facility, the radix tree must be initialised without
  * __GFP_WAIT being passed to INIT_RADIX_TREE().
  */
-int radix_tree_preload(gfp_t gfp_mask)
+static int __radix_tree_preload(gfp_t gfp_mask)
 {
 	struct radix_tree_preload *rtp;
 	struct radix_tree_node *node;
@@ -288,9 +294,40 @@
 out:
 	return ret;
 }
+
+/*
+ * Load up this CPU's radix_tree_node buffer with sufficient objects to
+ * ensure that the addition of a single element in the tree cannot fail.  On
+ * success, return zero, with preemption disabled.  On error, return -ENOMEM
+ * with preemption not disabled.
+ *
+ * To make use of this facility, the radix tree must be initialised without
+ * __GFP_WAIT being passed to INIT_RADIX_TREE().
+ */
+int radix_tree_preload(gfp_t gfp_mask)
+{
+	/* Warn on non-sensical use... */
+	WARN_ON_ONCE(!(gfp_mask & __GFP_WAIT));
+	return __radix_tree_preload(gfp_mask);
+}
 EXPORT_SYMBOL(radix_tree_preload);
 
 /*
+ * The same as above function, except we don't guarantee preloading happens.
+ * We do it, if we decide it helps. On success, return zero with preemption
+ * disabled. On error, return -ENOMEM with preemption not disabled.
+ */
+int radix_tree_maybe_preload(gfp_t gfp_mask)
+{
+	if (gfp_mask & __GFP_WAIT)
+		return __radix_tree_preload(gfp_mask);
+	/* Preloading doesn't help anything with this gfp mask, skip it */
+	preempt_disable();
+	return 0;
+}
+EXPORT_SYMBOL(radix_tree_maybe_preload);
+
+/*
  *	Return the maximum key which can be store into a
  *	radix tree with height HEIGHT.
  */
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
index b462578..c7dab06 100644
--- a/lib/raid6/Makefile
+++ b/lib/raid6/Makefile
@@ -6,6 +6,7 @@
 raid6_pq-$(CONFIG_X86) += recov_ssse3.o recov_avx2.o mmx.o sse1.o sse2.o avx2.o
 raid6_pq-$(CONFIG_ALTIVEC) += altivec1.o altivec2.o altivec4.o altivec8.o
 raid6_pq-$(CONFIG_KERNEL_MODE_NEON) += neon.o neon1.o neon2.o neon4.o neon8.o
+raid6_pq-$(CONFIG_TILEGX) += tilegx8.o
 
 hostprogs-y	+= mktables
 
@@ -110,6 +111,11 @@
 $(obj)/neon8.c:   $(src)/neon.uc $(src)/unroll.awk FORCE
 	$(call if_changed,unroll)
 
+targets += tilegx8.c
+$(obj)/tilegx8.c:   UNROLL := 8
+$(obj)/tilegx8.c:   $(src)/tilegx.uc $(src)/unroll.awk FORCE
+	$(call if_changed,unroll)
+
 quiet_cmd_mktable = TABLE   $@
       cmd_mktable = $(obj)/mktables > $@ || ( rm -f $@ && exit 1 )
 
diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c
index 74e6f56..f0b1aa3 100644
--- a/lib/raid6/algos.c
+++ b/lib/raid6/algos.c
@@ -66,6 +66,9 @@
 	&raid6_altivec4,
 	&raid6_altivec8,
 #endif
+#if defined(CONFIG_TILEGX)
+	&raid6_tilegx8,
+#endif
 	&raid6_intx1,
 	&raid6_intx2,
 	&raid6_intx4,
diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile
index 28afa1a..29090f3 100644
--- a/lib/raid6/test/Makefile
+++ b/lib/raid6/test/Makefile
@@ -40,13 +40,16 @@
         OBJS   += neon.o neon1.o neon2.o neon4.o neon8.o
         CFLAGS += -DCONFIG_KERNEL_MODE_NEON=1
 else
-        HAS_ALTIVEC := $(shell echo -e '\#include <altivec.h>\nvector int a;' |\
+        HAS_ALTIVEC := $(shell printf '\#include <altivec.h>\nvector int a;\n' |\
                          gcc -c -x c - >&/dev/null && \
                          rm ./-.o && echo yes)
         ifeq ($(HAS_ALTIVEC),yes)
                 OBJS += altivec1.o altivec2.o altivec4.o altivec8.o
         endif
 endif
+ifeq ($(ARCH),tilegx)
+OBJS += tilegx8.o
+endif
 
 .c.o:
 	$(CC) $(CFLAGS) -c -o $@ $<
@@ -109,11 +112,15 @@
 int32.c: int.uc ../unroll.awk
 	$(AWK) ../unroll.awk -vN=32 < int.uc > $@
 
+tilegx8.c: tilegx.uc ../unroll.awk
+	$(AWK) ../unroll.awk -vN=8 < tilegx.uc > $@
+
 tables.c: mktables
 	./mktables > tables.c
 
 clean:
 	rm -f *.o *.a mktables mktables.c *.uc int*.c altivec*.c neon*.c tables.c raid6test
+	rm -f tilegx*.c
 
 spotless: clean
 	rm -f *~
diff --git a/lib/raid6/tilegx.uc b/lib/raid6/tilegx.uc
new file mode 100644
index 0000000..e7c2945
--- /dev/null
+++ b/lib/raid6/tilegx.uc
@@ -0,0 +1,86 @@
+/* -*- linux-c -*- ------------------------------------------------------- *
+ *
+ *   Copyright 2002 H. Peter Anvin - All Rights Reserved
+ *   Copyright 2012 Tilera Corporation - All Rights Reserved
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation, Inc., 53 Temple Place Ste 330,
+ *   Boston MA 02111-1307, USA; either version 2 of the License, or
+ *   (at your option) any later version; incorporated herein by reference.
+ *
+ * ----------------------------------------------------------------------- */
+
+/*
+ * tilegx$#.c
+ *
+ * $#-way unrolled TILE-Gx SIMD for RAID-6 math.
+ *
+ * This file is postprocessed using unroll.awk.
+ *
+ */
+
+#include <linux/raid/pq.h>
+
+/* Create 8 byte copies of constant byte */
+# define NBYTES(x) (__insn_v1addi(0, x))
+# define NSIZE  8
+
+/*
+ * The SHLBYTE() operation shifts each byte left by 1, *not*
+ * rolling over into the next byte
+ */
+static inline __attribute_const__ u64 SHLBYTE(u64 v)
+{
+	/* Vector One Byte Shift Left Immediate. */
+	return __insn_v1shli(v, 1);
+}
+
+/*
+ * The MASK() operation returns 0xFF in any byte for which the high
+ * bit is 1, 0x00 for any byte for which the high bit is 0.
+ */
+static inline __attribute_const__ u64 MASK(u64 v)
+{
+	/* Vector One Byte Shift Right Signed Immediate. */
+	return __insn_v1shrsi(v, 7);
+}
+
+
+void raid6_tilegx$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
+{
+	u8 **dptr = (u8 **)ptrs;
+	u64 *p, *q;
+	int d, z, z0;
+
+	u64 wd$$, wq$$, wp$$, w1$$, w2$$;
+	u64 x1d = NBYTES(0x1d);
+	u64 * z0ptr;
+
+	z0 = disks - 3;			/* Highest data disk */
+	p = (u64 *)dptr[z0+1];	/* XOR parity */
+	q = (u64 *)dptr[z0+2];	/* RS syndrome */
+
+	z0ptr = (u64 *)&dptr[z0][0];
+	for ( d = 0 ; d < bytes ; d += NSIZE*$# ) {
+		wq$$ = wp$$ = *z0ptr++;
+		for ( z = z0-1 ; z >= 0 ; z-- ) {
+			wd$$ = *(u64 *)&dptr[z][d+$$*NSIZE];
+			wp$$ = wp$$ ^ wd$$;
+			w2$$ = MASK(wq$$);
+			w1$$ = SHLBYTE(wq$$);
+			w2$$ = w2$$ & x1d;
+			w1$$ = w1$$ ^ w2$$;
+			wq$$ = w1$$ ^ wd$$;
+		}
+		*p++ = wp$$;
+		*q++ = wq$$;
+	}
+}
+
+const struct raid6_calls raid6_tilegx$# = {
+	raid6_tilegx$#_gen_syndrome,
+	NULL,
+	"tilegx$#",
+	0
+};
diff --git a/lib/rbtree.c b/lib/rbtree.c
index c0e31fe..65f4eff 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -518,3 +518,43 @@
 	*new = *victim;
 }
 EXPORT_SYMBOL(rb_replace_node);
+
+static struct rb_node *rb_left_deepest_node(const struct rb_node *node)
+{
+	for (;;) {
+		if (node->rb_left)
+			node = node->rb_left;
+		else if (node->rb_right)
+			node = node->rb_right;
+		else
+			return (struct rb_node *)node;
+	}
+}
+
+struct rb_node *rb_next_postorder(const struct rb_node *node)
+{
+	const struct rb_node *parent;
+	if (!node)
+		return NULL;
+	parent = rb_parent(node);
+
+	/* If we're sitting on node, we've already seen our children */
+	if (parent && node == parent->rb_left && parent->rb_right) {
+		/* If we are the parent's left node, go to the parent's right
+		 * node then all the way down to the left */
+		return rb_left_deepest_node(parent->rb_right);
+	} else
+		/* Otherwise we are the parent's right node, and the parent
+		 * should be next */
+		return (struct rb_node *)parent;
+}
+EXPORT_SYMBOL(rb_next_postorder);
+
+struct rb_node *rb_first_postorder(const struct rb_root *root)
+{
+	if (!root->rb_node)
+		return NULL;
+
+	return rb_left_deepest_node(root->rb_node);
+}
+EXPORT_SYMBOL(rb_first_postorder);
diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c
index 122f02f..31dd4cc 100644
--- a/lib/rbtree_test.c
+++ b/lib/rbtree_test.c
@@ -114,6 +114,16 @@
 	return count;
 }
 
+static void check_postorder(int nr_nodes)
+{
+	struct rb_node *rb;
+	int count = 0;
+	for (rb = rb_first_postorder(&root); rb; rb = rb_next_postorder(rb))
+		count++;
+
+	WARN_ON_ONCE(count != nr_nodes);
+}
+
 static void check(int nr_nodes)
 {
 	struct rb_node *rb;
@@ -136,6 +146,8 @@
 
 	WARN_ON_ONCE(count != nr_nodes);
 	WARN_ON_ONCE(count < (1 << black_path_count(rb_last(&root))) - 1);
+
+	check_postorder(nr_nodes);
 }
 
 static void check_augmented(int nr_nodes)
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 37d9edc..ce682f7 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -652,7 +652,7 @@
 {
 	char kbuf[] = "0\n";
 
-	if (*ppos) {
+	if (*ppos || *lenp < sizeof(kbuf)) {
 		*lenp = 0;
 		return 0;
 	}
diff --git a/mm/compaction.c b/mm/compaction.c
index 05ccb4c..c437893 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1131,6 +1131,9 @@
 		.sync = false,
 	};
 
+	if (!order)
+		return;
+
 	__compact_pgdat(pgdat, &cc);
 }
 
diff --git a/mm/filemap.c b/mm/filemap.c
index 731a2c2..e607728 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -469,7 +469,7 @@
 	if (error)
 		goto out;
 
-	error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
+	error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM);
 	if (error == 0) {
 		page_cache_get(page);
 		page->mapping = mapping;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index a92012a..963e14c 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -417,7 +417,7 @@
 	unsigned long msecs;
 	int err;
 
-	err = strict_strtoul(buf, 10, &msecs);
+	err = kstrtoul(buf, 10, &msecs);
 	if (err || msecs > UINT_MAX)
 		return -EINVAL;
 
@@ -444,7 +444,7 @@
 	unsigned long msecs;
 	int err;
 
-	err = strict_strtoul(buf, 10, &msecs);
+	err = kstrtoul(buf, 10, &msecs);
 	if (err || msecs > UINT_MAX)
 		return -EINVAL;
 
@@ -470,7 +470,7 @@
 	int err;
 	unsigned long pages;
 
-	err = strict_strtoul(buf, 10, &pages);
+	err = kstrtoul(buf, 10, &pages);
 	if (err || !pages || pages > UINT_MAX)
 		return -EINVAL;
 
@@ -538,7 +538,7 @@
 	int err;
 	unsigned long max_ptes_none;
 
-	err = strict_strtoul(buf, 10, &max_ptes_none);
+	err = kstrtoul(buf, 10, &max_ptes_none);
 	if (err || max_ptes_none > HPAGE_PMD_NR-1)
 		return -EINVAL;
 
@@ -2296,6 +2296,8 @@
 		goto out;
 
 	vma = find_vma(mm, address);
+	if (!vma)
+		goto out;
 	hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
 	hend = vma->vm_end & HPAGE_PMD_MASK;
 	if (address < hstart || address + HPAGE_PMD_SIZE > hend)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index b60f330..b49579c 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -21,6 +21,7 @@
 #include <linux/rmap.h>
 #include <linux/swap.h>
 #include <linux/swapops.h>
+#include <linux/page-isolation.h>
 
 #include <asm/page.h>
 #include <asm/pgtable.h>
@@ -33,7 +34,6 @@
 #include "internal.h"
 
 const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
-static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
 unsigned long hugepages_treat_as_movable;
 
 int hugetlb_max_hstate __read_mostly;
@@ -48,7 +48,8 @@
 static unsigned long __initdata default_hstate_size;
 
 /*
- * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
+ * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
+ * free_huge_pages, and surplus_huge_pages.
  */
 DEFINE_SPINLOCK(hugetlb_lock);
 
@@ -135,9 +136,9 @@
  *                    across the pages in a mapping.
  *
  * The region data structures are protected by a combination of the mmap_sem
- * and the hugetlb_instantion_mutex.  To access or modify a region the caller
+ * and the hugetlb_instantiation_mutex.  To access or modify a region the caller
  * must either hold the mmap_sem for write, or the mmap_sem for read and
- * the hugetlb_instantiation mutex:
+ * the hugetlb_instantiation_mutex:
  *
  *	down_write(&mm->mmap_sem);
  * or
@@ -434,25 +435,6 @@
 	return (get_vma_private_data(vma) & flag) != 0;
 }
 
-/* Decrement the reserved pages in the hugepage pool by one */
-static void decrement_hugepage_resv_vma(struct hstate *h,
-			struct vm_area_struct *vma)
-{
-	if (vma->vm_flags & VM_NORESERVE)
-		return;
-
-	if (vma->vm_flags & VM_MAYSHARE) {
-		/* Shared mappings always use reserves */
-		h->resv_huge_pages--;
-	} else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
-		/*
-		 * Only the process that called mmap() has reserves for
-		 * private mappings.
-		 */
-		h->resv_huge_pages--;
-	}
-}
-
 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
 {
@@ -462,12 +444,35 @@
 }
 
 /* Returns true if the VMA has associated reserve pages */
-static int vma_has_reserves(struct vm_area_struct *vma)
+static int vma_has_reserves(struct vm_area_struct *vma, long chg)
 {
+	if (vma->vm_flags & VM_NORESERVE) {
+		/*
+		 * This address is already reserved by other process(chg == 0),
+		 * so, we should decrement reserved count. Without decrementing,
+		 * reserve count remains after releasing inode, because this
+		 * allocated page will go into page cache and is regarded as
+		 * coming from reserved pool in releasing step.  Currently, we
+		 * don't have any other solution to deal with this situation
+		 * properly, so add work-around here.
+		 */
+		if (vma->vm_flags & VM_MAYSHARE && chg == 0)
+			return 1;
+		else
+			return 0;
+	}
+
+	/* Shared mappings always use reserves */
 	if (vma->vm_flags & VM_MAYSHARE)
 		return 1;
+
+	/*
+	 * Only the process that called mmap() has reserves for
+	 * private mappings.
+	 */
 	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
 		return 1;
+
 	return 0;
 }
 
@@ -517,9 +522,15 @@
 {
 	struct page *page;
 
-	if (list_empty(&h->hugepage_freelists[nid]))
+	list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
+		if (!is_migrate_isolate_page(page))
+			break;
+	/*
+	 * if 'non-isolated free hugepage' not found on the list,
+	 * the allocation fails.
+	 */
+	if (&h->hugepage_freelists[nid] == &page->lru)
 		return NULL;
-	page = list_entry(h->hugepage_freelists[nid].next, struct page, lru);
 	list_move(&page->lru, &h->hugepage_activelist);
 	set_page_refcounted(page);
 	h->free_huge_pages--;
@@ -527,9 +538,19 @@
 	return page;
 }
 
+/* Movability of hugepages depends on migration support. */
+static inline gfp_t htlb_alloc_mask(struct hstate *h)
+{
+	if (hugepages_treat_as_movable || hugepage_migration_support(h))
+		return GFP_HIGHUSER_MOVABLE;
+	else
+		return GFP_HIGHUSER;
+}
+
 static struct page *dequeue_huge_page_vma(struct hstate *h,
 				struct vm_area_struct *vma,
-				unsigned long address, int avoid_reserve)
+				unsigned long address, int avoid_reserve,
+				long chg)
 {
 	struct page *page = NULL;
 	struct mempolicy *mpol;
@@ -539,16 +560,12 @@
 	struct zoneref *z;
 	unsigned int cpuset_mems_cookie;
 
-retry_cpuset:
-	cpuset_mems_cookie = get_mems_allowed();
-	zonelist = huge_zonelist(vma, address,
-					htlb_alloc_mask, &mpol, &nodemask);
 	/*
 	 * A child process with MAP_PRIVATE mappings created by their parent
 	 * have no page reserves. This check ensures that reservations are
 	 * not "stolen". The child may still get SIGKILLed
 	 */
-	if (!vma_has_reserves(vma) &&
+	if (!vma_has_reserves(vma, chg) &&
 			h->free_huge_pages - h->resv_huge_pages == 0)
 		goto err;
 
@@ -556,13 +573,23 @@
 	if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
 		goto err;
 
+retry_cpuset:
+	cpuset_mems_cookie = get_mems_allowed();
+	zonelist = huge_zonelist(vma, address,
+					htlb_alloc_mask(h), &mpol, &nodemask);
+
 	for_each_zone_zonelist_nodemask(zone, z, zonelist,
 						MAX_NR_ZONES - 1, nodemask) {
-		if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask)) {
+		if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask(h))) {
 			page = dequeue_huge_page_node(h, zone_to_nid(zone));
 			if (page) {
-				if (!avoid_reserve)
-					decrement_hugepage_resv_vma(h, vma);
+				if (avoid_reserve)
+					break;
+				if (!vma_has_reserves(vma, chg))
+					break;
+
+				SetPagePrivate(page);
+				h->resv_huge_pages--;
 				break;
 			}
 		}
@@ -574,7 +601,6 @@
 	return page;
 
 err:
-	mpol_cond_put(mpol);
 	return NULL;
 }
 
@@ -620,15 +646,20 @@
 	int nid = page_to_nid(page);
 	struct hugepage_subpool *spool =
 		(struct hugepage_subpool *)page_private(page);
+	bool restore_reserve;
 
 	set_page_private(page, 0);
 	page->mapping = NULL;
 	BUG_ON(page_count(page));
 	BUG_ON(page_mapcount(page));
+	restore_reserve = PagePrivate(page);
 
 	spin_lock(&hugetlb_lock);
 	hugetlb_cgroup_uncharge_page(hstate_index(h),
 				     pages_per_huge_page(h), page);
+	if (restore_reserve)
+		h->resv_huge_pages++;
+
 	if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
 		/* remove the page from active list */
 		list_del(&page->lru);
@@ -715,7 +746,7 @@
 		return NULL;
 
 	page = alloc_pages_exact_node(nid,
-		htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
+		htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
 						__GFP_REPEAT|__GFP_NOWARN,
 		huge_page_order(h));
 	if (page) {
@@ -772,33 +803,6 @@
 	return nid;
 }
 
-static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
-{
-	struct page *page;
-	int start_nid;
-	int next_nid;
-	int ret = 0;
-
-	start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
-	next_nid = start_nid;
-
-	do {
-		page = alloc_fresh_huge_page_node(h, next_nid);
-		if (page) {
-			ret = 1;
-			break;
-		}
-		next_nid = hstate_next_node_to_alloc(h, nodes_allowed);
-	} while (next_nid != start_nid);
-
-	if (ret)
-		count_vm_event(HTLB_BUDDY_PGALLOC);
-	else
-		count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
-
-	return ret;
-}
-
 /*
  * helper for free_pool_huge_page() - return the previously saved
  * node ["this node"] from which to free a huge page.  Advance the
@@ -817,6 +821,40 @@
 	return nid;
 }
 
+#define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask)		\
+	for (nr_nodes = nodes_weight(*mask);				\
+		nr_nodes > 0 &&						\
+		((node = hstate_next_node_to_alloc(hs, mask)) || 1);	\
+		nr_nodes--)
+
+#define for_each_node_mask_to_free(hs, nr_nodes, node, mask)		\
+	for (nr_nodes = nodes_weight(*mask);				\
+		nr_nodes > 0 &&						\
+		((node = hstate_next_node_to_free(hs, mask)) || 1);	\
+		nr_nodes--)
+
+static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
+{
+	struct page *page;
+	int nr_nodes, node;
+	int ret = 0;
+
+	for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
+		page = alloc_fresh_huge_page_node(h, node);
+		if (page) {
+			ret = 1;
+			break;
+		}
+	}
+
+	if (ret)
+		count_vm_event(HTLB_BUDDY_PGALLOC);
+	else
+		count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
+
+	return ret;
+}
+
 /*
  * Free huge page from pool from next node to free.
  * Attempt to keep persistent huge pages more or less
@@ -826,40 +864,73 @@
 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
 							 bool acct_surplus)
 {
-	int start_nid;
-	int next_nid;
+	int nr_nodes, node;
 	int ret = 0;
 
-	start_nid = hstate_next_node_to_free(h, nodes_allowed);
-	next_nid = start_nid;
-
-	do {
+	for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
 		/*
 		 * If we're returning unused surplus pages, only examine
 		 * nodes with surplus pages.
 		 */
-		if ((!acct_surplus || h->surplus_huge_pages_node[next_nid]) &&
-		    !list_empty(&h->hugepage_freelists[next_nid])) {
+		if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
+		    !list_empty(&h->hugepage_freelists[node])) {
 			struct page *page =
-				list_entry(h->hugepage_freelists[next_nid].next,
+				list_entry(h->hugepage_freelists[node].next,
 					  struct page, lru);
 			list_del(&page->lru);
 			h->free_huge_pages--;
-			h->free_huge_pages_node[next_nid]--;
+			h->free_huge_pages_node[node]--;
 			if (acct_surplus) {
 				h->surplus_huge_pages--;
-				h->surplus_huge_pages_node[next_nid]--;
+				h->surplus_huge_pages_node[node]--;
 			}
 			update_and_free_page(h, page);
 			ret = 1;
 			break;
 		}
-		next_nid = hstate_next_node_to_free(h, nodes_allowed);
-	} while (next_nid != start_nid);
+	}
 
 	return ret;
 }
 
+/*
+ * Dissolve a given free hugepage into free buddy pages. This function does
+ * nothing for in-use (including surplus) hugepages.
+ */
+static void dissolve_free_huge_page(struct page *page)
+{
+	spin_lock(&hugetlb_lock);
+	if (PageHuge(page) && !page_count(page)) {
+		struct hstate *h = page_hstate(page);
+		int nid = page_to_nid(page);
+		list_del(&page->lru);
+		h->free_huge_pages--;
+		h->free_huge_pages_node[nid]--;
+		update_and_free_page(h, page);
+	}
+	spin_unlock(&hugetlb_lock);
+}
+
+/*
+ * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
+ * make specified memory blocks removable from the system.
+ * Note that start_pfn should aligned with (minimum) hugepage size.
+ */
+void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
+{
+	unsigned int order = 8 * sizeof(void *);
+	unsigned long pfn;
+	struct hstate *h;
+
+	/* Set scan step to minimum hugepage size */
+	for_each_hstate(h)
+		if (order > huge_page_order(h))
+			order = huge_page_order(h);
+	VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << order));
+	for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order)
+		dissolve_free_huge_page(pfn_to_page(pfn));
+}
+
 static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
 {
 	struct page *page;
@@ -902,12 +973,12 @@
 	spin_unlock(&hugetlb_lock);
 
 	if (nid == NUMA_NO_NODE)
-		page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
+		page = alloc_pages(htlb_alloc_mask(h)|__GFP_COMP|
 				   __GFP_REPEAT|__GFP_NOWARN,
 				   huge_page_order(h));
 	else
 		page = alloc_pages_exact_node(nid,
-			htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
+			htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
 			__GFP_REPEAT|__GFP_NOWARN, huge_page_order(h));
 
 	if (page && arch_prepare_hugepage(page)) {
@@ -944,10 +1015,11 @@
  */
 struct page *alloc_huge_page_node(struct hstate *h, int nid)
 {
-	struct page *page;
+	struct page *page = NULL;
 
 	spin_lock(&hugetlb_lock);
-	page = dequeue_huge_page_node(h, nid);
+	if (h->free_huge_pages - h->resv_huge_pages > 0)
+		page = dequeue_huge_page_node(h, nid);
 	spin_unlock(&hugetlb_lock);
 
 	if (!page)
@@ -1035,11 +1107,8 @@
 	spin_unlock(&hugetlb_lock);
 
 	/* Free unnecessary surplus pages to the buddy allocator */
-	if (!list_empty(&surplus_list)) {
-		list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
-			put_page(page);
-		}
-	}
+	list_for_each_entry_safe(page, tmp, &surplus_list, lru)
+		put_page(page);
 	spin_lock(&hugetlb_lock);
 
 	return ret;
@@ -1106,9 +1175,9 @@
 	} else  {
 		long err;
 		pgoff_t idx = vma_hugecache_offset(h, vma, addr);
-		struct resv_map *reservations = vma_resv_map(vma);
+		struct resv_map *resv = vma_resv_map(vma);
 
-		err = region_chg(&reservations->regions, idx, idx + 1);
+		err = region_chg(&resv->regions, idx, idx + 1);
 		if (err < 0)
 			return err;
 		return 0;
@@ -1126,10 +1195,10 @@
 
 	} else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
 		pgoff_t idx = vma_hugecache_offset(h, vma, addr);
-		struct resv_map *reservations = vma_resv_map(vma);
+		struct resv_map *resv = vma_resv_map(vma);
 
 		/* Mark this page used in the map. */
-		region_add(&reservations->regions, idx, idx + 1);
+		region_add(&resv->regions, idx, idx + 1);
 	}
 }
 
@@ -1155,38 +1224,35 @@
 	chg = vma_needs_reservation(h, vma, addr);
 	if (chg < 0)
 		return ERR_PTR(-ENOMEM);
-	if (chg)
-		if (hugepage_subpool_get_pages(spool, chg))
+	if (chg || avoid_reserve)
+		if (hugepage_subpool_get_pages(spool, 1))
 			return ERR_PTR(-ENOSPC);
 
 	ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
 	if (ret) {
-		hugepage_subpool_put_pages(spool, chg);
+		if (chg || avoid_reserve)
+			hugepage_subpool_put_pages(spool, 1);
 		return ERR_PTR(-ENOSPC);
 	}
 	spin_lock(&hugetlb_lock);
-	page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
-	if (page) {
-		/* update page cgroup details */
-		hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h),
-					     h_cg, page);
-		spin_unlock(&hugetlb_lock);
-	} else {
+	page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, chg);
+	if (!page) {
 		spin_unlock(&hugetlb_lock);
 		page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
 		if (!page) {
 			hugetlb_cgroup_uncharge_cgroup(idx,
 						       pages_per_huge_page(h),
 						       h_cg);
-			hugepage_subpool_put_pages(spool, chg);
+			if (chg || avoid_reserve)
+				hugepage_subpool_put_pages(spool, 1);
 			return ERR_PTR(-ENOSPC);
 		}
 		spin_lock(&hugetlb_lock);
-		hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h),
-					     h_cg, page);
 		list_move(&page->lru, &h->hugepage_activelist);
-		spin_unlock(&hugetlb_lock);
+		/* Fall through */
 	}
+	hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
+	spin_unlock(&hugetlb_lock);
 
 	set_page_private(page, (unsigned long)spool);
 
@@ -1194,17 +1260,29 @@
 	return page;
 }
 
+/*
+ * alloc_huge_page()'s wrapper which simply returns the page if allocation
+ * succeeds, otherwise NULL. This function is called from new_vma_page(),
+ * where no ERR_VALUE is expected to be returned.
+ */
+struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
+				unsigned long addr, int avoid_reserve)
+{
+	struct page *page = alloc_huge_page(vma, addr, avoid_reserve);
+	if (IS_ERR(page))
+		page = NULL;
+	return page;
+}
+
 int __weak alloc_bootmem_huge_page(struct hstate *h)
 {
 	struct huge_bootmem_page *m;
-	int nr_nodes = nodes_weight(node_states[N_MEMORY]);
+	int nr_nodes, node;
 
-	while (nr_nodes) {
+	for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
 		void *addr;
 
-		addr = __alloc_bootmem_node_nopanic(
-				NODE_DATA(hstate_next_node_to_alloc(h,
-						&node_states[N_MEMORY])),
+		addr = __alloc_bootmem_node_nopanic(NODE_DATA(node),
 				huge_page_size(h), huge_page_size(h), 0);
 
 		if (addr) {
@@ -1216,7 +1294,6 @@
 			m = addr;
 			goto found;
 		}
-		nr_nodes--;
 	}
 	return 0;
 
@@ -1355,48 +1432,28 @@
 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
 				int delta)
 {
-	int start_nid, next_nid;
-	int ret = 0;
+	int nr_nodes, node;
 
 	VM_BUG_ON(delta != -1 && delta != 1);
 
-	if (delta < 0)
-		start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
-	else
-		start_nid = hstate_next_node_to_free(h, nodes_allowed);
-	next_nid = start_nid;
-
-	do {
-		int nid = next_nid;
-		if (delta < 0)  {
-			/*
-			 * To shrink on this node, there must be a surplus page
-			 */
-			if (!h->surplus_huge_pages_node[nid]) {
-				next_nid = hstate_next_node_to_alloc(h,
-								nodes_allowed);
-				continue;
-			}
+	if (delta < 0) {
+		for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
+			if (h->surplus_huge_pages_node[node])
+				goto found;
 		}
-		if (delta > 0) {
-			/*
-			 * Surplus cannot exceed the total number of pages
-			 */
-			if (h->surplus_huge_pages_node[nid] >=
-						h->nr_huge_pages_node[nid]) {
-				next_nid = hstate_next_node_to_free(h,
-								nodes_allowed);
-				continue;
-			}
+	} else {
+		for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
+			if (h->surplus_huge_pages_node[node] <
+					h->nr_huge_pages_node[node])
+				goto found;
 		}
+	}
+	return 0;
 
-		h->surplus_huge_pages += delta;
-		h->surplus_huge_pages_node[nid] += delta;
-		ret = 1;
-		break;
-	} while (next_nid != start_nid);
-
-	return ret;
+found:
+	h->surplus_huge_pages += delta;
+	h->surplus_huge_pages_node[node] += delta;
+	return 1;
 }
 
 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
@@ -1526,7 +1583,7 @@
 	struct hstate *h;
 	NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
 
-	err = strict_strtoul(buf, 10, &count);
+	err = kstrtoul(buf, 10, &count);
 	if (err)
 		goto out;
 
@@ -1617,7 +1674,7 @@
 	if (h->order >= MAX_ORDER)
 		return -EINVAL;
 
-	err = strict_strtoul(buf, 10, &input);
+	err = kstrtoul(buf, 10, &input);
 	if (err)
 		return err;
 
@@ -2068,18 +2125,6 @@
 }
 #endif /* CONFIG_NUMA */
 
-int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
-			void __user *buffer,
-			size_t *length, loff_t *ppos)
-{
-	proc_dointvec(table, write, buffer, length, ppos);
-	if (hugepages_treat_as_movable)
-		htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
-	else
-		htlb_alloc_mask = GFP_HIGHUSER;
-	return 0;
-}
-
 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
 			void __user *buffer,
 			size_t *length, loff_t *ppos)
@@ -2207,7 +2252,7 @@
 
 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
 {
-	struct resv_map *reservations = vma_resv_map(vma);
+	struct resv_map *resv = vma_resv_map(vma);
 
 	/*
 	 * This new VMA should share its siblings reservation map if present.
@@ -2217,34 +2262,34 @@
 	 * after this open call completes.  It is therefore safe to take a
 	 * new reference here without additional locking.
 	 */
-	if (reservations)
-		kref_get(&reservations->refs);
+	if (resv)
+		kref_get(&resv->refs);
 }
 
 static void resv_map_put(struct vm_area_struct *vma)
 {
-	struct resv_map *reservations = vma_resv_map(vma);
+	struct resv_map *resv = vma_resv_map(vma);
 
-	if (!reservations)
+	if (!resv)
 		return;
-	kref_put(&reservations->refs, resv_map_release);
+	kref_put(&resv->refs, resv_map_release);
 }
 
 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
 {
 	struct hstate *h = hstate_vma(vma);
-	struct resv_map *reservations = vma_resv_map(vma);
+	struct resv_map *resv = vma_resv_map(vma);
 	struct hugepage_subpool *spool = subpool_vma(vma);
 	unsigned long reserve;
 	unsigned long start;
 	unsigned long end;
 
-	if (reservations) {
+	if (resv) {
 		start = vma_hugecache_offset(h, vma, vma->vm_start);
 		end = vma_hugecache_offset(h, vma, vma->vm_end);
 
 		reserve = (end - start) -
-			region_count(&reservations->regions, start, end);
+			region_count(&resv->regions, start, end);
 
 		resv_map_put(vma);
 
@@ -2557,7 +2602,6 @@
 {
 	struct hstate *h = hstate_vma(vma);
 	struct page *old_page, *new_page;
-	int avoidcopy;
 	int outside_reserve = 0;
 	unsigned long mmun_start;	/* For mmu_notifiers */
 	unsigned long mmun_end;		/* For mmu_notifiers */
@@ -2567,10 +2611,8 @@
 retry_avoidcopy:
 	/* If no-one else is actually using this page, avoid the copy
 	 * and just make the page writable */
-	avoidcopy = (page_mapcount(old_page) == 1);
-	if (avoidcopy) {
-		if (PageAnon(old_page))
-			page_move_anon_rmap(old_page, vma, address);
+	if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
+		page_move_anon_rmap(old_page, vma, address);
 		set_huge_ptep_writable(vma, address, ptep);
 		return 0;
 	}
@@ -2584,8 +2626,7 @@
 	 * at the time of fork() could consume its reserves on COW instead
 	 * of the full address range.
 	 */
-	if (!(vma->vm_flags & VM_MAYSHARE) &&
-			is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
+	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
 			old_page != pagecache_page)
 		outside_reserve = 1;
 
@@ -2657,6 +2698,8 @@
 	spin_lock(&mm->page_table_lock);
 	ptep = huge_pte_offset(mm, address & huge_page_mask(h));
 	if (likely(pte_same(huge_ptep_get(ptep), pte))) {
+		ClearPagePrivate(new_page);
+
 		/* Break COW */
 		huge_ptep_clear_flush(vma, address, ptep);
 		set_huge_pte_at(mm, address, ptep,
@@ -2668,10 +2711,11 @@
 	}
 	spin_unlock(&mm->page_table_lock);
 	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
-	/* Caller expects lock to be held */
-	spin_lock(&mm->page_table_lock);
 	page_cache_release(new_page);
 	page_cache_release(old_page);
+
+	/* Caller expects lock to be held */
+	spin_lock(&mm->page_table_lock);
 	return 0;
 }
 
@@ -2767,6 +2811,7 @@
 					goto retry;
 				goto out;
 			}
+			ClearPagePrivate(page);
 
 			spin_lock(&inode->i_lock);
 			inode->i_blocks += blocks_per_huge_page(h);
@@ -2813,8 +2858,10 @@
 	if (!huge_pte_none(huge_ptep_get(ptep)))
 		goto backout;
 
-	if (anon_rmap)
+	if (anon_rmap) {
+		ClearPagePrivate(page);
 		hugepage_add_new_anon_rmap(page, vma, address);
+	}
 	else
 		page_dup_rmap(page);
 	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
@@ -3431,3 +3478,45 @@
 	return ret;
 }
 #endif
+
+bool isolate_huge_page(struct page *page, struct list_head *list)
+{
+	VM_BUG_ON(!PageHead(page));
+	if (!get_page_unless_zero(page))
+		return false;
+	spin_lock(&hugetlb_lock);
+	list_move_tail(&page->lru, list);
+	spin_unlock(&hugetlb_lock);
+	return true;
+}
+
+void putback_active_hugepage(struct page *page)
+{
+	VM_BUG_ON(!PageHead(page));
+	spin_lock(&hugetlb_lock);
+	list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
+	spin_unlock(&hugetlb_lock);
+	put_page(page);
+}
+
+bool is_hugepage_active(struct page *page)
+{
+	VM_BUG_ON(!PageHuge(page));
+	/*
+	 * This function can be called for a tail page because the caller,
+	 * scan_movable_pages, scans through a given pfn-range which typically
+	 * covers one memory block. In systems using gigantic hugepage (1GB
+	 * for x86_64,) a hugepage is larger than a memory block, and we don't
+	 * support migrating such large hugepages for now, so return false
+	 * when called for tail pages.
+	 */
+	if (PageTail(page))
+		return false;
+	/*
+	 * Refcount of a hwpoisoned hugepages is 1, but they are not active,
+	 * so we should return false for them.
+	 */
+	if (unlikely(PageHWPoison(page)))
+		return false;
+	return page_count(page) > 0;
+}
diff --git a/mm/hwpoison-inject.c b/mm/hwpoison-inject.c
index 3a61efc..afc2daa 100644
--- a/mm/hwpoison-inject.c
+++ b/mm/hwpoison-inject.c
@@ -88,12 +88,12 @@
 	 * hardware status change, hence do not require hardware support.
 	 * They are mainly for testing hwpoison in software level.
 	 */
-	dentry = debugfs_create_file("corrupt-pfn", 0600, hwpoison_dir,
+	dentry = debugfs_create_file("corrupt-pfn", 0200, hwpoison_dir,
 					  NULL, &hwpoison_fops);
 	if (!dentry)
 		goto fail;
 
-	dentry = debugfs_create_file("unpoison-pfn", 0600, hwpoison_dir,
+	dentry = debugfs_create_file("unpoison-pfn", 0200, hwpoison_dir,
 				     NULL, &unpoison_fops);
 	if (!dentry)
 		goto fail;
diff --git a/mm/internal.h b/mm/internal.h
index 4390ac6..684f7aa 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -85,6 +85,8 @@
  */
 extern int isolate_lru_page(struct page *page);
 extern void putback_lru_page(struct page *page);
+extern unsigned long zone_reclaimable_pages(struct zone *zone);
+extern bool zone_reclaimable(struct zone *zone);
 
 /*
  * in mm/rmap.c:
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index c8d7f31..e126b0e 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -1639,7 +1639,7 @@
 	else if (strncmp(buf, "scan=", 5) == 0) {
 		unsigned long secs;
 
-		ret = strict_strtoul(buf + 5, 0, &secs);
+		ret = kstrtoul(buf + 5, 0, &secs);
 		if (ret < 0)
 			goto out;
 		stop_scan_thread();
diff --git a/mm/ksm.c b/mm/ksm.c
index b6afe0c..0bea2b2 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -2194,7 +2194,7 @@
 	unsigned long msecs;
 	int err;
 
-	err = strict_strtoul(buf, 10, &msecs);
+	err = kstrtoul(buf, 10, &msecs);
 	if (err || msecs > UINT_MAX)
 		return -EINVAL;
 
@@ -2217,7 +2217,7 @@
 	int err;
 	unsigned long nr_pages;
 
-	err = strict_strtoul(buf, 10, &nr_pages);
+	err = kstrtoul(buf, 10, &nr_pages);
 	if (err || nr_pages > UINT_MAX)
 		return -EINVAL;
 
@@ -2239,7 +2239,7 @@
 	int err;
 	unsigned long flags;
 
-	err = strict_strtoul(buf, 10, &flags);
+	err = kstrtoul(buf, 10, &flags);
 	if (err || flags > UINT_MAX)
 		return -EINVAL;
 	if (flags > KSM_RUN_UNMERGE)
diff --git a/mm/madvise.c b/mm/madvise.c
index 7055883..6975bc8 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -42,11 +42,11 @@
  * We can potentially split a vm area into separate
  * areas, each area with its own behavior.
  */
-static long madvise_behavior(struct vm_area_struct * vma,
+static long madvise_behavior(struct vm_area_struct *vma,
 		     struct vm_area_struct **prev,
 		     unsigned long start, unsigned long end, int behavior)
 {
-	struct mm_struct * mm = vma->vm_mm;
+	struct mm_struct *mm = vma->vm_mm;
 	int error = 0;
 	pgoff_t pgoff;
 	unsigned long new_flags = vma->vm_flags;
@@ -215,8 +215,8 @@
 /*
  * Schedule all required I/O operations.  Do not wait for completion.
  */
-static long madvise_willneed(struct vm_area_struct * vma,
-			     struct vm_area_struct ** prev,
+static long madvise_willneed(struct vm_area_struct *vma,
+			     struct vm_area_struct **prev,
 			     unsigned long start, unsigned long end)
 {
 	struct file *file = vma->vm_file;
@@ -270,8 +270,8 @@
  * An interface that causes the system to free clean pages and flush
  * dirty pages is already available as msync(MS_INVALIDATE).
  */
-static long madvise_dontneed(struct vm_area_struct * vma,
-			     struct vm_area_struct ** prev,
+static long madvise_dontneed(struct vm_area_struct *vma,
+			     struct vm_area_struct **prev,
 			     unsigned long start, unsigned long end)
 {
 	*prev = vma;
@@ -343,29 +343,34 @@
  */
 static int madvise_hwpoison(int bhv, unsigned long start, unsigned long end)
 {
-	int ret = 0;
-
 	if (!capable(CAP_SYS_ADMIN))
 		return -EPERM;
 	for (; start < end; start += PAGE_SIZE) {
 		struct page *p;
-		int ret = get_user_pages_fast(start, 1, 0, &p);
+		int ret;
+
+		ret = get_user_pages_fast(start, 1, 0, &p);
 		if (ret != 1)
 			return ret;
+
+		if (PageHWPoison(p)) {
+			put_page(p);
+			continue;
+		}
 		if (bhv == MADV_SOFT_OFFLINE) {
-			printk(KERN_INFO "Soft offlining page %lx at %lx\n",
+			pr_info("Soft offlining page %#lx at %#lx\n",
 				page_to_pfn(p), start);
 			ret = soft_offline_page(p, MF_COUNT_INCREASED);
 			if (ret)
-				break;
+				return ret;
 			continue;
 		}
-		printk(KERN_INFO "Injecting memory failure for page %lx at %lx\n",
+		pr_info("Injecting memory failure for page %#lx at %#lx\n",
 		       page_to_pfn(p), start);
 		/* Ignore return value for now */
 		memory_failure(page_to_pfn(p), 0, MF_COUNT_INCREASED);
 	}
-	return ret;
+	return 0;
 }
 #endif
 
@@ -459,7 +464,7 @@
 SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
 {
 	unsigned long end, tmp;
-	struct vm_area_struct * vma, *prev;
+	struct vm_area_struct *vma, *prev;
 	int unmapped_error = 0;
 	int error = -EINVAL;
 	int write;
diff --git a/mm/memblock.c b/mm/memblock.c
index a847bfe6..0ac412a 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -914,6 +914,24 @@
 	return memblock_search(&memblock.memory, addr) != -1;
 }
 
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
+			 unsigned long *start_pfn, unsigned long *end_pfn)
+{
+	struct memblock_type *type = &memblock.memory;
+	int mid = memblock_search(type, (phys_addr_t)pfn << PAGE_SHIFT);
+
+	if (mid == -1)
+		return -1;
+
+	*start_pfn = type->regions[mid].base >> PAGE_SHIFT;
+	*end_pfn = (type->regions[mid].base + type->regions[mid].size)
+			>> PAGE_SHIFT;
+
+	return type->regions[mid].nid;
+}
+#endif
+
 /**
  * memblock_is_region_memory - check if a region is a subset of memory
  * @base: base of region to check
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 3b83957..c6bd28e 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3121,7 +3121,7 @@
 		ssize_t size = memcg_caches_array_size(num_groups);
 
 		size *= sizeof(void *);
-		size += sizeof(struct memcg_cache_params);
+		size += offsetof(struct memcg_cache_params, memcg_caches);
 
 		s->memcg_params = kzalloc(size, GFP_KERNEL);
 		if (!s->memcg_params) {
@@ -3164,13 +3164,16 @@
 int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
 			 struct kmem_cache *root_cache)
 {
-	size_t size = sizeof(struct memcg_cache_params);
+	size_t size;
 
 	if (!memcg_kmem_enabled())
 		return 0;
 
-	if (!memcg)
+	if (!memcg) {
+		size = offsetof(struct memcg_cache_params, memcg_caches);
 		size += memcg_limited_groups_array_size * sizeof(void *);
+	} else
+		size = sizeof(struct memcg_cache_params);
 
 	s->memcg_params = kzalloc(size, GFP_KERNEL);
 	if (!s->memcg_params)
@@ -5588,7 +5591,13 @@
 	const struct mem_cgroup_threshold *_a = a;
 	const struct mem_cgroup_threshold *_b = b;
 
-	return _a->threshold - _b->threshold;
+	if (_a->threshold > _b->threshold)
+		return 1;
+
+	if (_a->threshold < _b->threshold)
+		return -1;
+
+	return 0;
 }
 
 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index d84c5e5..d472e14 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -206,7 +206,7 @@
 #ifdef __ARCH_SI_TRAPNO
 	si.si_trapno = trapno;
 #endif
-	si.si_addr_lsb = compound_trans_order(compound_head(page)) + PAGE_SHIFT;
+	si.si_addr_lsb = compound_order(compound_head(page)) + PAGE_SHIFT;
 
 	if ((flags & MF_ACTION_REQUIRED) && t == current) {
 		si.si_code = BUS_MCEERR_AR;
@@ -983,7 +983,7 @@
 static void set_page_hwpoison_huge_page(struct page *hpage)
 {
 	int i;
-	int nr_pages = 1 << compound_trans_order(hpage);
+	int nr_pages = 1 << compound_order(hpage);
 	for (i = 0; i < nr_pages; i++)
 		SetPageHWPoison(hpage + i);
 }
@@ -991,7 +991,7 @@
 static void clear_page_hwpoison_huge_page(struct page *hpage)
 {
 	int i;
-	int nr_pages = 1 << compound_trans_order(hpage);
+	int nr_pages = 1 << compound_order(hpage);
 	for (i = 0; i < nr_pages; i++)
 		ClearPageHWPoison(hpage + i);
 }
@@ -1204,6 +1204,9 @@
 	for (ps = error_states;; ps++)
 		if ((p->flags & ps->mask) == ps->res)
 			break;
+
+	page_flags |= (p->flags & (1UL << PG_dirty));
+
 	if (!ps->mask)
 		for (ps = error_states;; ps++)
 			if ((page_flags & ps->mask) == ps->res)
@@ -1339,7 +1342,17 @@
 		return 0;
 	}
 
-	nr_pages = 1 << compound_trans_order(page);
+	/*
+	 * unpoison_memory() can encounter thp only when the thp is being
+	 * worked by memory_failure() and the page lock is not held yet.
+	 * In such case, we yield to memory_failure() and make unpoison fail.
+	 */
+	if (PageTransHuge(page)) {
+		pr_info("MCE: Memory failure is now running on %#lx\n", pfn);
+			return 0;
+	}
+
+	nr_pages = 1 << compound_order(page);
 
 	if (!get_page_unless_zero(page)) {
 		/*
@@ -1353,7 +1366,7 @@
 			return 0;
 		}
 		if (TestClearPageHWPoison(p))
-			atomic_long_sub(nr_pages, &num_poisoned_pages);
+			atomic_long_dec(&num_poisoned_pages);
 		pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
 		return 0;
 	}
@@ -1375,7 +1388,7 @@
 	unlock_page(page);
 
 	put_page(page);
-	if (freeit)
+	if (freeit && !(pfn == my_zero_pfn(0) && page_count(p) == 1))
 		put_page(page);
 
 	return 0;
@@ -1416,7 +1429,8 @@
 	 * was free. This flag should be kept set until the source page
 	 * is freed and PG_hwpoison on it is set.
 	 */
-	set_migratetype_isolate(p, true);
+	if (get_pageblock_migratetype(p) != MIGRATE_ISOLATE)
+		set_migratetype_isolate(p, true);
 	/*
 	 * When the target page is a free hugepage, just remove it
 	 * from free hugepage list.
@@ -1470,6 +1484,7 @@
 	int ret;
 	unsigned long pfn = page_to_pfn(page);
 	struct page *hpage = compound_head(page);
+	LIST_HEAD(pagelist);
 
 	/*
 	 * This double-check of PageHWPoison is to avoid the race with
@@ -1485,86 +1500,29 @@
 	unlock_page(hpage);
 
 	/* Keep page count to indicate a given hugepage is isolated. */
-	ret = migrate_huge_page(hpage, new_page, MPOL_MF_MOVE_ALL,
-				MIGRATE_SYNC);
-	put_page(hpage);
+	list_move(&hpage->lru, &pagelist);
+	ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
+				MIGRATE_SYNC, MR_MEMORY_FAILURE);
 	if (ret) {
 		pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
 			pfn, ret, page->flags);
+		/*
+		 * We know that soft_offline_huge_page() tries to migrate
+		 * only one hugepage pointed to by hpage, so we need not
+		 * run through the pagelist here.
+		 */
+		putback_active_hugepage(hpage);
+		if (ret > 0)
+			ret = -EIO;
 	} else {
 		set_page_hwpoison_huge_page(hpage);
 		dequeue_hwpoisoned_huge_page(hpage);
-		atomic_long_add(1 << compound_trans_order(hpage),
+		atomic_long_add(1 << compound_order(hpage),
 				&num_poisoned_pages);
 	}
 	return ret;
 }
 
-static int __soft_offline_page(struct page *page, int flags);
-
-/**
- * soft_offline_page - Soft offline a page.
- * @page: page to offline
- * @flags: flags. Same as memory_failure().
- *
- * Returns 0 on success, otherwise negated errno.
- *
- * Soft offline a page, by migration or invalidation,
- * without killing anything. This is for the case when
- * a page is not corrupted yet (so it's still valid to access),
- * but has had a number of corrected errors and is better taken
- * out.
- *
- * The actual policy on when to do that is maintained by
- * user space.
- *
- * This should never impact any application or cause data loss,
- * however it might take some time.
- *
- * This is not a 100% solution for all memory, but tries to be
- * ``good enough'' for the majority of memory.
- */
-int soft_offline_page(struct page *page, int flags)
-{
-	int ret;
-	unsigned long pfn = page_to_pfn(page);
-	struct page *hpage = compound_trans_head(page);
-
-	if (PageHWPoison(page)) {
-		pr_info("soft offline: %#lx page already poisoned\n", pfn);
-		return -EBUSY;
-	}
-	if (!PageHuge(page) && PageTransHuge(hpage)) {
-		if (PageAnon(hpage) && unlikely(split_huge_page(hpage))) {
-			pr_info("soft offline: %#lx: failed to split THP\n",
-				pfn);
-			return -EBUSY;
-		}
-	}
-
-	ret = get_any_page(page, pfn, flags);
-	if (ret < 0)
-		return ret;
-	if (ret) { /* for in-use pages */
-		if (PageHuge(page))
-			ret = soft_offline_huge_page(page, flags);
-		else
-			ret = __soft_offline_page(page, flags);
-	} else { /* for free pages */
-		if (PageHuge(page)) {
-			set_page_hwpoison_huge_page(hpage);
-			dequeue_hwpoisoned_huge_page(hpage);
-			atomic_long_add(1 << compound_trans_order(hpage),
-					&num_poisoned_pages);
-		} else {
-			SetPageHWPoison(page);
-			atomic_long_inc(&num_poisoned_pages);
-		}
-	}
-	unset_migratetype_isolate(page, MIGRATE_MOVABLE);
-	return ret;
-}
-
 static int __soft_offline_page(struct page *page, int flags)
 {
 	int ret;
@@ -1651,3 +1609,67 @@
 	}
 	return ret;
 }
+
+/**
+ * soft_offline_page - Soft offline a page.
+ * @page: page to offline
+ * @flags: flags. Same as memory_failure().
+ *
+ * Returns 0 on success, otherwise negated errno.
+ *
+ * Soft offline a page, by migration or invalidation,
+ * without killing anything. This is for the case when
+ * a page is not corrupted yet (so it's still valid to access),
+ * but has had a number of corrected errors and is better taken
+ * out.
+ *
+ * The actual policy on when to do that is maintained by
+ * user space.
+ *
+ * This should never impact any application or cause data loss,
+ * however it might take some time.
+ *
+ * This is not a 100% solution for all memory, but tries to be
+ * ``good enough'' for the majority of memory.
+ */
+int soft_offline_page(struct page *page, int flags)
+{
+	int ret;
+	unsigned long pfn = page_to_pfn(page);
+	struct page *hpage = compound_trans_head(page);
+
+	if (PageHWPoison(page)) {
+		pr_info("soft offline: %#lx page already poisoned\n", pfn);
+		return -EBUSY;
+	}
+	if (!PageHuge(page) && PageTransHuge(hpage)) {
+		if (PageAnon(hpage) && unlikely(split_huge_page(hpage))) {
+			pr_info("soft offline: %#lx: failed to split THP\n",
+				pfn);
+			return -EBUSY;
+		}
+	}
+
+	ret = get_any_page(page, pfn, flags);
+	if (ret < 0)
+		goto unset;
+	if (ret) { /* for in-use pages */
+		if (PageHuge(page))
+			ret = soft_offline_huge_page(page, flags);
+		else
+			ret = __soft_offline_page(page, flags);
+	} else { /* for free pages */
+		if (PageHuge(page)) {
+			set_page_hwpoison_huge_page(hpage);
+			dequeue_hwpoisoned_huge_page(hpage);
+			atomic_long_add(1 << compound_order(hpage),
+					&num_poisoned_pages);
+		} else {
+			SetPageHWPoison(page);
+			atomic_long_inc(&num_poisoned_pages);
+		}
+	}
+unset:
+	unset_migratetype_isolate(page, MIGRATE_MOVABLE);
+	return ret;
+}
diff --git a/mm/memory.c b/mm/memory.c
index b3c6bf9..2b73dbd 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -373,30 +373,6 @@
 #endif /* CONFIG_HAVE_RCU_TABLE_FREE */
 
 /*
- * If a p?d_bad entry is found while walking page tables, report
- * the error, before resetting entry to p?d_none.  Usually (but
- * very seldom) called out from the p?d_none_or_clear_bad macros.
- */
-
-void pgd_clear_bad(pgd_t *pgd)
-{
-	pgd_ERROR(*pgd);
-	pgd_clear(pgd);
-}
-
-void pud_clear_bad(pud_t *pud)
-{
-	pud_ERROR(*pud);
-	pud_clear(pud);
-}
-
-void pmd_clear_bad(pmd_t *pmd)
-{
-	pmd_ERROR(*pmd);
-	pmd_clear(pmd);
-}
-
-/*
  * Note: this doesn't free the actual pages themselves. That
  * has been handled earlier when unmapping all the memory regions.
  */
@@ -1505,7 +1481,8 @@
 	if (pud_none(*pud))
 		goto no_page_table;
 	if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
-		BUG_ON(flags & FOLL_GET);
+		if (flags & FOLL_GET)
+			goto out;
 		page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE);
 		goto out;
 	}
@@ -1516,8 +1493,20 @@
 	if (pmd_none(*pmd))
 		goto no_page_table;
 	if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
-		BUG_ON(flags & FOLL_GET);
 		page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
+		if (flags & FOLL_GET) {
+			/*
+			 * Refcount on tail pages are not well-defined and
+			 * shouldn't be taken. The caller should handle a NULL
+			 * return when trying to follow tail pages.
+			 */
+			if (PageHead(page))
+				get_page(page);
+			else {
+				page = NULL;
+				goto out;
+			}
+		}
 		goto out;
 	}
 	if ((flags & FOLL_NUMA) && pmd_numa(*pmd))
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index ca1dd3a..0eb1a1d 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -30,6 +30,7 @@
 #include <linux/mm_inline.h>
 #include <linux/firmware-map.h>
 #include <linux/stop_machine.h>
+#include <linux/hugetlb.h>
 
 #include <asm/tlbflush.h>
 
@@ -194,7 +195,7 @@
 
 	zone = &pgdat->node_zones[0];
 	for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) {
-		if (zone->wait_table) {
+		if (zone_is_initialized(zone)) {
 			nr_pages = zone->wait_table_hash_nr_entries
 				* sizeof(wait_queue_head_t);
 			nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT;
@@ -229,8 +230,8 @@
 
 	zone_span_writelock(zone);
 
-	old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
-	if (!zone->spanned_pages || start_pfn < zone->zone_start_pfn)
+	old_zone_end_pfn = zone_end_pfn(zone);
+	if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn)
 		zone->zone_start_pfn = start_pfn;
 
 	zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
@@ -305,7 +306,7 @@
 		goto out_fail;
 
 	/* use start_pfn for z1's start_pfn if z1 is empty */
-	if (z1->spanned_pages)
+	if (!zone_is_empty(z1))
 		z1_start_pfn = z1->zone_start_pfn;
 	else
 		z1_start_pfn = start_pfn;
@@ -347,7 +348,7 @@
 		goto out_fail;
 
 	/* use end_pfn for z2's end_pfn if z2 is empty */
-	if (z2->spanned_pages)
+	if (!zone_is_empty(z2))
 		z2_end_pfn = zone_end_pfn(z2);
 	else
 		z2_end_pfn = end_pfn;
@@ -514,8 +515,9 @@
 static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
 			     unsigned long end_pfn)
 {
-	unsigned long zone_start_pfn =  zone->zone_start_pfn;
-	unsigned long zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
+	unsigned long zone_start_pfn = zone->zone_start_pfn;
+	unsigned long z = zone_end_pfn(zone); /* zone_end_pfn namespace clash */
+	unsigned long zone_end_pfn = z;
 	unsigned long pfn;
 	struct mem_section *ms;
 	int nid = zone_to_nid(zone);
@@ -1069,6 +1071,23 @@
 	return ret;
 }
 
+static int check_hotplug_memory_range(u64 start, u64 size)
+{
+	u64 start_pfn = start >> PAGE_SHIFT;
+	u64 nr_pages = size >> PAGE_SHIFT;
+
+	/* Memory range must be aligned with section */
+	if ((start_pfn & ~PAGE_SECTION_MASK) ||
+	    (nr_pages % PAGES_PER_SECTION) || (!nr_pages)) {
+		pr_err("Section-unaligned hotplug range: start 0x%llx, size 0x%llx\n",
+				(unsigned long long)start,
+				(unsigned long long)size);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
 int __ref add_memory(int nid, u64 start, u64 size)
 {
@@ -1078,6 +1097,10 @@
 	struct resource *res;
 	int ret;
 
+	ret = check_hotplug_memory_range(start, size);
+	if (ret)
+		return ret;
+
 	lock_memory_hotplug();
 
 	res = register_memory_resource(start, size);
@@ -1208,10 +1231,12 @@
 }
 
 /*
- * Scanning pfn is much easier than scanning lru list.
- * Scan pfn from start to end and Find LRU page.
+ * Scan pfn range [start,end) to find movable/migratable pages (LRU pages
+ * and hugepages). We scan pfn because it's much easier than scanning over
+ * linked list. This function returns the pfn of the first found movable
+ * page if it's found, otherwise 0.
  */
-static unsigned long scan_lru_pages(unsigned long start, unsigned long end)
+static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
 {
 	unsigned long pfn;
 	struct page *page;
@@ -1220,6 +1245,13 @@
 			page = pfn_to_page(pfn);
 			if (PageLRU(page))
 				return pfn;
+			if (PageHuge(page)) {
+				if (is_hugepage_active(page))
+					return pfn;
+				else
+					pfn = round_up(pfn + 1,
+						1 << compound_order(page)) - 1;
+			}
 		}
 	}
 	return 0;
@@ -1240,6 +1272,19 @@
 		if (!pfn_valid(pfn))
 			continue;
 		page = pfn_to_page(pfn);
+
+		if (PageHuge(page)) {
+			struct page *head = compound_head(page);
+			pfn = page_to_pfn(head) + (1<<compound_order(head)) - 1;
+			if (compound_order(head) > PFN_SECTION_SHIFT) {
+				ret = -EBUSY;
+				break;
+			}
+			if (isolate_huge_page(page, &source))
+				move_pages -= 1 << compound_order(head);
+			continue;
+		}
+
 		if (!get_page_unless_zero(page))
 			continue;
 		/*
@@ -1272,7 +1317,7 @@
 	}
 	if (!list_empty(&source)) {
 		if (not_managed) {
-			putback_lru_pages(&source);
+			putback_movable_pages(&source);
 			goto out;
 		}
 
@@ -1283,7 +1328,7 @@
 		ret = migrate_pages(&source, alloc_migrate_target, 0,
 					MIGRATE_SYNC, MR_MEMORY_HOTPLUG);
 		if (ret)
-			putback_lru_pages(&source);
+			putback_movable_pages(&source);
 	}
 out:
 	return ret;
@@ -1472,7 +1517,6 @@
 	struct zone *zone;
 	struct memory_notify arg;
 
-	BUG_ON(start_pfn >= end_pfn);
 	/* at least, alignment against pageblock is necessary */
 	if (!IS_ALIGNED(start_pfn, pageblock_nr_pages))
 		return -EINVAL;
@@ -1527,8 +1571,8 @@
 		drain_all_pages();
 	}
 
-	pfn = scan_lru_pages(start_pfn, end_pfn);
-	if (pfn) { /* We have page on LRU */
+	pfn = scan_movable_pages(start_pfn, end_pfn);
+	if (pfn) { /* We have movable pages */
 		ret = do_migrate_range(pfn, end_pfn);
 		if (!ret) {
 			drain = 1;
@@ -1547,6 +1591,11 @@
 	yield();
 	/* drain pcp pages, this is synchronous. */
 	drain_all_pages();
+	/*
+	 * dissolve free hugepages in the memory block before doing offlining
+	 * actually in order to make hugetlbfs's object counting consistent.
+	 */
+	dissolve_free_huge_pages(start_pfn, end_pfn);
 	/* check again */
 	offlined_pages = check_pages_isolated(start_pfn, end_pfn);
 	if (offlined_pages < 0) {
@@ -1674,9 +1723,8 @@
 	return ret;
 }
 
-static int check_cpu_on_node(void *data)
+static int check_cpu_on_node(pg_data_t *pgdat)
 {
-	struct pglist_data *pgdat = data;
 	int cpu;
 
 	for_each_present_cpu(cpu) {
@@ -1691,10 +1739,9 @@
 	return 0;
 }
 
-static void unmap_cpu_on_node(void *data)
+static void unmap_cpu_on_node(pg_data_t *pgdat)
 {
 #ifdef CONFIG_ACPI_NUMA
-	struct pglist_data *pgdat = data;
 	int cpu;
 
 	for_each_possible_cpu(cpu)
@@ -1703,10 +1750,11 @@
 #endif
 }
 
-static int check_and_unmap_cpu_on_node(void *data)
+static int check_and_unmap_cpu_on_node(pg_data_t *pgdat)
 {
-	int ret = check_cpu_on_node(data);
+	int ret;
 
+	ret = check_cpu_on_node(pgdat);
 	if (ret)
 		return ret;
 
@@ -1715,11 +1763,18 @@
 	 * the cpu_to_node() now.
 	 */
 
-	unmap_cpu_on_node(data);
+	unmap_cpu_on_node(pgdat);
 	return 0;
 }
 
-/* offline the node if all memory sections of this node are removed */
+/**
+ * try_offline_node
+ *
+ * Offline a node if all memory sections and cpus of the node are removed.
+ *
+ * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
+ * and online/offline operations before this call.
+ */
 void try_offline_node(int nid)
 {
 	pg_data_t *pgdat = NODE_DATA(nid);
@@ -1745,7 +1800,7 @@
 		return;
 	}
 
-	if (stop_machine(check_and_unmap_cpu_on_node, pgdat, NULL))
+	if (check_and_unmap_cpu_on_node(pgdat))
 		return;
 
 	/*
@@ -1782,10 +1837,19 @@
 }
 EXPORT_SYMBOL(try_offline_node);
 
+/**
+ * remove_memory
+ *
+ * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
+ * and online/offline operations before this call, as required by
+ * try_offline_node().
+ */
 void __ref remove_memory(int nid, u64 start, u64 size)
 {
 	int ret;
 
+	BUG_ON(check_hotplug_memory_range(start, size));
+
 	lock_memory_hotplug();
 
 	/*
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 4baf12e..0472964 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -123,16 +123,19 @@
 static struct mempolicy *get_task_policy(struct task_struct *p)
 {
 	struct mempolicy *pol = p->mempolicy;
-	int node;
 
 	if (!pol) {
-		node = numa_node_id();
-		if (node != NUMA_NO_NODE)
-			pol = &preferred_node_policy[node];
+		int node = numa_node_id();
 
-		/* preferred_node_policy is not initialised early in boot */
-		if (!pol->mode)
-			pol = NULL;
+		if (node != NUMA_NO_NODE) {
+			pol = &preferred_node_policy[node];
+			/*
+			 * preferred_node_policy is not initialised early in
+			 * boot
+			 */
+			if (!pol->mode)
+				pol = NULL;
+		}
 	}
 
 	return pol;
@@ -473,8 +476,11 @@
 static void migrate_page_add(struct page *page, struct list_head *pagelist,
 				unsigned long flags);
 
-/* Scan through pages checking if pages follow certain conditions. */
-static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
+/*
+ * Scan through pages checking if pages follow certain conditions,
+ * and move them to the pagelist if they do.
+ */
+static int queue_pages_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
 		unsigned long addr, unsigned long end,
 		const nodemask_t *nodes, unsigned long flags,
 		void *private)
@@ -512,7 +518,31 @@
 	return addr != end;
 }
 
-static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
+static void queue_pages_hugetlb_pmd_range(struct vm_area_struct *vma,
+		pmd_t *pmd, const nodemask_t *nodes, unsigned long flags,
+				    void *private)
+{
+#ifdef CONFIG_HUGETLB_PAGE
+	int nid;
+	struct page *page;
+
+	spin_lock(&vma->vm_mm->page_table_lock);
+	page = pte_page(huge_ptep_get((pte_t *)pmd));
+	nid = page_to_nid(page);
+	if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
+		goto unlock;
+	/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
+	if (flags & (MPOL_MF_MOVE_ALL) ||
+	    (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
+		isolate_huge_page(page, private);
+unlock:
+	spin_unlock(&vma->vm_mm->page_table_lock);
+#else
+	BUG();
+#endif
+}
+
+static inline int queue_pages_pmd_range(struct vm_area_struct *vma, pud_t *pud,
 		unsigned long addr, unsigned long end,
 		const nodemask_t *nodes, unsigned long flags,
 		void *private)
@@ -523,17 +553,24 @@
 	pmd = pmd_offset(pud, addr);
 	do {
 		next = pmd_addr_end(addr, end);
+		if (!pmd_present(*pmd))
+			continue;
+		if (pmd_huge(*pmd) && is_vm_hugetlb_page(vma)) {
+			queue_pages_hugetlb_pmd_range(vma, pmd, nodes,
+						flags, private);
+			continue;
+		}
 		split_huge_page_pmd(vma, addr, pmd);
 		if (pmd_none_or_trans_huge_or_clear_bad(pmd))
 			continue;
-		if (check_pte_range(vma, pmd, addr, next, nodes,
+		if (queue_pages_pte_range(vma, pmd, addr, next, nodes,
 				    flags, private))
 			return -EIO;
 	} while (pmd++, addr = next, addr != end);
 	return 0;
 }
 
-static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
+static inline int queue_pages_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
 		unsigned long addr, unsigned long end,
 		const nodemask_t *nodes, unsigned long flags,
 		void *private)
@@ -544,16 +581,18 @@
 	pud = pud_offset(pgd, addr);
 	do {
 		next = pud_addr_end(addr, end);
+		if (pud_huge(*pud) && is_vm_hugetlb_page(vma))
+			continue;
 		if (pud_none_or_clear_bad(pud))
 			continue;
-		if (check_pmd_range(vma, pud, addr, next, nodes,
+		if (queue_pages_pmd_range(vma, pud, addr, next, nodes,
 				    flags, private))
 			return -EIO;
 	} while (pud++, addr = next, addr != end);
 	return 0;
 }
 
-static inline int check_pgd_range(struct vm_area_struct *vma,
+static inline int queue_pages_pgd_range(struct vm_area_struct *vma,
 		unsigned long addr, unsigned long end,
 		const nodemask_t *nodes, unsigned long flags,
 		void *private)
@@ -566,7 +605,7 @@
 		next = pgd_addr_end(addr, end);
 		if (pgd_none_or_clear_bad(pgd))
 			continue;
-		if (check_pud_range(vma, pgd, addr, next, nodes,
+		if (queue_pages_pud_range(vma, pgd, addr, next, nodes,
 				    flags, private))
 			return -EIO;
 	} while (pgd++, addr = next, addr != end);
@@ -604,12 +643,14 @@
 #endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */
 
 /*
- * Check if all pages in a range are on a set of nodes.
- * If pagelist != NULL then isolate pages from the LRU and
- * put them on the pagelist.
+ * Walk through page tables and collect pages to be migrated.
+ *
+ * If pages found in a given range are on a set of nodes (determined by
+ * @nodes and @flags,) it's isolated and queued to the pagelist which is
+ * passed via @private.)
  */
 static struct vm_area_struct *
-check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
+queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
 		const nodemask_t *nodes, unsigned long flags, void *private)
 {
 	int err;
@@ -635,9 +676,6 @@
 				return ERR_PTR(-EFAULT);
 		}
 
-		if (is_vm_hugetlb_page(vma))
-			goto next;
-
 		if (flags & MPOL_MF_LAZY) {
 			change_prot_numa(vma, start, endvma);
 			goto next;
@@ -647,7 +685,7 @@
 		     ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
 		      vma_migratable(vma))) {
 
-			err = check_pgd_range(vma, start, endvma, nodes,
+			err = queue_pages_pgd_range(vma, start, endvma, nodes,
 						flags, private);
 			if (err) {
 				first = ERR_PTR(err);
@@ -990,7 +1028,11 @@
 
 static struct page *new_node_page(struct page *page, unsigned long node, int **x)
 {
-	return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
+	if (PageHuge(page))
+		return alloc_huge_page_node(page_hstate(compound_head(page)),
+					node);
+	else
+		return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
 }
 
 /*
@@ -1013,14 +1055,14 @@
 	 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
 	 */
 	VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
-	check_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
+	queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
 
 	if (!list_empty(&pagelist)) {
 		err = migrate_pages(&pagelist, new_node_page, dest,
 					MIGRATE_SYNC, MR_SYSCALL);
 		if (err)
-			putback_lru_pages(&pagelist);
+			putback_movable_pages(&pagelist);
 	}
 
 	return err;
@@ -1154,10 +1196,14 @@
 			break;
 		vma = vma->vm_next;
 	}
-
 	/*
-	 * if !vma, alloc_page_vma() will use task or system default policy
+	 * queue_pages_range() confirms that @page belongs to some vma,
+	 * so vma shouldn't be NULL.
 	 */
+	BUG_ON(!vma);
+
+	if (PageHuge(page))
+		return alloc_huge_page_noerr(vma, address, 1);
 	return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
 }
 #else
@@ -1249,7 +1295,7 @@
 	if (err)
 		goto mpol_out;
 
-	vma = check_range(mm, start, end, nmask,
+	vma = queue_pages_range(mm, start, end, nmask,
 			  flags | MPOL_MF_INVERT, &pagelist);
 
 	err = PTR_ERR(vma);	/* maybe ... */
@@ -1265,7 +1311,7 @@
 					(unsigned long)vma,
 					MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
 			if (nr_failed)
-				putback_lru_pages(&pagelist);
+				putback_movable_pages(&pagelist);
 		}
 
 		if (nr_failed && (flags & MPOL_MF_STRICT))
@@ -2065,6 +2111,16 @@
 }
 EXPORT_SYMBOL(alloc_pages_current);
 
+int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
+{
+	struct mempolicy *pol = mpol_dup(vma_policy(src));
+
+	if (IS_ERR(pol))
+		return PTR_ERR(pol);
+	dst->vm_policy = pol;
+	return 0;
+}
+
 /*
  * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
diff --git a/mm/mempool.c b/mm/mempool.c
index 5499047..659aa42 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -73,7 +73,7 @@
 			       gfp_t gfp_mask, int node_id)
 {
 	mempool_t *pool;
-	pool = kmalloc_node(sizeof(*pool), gfp_mask | __GFP_ZERO, node_id);
+	pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id);
 	if (!pool)
 		return NULL;
 	pool->elements = kmalloc_node(min_nr * sizeof(void *),
diff --git a/mm/migrate.c b/mm/migrate.c
index 6f0c244..b7ded7e 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -100,6 +100,10 @@
 	struct page *page2;
 
 	list_for_each_entry_safe(page, page2, l, lru) {
+		if (unlikely(PageHuge(page))) {
+			putback_active_hugepage(page);
+			continue;
+		}
 		list_del(&page->lru);
 		dec_zone_page_state(page, NR_ISOLATED_ANON +
 				page_is_file_cache(page));
@@ -945,6 +949,16 @@
 	struct page *new_hpage = get_new_page(hpage, private, &result);
 	struct anon_vma *anon_vma = NULL;
 
+	/*
+	 * Movability of hugepages depends on architectures and hugepage size.
+	 * This check is necessary because some callers of hugepage migration
+	 * like soft offline and memory hotremove don't walk through page
+	 * tables or check whether the hugepage is pmd-based or not before
+	 * kicking migration.
+	 */
+	if (!hugepage_migration_support(page_hstate(hpage)))
+		return -ENOSYS;
+
 	if (!new_hpage)
 		return -ENOMEM;
 
@@ -975,6 +989,8 @@
 
 	unlock_page(hpage);
 out:
+	if (rc != -EAGAIN)
+		putback_active_hugepage(hpage);
 	put_page(new_hpage);
 	if (result) {
 		if (rc)
@@ -1025,7 +1041,11 @@
 		list_for_each_entry_safe(page, page2, from, lru) {
 			cond_resched();
 
-			rc = unmap_and_move(get_new_page, private,
+			if (PageHuge(page))
+				rc = unmap_and_move_huge_page(get_new_page,
+						private, page, pass > 2, mode);
+			else
+				rc = unmap_and_move(get_new_page, private,
 						page, pass > 2, mode);
 
 			switch(rc) {
@@ -1058,32 +1078,6 @@
 	return rc;
 }
 
-int migrate_huge_page(struct page *hpage, new_page_t get_new_page,
-		      unsigned long private, enum migrate_mode mode)
-{
-	int pass, rc;
-
-	for (pass = 0; pass < 10; pass++) {
-		rc = unmap_and_move_huge_page(get_new_page, private,
-						hpage, pass > 2, mode);
-		switch (rc) {
-		case -ENOMEM:
-			goto out;
-		case -EAGAIN:
-			/* try again */
-			cond_resched();
-			break;
-		case MIGRATEPAGE_SUCCESS:
-			goto out;
-		default:
-			rc = -EIO;
-			goto out;
-		}
-	}
-out:
-	return rc;
-}
-
 #ifdef CONFIG_NUMA
 /*
  * Move a list of individual pages
@@ -1108,7 +1102,11 @@
 
 	*result = &pm->status;
 
-	return alloc_pages_exact_node(pm->node,
+	if (PageHuge(p))
+		return alloc_huge_page_node(page_hstate(compound_head(p)),
+					pm->node);
+	else
+		return alloc_pages_exact_node(pm->node,
 				GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0);
 }
 
@@ -1168,6 +1166,11 @@
 				!migrate_all)
 			goto put_and_set;
 
+		if (PageHuge(page)) {
+			isolate_huge_page(page, &pagelist);
+			goto put_and_set;
+		}
+
 		err = isolate_lru_page(page);
 		if (!err) {
 			list_add_tail(&page->lru, &pagelist);
@@ -1190,7 +1193,7 @@
 		err = migrate_pages(&pagelist, new_page_node,
 				(unsigned long)pm, MIGRATE_SYNC, MR_SYSCALL);
 		if (err)
-			putback_lru_pages(&pagelist);
+			putback_movable_pages(&pagelist);
 	}
 
 	up_read(&mm->mmap_sem);
@@ -1468,7 +1471,7 @@
 		if (!populated_zone(zone))
 			continue;
 
-		if (zone->all_unreclaimable)
+		if (!zone_reclaimable(zone))
 			continue;
 
 		/* Avoid waking kswapd by allocating pages_to_migrate pages. */
diff --git a/mm/mlock.c b/mm/mlock.c
index 79b7cf7..d638026 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -11,6 +11,7 @@
 #include <linux/swap.h>
 #include <linux/swapops.h>
 #include <linux/pagemap.h>
+#include <linux/pagevec.h>
 #include <linux/mempolicy.h>
 #include <linux/syscalls.h>
 #include <linux/sched.h>
@@ -18,6 +19,8 @@
 #include <linux/rmap.h>
 #include <linux/mmzone.h>
 #include <linux/hugetlb.h>
+#include <linux/memcontrol.h>
+#include <linux/mm_inline.h>
 
 #include "internal.h"
 
@@ -87,6 +90,47 @@
 	}
 }
 
+/*
+ * Finish munlock after successful page isolation
+ *
+ * Page must be locked. This is a wrapper for try_to_munlock()
+ * and putback_lru_page() with munlock accounting.
+ */
+static void __munlock_isolated_page(struct page *page)
+{
+	int ret = SWAP_AGAIN;
+
+	/*
+	 * Optimization: if the page was mapped just once, that's our mapping
+	 * and we don't need to check all the other vmas.
+	 */
+	if (page_mapcount(page) > 1)
+		ret = try_to_munlock(page);
+
+	/* Did try_to_unlock() succeed or punt? */
+	if (ret != SWAP_MLOCK)
+		count_vm_event(UNEVICTABLE_PGMUNLOCKED);
+
+	putback_lru_page(page);
+}
+
+/*
+ * Accounting for page isolation fail during munlock
+ *
+ * Performs accounting when page isolation fails in munlock. There is nothing
+ * else to do because it means some other task has already removed the page
+ * from the LRU. putback_lru_page() will take care of removing the page from
+ * the unevictable list, if necessary. vmscan [page_referenced()] will move
+ * the page back to the unevictable list if some other vma has it mlocked.
+ */
+static void __munlock_isolation_failed(struct page *page)
+{
+	if (PageUnevictable(page))
+		count_vm_event(UNEVICTABLE_PGSTRANDED);
+	else
+		count_vm_event(UNEVICTABLE_PGMUNLOCKED);
+}
+
 /**
  * munlock_vma_page - munlock a vma page
  * @page - page to be unlocked
@@ -112,37 +156,10 @@
 		unsigned int nr_pages = hpage_nr_pages(page);
 		mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
 		page_mask = nr_pages - 1;
-		if (!isolate_lru_page(page)) {
-			int ret = SWAP_AGAIN;
-
-			/*
-			 * Optimization: if the page was mapped just once,
-			 * that's our mapping and we don't need to check all the
-			 * other vmas.
-			 */
-			if (page_mapcount(page) > 1)
-				ret = try_to_munlock(page);
-			/*
-			 * did try_to_unlock() succeed or punt?
-			 */
-			if (ret != SWAP_MLOCK)
-				count_vm_event(UNEVICTABLE_PGMUNLOCKED);
-
-			putback_lru_page(page);
-		} else {
-			/*
-			 * Some other task has removed the page from the LRU.
-			 * putback_lru_page() will take care of removing the
-			 * page from the unevictable list, if necessary.
-			 * vmscan [page_referenced()] will move the page back
-			 * to the unevictable list if some other vma has it
-			 * mlocked.
-			 */
-			if (PageUnevictable(page))
-				count_vm_event(UNEVICTABLE_PGSTRANDED);
-			else
-				count_vm_event(UNEVICTABLE_PGMUNLOCKED);
-		}
+		if (!isolate_lru_page(page))
+			__munlock_isolated_page(page);
+		else
+			__munlock_isolation_failed(page);
 	}
 
 	return page_mask;
@@ -210,6 +227,191 @@
 }
 
 /*
+ * Prepare page for fast batched LRU putback via putback_lru_evictable_pagevec()
+ *
+ * The fast path is available only for evictable pages with single mapping.
+ * Then we can bypass the per-cpu pvec and get better performance.
+ * when mapcount > 1 we need try_to_munlock() which can fail.
+ * when !page_evictable(), we need the full redo logic of putback_lru_page to
+ * avoid leaving evictable page in unevictable list.
+ *
+ * In case of success, @page is added to @pvec and @pgrescued is incremented
+ * in case that the page was previously unevictable. @page is also unlocked.
+ */
+static bool __putback_lru_fast_prepare(struct page *page, struct pagevec *pvec,
+		int *pgrescued)
+{
+	VM_BUG_ON(PageLRU(page));
+	VM_BUG_ON(!PageLocked(page));
+
+	if (page_mapcount(page) <= 1 && page_evictable(page)) {
+		pagevec_add(pvec, page);
+		if (TestClearPageUnevictable(page))
+			(*pgrescued)++;
+		unlock_page(page);
+		return true;
+	}
+
+	return false;
+}
+
+/*
+ * Putback multiple evictable pages to the LRU
+ *
+ * Batched putback of evictable pages that bypasses the per-cpu pvec. Some of
+ * the pages might have meanwhile become unevictable but that is OK.
+ */
+static void __putback_lru_fast(struct pagevec *pvec, int pgrescued)
+{
+	count_vm_events(UNEVICTABLE_PGMUNLOCKED, pagevec_count(pvec));
+	/*
+	 *__pagevec_lru_add() calls release_pages() so we don't call
+	 * put_page() explicitly
+	 */
+	__pagevec_lru_add(pvec);
+	count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
+}
+
+/*
+ * Munlock a batch of pages from the same zone
+ *
+ * The work is split to two main phases. First phase clears the Mlocked flag
+ * and attempts to isolate the pages, all under a single zone lru lock.
+ * The second phase finishes the munlock only for pages where isolation
+ * succeeded.
+ *
+ * Note that the pagevec may be modified during the process.
+ */
+static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
+{
+	int i;
+	int nr = pagevec_count(pvec);
+	int delta_munlocked = -nr;
+	struct pagevec pvec_putback;
+	int pgrescued = 0;
+
+	/* Phase 1: page isolation */
+	spin_lock_irq(&zone->lru_lock);
+	for (i = 0; i < nr; i++) {
+		struct page *page = pvec->pages[i];
+
+		if (TestClearPageMlocked(page)) {
+			struct lruvec *lruvec;
+			int lru;
+
+			if (PageLRU(page)) {
+				lruvec = mem_cgroup_page_lruvec(page, zone);
+				lru = page_lru(page);
+				/*
+				 * We already have pin from follow_page_mask()
+				 * so we can spare the get_page() here.
+				 */
+				ClearPageLRU(page);
+				del_page_from_lru_list(page, lruvec, lru);
+			} else {
+				__munlock_isolation_failed(page);
+				goto skip_munlock;
+			}
+
+		} else {
+skip_munlock:
+			/*
+			 * We won't be munlocking this page in the next phase
+			 * but we still need to release the follow_page_mask()
+			 * pin.
+			 */
+			pvec->pages[i] = NULL;
+			put_page(page);
+			delta_munlocked++;
+		}
+	}
+	__mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
+	spin_unlock_irq(&zone->lru_lock);
+
+	/* Phase 2: page munlock */
+	pagevec_init(&pvec_putback, 0);
+	for (i = 0; i < nr; i++) {
+		struct page *page = pvec->pages[i];
+
+		if (page) {
+			lock_page(page);
+			if (!__putback_lru_fast_prepare(page, &pvec_putback,
+					&pgrescued)) {
+				/*
+				 * Slow path. We don't want to lose the last
+				 * pin before unlock_page()
+				 */
+				get_page(page); /* for putback_lru_page() */
+				__munlock_isolated_page(page);
+				unlock_page(page);
+				put_page(page); /* from follow_page_mask() */
+			}
+		}
+	}
+
+	/*
+	 * Phase 3: page putback for pages that qualified for the fast path
+	 * This will also call put_page() to return pin from follow_page_mask()
+	 */
+	if (pagevec_count(&pvec_putback))
+		__putback_lru_fast(&pvec_putback, pgrescued);
+}
+
+/*
+ * Fill up pagevec for __munlock_pagevec using pte walk
+ *
+ * The function expects that the struct page corresponding to @start address is
+ * a non-TPH page already pinned and in the @pvec, and that it belongs to @zone.
+ *
+ * The rest of @pvec is filled by subsequent pages within the same pmd and same
+ * zone, as long as the pte's are present and vm_normal_page() succeeds. These
+ * pages also get pinned.
+ *
+ * Returns the address of the next page that should be scanned. This equals
+ * @start + PAGE_SIZE when no page could be added by the pte walk.
+ */
+static unsigned long __munlock_pagevec_fill(struct pagevec *pvec,
+		struct vm_area_struct *vma, int zoneid,	unsigned long start,
+		unsigned long end)
+{
+	pte_t *pte;
+	spinlock_t *ptl;
+
+	/*
+	 * Initialize pte walk starting at the already pinned page where we
+	 * are sure that there is a pte.
+	 */
+	pte = get_locked_pte(vma->vm_mm, start,	&ptl);
+	end = min(end, pmd_addr_end(start, end));
+
+	/* The page next to the pinned page is the first we will try to get */
+	start += PAGE_SIZE;
+	while (start < end) {
+		struct page *page = NULL;
+		pte++;
+		if (pte_present(*pte))
+			page = vm_normal_page(vma, start, *pte);
+		/*
+		 * Break if page could not be obtained or the page's node+zone does not
+		 * match
+		 */
+		if (!page || page_zone_id(page) != zoneid)
+			break;
+
+		get_page(page);
+		/*
+		 * Increase the address that will be returned *before* the
+		 * eventual break due to pvec becoming full by adding the page
+		 */
+		start += PAGE_SIZE;
+		if (pagevec_add(pvec, page) == 0)
+			break;
+	}
+	pte_unmap_unlock(pte, ptl);
+	return start;
+}
+
+/*
  * munlock_vma_pages_range() - munlock all pages in the vma range.'
  * @vma - vma containing range to be munlock()ed.
  * @start - start address in @vma of the range
@@ -233,9 +435,13 @@
 	vma->vm_flags &= ~VM_LOCKED;
 
 	while (start < end) {
-		struct page *page;
+		struct page *page = NULL;
 		unsigned int page_mask, page_increm;
+		struct pagevec pvec;
+		struct zone *zone;
+		int zoneid;
 
+		pagevec_init(&pvec, 0);
 		/*
 		 * Although FOLL_DUMP is intended for get_dump_page(),
 		 * it just so happens that its special treatment of the
@@ -244,21 +450,45 @@
 		 * has sneaked into the range, we won't oops here: great).
 		 */
 		page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP,
-					&page_mask);
+				&page_mask);
+
 		if (page && !IS_ERR(page)) {
-			lock_page(page);
-			lru_add_drain();
-			/*
-			 * Any THP page found by follow_page_mask() may have
-			 * gotten split before reaching munlock_vma_page(),
-			 * so we need to recompute the page_mask here.
-			 */
-			page_mask = munlock_vma_page(page);
-			unlock_page(page);
-			put_page(page);
+			if (PageTransHuge(page)) {
+				lock_page(page);
+				/*
+				 * Any THP page found by follow_page_mask() may
+				 * have gotten split before reaching
+				 * munlock_vma_page(), so we need to recompute
+				 * the page_mask here.
+				 */
+				page_mask = munlock_vma_page(page);
+				unlock_page(page);
+				put_page(page); /* follow_page_mask() */
+			} else {
+				/*
+				 * Non-huge pages are handled in batches via
+				 * pagevec. The pin from follow_page_mask()
+				 * prevents them from collapsing by THP.
+				 */
+				pagevec_add(&pvec, page);
+				zone = page_zone(page);
+				zoneid = page_zone_id(page);
+
+				/*
+				 * Try to fill the rest of pagevec using fast
+				 * pte walk. This will also update start to
+				 * the next page to process. Then munlock the
+				 * pagevec.
+				 */
+				start = __munlock_pagevec_fill(&pvec, vma,
+						zoneid, start, end);
+				__munlock_pagevec(&pvec, zone);
+				goto next;
+			}
 		}
 		page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask);
 		start += page_increm * PAGE_SIZE;
+next:
 		cond_resched();
 	}
 }
diff --git a/mm/mmap.c b/mm/mmap.c
index f9c97d1..9d54851 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1202,7 +1202,6 @@
 			unsigned long *populate)
 {
 	struct mm_struct * mm = current->mm;
-	struct inode *inode;
 	vm_flags_t vm_flags;
 
 	*populate = 0;
@@ -1265,9 +1264,9 @@
 			return -EAGAIN;
 	}
 
-	inode = file ? file_inode(file) : NULL;
-
 	if (file) {
+		struct inode *inode = file_inode(file);
+
 		switch (flags & MAP_TYPE) {
 		case MAP_SHARED:
 			if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE))
@@ -1302,6 +1301,8 @@
 
 			if (!file->f_op || !file->f_op->mmap)
 				return -ENODEV;
+			if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
+				return -EINVAL;
 			break;
 
 		default:
@@ -1310,6 +1311,8 @@
 	} else {
 		switch (flags & MAP_TYPE) {
 		case MAP_SHARED:
+			if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
+				return -EINVAL;
 			/*
 			 * Ignore pgoff.
 			 */
@@ -1476,11 +1479,9 @@
 {
 	struct mm_struct *mm = current->mm;
 	struct vm_area_struct *vma, *prev;
-	int correct_wcount = 0;
 	int error;
 	struct rb_node **rb_link, *rb_parent;
 	unsigned long charged = 0;
-	struct inode *inode =  file ? file_inode(file) : NULL;
 
 	/* Check against address space limit. */
 	if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
@@ -1544,16 +1545,11 @@
 	vma->vm_pgoff = pgoff;
 	INIT_LIST_HEAD(&vma->anon_vma_chain);
 
-	error = -EINVAL;	/* when rejecting VM_GROWSDOWN|VM_GROWSUP */
-
 	if (file) {
-		if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
-			goto free_vma;
 		if (vm_flags & VM_DENYWRITE) {
 			error = deny_write_access(file);
 			if (error)
 				goto free_vma;
-			correct_wcount = 1;
 		}
 		vma->vm_file = get_file(file);
 		error = file->f_op->mmap(file, vma);
@@ -1570,11 +1566,8 @@
 		WARN_ON_ONCE(addr != vma->vm_start);
 
 		addr = vma->vm_start;
-		pgoff = vma->vm_pgoff;
 		vm_flags = vma->vm_flags;
 	} else if (vm_flags & VM_SHARED) {
-		if (unlikely(vm_flags & (VM_GROWSDOWN|VM_GROWSUP)))
-			goto free_vma;
 		error = shmem_zero_setup(vma);
 		if (error)
 			goto free_vma;
@@ -1596,11 +1589,10 @@
 	}
 
 	vma_link(mm, vma, prev, rb_link, rb_parent);
-	file = vma->vm_file;
-
 	/* Once vma denies write, undo our temporary denial count */
-	if (correct_wcount)
-		atomic_inc(&inode->i_writecount);
+	if (vm_flags & VM_DENYWRITE)
+		allow_write_access(file);
+	file = vma->vm_file;
 out:
 	perf_event_mmap(vma);
 
@@ -1616,11 +1608,20 @@
 	if (file)
 		uprobe_mmap(vma);
 
+	/*
+	 * New (or expanded) vma always get soft dirty status.
+	 * Otherwise user-space soft-dirty page tracker won't
+	 * be able to distinguish situation when vma area unmapped,
+	 * then new mapped in-place (which must be aimed as
+	 * a completely new data area).
+	 */
+	vma->vm_flags |= VM_SOFTDIRTY;
+
 	return addr;
 
 unmap_and_free_vma:
-	if (correct_wcount)
-		atomic_inc(&inode->i_writecount);
+	if (vm_flags & VM_DENYWRITE)
+		allow_write_access(file);
 	vma->vm_file = NULL;
 	fput(file);
 
@@ -2380,7 +2381,6 @@
 static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
 	      unsigned long addr, int new_below)
 {
-	struct mempolicy *pol;
 	struct vm_area_struct *new;
 	int err = -ENOMEM;
 
@@ -2404,12 +2404,9 @@
 		new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
 	}
 
-	pol = mpol_dup(vma_policy(vma));
-	if (IS_ERR(pol)) {
-		err = PTR_ERR(pol);
+	err = vma_dup_policy(vma, new);
+	if (err)
 		goto out_free_vma;
-	}
-	vma_set_policy(new, pol);
 
 	if (anon_vma_clone(new, vma))
 		goto out_free_mpol;
@@ -2437,7 +2434,7 @@
 		fput(new->vm_file);
 	unlink_anon_vmas(new);
  out_free_mpol:
-	mpol_put(pol);
+	mpol_put(vma_policy(new));
  out_free_vma:
 	kmem_cache_free(vm_area_cachep, new);
  out_err:
@@ -2663,6 +2660,7 @@
 	mm->total_vm += len >> PAGE_SHIFT;
 	if (flags & VM_LOCKED)
 		mm->locked_vm += (len >> PAGE_SHIFT);
+	vma->vm_flags |= VM_SOFTDIRTY;
 	return addr;
 }
 
@@ -2780,7 +2778,6 @@
 	struct mm_struct *mm = vma->vm_mm;
 	struct vm_area_struct *new_vma, *prev;
 	struct rb_node **rb_link, *rb_parent;
-	struct mempolicy *pol;
 	bool faulted_in_anon_vma = true;
 
 	/*
@@ -2825,10 +2822,8 @@
 			new_vma->vm_start = addr;
 			new_vma->vm_end = addr + len;
 			new_vma->vm_pgoff = pgoff;
-			pol = mpol_dup(vma_policy(vma));
-			if (IS_ERR(pol))
+			if (vma_dup_policy(vma, new_vma))
 				goto out_free_vma;
-			vma_set_policy(new_vma, pol);
 			INIT_LIST_HEAD(&new_vma->anon_vma_chain);
 			if (anon_vma_clone(new_vma, vma))
 				goto out_free_mempol;
@@ -2843,7 +2838,7 @@
 	return new_vma;
 
  out_free_mempol:
-	mpol_put(pol);
+	mpol_put(vma_policy(new_vma));
  out_free_vma:
 	kmem_cache_free(vm_area_cachep, new_vma);
 	return NULL;
@@ -2930,7 +2925,7 @@
 	vma->vm_start = addr;
 	vma->vm_end = addr + len;
 
-	vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
+	vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
 
 	vma->vm_ops = &special_mapping_vmops;
diff --git a/mm/mremap.c b/mm/mremap.c
index 0843feb..91b13d6 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -25,6 +25,7 @@
 #include <asm/uaccess.h>
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
+#include <asm/pgalloc.h>
 
 #include "internal.h"
 
@@ -62,8 +63,10 @@
 		return NULL;
 
 	pmd = pmd_alloc(mm, pud, addr);
-	if (!pmd)
+	if (!pmd) {
+		pud_free(mm, pud);
 		return NULL;
+	}
 
 	VM_BUG_ON(pmd_trans_huge(*pmd));
 
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 3f0c895..6c7b018 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -36,8 +36,11 @@
 #include <linux/pagevec.h>
 #include <linux/timer.h>
 #include <linux/sched/rt.h>
+#include <linux/mm_inline.h>
 #include <trace/events/writeback.h>
 
+#include "internal.h"
+
 /*
  * Sleep at most 200ms at a time in balance_dirty_pages().
  */
@@ -241,9 +244,6 @@
 	if (!vm_highmem_is_dirtyable)
 		x -= highmem_dirtyable_memory(x);
 
-	/* Subtract min_free_kbytes */
-	x -= min_t(unsigned long, x, min_free_kbytes >> (PAGE_SHIFT - 10));
-
 	return x + 1;	/* Ensure that we never return 0 */
 }
 
@@ -585,6 +585,37 @@
 }
 
 /*
+ *                           setpoint - dirty 3
+ *        f(dirty) := 1.0 + (----------------)
+ *                           limit - setpoint
+ *
+ * it's a 3rd order polynomial that subjects to
+ *
+ * (1) f(freerun)  = 2.0 => rampup dirty_ratelimit reasonably fast
+ * (2) f(setpoint) = 1.0 => the balance point
+ * (3) f(limit)    = 0   => the hard limit
+ * (4) df/dx      <= 0	 => negative feedback control
+ * (5) the closer to setpoint, the smaller |df/dx| (and the reverse)
+ *     => fast response on large errors; small oscillation near setpoint
+ */
+static inline long long pos_ratio_polynom(unsigned long setpoint,
+					  unsigned long dirty,
+					  unsigned long limit)
+{
+	long long pos_ratio;
+	long x;
+
+	x = div_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
+		    limit - setpoint + 1);
+	pos_ratio = x;
+	pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
+	pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
+	pos_ratio += 1 << RATELIMIT_CALC_SHIFT;
+
+	return clamp(pos_ratio, 0LL, 2LL << RATELIMIT_CALC_SHIFT);
+}
+
+/*
  * Dirty position control.
  *
  * (o) global/bdi setpoints
@@ -682,26 +713,80 @@
 	/*
 	 * global setpoint
 	 *
-	 *                           setpoint - dirty 3
-	 *        f(dirty) := 1.0 + (----------------)
-	 *                           limit - setpoint
-	 *
-	 * it's a 3rd order polynomial that subjects to
-	 *
-	 * (1) f(freerun)  = 2.0 => rampup dirty_ratelimit reasonably fast
-	 * (2) f(setpoint) = 1.0 => the balance point
-	 * (3) f(limit)    = 0   => the hard limit
-	 * (4) df/dx      <= 0	 => negative feedback control
-	 * (5) the closer to setpoint, the smaller |df/dx| (and the reverse)
-	 *     => fast response on large errors; small oscillation near setpoint
+	 * See comment for pos_ratio_polynom().
 	 */
 	setpoint = (freerun + limit) / 2;
-	x = div_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
-		    limit - setpoint + 1);
-	pos_ratio = x;
-	pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
-	pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
-	pos_ratio += 1 << RATELIMIT_CALC_SHIFT;
+	pos_ratio = pos_ratio_polynom(setpoint, dirty, limit);
+
+	/*
+	 * The strictlimit feature is a tool preventing mistrusted filesystems
+	 * from growing a large number of dirty pages before throttling. For
+	 * such filesystems balance_dirty_pages always checks bdi counters
+	 * against bdi limits. Even if global "nr_dirty" is under "freerun".
+	 * This is especially important for fuse which sets bdi->max_ratio to
+	 * 1% by default. Without strictlimit feature, fuse writeback may
+	 * consume arbitrary amount of RAM because it is accounted in
+	 * NR_WRITEBACK_TEMP which is not involved in calculating "nr_dirty".
+	 *
+	 * Here, in bdi_position_ratio(), we calculate pos_ratio based on
+	 * two values: bdi_dirty and bdi_thresh. Let's consider an example:
+	 * total amount of RAM is 16GB, bdi->max_ratio is equal to 1%, global
+	 * limits are set by default to 10% and 20% (background and throttle).
+	 * Then bdi_thresh is 1% of 20% of 16GB. This amounts to ~8K pages.
+	 * bdi_dirty_limit(bdi, bg_thresh) is about ~4K pages. bdi_setpoint is
+	 * about ~6K pages (as the average of background and throttle bdi
+	 * limits). The 3rd order polynomial will provide positive feedback if
+	 * bdi_dirty is under bdi_setpoint and vice versa.
+	 *
+	 * Note, that we cannot use global counters in these calculations
+	 * because we want to throttle process writing to a strictlimit BDI
+	 * much earlier than global "freerun" is reached (~23MB vs. ~2.3GB
+	 * in the example above).
+	 */
+	if (unlikely(bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
+		long long bdi_pos_ratio;
+		unsigned long bdi_bg_thresh;
+
+		if (bdi_dirty < 8)
+			return min_t(long long, pos_ratio * 2,
+				     2 << RATELIMIT_CALC_SHIFT);
+
+		if (bdi_dirty >= bdi_thresh)
+			return 0;
+
+		bdi_bg_thresh = div_u64((u64)bdi_thresh * bg_thresh, thresh);
+		bdi_setpoint = dirty_freerun_ceiling(bdi_thresh,
+						     bdi_bg_thresh);
+
+		if (bdi_setpoint == 0 || bdi_setpoint == bdi_thresh)
+			return 0;
+
+		bdi_pos_ratio = pos_ratio_polynom(bdi_setpoint, bdi_dirty,
+						  bdi_thresh);
+
+		/*
+		 * Typically, for strictlimit case, bdi_setpoint << setpoint
+		 * and pos_ratio >> bdi_pos_ratio. In the other words global
+		 * state ("dirty") is not limiting factor and we have to
+		 * make decision based on bdi counters. But there is an
+		 * important case when global pos_ratio should get precedence:
+		 * global limits are exceeded (e.g. due to activities on other
+		 * BDIs) while given strictlimit BDI is below limit.
+		 *
+		 * "pos_ratio * bdi_pos_ratio" would work for the case above,
+		 * but it would look too non-natural for the case of all
+		 * activity in the system coming from a single strictlimit BDI
+		 * with bdi->max_ratio == 100%.
+		 *
+		 * Note that min() below somewhat changes the dynamics of the
+		 * control system. Normally, pos_ratio value can be well over 3
+		 * (when globally we are at freerun and bdi is well below bdi
+		 * setpoint). Now the maximum pos_ratio in the same situation
+		 * is 2. We might want to tweak this if we observe the control
+		 * system is too slow to adapt.
+		 */
+		return min(pos_ratio, bdi_pos_ratio);
+	}
 
 	/*
 	 * We have computed basic pos_ratio above based on global situation. If
@@ -994,6 +1079,27 @@
 	 * keep that period small to reduce time lags).
 	 */
 	step = 0;
+
+	/*
+	 * For strictlimit case, calculations above were based on bdi counters
+	 * and limits (starting from pos_ratio = bdi_position_ratio() and up to
+	 * balanced_dirty_ratelimit = task_ratelimit * write_bw / dirty_rate).
+	 * Hence, to calculate "step" properly, we have to use bdi_dirty as
+	 * "dirty" and bdi_setpoint as "setpoint".
+	 *
+	 * We rampup dirty_ratelimit forcibly if bdi_dirty is low because
+	 * it's possible that bdi_thresh is close to zero due to inactivity
+	 * of backing device (see the implementation of bdi_dirty_limit()).
+	 */
+	if (unlikely(bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
+		dirty = bdi_dirty;
+		if (bdi_dirty < 8)
+			setpoint = bdi_dirty + 1;
+		else
+			setpoint = (bdi_thresh +
+				    bdi_dirty_limit(bdi, bg_thresh)) / 2;
+	}
+
 	if (dirty < setpoint) {
 		x = min(bdi->balanced_dirty_ratelimit,
 			 min(balanced_dirty_ratelimit, task_ratelimit));
@@ -1198,6 +1304,56 @@
 	return pages >= DIRTY_POLL_THRESH ? 1 + t / 2 : t;
 }
 
+static inline void bdi_dirty_limits(struct backing_dev_info *bdi,
+				    unsigned long dirty_thresh,
+				    unsigned long background_thresh,
+				    unsigned long *bdi_dirty,
+				    unsigned long *bdi_thresh,
+				    unsigned long *bdi_bg_thresh)
+{
+	unsigned long bdi_reclaimable;
+
+	/*
+	 * bdi_thresh is not treated as some limiting factor as
+	 * dirty_thresh, due to reasons
+	 * - in JBOD setup, bdi_thresh can fluctuate a lot
+	 * - in a system with HDD and USB key, the USB key may somehow
+	 *   go into state (bdi_dirty >> bdi_thresh) either because
+	 *   bdi_dirty starts high, or because bdi_thresh drops low.
+	 *   In this case we don't want to hard throttle the USB key
+	 *   dirtiers for 100 seconds until bdi_dirty drops under
+	 *   bdi_thresh. Instead the auxiliary bdi control line in
+	 *   bdi_position_ratio() will let the dirtier task progress
+	 *   at some rate <= (write_bw / 2) for bringing down bdi_dirty.
+	 */
+	*bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
+
+	if (bdi_bg_thresh)
+		*bdi_bg_thresh = div_u64((u64)*bdi_thresh *
+					 background_thresh,
+					 dirty_thresh);
+
+	/*
+	 * In order to avoid the stacked BDI deadlock we need
+	 * to ensure we accurately count the 'dirty' pages when
+	 * the threshold is low.
+	 *
+	 * Otherwise it would be possible to get thresh+n pages
+	 * reported dirty, even though there are thresh-m pages
+	 * actually dirty; with m+n sitting in the percpu
+	 * deltas.
+	 */
+	if (*bdi_thresh < 2 * bdi_stat_error(bdi)) {
+		bdi_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE);
+		*bdi_dirty = bdi_reclaimable +
+			bdi_stat_sum(bdi, BDI_WRITEBACK);
+	} else {
+		bdi_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
+		*bdi_dirty = bdi_reclaimable +
+			bdi_stat(bdi, BDI_WRITEBACK);
+	}
+}
+
 /*
  * balance_dirty_pages() must be called by processes which are generating dirty
  * data.  It looks at the number of dirty pages in the machine and will force
@@ -1209,13 +1365,9 @@
 				unsigned long pages_dirtied)
 {
 	unsigned long nr_reclaimable;	/* = file_dirty + unstable_nfs */
-	unsigned long bdi_reclaimable;
 	unsigned long nr_dirty;  /* = file_dirty + writeback + unstable_nfs */
-	unsigned long bdi_dirty;
-	unsigned long freerun;
 	unsigned long background_thresh;
 	unsigned long dirty_thresh;
-	unsigned long bdi_thresh;
 	long period;
 	long pause;
 	long max_pause;
@@ -1226,10 +1378,16 @@
 	unsigned long dirty_ratelimit;
 	unsigned long pos_ratio;
 	struct backing_dev_info *bdi = mapping->backing_dev_info;
+	bool strictlimit = bdi->capabilities & BDI_CAP_STRICTLIMIT;
 	unsigned long start_time = jiffies;
 
 	for (;;) {
 		unsigned long now = jiffies;
+		unsigned long uninitialized_var(bdi_thresh);
+		unsigned long thresh;
+		unsigned long uninitialized_var(bdi_dirty);
+		unsigned long dirty;
+		unsigned long bg_thresh;
 
 		/*
 		 * Unstable writes are a feature of certain networked
@@ -1243,61 +1401,44 @@
 
 		global_dirty_limits(&background_thresh, &dirty_thresh);
 
+		if (unlikely(strictlimit)) {
+			bdi_dirty_limits(bdi, dirty_thresh, background_thresh,
+					 &bdi_dirty, &bdi_thresh, &bg_thresh);
+
+			dirty = bdi_dirty;
+			thresh = bdi_thresh;
+		} else {
+			dirty = nr_dirty;
+			thresh = dirty_thresh;
+			bg_thresh = background_thresh;
+		}
+
 		/*
 		 * Throttle it only when the background writeback cannot
 		 * catch-up. This avoids (excessively) small writeouts
-		 * when the bdi limits are ramping up.
+		 * when the bdi limits are ramping up in case of !strictlimit.
+		 *
+		 * In strictlimit case make decision based on the bdi counters
+		 * and limits. Small writeouts when the bdi limits are ramping
+		 * up are the price we consciously pay for strictlimit-ing.
 		 */
-		freerun = dirty_freerun_ceiling(dirty_thresh,
-						background_thresh);
-		if (nr_dirty <= freerun) {
+		if (dirty <= dirty_freerun_ceiling(thresh, bg_thresh)) {
 			current->dirty_paused_when = now;
 			current->nr_dirtied = 0;
 			current->nr_dirtied_pause =
-				dirty_poll_interval(nr_dirty, dirty_thresh);
+				dirty_poll_interval(dirty, thresh);
 			break;
 		}
 
 		if (unlikely(!writeback_in_progress(bdi)))
 			bdi_start_background_writeback(bdi);
 
-		/*
-		 * bdi_thresh is not treated as some limiting factor as
-		 * dirty_thresh, due to reasons
-		 * - in JBOD setup, bdi_thresh can fluctuate a lot
-		 * - in a system with HDD and USB key, the USB key may somehow
-		 *   go into state (bdi_dirty >> bdi_thresh) either because
-		 *   bdi_dirty starts high, or because bdi_thresh drops low.
-		 *   In this case we don't want to hard throttle the USB key
-		 *   dirtiers for 100 seconds until bdi_dirty drops under
-		 *   bdi_thresh. Instead the auxiliary bdi control line in
-		 *   bdi_position_ratio() will let the dirtier task progress
-		 *   at some rate <= (write_bw / 2) for bringing down bdi_dirty.
-		 */
-		bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
-
-		/*
-		 * In order to avoid the stacked BDI deadlock we need
-		 * to ensure we accurately count the 'dirty' pages when
-		 * the threshold is low.
-		 *
-		 * Otherwise it would be possible to get thresh+n pages
-		 * reported dirty, even though there are thresh-m pages
-		 * actually dirty; with m+n sitting in the percpu
-		 * deltas.
-		 */
-		if (bdi_thresh < 2 * bdi_stat_error(bdi)) {
-			bdi_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE);
-			bdi_dirty = bdi_reclaimable +
-				    bdi_stat_sum(bdi, BDI_WRITEBACK);
-		} else {
-			bdi_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
-			bdi_dirty = bdi_reclaimable +
-				    bdi_stat(bdi, BDI_WRITEBACK);
-		}
+		if (!strictlimit)
+			bdi_dirty_limits(bdi, dirty_thresh, background_thresh,
+					 &bdi_dirty, &bdi_thresh, NULL);
 
 		dirty_exceeded = (bdi_dirty > bdi_thresh) &&
-				  (nr_dirty > dirty_thresh);
+				 ((nr_dirty > dirty_thresh) || strictlimit);
 		if (dirty_exceeded && !bdi->dirty_exceeded)
 			bdi->dirty_exceeded = 1;
 
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c2b59db..0ee638f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -56,6 +56,7 @@
 #include <linux/ftrace_event.h>
 #include <linux/memcontrol.h>
 #include <linux/prefetch.h>
+#include <linux/mm_inline.h>
 #include <linux/migrate.h>
 #include <linux/page-debug-flags.h>
 #include <linux/hugetlb.h>
@@ -488,8 +489,10 @@
  * (c) a page and its buddy have the same order &&
  * (d) a page and its buddy are in the same zone.
  *
- * For recording whether a page is in the buddy system, we set ->_mapcount -2.
- * Setting, clearing, and testing _mapcount -2 is serialized by zone->lock.
+ * For recording whether a page is in the buddy system, we set ->_mapcount
+ * PAGE_BUDDY_MAPCOUNT_VALUE.
+ * Setting, clearing, and testing _mapcount PAGE_BUDDY_MAPCOUNT_VALUE is
+ * serialized by zone->lock.
  *
  * For recording page's order, we use page_private(page).
  */
@@ -527,8 +530,9 @@
  * as necessary, plus some accounting needed to play nicely with other
  * parts of the VM system.
  * At each level, we keep a list of pages, which are heads of continuous
- * free pages of length of (1 << order) and marked with _mapcount -2. Page's
- * order is recorded in page_private(page) field.
+ * free pages of length of (1 << order) and marked with _mapcount
+ * PAGE_BUDDY_MAPCOUNT_VALUE. Page's order is recorded in page_private(page)
+ * field.
  * So when we are allocating or freeing one, we can derive the state of the
  * other.  That is, if we allocate a small block, and both were
  * free, the remainder of the region must be split into blocks.
@@ -647,7 +651,6 @@
 	int to_free = count;
 
 	spin_lock(&zone->lock);
-	zone->all_unreclaimable = 0;
 	zone->pages_scanned = 0;
 
 	while (to_free) {
@@ -696,7 +699,6 @@
 				int migratetype)
 {
 	spin_lock(&zone->lock);
-	zone->all_unreclaimable = 0;
 	zone->pages_scanned = 0;
 
 	__free_one_page(page, zone, order, migratetype);
@@ -721,7 +723,8 @@
 		return false;
 
 	if (!PageHighMem(page)) {
-		debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
+		debug_check_no_locks_freed(page_address(page),
+					   PAGE_SIZE << order);
 		debug_check_no_obj_freed(page_address(page),
 					   PAGE_SIZE << order);
 	}
@@ -750,19 +753,19 @@
 void __init __free_pages_bootmem(struct page *page, unsigned int order)
 {
 	unsigned int nr_pages = 1 << order;
+	struct page *p = page;
 	unsigned int loop;
 
-	prefetchw(page);
-	for (loop = 0; loop < nr_pages; loop++) {
-		struct page *p = &page[loop];
-
-		if (loop + 1 < nr_pages)
-			prefetchw(p + 1);
+	prefetchw(p);
+	for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
+		prefetchw(p + 1);
 		__ClearPageReserved(p);
 		set_page_count(p, 0);
 	}
+	__ClearPageReserved(p);
+	set_page_count(p, 0);
 
-	page_zone(page)->managed_pages += 1 << order;
+	page_zone(page)->managed_pages += nr_pages;
 	set_page_refcounted(page);
 	__free_pages(page, order);
 }
@@ -885,7 +888,7 @@
 						int migratetype)
 {
 	unsigned int current_order;
-	struct free_area * area;
+	struct free_area *area;
 	struct page *page;
 
 	/* Find a page of the appropriate size in the preferred list */
@@ -1007,14 +1010,60 @@
 	}
 }
 
+/*
+ * If breaking a large block of pages, move all free pages to the preferred
+ * allocation list. If falling back for a reclaimable kernel allocation, be
+ * more aggressive about taking ownership of free pages.
+ *
+ * On the other hand, never change migration type of MIGRATE_CMA pageblocks
+ * nor move CMA pages to different free lists. We don't want unmovable pages
+ * to be allocated from MIGRATE_CMA areas.
+ *
+ * Returns the new migratetype of the pageblock (or the same old migratetype
+ * if it was unchanged).
+ */
+static int try_to_steal_freepages(struct zone *zone, struct page *page,
+				  int start_type, int fallback_type)
+{
+	int current_order = page_order(page);
+
+	if (is_migrate_cma(fallback_type))
+		return fallback_type;
+
+	/* Take ownership for orders >= pageblock_order */
+	if (current_order >= pageblock_order) {
+		change_pageblock_range(page, current_order, start_type);
+		return start_type;
+	}
+
+	if (current_order >= pageblock_order / 2 ||
+	    start_type == MIGRATE_RECLAIMABLE ||
+	    page_group_by_mobility_disabled) {
+		int pages;
+
+		pages = move_freepages_block(zone, page, start_type);
+
+		/* Claim the whole block if over half of it is free */
+		if (pages >= (1 << (pageblock_order-1)) ||
+				page_group_by_mobility_disabled) {
+
+			set_pageblock_migratetype(page, start_type);
+			return start_type;
+		}
+
+	}
+
+	return fallback_type;
+}
+
 /* Remove an element from the buddy allocator from the fallback list */
 static inline struct page *
 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
 {
-	struct free_area * area;
+	struct free_area *area;
 	int current_order;
 	struct page *page;
-	int migratetype, i;
+	int migratetype, new_type, i;
 
 	/* Find the largest possible block of pages in the other list */
 	for (current_order = MAX_ORDER-1; current_order >= order;
@@ -1034,51 +1083,29 @@
 					struct page, lru);
 			area->nr_free--;
 
-			/*
-			 * If breaking a large block of pages, move all free
-			 * pages to the preferred allocation list. If falling
-			 * back for a reclaimable kernel allocation, be more
-			 * aggressive about taking ownership of free pages
-			 *
-			 * On the other hand, never change migration
-			 * type of MIGRATE_CMA pageblocks nor move CMA
-			 * pages on different free lists. We don't
-			 * want unmovable pages to be allocated from
-			 * MIGRATE_CMA areas.
-			 */
-			if (!is_migrate_cma(migratetype) &&
-			    (current_order >= pageblock_order / 2 ||
-			     start_migratetype == MIGRATE_RECLAIMABLE ||
-			     page_group_by_mobility_disabled)) {
-				int pages;
-				pages = move_freepages_block(zone, page,
-								start_migratetype);
-
-				/* Claim the whole block if over half of it is free */
-				if (pages >= (1 << (pageblock_order-1)) ||
-						page_group_by_mobility_disabled)
-					set_pageblock_migratetype(page,
-								start_migratetype);
-
-				migratetype = start_migratetype;
-			}
+			new_type = try_to_steal_freepages(zone, page,
+							  start_migratetype,
+							  migratetype);
 
 			/* Remove the page from the freelists */
 			list_del(&page->lru);
 			rmv_page_order(page);
 
-			/* Take ownership for orders >= pageblock_order */
-			if (current_order >= pageblock_order &&
-			    !is_migrate_cma(migratetype))
-				change_pageblock_range(page, current_order,
-							start_migratetype);
-
+			/*
+			 * Borrow the excess buddy pages as well, irrespective
+			 * of whether we stole freepages, or took ownership of
+			 * the pageblock or not.
+			 *
+			 * Exception: When borrowing from MIGRATE_CMA, release
+			 * the excess buddy pages to CMA itself.
+			 */
 			expand(zone, page, order, current_order, area,
 			       is_migrate_cma(migratetype)
 			     ? migratetype : start_migratetype);
 
-			trace_mm_page_alloc_extfrag(page, order, current_order,
-				start_migratetype, migratetype);
+			trace_mm_page_alloc_extfrag(page, order,
+				current_order, start_migratetype, migratetype,
+				new_type == start_migratetype);
 
 			return page;
 		}
@@ -1281,7 +1308,7 @@
 	int order, t;
 	struct list_head *curr;
 
-	if (!zone->spanned_pages)
+	if (zone_is_empty(zone))
 		return;
 
 	spin_lock_irqsave(&zone->lock, flags);
@@ -1526,6 +1553,7 @@
 					  get_pageblock_migratetype(page));
 	}
 
+	__mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
 	__count_zone_vm_events(PGALLOC, zone, 1 << order);
 	zone_statistics(preferred_zone, zone, gfp_flags);
 	local_irq_restore(flags);
@@ -1792,6 +1820,11 @@
 	bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
 }
 
+static bool zone_local(struct zone *local_zone, struct zone *zone)
+{
+	return node_distance(local_zone->node, zone->node) == LOCAL_DISTANCE;
+}
+
 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
 {
 	return node_isset(local_zone->node, zone->zone_pgdat->reclaim_nodes);
@@ -1829,6 +1862,11 @@
 {
 }
 
+static bool zone_local(struct zone *local_zone, struct zone *zone)
+{
+	return true;
+}
+
 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
 {
 	return true;
@@ -1860,16 +1898,41 @@
 zonelist_scan:
 	/*
 	 * Scan zonelist, looking for a zone with enough free.
-	 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
+	 * See also __cpuset_node_allowed_softwall() comment in kernel/cpuset.c.
 	 */
 	for_each_zone_zonelist_nodemask(zone, z, zonelist,
 						high_zoneidx, nodemask) {
+		unsigned long mark;
+
 		if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
 			!zlc_zone_worth_trying(zonelist, z, allowednodes))
 				continue;
 		if ((alloc_flags & ALLOC_CPUSET) &&
 			!cpuset_zone_allowed_softwall(zone, gfp_mask))
 				continue;
+		BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
+		if (unlikely(alloc_flags & ALLOC_NO_WATERMARKS))
+			goto try_this_zone;
+		/*
+		 * Distribute pages in proportion to the individual
+		 * zone size to ensure fair page aging.  The zone a
+		 * page was allocated in should have no effect on the
+		 * time the page has in memory before being reclaimed.
+		 *
+		 * When zone_reclaim_mode is enabled, try to stay in
+		 * local zones in the fastpath.  If that fails, the
+		 * slowpath is entered, which will do another pass
+		 * starting with the local zones, but ultimately fall
+		 * back to remote zones that do not partake in the
+		 * fairness round-robin cycle of this zonelist.
+		 */
+		if (alloc_flags & ALLOC_WMARK_LOW) {
+			if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0)
+				continue;
+			if (zone_reclaim_mode &&
+			    !zone_local(preferred_zone, zone))
+				continue;
+		}
 		/*
 		 * When allocating a page cache page for writing, we
 		 * want to get it from a zone that is within its dirty
@@ -1900,16 +1963,11 @@
 		    (gfp_mask & __GFP_WRITE) && !zone_dirty_ok(zone))
 			goto this_zone_full;
 
-		BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
-		if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
-			unsigned long mark;
+		mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
+		if (!zone_watermark_ok(zone, order, mark,
+				       classzone_idx, alloc_flags)) {
 			int ret;
 
-			mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
-			if (zone_watermark_ok(zone, order, mark,
-				    classzone_idx, alloc_flags))
-				goto try_this_zone;
-
 			if (IS_ENABLED(CONFIG_NUMA) &&
 					!did_zlc_setup && nr_online_nodes > 1) {
 				/*
@@ -2321,16 +2379,30 @@
 	return page;
 }
 
-static inline
-void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
-						enum zone_type high_zoneidx,
-						enum zone_type classzone_idx)
+static void prepare_slowpath(gfp_t gfp_mask, unsigned int order,
+			     struct zonelist *zonelist,
+			     enum zone_type high_zoneidx,
+			     struct zone *preferred_zone)
 {
 	struct zoneref *z;
 	struct zone *zone;
 
-	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
-		wakeup_kswapd(zone, order, classzone_idx);
+	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
+		if (!(gfp_mask & __GFP_NO_KSWAPD))
+			wakeup_kswapd(zone, order, zone_idx(preferred_zone));
+		/*
+		 * Only reset the batches of zones that were actually
+		 * considered in the fast path, we don't want to
+		 * thrash fairness information for zones that are not
+		 * actually part of this zonelist's round-robin cycle.
+		 */
+		if (zone_reclaim_mode && !zone_local(preferred_zone, zone))
+			continue;
+		mod_zone_page_state(zone, NR_ALLOC_BATCH,
+				    high_wmark_pages(zone) -
+				    low_wmark_pages(zone) -
+				    zone_page_state(zone, NR_ALLOC_BATCH));
+	}
 }
 
 static inline int
@@ -2426,9 +2498,8 @@
 		goto nopage;
 
 restart:
-	if (!(gfp_mask & __GFP_NO_KSWAPD))
-		wake_all_kswapd(order, zonelist, high_zoneidx,
-						zone_idx(preferred_zone));
+	prepare_slowpath(gfp_mask, order, zonelist,
+			 high_zoneidx, preferred_zone);
 
 	/*
 	 * OK, we're below the kswapd watermark and have kicked background
@@ -3095,7 +3166,7 @@
 			K(zone_page_state(zone, NR_FREE_CMA_PAGES)),
 			K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
 			zone->pages_scanned,
-			(zone->all_unreclaimable ? "yes" : "no")
+			(!zone_reclaimable(zone) ? "yes" : "no")
 			);
 		printk("lowmem_reserve[]:");
 		for (i = 0; i < MAX_NR_ZONES; i++)
@@ -3104,7 +3175,7 @@
 	}
 
 	for_each_populated_zone(zone) {
- 		unsigned long nr[MAX_ORDER], flags, order, total = 0;
+		unsigned long nr[MAX_ORDER], flags, order, total = 0;
 		unsigned char types[MAX_ORDER];
 
 		if (skip_free_areas_node(filter, zone_to_nid(zone)))
@@ -3416,11 +3487,11 @@
 static int default_zonelist_order(void)
 {
 	int nid, zone_type;
-	unsigned long low_kmem_size,total_size;
+	unsigned long low_kmem_size, total_size;
 	struct zone *z;
 	int average_size;
 	/*
-         * ZONE_DMA and ZONE_DMA32 can be very small area in the system.
+	 * ZONE_DMA and ZONE_DMA32 can be very small area in the system.
 	 * If they are really small and used heavily, the system can fall
 	 * into OOM very easily.
 	 * This function detect ZONE_DMA/DMA32 size and configures zone order.
@@ -3452,9 +3523,9 @@
 		return ZONELIST_ORDER_NODE;
 	/*
 	 * look into each node's config.
-  	 * If there is a node whose DMA/DMA32 memory is very big area on
- 	 * local memory, NODE_ORDER may be suitable.
-         */
+	 * If there is a node whose DMA/DMA32 memory is very big area on
+	 * local memory, NODE_ORDER may be suitable.
+	 */
 	average_size = total_size /
 				(nodes_weight(node_states[N_MEMORY]) + 1);
 	for_each_online_node(nid) {
@@ -4180,7 +4251,7 @@
 	if (!zone->wait_table)
 		return -ENOMEM;
 
-	for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
+	for (i = 0; i < zone->wait_table_hash_nr_entries; ++i)
 		init_waitqueue_head(zone->wait_table + i);
 
 	return 0;
@@ -4237,7 +4308,7 @@
 int __meminit __early_pfn_to_nid(unsigned long pfn)
 {
 	unsigned long start_pfn, end_pfn;
-	int i, nid;
+	int nid;
 	/*
 	 * NOTE: The following SMP-unsafe globals are only used early in boot
 	 * when the kernel is running single-threaded.
@@ -4248,15 +4319,14 @@
 	if (last_start_pfn <= pfn && pfn < last_end_pfn)
 		return last_nid;
 
-	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
-		if (start_pfn <= pfn && pfn < end_pfn) {
-			last_start_pfn = start_pfn;
-			last_end_pfn = end_pfn;
-			last_nid = nid;
-			return nid;
-		}
-	/* This is a memory hole */
-	return -1;
+	nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
+	if (nid != -1) {
+		last_start_pfn = start_pfn;
+		last_end_pfn = end_pfn;
+		last_nid = nid;
+	}
+
+	return nid;
 }
 #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
 
@@ -4586,7 +4656,7 @@
 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
 
 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
-void __init set_pageblock_order(void)
+void __paginginit set_pageblock_order(void)
 {
 	unsigned int order;
 
@@ -4614,7 +4684,7 @@
  * include/linux/pageblock-flags.h for the values of pageblock_order based on
  * the kernel config
  */
-void __init set_pageblock_order(void)
+void __paginginit set_pageblock_order(void)
 {
 }
 
@@ -4728,8 +4798,11 @@
 		spin_lock_init(&zone->lru_lock);
 		zone_seqlock_init(zone);
 		zone->zone_pgdat = pgdat;
-
 		zone_pcp_init(zone);
+
+		/* For bootup, initialized properly in watermark setup */
+		mod_zone_page_state(zone, NR_ALLOC_BATCH, zone->managed_pages);
+
 		lruvec_init(&zone->lruvec);
 		if (!size)
 			continue;
@@ -4930,7 +5003,7 @@
 		if (pages)
 			node_set_state(nid, N_MEMORY);
 	}
-  	return totalpages;
+	return totalpages;
 }
 
 /*
@@ -5047,7 +5120,7 @@
 			/*
 			 * Some kernelcore has been met, update counts and
 			 * break if the kernelcore for this node has been
-			 * satisified
+			 * satisfied
 			 */
 			required_kernelcore -= min(required_kernelcore,
 								size_pages);
@@ -5061,7 +5134,7 @@
 	 * If there is still required_kernelcore, we do another pass with one
 	 * less node in the count. This will push zone_movable_pfn[nid] further
 	 * along on the nodes that still have memory until kernelcore is
-	 * satisified
+	 * satisfied
 	 */
 	usable_nodes--;
 	if (usable_nodes && required_kernelcore > usable_nodes)
@@ -5286,8 +5359,10 @@
 	 * 3) .rodata.* may be embedded into .text or .data sections.
 	 */
 #define adj_init_size(start, end, size, pos, adj) \
-	if (start <= pos && pos < end && size > adj) \
-		size -= adj;
+	do { \
+		if (start <= pos && pos < end && size > adj) \
+			size -= adj; \
+	} while (0)
 
 	adj_init_size(__init_begin, __init_end, init_data_size,
 		     _sinittext, init_code_size);
@@ -5361,7 +5436,7 @@
 		 * This is only okay since the processor is dead and cannot
 		 * race with what we are doing.
 		 */
-		refresh_cpu_vm_stats(cpu);
+		cpu_vm_stats_fold(cpu);
 	}
 	return NOTIFY_OK;
 }
@@ -5498,6 +5573,11 @@
 		zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) + (tmp >> 2);
 		zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
 
+		__mod_zone_page_state(zone, NR_ALLOC_BATCH,
+				      high_wmark_pages(zone) -
+				      low_wmark_pages(zone) -
+				      zone_page_state(zone, NR_ALLOC_BATCH));
+
 		setup_zone_migrate_reserve(zone);
 		spin_unlock_irqrestore(&zone->lock, flags);
 	}
@@ -5570,7 +5650,7 @@
  * we want it large (64MB max).  But it is not linear, because network
  * bandwidth does not increase linearly with machine size.  We use
  *
- * 	min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
+ *	min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
  *	min_free_kbytes = sqrt(lowmem_kbytes * 16)
  *
  * which yields
@@ -5614,11 +5694,11 @@
 module_init(init_per_zone_wmark_min)
 
 /*
- * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 
+ * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
  *	that we can call two helper functions whenever min_free_kbytes
  *	changes.
  */
-int min_free_kbytes_sysctl_handler(ctl_table *table, int write, 
+int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
 	void __user *buffer, size_t *length, loff_t *ppos)
 {
 	proc_dointvec(table, write, buffer, length, ppos);
@@ -5682,8 +5762,8 @@
 
 /*
  * percpu_pagelist_fraction - changes the pcp->high for each zone on each
- * cpu.  It is the fraction of total pages in each zone that a hot per cpu pagelist
- * can have before it gets flushed back to buddy allocator.
+ * cpu.  It is the fraction of total pages in each zone that a hot per cpu
+ * pagelist can have before it gets flushed back to buddy allocator.
  */
 int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
 	void __user *buffer, size_t *length, loff_t *ppos)
@@ -5745,9 +5825,10 @@
 	if (!numentries) {
 		/* round applicable memory size up to nearest megabyte */
 		numentries = nr_kernel_pages;
-		numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
-		numentries >>= 20 - PAGE_SHIFT;
-		numentries <<= 20 - PAGE_SHIFT;
+
+		/* It isn't necessary when PAGE_SIZE >= 1MB */
+		if (PAGE_SHIFT < 20)
+			numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
 
 		/* limit to 1 bucket per 2^scale bytes of low memory */
 		if (scale > PAGE_SHIFT)
@@ -5900,7 +5981,7 @@
  * This function checks whether pageblock includes unmovable pages or not.
  * If @count is not zero, it is okay to include less @count unmovable pages
  *
- * PageLRU check wihtout isolation or lru_lock could race so that
+ * PageLRU check without isolation or lru_lock could race so that
  * MIGRATE_MOVABLE block might include unmovable pages. It means you can't
  * expect this function should be exact.
  */
@@ -5928,6 +6009,17 @@
 			continue;
 
 		page = pfn_to_page(check);
+
+		/*
+		 * Hugepages are not in LRU lists, but they're movable.
+		 * We need not scan over tail pages bacause we don't
+		 * handle each tail page individually in migration.
+		 */
+		if (PageHuge(page)) {
+			iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
+			continue;
+		}
+
 		/*
 		 * We can't use page_count without pin a page
 		 * because another CPU can free compound page.
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 0cee10f..d1473b2 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -6,6 +6,7 @@
 #include <linux/page-isolation.h>
 #include <linux/pageblock-flags.h>
 #include <linux/memory.h>
+#include <linux/hugetlb.h>
 #include "internal.h"
 
 int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages)
@@ -252,6 +253,19 @@
 {
 	gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
 
+	/*
+	 * TODO: allocate a destination hugepage from a nearest neighbor node,
+	 * accordance with memory policy of the user process if possible. For
+	 * now as a simple work-around, we use the next node for destination.
+	 */
+	if (PageHuge(page)) {
+		nodemask_t src = nodemask_of_node(page_to_nid(page));
+		nodemask_t dst;
+		nodes_complement(dst, src);
+		return alloc_huge_page_node(page_hstate(compound_head(page)),
+					    next_node(page_to_nid(page), dst));
+	}
+
 	if (PageHighMem(page))
 		gfp_mask |= __GFP_HIGHMEM;
 
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index e1a6e4f..3929a40 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -10,6 +10,30 @@
 #include <asm/tlb.h>
 #include <asm-generic/pgtable.h>
 
+/*
+ * If a p?d_bad entry is found while walking page tables, report
+ * the error, before resetting entry to p?d_none.  Usually (but
+ * very seldom) called out from the p?d_none_or_clear_bad macros.
+ */
+
+void pgd_clear_bad(pgd_t *pgd)
+{
+	pgd_ERROR(*pgd);
+	pgd_clear(pgd);
+}
+
+void pud_clear_bad(pud_t *pud)
+{
+	pud_ERROR(*pud);
+	pud_clear(pud);
+}
+
+void pmd_clear_bad(pmd_t *pmd)
+{
+	pmd_ERROR(*pmd);
+	pmd_clear(pmd);
+}
+
 #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
 /*
  * Only sets the access flags (dirty, accessed), as well as write 
diff --git a/mm/readahead.c b/mm/readahead.c
index 829a77c..e4ed041 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -371,10 +371,10 @@
 	size = count_history_pages(mapping, ra, offset, max);
 
 	/*
-	 * no history pages:
+	 * not enough history pages:
 	 * it could be a random read
 	 */
-	if (!size)
+	if (size <= req_size)
 		return 0;
 
 	/*
@@ -385,8 +385,8 @@
 		size *= 2;
 
 	ra->start = offset;
-	ra->size = get_init_ra_size(size + req_size, max);
-	ra->async_size = ra->size;
+	ra->size = min(size + req_size, max);
+	ra->async_size = 1;
 
 	return 1;
 }
diff --git a/mm/shmem.c b/mm/shmem.c
index 5261498..8297623 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1205,7 +1205,7 @@
 						gfp & GFP_RECLAIM_MASK);
 		if (error)
 			goto decused;
-		error = radix_tree_preload(gfp & GFP_RECLAIM_MASK);
+		error = radix_tree_maybe_preload(gfp & GFP_RECLAIM_MASK);
 		if (!error) {
 			error = shmem_add_to_page_cache(page, mapping, index,
 							gfp, NULL);
@@ -2819,6 +2819,10 @@
 {
 	int error;
 
+	/* If rootfs called this, don't re-init */
+	if (shmem_inode_cachep)
+		return 0;
+
 	error = bdi_init(&shmem_backing_dev_info);
 	if (error)
 		goto out4;
diff --git a/mm/slub.c b/mm/slub.c
index e3ba1f2..51df827 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4420,7 +4420,7 @@
 	unsigned long order;
 	int err;
 
-	err = strict_strtoul(buf, 10, &order);
+	err = kstrtoul(buf, 10, &order);
 	if (err)
 		return err;
 
@@ -4448,7 +4448,7 @@
 	unsigned long min;
 	int err;
 
-	err = strict_strtoul(buf, 10, &min);
+	err = kstrtoul(buf, 10, &min);
 	if (err)
 		return err;
 
@@ -4468,7 +4468,7 @@
 	unsigned long objects;
 	int err;
 
-	err = strict_strtoul(buf, 10, &objects);
+	err = kstrtoul(buf, 10, &objects);
 	if (err)
 		return err;
 	if (objects && !kmem_cache_has_cpu_partial(s))
@@ -4784,7 +4784,7 @@
 	unsigned long ratio;
 	int err;
 
-	err = strict_strtoul(buf, 10, &ratio);
+	err = kstrtoul(buf, 10, &ratio);
 	if (err)
 		return err;
 
diff --git a/mm/sparse.c b/mm/sparse.c
index 308d5033..4ac1d7e 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -339,13 +339,14 @@
 }
 #endif /* CONFIG_MEMORY_HOTREMOVE */
 
-static void __init sparse_early_usemaps_alloc_node(unsigned long**usemap_map,
+static void __init sparse_early_usemaps_alloc_node(void *data,
 				 unsigned long pnum_begin,
 				 unsigned long pnum_end,
 				 unsigned long usemap_count, int nodeid)
 {
 	void *usemap;
 	unsigned long pnum;
+	unsigned long **usemap_map = (unsigned long **)data;
 	int size = usemap_size();
 
 	usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid),
@@ -430,11 +431,12 @@
 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
 
 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
-static void __init sparse_early_mem_maps_alloc_node(struct page **map_map,
+static void __init sparse_early_mem_maps_alloc_node(void *data,
 				 unsigned long pnum_begin,
 				 unsigned long pnum_end,
 				 unsigned long map_count, int nodeid)
 {
+	struct page **map_map = (struct page **)data;
 	sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end,
 					 map_count, nodeid);
 }
@@ -460,88 +462,18 @@
 {
 }
 
-/*
- * Allocate the accumulated non-linear sections, allocate a mem_map
- * for each and record the physical to section mapping.
+/**
+ *  alloc_usemap_and_memmap - memory alloction for pageblock flags and vmemmap
+ *  @map: usemap_map for pageblock flags or mmap_map for vmemmap
  */
-void __init sparse_init(void)
+static void __init alloc_usemap_and_memmap(void (*alloc_func)
+					(void *, unsigned long, unsigned long,
+					unsigned long, int), void *data)
 {
 	unsigned long pnum;
-	struct page *map;
-	unsigned long *usemap;
-	unsigned long **usemap_map;
-	int size;
+	unsigned long map_count;
 	int nodeid_begin = 0;
 	unsigned long pnum_begin = 0;
-	unsigned long usemap_count;
-#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
-	unsigned long map_count;
-	int size2;
-	struct page **map_map;
-#endif
-
-	/* see include/linux/mmzone.h 'struct mem_section' definition */
-	BUILD_BUG_ON(!is_power_of_2(sizeof(struct mem_section)));
-
-	/* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */
-	set_pageblock_order();
-
-	/*
-	 * map is using big page (aka 2M in x86 64 bit)
-	 * usemap is less one page (aka 24 bytes)
-	 * so alloc 2M (with 2M align) and 24 bytes in turn will
-	 * make next 2M slip to one more 2M later.
-	 * then in big system, the memory will have a lot of holes...
-	 * here try to allocate 2M pages continuously.
-	 *
-	 * powerpc need to call sparse_init_one_section right after each
-	 * sparse_early_mem_map_alloc, so allocate usemap_map at first.
-	 */
-	size = sizeof(unsigned long *) * NR_MEM_SECTIONS;
-	usemap_map = alloc_bootmem(size);
-	if (!usemap_map)
-		panic("can not allocate usemap_map\n");
-
-	for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
-		struct mem_section *ms;
-
-		if (!present_section_nr(pnum))
-			continue;
-		ms = __nr_to_section(pnum);
-		nodeid_begin = sparse_early_nid(ms);
-		pnum_begin = pnum;
-		break;
-	}
-	usemap_count = 1;
-	for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) {
-		struct mem_section *ms;
-		int nodeid;
-
-		if (!present_section_nr(pnum))
-			continue;
-		ms = __nr_to_section(pnum);
-		nodeid = sparse_early_nid(ms);
-		if (nodeid == nodeid_begin) {
-			usemap_count++;
-			continue;
-		}
-		/* ok, we need to take cake of from pnum_begin to pnum - 1*/
-		sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, pnum,
-						 usemap_count, nodeid_begin);
-		/* new start, update count etc*/
-		nodeid_begin = nodeid;
-		pnum_begin = pnum;
-		usemap_count = 1;
-	}
-	/* ok, last chunk */
-	sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, NR_MEM_SECTIONS,
-					 usemap_count, nodeid_begin);
-
-#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
-	size2 = sizeof(struct page *) * NR_MEM_SECTIONS;
-	map_map = alloc_bootmem(size2);
-	if (!map_map)
-		panic("can not allocate map_map\n");
 
 	for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
 		struct mem_section *ms;
@@ -567,16 +499,65 @@
 			continue;
 		}
 		/* ok, we need to take cake of from pnum_begin to pnum - 1*/
-		sparse_early_mem_maps_alloc_node(map_map, pnum_begin, pnum,
-						 map_count, nodeid_begin);
+		alloc_func(data, pnum_begin, pnum,
+						map_count, nodeid_begin);
 		/* new start, update count etc*/
 		nodeid_begin = nodeid;
 		pnum_begin = pnum;
 		map_count = 1;
 	}
 	/* ok, last chunk */
-	sparse_early_mem_maps_alloc_node(map_map, pnum_begin, NR_MEM_SECTIONS,
-					 map_count, nodeid_begin);
+	alloc_func(data, pnum_begin, NR_MEM_SECTIONS,
+						map_count, nodeid_begin);
+}
+
+/*
+ * Allocate the accumulated non-linear sections, allocate a mem_map
+ * for each and record the physical to section mapping.
+ */
+void __init sparse_init(void)
+{
+	unsigned long pnum;
+	struct page *map;
+	unsigned long *usemap;
+	unsigned long **usemap_map;
+	int size;
+#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
+	int size2;
+	struct page **map_map;
+#endif
+
+	/* see include/linux/mmzone.h 'struct mem_section' definition */
+	BUILD_BUG_ON(!is_power_of_2(sizeof(struct mem_section)));
+
+	/* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */
+	set_pageblock_order();
+
+	/*
+	 * map is using big page (aka 2M in x86 64 bit)
+	 * usemap is less one page (aka 24 bytes)
+	 * so alloc 2M (with 2M align) and 24 bytes in turn will
+	 * make next 2M slip to one more 2M later.
+	 * then in big system, the memory will have a lot of holes...
+	 * here try to allocate 2M pages continuously.
+	 *
+	 * powerpc need to call sparse_init_one_section right after each
+	 * sparse_early_mem_map_alloc, so allocate usemap_map at first.
+	 */
+	size = sizeof(unsigned long *) * NR_MEM_SECTIONS;
+	usemap_map = alloc_bootmem(size);
+	if (!usemap_map)
+		panic("can not allocate usemap_map\n");
+	alloc_usemap_and_memmap(sparse_early_usemaps_alloc_node,
+							(void *)usemap_map);
+
+#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
+	size2 = sizeof(struct page *) * NR_MEM_SECTIONS;
+	map_map = alloc_bootmem(size2);
+	if (!map_map)
+		panic("can not allocate map_map\n");
+	alloc_usemap_and_memmap(sparse_early_mem_maps_alloc_node,
+							(void *)map_map);
 #endif
 
 	for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
diff --git a/mm/swap.c b/mm/swap.c
index 62b78a6..c899502 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -31,6 +31,7 @@
 #include <linux/memcontrol.h>
 #include <linux/gfp.h>
 #include <linux/uio.h>
+#include <linux/hugetlb.h>
 
 #include "internal.h"
 
@@ -81,6 +82,19 @@
 
 static void put_compound_page(struct page *page)
 {
+	/*
+	 * hugetlbfs pages cannot be split from under us.  If this is a
+	 * hugetlbfs page, check refcount on head page and release the page if
+	 * the refcount becomes zero.
+	 */
+	if (PageHuge(page)) {
+		page = compound_head(page);
+		if (put_page_testzero(page))
+			__put_compound_page(page);
+
+		return;
+	}
+
 	if (unlikely(PageTail(page))) {
 		/* __split_huge_page_refcount can run under us */
 		struct page *page_head = compound_trans_head(page);
@@ -184,38 +198,51 @@
 	 * proper PT lock that already serializes against
 	 * split_huge_page().
 	 */
-	unsigned long flags;
 	bool got = false;
-	struct page *page_head = compound_trans_head(page);
+	struct page *page_head;
 
-	if (likely(page != page_head && get_page_unless_zero(page_head))) {
+	/*
+	 * If this is a hugetlbfs page it cannot be split under us.  Simply
+	 * increment refcount for the head page.
+	 */
+	if (PageHuge(page)) {
+		page_head = compound_head(page);
+		atomic_inc(&page_head->_count);
+		got = true;
+	} else {
+		unsigned long flags;
 
-		/* Ref to put_compound_page() comment. */
-		if (PageSlab(page_head)) {
+		page_head = compound_trans_head(page);
+		if (likely(page != page_head &&
+					get_page_unless_zero(page_head))) {
+
+			/* Ref to put_compound_page() comment. */
+			if (PageSlab(page_head)) {
+				if (likely(PageTail(page))) {
+					__get_page_tail_foll(page, false);
+					return true;
+				} else {
+					put_page(page_head);
+					return false;
+				}
+			}
+
+			/*
+			 * page_head wasn't a dangling pointer but it
+			 * may not be a head page anymore by the time
+			 * we obtain the lock. That is ok as long as it
+			 * can't be freed from under us.
+			 */
+			flags = compound_lock_irqsave(page_head);
+			/* here __split_huge_page_refcount won't run anymore */
 			if (likely(PageTail(page))) {
 				__get_page_tail_foll(page, false);
-				return true;
-			} else {
-				put_page(page_head);
-				return false;
+				got = true;
 			}
+			compound_unlock_irqrestore(page_head, flags);
+			if (unlikely(!got))
+				put_page(page_head);
 		}
-
-		/*
-		 * page_head wasn't a dangling pointer but it
-		 * may not be a head page anymore by the time
-		 * we obtain the lock. That is ok as long as it
-		 * can't be freed from under us.
-		 */
-		flags = compound_lock_irqsave(page_head);
-		/* here __split_huge_page_refcount won't run anymore */
-		if (likely(PageTail(page))) {
-			__get_page_tail_foll(page, false);
-			got = true;
-		}
-		compound_unlock_irqrestore(page_head, flags);
-		if (unlikely(!got))
-			put_page(page_head);
 	}
 	return got;
 }
diff --git a/mm/swap_state.c b/mm/swap_state.c
index f24ab0d..e6f15f8 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -122,7 +122,7 @@
 {
 	int error;
 
-	error = radix_tree_preload(gfp_mask);
+	error = radix_tree_maybe_preload(gfp_mask);
 	if (!error) {
 		error = __add_to_swap_cache(page, entry);
 		radix_tree_preload_end();
@@ -328,7 +328,7 @@
 		/*
 		 * call radix_tree_preload() while we can wait.
 		 */
-		err = radix_tree_preload(gfp_mask & GFP_KERNEL);
+		err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL);
 		if (err)
 			break;
 
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 6cf2e60..3963fc2 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -175,15 +175,297 @@
 	}
 }
 
-static int wait_for_discard(void *word)
-{
-	schedule();
-	return 0;
-}
-
 #define SWAPFILE_CLUSTER	256
 #define LATENCY_LIMIT		256
 
+static inline void cluster_set_flag(struct swap_cluster_info *info,
+	unsigned int flag)
+{
+	info->flags = flag;
+}
+
+static inline unsigned int cluster_count(struct swap_cluster_info *info)
+{
+	return info->data;
+}
+
+static inline void cluster_set_count(struct swap_cluster_info *info,
+				     unsigned int c)
+{
+	info->data = c;
+}
+
+static inline void cluster_set_count_flag(struct swap_cluster_info *info,
+					 unsigned int c, unsigned int f)
+{
+	info->flags = f;
+	info->data = c;
+}
+
+static inline unsigned int cluster_next(struct swap_cluster_info *info)
+{
+	return info->data;
+}
+
+static inline void cluster_set_next(struct swap_cluster_info *info,
+				    unsigned int n)
+{
+	info->data = n;
+}
+
+static inline void cluster_set_next_flag(struct swap_cluster_info *info,
+					 unsigned int n, unsigned int f)
+{
+	info->flags = f;
+	info->data = n;
+}
+
+static inline bool cluster_is_free(struct swap_cluster_info *info)
+{
+	return info->flags & CLUSTER_FLAG_FREE;
+}
+
+static inline bool cluster_is_null(struct swap_cluster_info *info)
+{
+	return info->flags & CLUSTER_FLAG_NEXT_NULL;
+}
+
+static inline void cluster_set_null(struct swap_cluster_info *info)
+{
+	info->flags = CLUSTER_FLAG_NEXT_NULL;
+	info->data = 0;
+}
+
+/* Add a cluster to discard list and schedule it to do discard */
+static void swap_cluster_schedule_discard(struct swap_info_struct *si,
+		unsigned int idx)
+{
+	/*
+	 * If scan_swap_map() can't find a free cluster, it will check
+	 * si->swap_map directly. To make sure the discarding cluster isn't
+	 * taken by scan_swap_map(), mark the swap entries bad (occupied). It
+	 * will be cleared after discard
+	 */
+	memset(si->swap_map + idx * SWAPFILE_CLUSTER,
+			SWAP_MAP_BAD, SWAPFILE_CLUSTER);
+
+	if (cluster_is_null(&si->discard_cluster_head)) {
+		cluster_set_next_flag(&si->discard_cluster_head,
+						idx, 0);
+		cluster_set_next_flag(&si->discard_cluster_tail,
+						idx, 0);
+	} else {
+		unsigned int tail = cluster_next(&si->discard_cluster_tail);
+		cluster_set_next(&si->cluster_info[tail], idx);
+		cluster_set_next_flag(&si->discard_cluster_tail,
+						idx, 0);
+	}
+
+	schedule_work(&si->discard_work);
+}
+
+/*
+ * Doing discard actually. After a cluster discard is finished, the cluster
+ * will be added to free cluster list. caller should hold si->lock.
+*/
+static void swap_do_scheduled_discard(struct swap_info_struct *si)
+{
+	struct swap_cluster_info *info;
+	unsigned int idx;
+
+	info = si->cluster_info;
+
+	while (!cluster_is_null(&si->discard_cluster_head)) {
+		idx = cluster_next(&si->discard_cluster_head);
+
+		cluster_set_next_flag(&si->discard_cluster_head,
+						cluster_next(&info[idx]), 0);
+		if (cluster_next(&si->discard_cluster_tail) == idx) {
+			cluster_set_null(&si->discard_cluster_head);
+			cluster_set_null(&si->discard_cluster_tail);
+		}
+		spin_unlock(&si->lock);
+
+		discard_swap_cluster(si, idx * SWAPFILE_CLUSTER,
+				SWAPFILE_CLUSTER);
+
+		spin_lock(&si->lock);
+		cluster_set_flag(&info[idx], CLUSTER_FLAG_FREE);
+		if (cluster_is_null(&si->free_cluster_head)) {
+			cluster_set_next_flag(&si->free_cluster_head,
+						idx, 0);
+			cluster_set_next_flag(&si->free_cluster_tail,
+						idx, 0);
+		} else {
+			unsigned int tail;
+
+			tail = cluster_next(&si->free_cluster_tail);
+			cluster_set_next(&info[tail], idx);
+			cluster_set_next_flag(&si->free_cluster_tail,
+						idx, 0);
+		}
+		memset(si->swap_map + idx * SWAPFILE_CLUSTER,
+				0, SWAPFILE_CLUSTER);
+	}
+}
+
+static void swap_discard_work(struct work_struct *work)
+{
+	struct swap_info_struct *si;
+
+	si = container_of(work, struct swap_info_struct, discard_work);
+
+	spin_lock(&si->lock);
+	swap_do_scheduled_discard(si);
+	spin_unlock(&si->lock);
+}
+
+/*
+ * The cluster corresponding to page_nr will be used. The cluster will be
+ * removed from free cluster list and its usage counter will be increased.
+ */
+static void inc_cluster_info_page(struct swap_info_struct *p,
+	struct swap_cluster_info *cluster_info, unsigned long page_nr)
+{
+	unsigned long idx = page_nr / SWAPFILE_CLUSTER;
+
+	if (!cluster_info)
+		return;
+	if (cluster_is_free(&cluster_info[idx])) {
+		VM_BUG_ON(cluster_next(&p->free_cluster_head) != idx);
+		cluster_set_next_flag(&p->free_cluster_head,
+			cluster_next(&cluster_info[idx]), 0);
+		if (cluster_next(&p->free_cluster_tail) == idx) {
+			cluster_set_null(&p->free_cluster_tail);
+			cluster_set_null(&p->free_cluster_head);
+		}
+		cluster_set_count_flag(&cluster_info[idx], 0, 0);
+	}
+
+	VM_BUG_ON(cluster_count(&cluster_info[idx]) >= SWAPFILE_CLUSTER);
+	cluster_set_count(&cluster_info[idx],
+		cluster_count(&cluster_info[idx]) + 1);
+}
+
+/*
+ * The cluster corresponding to page_nr decreases one usage. If the usage
+ * counter becomes 0, which means no page in the cluster is in using, we can
+ * optionally discard the cluster and add it to free cluster list.
+ */
+static void dec_cluster_info_page(struct swap_info_struct *p,
+	struct swap_cluster_info *cluster_info, unsigned long page_nr)
+{
+	unsigned long idx = page_nr / SWAPFILE_CLUSTER;
+
+	if (!cluster_info)
+		return;
+
+	VM_BUG_ON(cluster_count(&cluster_info[idx]) == 0);
+	cluster_set_count(&cluster_info[idx],
+		cluster_count(&cluster_info[idx]) - 1);
+
+	if (cluster_count(&cluster_info[idx]) == 0) {
+		/*
+		 * If the swap is discardable, prepare discard the cluster
+		 * instead of free it immediately. The cluster will be freed
+		 * after discard.
+		 */
+		if ((p->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) ==
+				 (SWP_WRITEOK | SWP_PAGE_DISCARD)) {
+			swap_cluster_schedule_discard(p, idx);
+			return;
+		}
+
+		cluster_set_flag(&cluster_info[idx], CLUSTER_FLAG_FREE);
+		if (cluster_is_null(&p->free_cluster_head)) {
+			cluster_set_next_flag(&p->free_cluster_head, idx, 0);
+			cluster_set_next_flag(&p->free_cluster_tail, idx, 0);
+		} else {
+			unsigned int tail = cluster_next(&p->free_cluster_tail);
+			cluster_set_next(&cluster_info[tail], idx);
+			cluster_set_next_flag(&p->free_cluster_tail, idx, 0);
+		}
+	}
+}
+
+/*
+ * It's possible scan_swap_map() uses a free cluster in the middle of free
+ * cluster list. Avoiding such abuse to avoid list corruption.
+ */
+static bool
+scan_swap_map_ssd_cluster_conflict(struct swap_info_struct *si,
+	unsigned long offset)
+{
+	struct percpu_cluster *percpu_cluster;
+	bool conflict;
+
+	offset /= SWAPFILE_CLUSTER;
+	conflict = !cluster_is_null(&si->free_cluster_head) &&
+		offset != cluster_next(&si->free_cluster_head) &&
+		cluster_is_free(&si->cluster_info[offset]);
+
+	if (!conflict)
+		return false;
+
+	percpu_cluster = this_cpu_ptr(si->percpu_cluster);
+	cluster_set_null(&percpu_cluster->index);
+	return true;
+}
+
+/*
+ * Try to get a swap entry from current cpu's swap entry pool (a cluster). This
+ * might involve allocating a new cluster for current CPU too.
+ */
+static void scan_swap_map_try_ssd_cluster(struct swap_info_struct *si,
+	unsigned long *offset, unsigned long *scan_base)
+{
+	struct percpu_cluster *cluster;
+	bool found_free;
+	unsigned long tmp;
+
+new_cluster:
+	cluster = this_cpu_ptr(si->percpu_cluster);
+	if (cluster_is_null(&cluster->index)) {
+		if (!cluster_is_null(&si->free_cluster_head)) {
+			cluster->index = si->free_cluster_head;
+			cluster->next = cluster_next(&cluster->index) *
+					SWAPFILE_CLUSTER;
+		} else if (!cluster_is_null(&si->discard_cluster_head)) {
+			/*
+			 * we don't have free cluster but have some clusters in
+			 * discarding, do discard now and reclaim them
+			 */
+			swap_do_scheduled_discard(si);
+			*scan_base = *offset = si->cluster_next;
+			goto new_cluster;
+		} else
+			return;
+	}
+
+	found_free = false;
+
+	/*
+	 * Other CPUs can use our cluster if they can't find a free cluster,
+	 * check if there is still free entry in the cluster
+	 */
+	tmp = cluster->next;
+	while (tmp < si->max && tmp < (cluster_next(&cluster->index) + 1) *
+	       SWAPFILE_CLUSTER) {
+		if (!si->swap_map[tmp]) {
+			found_free = true;
+			break;
+		}
+		tmp++;
+	}
+	if (!found_free) {
+		cluster_set_null(&cluster->index);
+		goto new_cluster;
+	}
+	cluster->next = tmp + 1;
+	*offset = tmp;
+	*scan_base = tmp;
+}
+
 static unsigned long scan_swap_map(struct swap_info_struct *si,
 				   unsigned char usage)
 {
@@ -191,7 +473,6 @@
 	unsigned long scan_base;
 	unsigned long last_in_cluster = 0;
 	int latency_ration = LATENCY_LIMIT;
-	int found_free_cluster = 0;
 
 	/*
 	 * We try to cluster swap pages by allocating them sequentially
@@ -207,24 +488,18 @@
 	si->flags += SWP_SCANNING;
 	scan_base = offset = si->cluster_next;
 
+	/* SSD algorithm */
+	if (si->cluster_info) {
+		scan_swap_map_try_ssd_cluster(si, &offset, &scan_base);
+		goto checks;
+	}
+
 	if (unlikely(!si->cluster_nr--)) {
 		if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) {
 			si->cluster_nr = SWAPFILE_CLUSTER - 1;
 			goto checks;
 		}
-		if (si->flags & SWP_PAGE_DISCARD) {
-			/*
-			 * Start range check on racing allocations, in case
-			 * they overlap the cluster we eventually decide on
-			 * (we scan without swap_lock to allow preemption).
-			 * It's hardly conceivable that cluster_nr could be
-			 * wrapped during our scan, but don't depend on it.
-			 */
-			if (si->lowest_alloc)
-				goto checks;
-			si->lowest_alloc = si->max;
-			si->highest_alloc = 0;
-		}
+
 		spin_unlock(&si->lock);
 
 		/*
@@ -248,7 +523,6 @@
 				offset -= SWAPFILE_CLUSTER - 1;
 				si->cluster_next = offset;
 				si->cluster_nr = SWAPFILE_CLUSTER - 1;
-				found_free_cluster = 1;
 				goto checks;
 			}
 			if (unlikely(--latency_ration < 0)) {
@@ -269,7 +543,6 @@
 				offset -= SWAPFILE_CLUSTER - 1;
 				si->cluster_next = offset;
 				si->cluster_nr = SWAPFILE_CLUSTER - 1;
-				found_free_cluster = 1;
 				goto checks;
 			}
 			if (unlikely(--latency_ration < 0)) {
@@ -281,10 +554,13 @@
 		offset = scan_base;
 		spin_lock(&si->lock);
 		si->cluster_nr = SWAPFILE_CLUSTER - 1;
-		si->lowest_alloc = 0;
 	}
 
 checks:
+	if (si->cluster_info) {
+		while (scan_swap_map_ssd_cluster_conflict(si, offset))
+			scan_swap_map_try_ssd_cluster(si, &offset, &scan_base);
+	}
 	if (!(si->flags & SWP_WRITEOK))
 		goto no_page;
 	if (!si->highest_bit)
@@ -317,62 +593,10 @@
 		si->highest_bit = 0;
 	}
 	si->swap_map[offset] = usage;
+	inc_cluster_info_page(si, si->cluster_info, offset);
 	si->cluster_next = offset + 1;
 	si->flags -= SWP_SCANNING;
 
-	if (si->lowest_alloc) {
-		/*
-		 * Only set when SWP_PAGE_DISCARD, and there's a scan
-		 * for a free cluster in progress or just completed.
-		 */
-		if (found_free_cluster) {
-			/*
-			 * To optimize wear-levelling, discard the
-			 * old data of the cluster, taking care not to
-			 * discard any of its pages that have already
-			 * been allocated by racing tasks (offset has
-			 * already stepped over any at the beginning).
-			 */
-			if (offset < si->highest_alloc &&
-			    si->lowest_alloc <= last_in_cluster)
-				last_in_cluster = si->lowest_alloc - 1;
-			si->flags |= SWP_DISCARDING;
-			spin_unlock(&si->lock);
-
-			if (offset < last_in_cluster)
-				discard_swap_cluster(si, offset,
-					last_in_cluster - offset + 1);
-
-			spin_lock(&si->lock);
-			si->lowest_alloc = 0;
-			si->flags &= ~SWP_DISCARDING;
-
-			smp_mb();	/* wake_up_bit advises this */
-			wake_up_bit(&si->flags, ilog2(SWP_DISCARDING));
-
-		} else if (si->flags & SWP_DISCARDING) {
-			/*
-			 * Delay using pages allocated by racing tasks
-			 * until the whole discard has been issued. We
-			 * could defer that delay until swap_writepage,
-			 * but it's easier to keep this self-contained.
-			 */
-			spin_unlock(&si->lock);
-			wait_on_bit(&si->flags, ilog2(SWP_DISCARDING),
-				wait_for_discard, TASK_UNINTERRUPTIBLE);
-			spin_lock(&si->lock);
-		} else {
-			/*
-			 * Note pages allocated by racing tasks while
-			 * scan for a free cluster is in progress, so
-			 * that its final discard can exclude them.
-			 */
-			if (offset < si->lowest_alloc)
-				si->lowest_alloc = offset;
-			if (offset > si->highest_alloc)
-				si->highest_alloc = offset;
-		}
-	}
 	return offset;
 
 scan:
@@ -527,16 +751,16 @@
 	return p;
 
 bad_free:
-	printk(KERN_ERR "swap_free: %s%08lx\n", Unused_offset, entry.val);
+	pr_err("swap_free: %s%08lx\n", Unused_offset, entry.val);
 	goto out;
 bad_offset:
-	printk(KERN_ERR "swap_free: %s%08lx\n", Bad_offset, entry.val);
+	pr_err("swap_free: %s%08lx\n", Bad_offset, entry.val);
 	goto out;
 bad_device:
-	printk(KERN_ERR "swap_free: %s%08lx\n", Unused_file, entry.val);
+	pr_err("swap_free: %s%08lx\n", Unused_file, entry.val);
 	goto out;
 bad_nofile:
-	printk(KERN_ERR "swap_free: %s%08lx\n", Bad_file, entry.val);
+	pr_err("swap_free: %s%08lx\n", Bad_file, entry.val);
 out:
 	return NULL;
 }
@@ -600,6 +824,7 @@
 
 	/* free if no reference */
 	if (!usage) {
+		dec_cluster_info_page(p, p->cluster_info, offset);
 		if (offset < p->lowest_bit)
 			p->lowest_bit = offset;
 		if (offset > p->highest_bit)
@@ -1107,7 +1332,7 @@
 			else
 				continue;
 		}
-		count = si->swap_map[i];
+		count = ACCESS_ONCE(si->swap_map[i]);
 		if (count && swap_count(count) != SWAP_MAP_BAD)
 			break;
 	}
@@ -1127,7 +1352,11 @@
 {
 	struct swap_info_struct *si = swap_info[type];
 	struct mm_struct *start_mm;
-	unsigned char *swap_map;
+	volatile unsigned char *swap_map; /* swap_map is accessed without
+					   * locking. Mark it as volatile
+					   * to prevent compiler doing
+					   * something odd.
+					   */
 	unsigned char swcount;
 	struct page *page;
 	swp_entry_t entry;
@@ -1178,7 +1407,15 @@
 			 * reused since sys_swapoff() already disabled
 			 * allocation from here, or alloc_page() failed.
 			 */
-			if (!*swap_map)
+			swcount = *swap_map;
+			/*
+			 * We don't hold lock here, so the swap entry could be
+			 * SWAP_MAP_BAD (when the cluster is discarding).
+			 * Instead of fail out, We can just skip the swap
+			 * entry because swapoff will wait for discarding
+			 * finish anyway.
+			 */
+			if (!swcount || swcount == SWAP_MAP_BAD)
 				continue;
 			retval = -ENOMEM;
 			break;
@@ -1524,7 +1761,8 @@
 }
 
 static void _enable_swap_info(struct swap_info_struct *p, int prio,
-				unsigned char *swap_map)
+				unsigned char *swap_map,
+				struct swap_cluster_info *cluster_info)
 {
 	int i, prev;
 
@@ -1533,6 +1771,7 @@
 	else
 		p->prio = --least_priority;
 	p->swap_map = swap_map;
+	p->cluster_info = cluster_info;
 	p->flags |= SWP_WRITEOK;
 	atomic_long_add(p->pages, &nr_swap_pages);
 	total_swap_pages += p->pages;
@@ -1553,12 +1792,13 @@
 
 static void enable_swap_info(struct swap_info_struct *p, int prio,
 				unsigned char *swap_map,
+				struct swap_cluster_info *cluster_info,
 				unsigned long *frontswap_map)
 {
 	frontswap_init(p->type, frontswap_map);
 	spin_lock(&swap_lock);
 	spin_lock(&p->lock);
-	 _enable_swap_info(p, prio, swap_map);
+	 _enable_swap_info(p, prio, swap_map, cluster_info);
 	spin_unlock(&p->lock);
 	spin_unlock(&swap_lock);
 }
@@ -1567,7 +1807,7 @@
 {
 	spin_lock(&swap_lock);
 	spin_lock(&p->lock);
-	_enable_swap_info(p, p->prio, p->swap_map);
+	_enable_swap_info(p, p->prio, p->swap_map, p->cluster_info);
 	spin_unlock(&p->lock);
 	spin_unlock(&swap_lock);
 }
@@ -1576,6 +1816,7 @@
 {
 	struct swap_info_struct *p = NULL;
 	unsigned char *swap_map;
+	struct swap_cluster_info *cluster_info;
 	unsigned long *frontswap_map;
 	struct file *swap_file, *victim;
 	struct address_space *mapping;
@@ -1651,6 +1892,8 @@
 		goto out_dput;
 	}
 
+	flush_work(&p->discard_work);
+
 	destroy_swap_extents(p);
 	if (p->flags & SWP_CONTINUED)
 		free_swap_count_continuations(p);
@@ -1675,6 +1918,8 @@
 	p->max = 0;
 	swap_map = p->swap_map;
 	p->swap_map = NULL;
+	cluster_info = p->cluster_info;
+	p->cluster_info = NULL;
 	p->flags = 0;
 	frontswap_map = frontswap_map_get(p);
 	frontswap_map_set(p, NULL);
@@ -1682,7 +1927,10 @@
 	spin_unlock(&swap_lock);
 	frontswap_invalidate_area(type);
 	mutex_unlock(&swapon_mutex);
+	free_percpu(p->percpu_cluster);
+	p->percpu_cluster = NULL;
 	vfree(swap_map);
+	vfree(cluster_info);
 	vfree(frontswap_map);
 	/* Destroy swap account informatin */
 	swap_cgroup_swapoff(type);
@@ -1926,9 +2174,10 @@
 	int i;
 	unsigned long maxpages;
 	unsigned long swapfilepages;
+	unsigned long last_page;
 
 	if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) {
-		printk(KERN_ERR "Unable to find swap-space signature\n");
+		pr_err("Unable to find swap-space signature\n");
 		return 0;
 	}
 
@@ -1942,9 +2191,8 @@
 	}
 	/* Check the swap header's sub-version */
 	if (swap_header->info.version != 1) {
-		printk(KERN_WARNING
-		       "Unable to handle swap header version %d\n",
-		       swap_header->info.version);
+		pr_warn("Unable to handle swap header version %d\n",
+			swap_header->info.version);
 		return 0;
 	}
 
@@ -1968,8 +2216,14 @@
 	 */
 	maxpages = swp_offset(pte_to_swp_entry(
 			swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
-	if (maxpages > swap_header->info.last_page) {
-		maxpages = swap_header->info.last_page + 1;
+	last_page = swap_header->info.last_page;
+	if (last_page > maxpages) {
+		pr_warn("Truncating oversized swap area, only using %luk out of %luk\n",
+			maxpages << (PAGE_SHIFT - 10),
+			last_page << (PAGE_SHIFT - 10));
+	}
+	if (maxpages > last_page) {
+		maxpages = last_page + 1;
 		/* p->max is an unsigned int: don't overflow it */
 		if ((unsigned int)maxpages == 0)
 			maxpages = UINT_MAX;
@@ -1980,8 +2234,7 @@
 		return 0;
 	swapfilepages = i_size_read(inode) >> PAGE_SHIFT;
 	if (swapfilepages && maxpages > swapfilepages) {
-		printk(KERN_WARNING
-		       "Swap area shorter than signature indicates\n");
+		pr_warn("Swap area shorter than signature indicates\n");
 		return 0;
 	}
 	if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
@@ -1995,15 +2248,23 @@
 static int setup_swap_map_and_extents(struct swap_info_struct *p,
 					union swap_header *swap_header,
 					unsigned char *swap_map,
+					struct swap_cluster_info *cluster_info,
 					unsigned long maxpages,
 					sector_t *span)
 {
 	int i;
 	unsigned int nr_good_pages;
 	int nr_extents;
+	unsigned long nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
+	unsigned long idx = p->cluster_next / SWAPFILE_CLUSTER;
 
 	nr_good_pages = maxpages - 1;	/* omit header page */
 
+	cluster_set_null(&p->free_cluster_head);
+	cluster_set_null(&p->free_cluster_tail);
+	cluster_set_null(&p->discard_cluster_head);
+	cluster_set_null(&p->discard_cluster_tail);
+
 	for (i = 0; i < swap_header->info.nr_badpages; i++) {
 		unsigned int page_nr = swap_header->info.badpages[i];
 		if (page_nr == 0 || page_nr > swap_header->info.last_page)
@@ -2011,11 +2272,25 @@
 		if (page_nr < maxpages) {
 			swap_map[page_nr] = SWAP_MAP_BAD;
 			nr_good_pages--;
+			/*
+			 * Haven't marked the cluster free yet, no list
+			 * operation involved
+			 */
+			inc_cluster_info_page(p, cluster_info, page_nr);
 		}
 	}
 
+	/* Haven't marked the cluster free yet, no list operation involved */
+	for (i = maxpages; i < round_up(maxpages, SWAPFILE_CLUSTER); i++)
+		inc_cluster_info_page(p, cluster_info, i);
+
 	if (nr_good_pages) {
 		swap_map[0] = SWAP_MAP_BAD;
+		/*
+		 * Not mark the cluster free yet, no list
+		 * operation involved
+		 */
+		inc_cluster_info_page(p, cluster_info, 0);
 		p->max = maxpages;
 		p->pages = nr_good_pages;
 		nr_extents = setup_swap_extents(p, span);
@@ -2024,10 +2299,34 @@
 		nr_good_pages = p->pages;
 	}
 	if (!nr_good_pages) {
-		printk(KERN_WARNING "Empty swap-file\n");
+		pr_warn("Empty swap-file\n");
 		return -EINVAL;
 	}
 
+	if (!cluster_info)
+		return nr_extents;
+
+	for (i = 0; i < nr_clusters; i++) {
+		if (!cluster_count(&cluster_info[idx])) {
+			cluster_set_flag(&cluster_info[idx], CLUSTER_FLAG_FREE);
+			if (cluster_is_null(&p->free_cluster_head)) {
+				cluster_set_next_flag(&p->free_cluster_head,
+								idx, 0);
+				cluster_set_next_flag(&p->free_cluster_tail,
+								idx, 0);
+			} else {
+				unsigned int tail;
+
+				tail = cluster_next(&p->free_cluster_tail);
+				cluster_set_next(&cluster_info[tail], idx);
+				cluster_set_next_flag(&p->free_cluster_tail,
+								idx, 0);
+			}
+		}
+		idx++;
+		if (idx == nr_clusters)
+			idx = 0;
+	}
 	return nr_extents;
 }
 
@@ -2059,6 +2358,7 @@
 	sector_t span;
 	unsigned long maxpages;
 	unsigned char *swap_map = NULL;
+	struct swap_cluster_info *cluster_info = NULL;
 	unsigned long *frontswap_map = NULL;
 	struct page *page = NULL;
 	struct inode *inode = NULL;
@@ -2073,6 +2373,8 @@
 	if (IS_ERR(p))
 		return PTR_ERR(p);
 
+	INIT_WORK(&p->discard_work, swap_discard_work);
+
 	name = getname(specialfile);
 	if (IS_ERR(name)) {
 		error = PTR_ERR(name);
@@ -2132,13 +2434,38 @@
 		error = -ENOMEM;
 		goto bad_swap;
 	}
+	if (p->bdev && blk_queue_nonrot(bdev_get_queue(p->bdev))) {
+		p->flags |= SWP_SOLIDSTATE;
+		/*
+		 * select a random position to start with to help wear leveling
+		 * SSD
+		 */
+		p->cluster_next = 1 + (prandom_u32() % p->highest_bit);
+
+		cluster_info = vzalloc(DIV_ROUND_UP(maxpages,
+			SWAPFILE_CLUSTER) * sizeof(*cluster_info));
+		if (!cluster_info) {
+			error = -ENOMEM;
+			goto bad_swap;
+		}
+		p->percpu_cluster = alloc_percpu(struct percpu_cluster);
+		if (!p->percpu_cluster) {
+			error = -ENOMEM;
+			goto bad_swap;
+		}
+		for_each_possible_cpu(i) {
+			struct percpu_cluster *cluster;
+			cluster = per_cpu_ptr(p->percpu_cluster, i);
+			cluster_set_null(&cluster->index);
+		}
+	}
 
 	error = swap_cgroup_swapon(p->type, maxpages);
 	if (error)
 		goto bad_swap;
 
 	nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map,
-		maxpages, &span);
+		cluster_info, maxpages, &span);
 	if (unlikely(nr_extents < 0)) {
 		error = nr_extents;
 		goto bad_swap;
@@ -2147,41 +2474,33 @@
 	if (frontswap_enabled)
 		frontswap_map = vzalloc(BITS_TO_LONGS(maxpages) * sizeof(long));
 
-	if (p->bdev) {
-		if (blk_queue_nonrot(bdev_get_queue(p->bdev))) {
-			p->flags |= SWP_SOLIDSTATE;
-			p->cluster_next = 1 + (prandom_u32() % p->highest_bit);
-		}
+	if (p->bdev &&(swap_flags & SWAP_FLAG_DISCARD) && swap_discardable(p)) {
+		/*
+		 * When discard is enabled for swap with no particular
+		 * policy flagged, we set all swap discard flags here in
+		 * order to sustain backward compatibility with older
+		 * swapon(8) releases.
+		 */
+		p->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD |
+			     SWP_PAGE_DISCARD);
 
-		if ((swap_flags & SWAP_FLAG_DISCARD) && swap_discardable(p)) {
-			/*
-			 * When discard is enabled for swap with no particular
-			 * policy flagged, we set all swap discard flags here in
-			 * order to sustain backward compatibility with older
-			 * swapon(8) releases.
-			 */
-			p->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD |
-				     SWP_PAGE_DISCARD);
+		/*
+		 * By flagging sys_swapon, a sysadmin can tell us to
+		 * either do single-time area discards only, or to just
+		 * perform discards for released swap page-clusters.
+		 * Now it's time to adjust the p->flags accordingly.
+		 */
+		if (swap_flags & SWAP_FLAG_DISCARD_ONCE)
+			p->flags &= ~SWP_PAGE_DISCARD;
+		else if (swap_flags & SWAP_FLAG_DISCARD_PAGES)
+			p->flags &= ~SWP_AREA_DISCARD;
 
-			/*
-			 * By flagging sys_swapon, a sysadmin can tell us to
-			 * either do single-time area discards only, or to just
-			 * perform discards for released swap page-clusters.
-			 * Now it's time to adjust the p->flags accordingly.
-			 */
-			if (swap_flags & SWAP_FLAG_DISCARD_ONCE)
-				p->flags &= ~SWP_PAGE_DISCARD;
-			else if (swap_flags & SWAP_FLAG_DISCARD_PAGES)
-				p->flags &= ~SWP_AREA_DISCARD;
-
-			/* issue a swapon-time discard if it's still required */
-			if (p->flags & SWP_AREA_DISCARD) {
-				int err = discard_swap(p);
-				if (unlikely(err))
-					printk(KERN_ERR
-					       "swapon: discard_swap(%p): %d\n",
-						p, err);
-			}
+		/* issue a swapon-time discard if it's still required */
+		if (p->flags & SWP_AREA_DISCARD) {
+			int err = discard_swap(p);
+			if (unlikely(err))
+				pr_err("swapon: discard_swap(%p): %d\n",
+					p, err);
 		}
 	}
 
@@ -2190,9 +2509,9 @@
 	if (swap_flags & SWAP_FLAG_PREFER)
 		prio =
 		  (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
-	enable_swap_info(p, prio, swap_map, frontswap_map);
+	enable_swap_info(p, prio, swap_map, cluster_info, frontswap_map);
 
-	printk(KERN_INFO "Adding %uk swap on %s.  "
+	pr_info("Adding %uk swap on %s.  "
 			"Priority:%d extents:%d across:%lluk %s%s%s%s%s\n",
 		p->pages<<(PAGE_SHIFT-10), name->name, p->prio,
 		nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10),
@@ -2211,6 +2530,8 @@
 	error = 0;
 	goto out;
 bad_swap:
+	free_percpu(p->percpu_cluster);
+	p->percpu_cluster = NULL;
 	if (inode && S_ISBLK(inode->i_mode) && p->bdev) {
 		set_blocksize(p->bdev, p->old_block_size);
 		blkdev_put(p->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
@@ -2222,6 +2543,7 @@
 	p->flags = 0;
 	spin_unlock(&swap_lock);
 	vfree(swap_map);
+	vfree(cluster_info);
 	if (swap_file) {
 		if (inode && S_ISREG(inode->i_mode)) {
 			mutex_unlock(&inode->i_mutex);
@@ -2291,6 +2613,16 @@
 		goto unlock_out;
 
 	count = p->swap_map[offset];
+
+	/*
+	 * swapin_readahead() doesn't check if a swap entry is valid, so the
+	 * swap entry could be SWAP_MAP_BAD. Check here with lock held.
+	 */
+	if (unlikely(swap_count(count) == SWAP_MAP_BAD)) {
+		err = -ENOENT;
+		goto unlock_out;
+	}
+
 	has_cache = count & SWAP_HAS_CACHE;
 	count &= ~SWAP_HAS_CACHE;
 	err = 0;
@@ -2326,7 +2658,7 @@
 	return err;
 
 bad_file:
-	printk(KERN_ERR "swap_dup: %s%08lx\n", Bad_file, entry.val);
+	pr_err("swap_dup: %s%08lx\n", Bad_file, entry.val);
 	goto out;
 }
 
diff --git a/mm/util.c b/mm/util.c
index 7441c41..eaf63fc2 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -388,15 +388,12 @@
 	struct address_space *mapping = page->mapping;
 
 	VM_BUG_ON(PageSlab(page));
-#ifdef CONFIG_SWAP
 	if (unlikely(PageSwapCache(page))) {
 		swp_entry_t entry;
 
 		entry.val = page_private(page);
 		mapping = swap_address_space(entry);
-	} else
-#endif
-	if ((unsigned long)mapping & PAGE_MAPPING_ANON)
+	} else if ((unsigned long)mapping & PAGE_MAPPING_ANON)
 		mapping = NULL;
 	return mapping;
 }
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 13a5495..1074543 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -752,7 +752,6 @@
 struct vmap_block {
 	spinlock_t lock;
 	struct vmap_area *va;
-	struct vmap_block_queue *vbq;
 	unsigned long free, dirty;
 	DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS);
 	struct list_head free_list;
@@ -830,7 +829,6 @@
 	radix_tree_preload_end();
 
 	vbq = &get_cpu_var(vmap_block_queue);
-	vb->vbq = vbq;
 	spin_lock(&vbq->lock);
 	list_add_rcu(&vb->free_list, &vbq->free);
 	spin_unlock(&vbq->lock);
@@ -1018,15 +1016,16 @@
 
 		rcu_read_lock();
 		list_for_each_entry_rcu(vb, &vbq->free, free_list) {
-			int i;
+			int i, j;
 
 			spin_lock(&vb->lock);
 			i = find_first_bit(vb->dirty_map, VMAP_BBMAP_BITS);
-			while (i < VMAP_BBMAP_BITS) {
+			if (i < VMAP_BBMAP_BITS) {
 				unsigned long s, e;
-				int j;
-				j = find_next_zero_bit(vb->dirty_map,
-					VMAP_BBMAP_BITS, i);
+
+				j = find_last_bit(vb->dirty_map,
+							VMAP_BBMAP_BITS);
+				j = j + 1; /* need exclusive index */
 
 				s = vb->va->va_start + (i << PAGE_SHIFT);
 				e = vb->va->va_start + (j << PAGE_SHIFT);
@@ -1036,10 +1035,6 @@
 					start = s;
 				if (e > end)
 					end = e;
-
-				i = j;
-				i = find_next_bit(vb->dirty_map,
-							VMAP_BBMAP_BITS, i);
 			}
 			spin_unlock(&vb->lock);
 		}
@@ -1263,7 +1258,7 @@
 int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
 {
 	unsigned long addr = (unsigned long)area->addr;
-	unsigned long end = addr + area->size - PAGE_SIZE;
+	unsigned long end = addr + get_vm_area_size(area);
 	int err;
 
 	err = vmap_page_range(addr, end, prot, *pages);
@@ -1558,7 +1553,7 @@
 	unsigned int nr_pages, array_size, i;
 	gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
 
-	nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
+	nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
 	array_size = (nr_pages * sizeof(struct page *));
 
 	area->nr_pages = nr_pages;
@@ -1990,7 +1985,7 @@
 
 		vm = va->vm;
 		vaddr = (char *) vm->addr;
-		if (addr >= vaddr + vm->size - PAGE_SIZE)
+		if (addr >= vaddr + get_vm_area_size(vm))
 			continue;
 		while (addr < vaddr) {
 			if (count == 0)
@@ -2000,7 +1995,7 @@
 			addr++;
 			count--;
 		}
-		n = vaddr + vm->size - PAGE_SIZE - addr;
+		n = vaddr + get_vm_area_size(vm) - addr;
 		if (n > count)
 			n = count;
 		if (!(vm->flags & VM_IOREMAP))
@@ -2072,7 +2067,7 @@
 
 		vm = va->vm;
 		vaddr = (char *) vm->addr;
-		if (addr >= vaddr + vm->size - PAGE_SIZE)
+		if (addr >= vaddr + get_vm_area_size(vm))
 			continue;
 		while (addr < vaddr) {
 			if (count == 0)
@@ -2081,7 +2076,7 @@
 			addr++;
 			count--;
 		}
-		n = vaddr + vm->size - PAGE_SIZE - addr;
+		n = vaddr + get_vm_area_size(vm) - addr;
 		if (n > count)
 			n = count;
 		if (!(vm->flags & VM_IOREMAP)) {
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 2cff0d4..fe715da 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -146,6 +146,25 @@
 }
 #endif
 
+unsigned long zone_reclaimable_pages(struct zone *zone)
+{
+	int nr;
+
+	nr = zone_page_state(zone, NR_ACTIVE_FILE) +
+	     zone_page_state(zone, NR_INACTIVE_FILE);
+
+	if (get_nr_swap_pages() > 0)
+		nr += zone_page_state(zone, NR_ACTIVE_ANON) +
+		      zone_page_state(zone, NR_INACTIVE_ANON);
+
+	return nr;
+}
+
+bool zone_reclaimable(struct zone *zone)
+{
+	return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
+}
+
 static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru)
 {
 	if (!mem_cgroup_disabled())
@@ -545,7 +564,7 @@
  */
 void putback_lru_page(struct page *page)
 {
-	int lru;
+	bool is_unevictable;
 	int was_unevictable = PageUnevictable(page);
 
 	VM_BUG_ON(PageLRU(page));
@@ -560,14 +579,14 @@
 		 * unevictable page on [in]active list.
 		 * We know how to handle that.
 		 */
-		lru = page_lru_base_type(page);
+		is_unevictable = false;
 		lru_cache_add(page);
 	} else {
 		/*
 		 * Put unevictable pages directly on zone's unevictable
 		 * list.
 		 */
-		lru = LRU_UNEVICTABLE;
+		is_unevictable = true;
 		add_page_to_unevictable_list(page);
 		/*
 		 * When racing with an mlock or AS_UNEVICTABLE clearing
@@ -587,7 +606,7 @@
 	 * page is on unevictable list, it never be freed. To avoid that,
 	 * check after we added it to the list, again.
 	 */
-	if (lru == LRU_UNEVICTABLE && page_evictable(page)) {
+	if (is_unevictable && page_evictable(page)) {
 		if (!isolate_lru_page(page)) {
 			put_page(page);
 			goto redo;
@@ -598,9 +617,9 @@
 		 */
 	}
 
-	if (was_unevictable && lru != LRU_UNEVICTABLE)
+	if (was_unevictable && !is_unevictable)
 		count_vm_event(UNEVICTABLE_PGRESCUED);
-	else if (!was_unevictable && lru == LRU_UNEVICTABLE)
+	else if (!was_unevictable && is_unevictable)
 		count_vm_event(UNEVICTABLE_PGCULLED);
 
 	put_page(page);		/* drop ref from isolate */
@@ -1789,7 +1808,7 @@
 	 * latencies, so it's better to scan a minimum amount there as
 	 * well.
 	 */
-	if (current_is_kswapd() && zone->all_unreclaimable)
+	if (current_is_kswapd() && !zone_reclaimable(zone))
 		force_scan = true;
 	if (!global_reclaim(sc))
 		force_scan = true;
@@ -2244,8 +2263,8 @@
 		if (global_reclaim(sc)) {
 			if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
 				continue;
-			if (zone->all_unreclaimable &&
-					sc->priority != DEF_PRIORITY)
+			if (sc->priority != DEF_PRIORITY &&
+			    !zone_reclaimable(zone))
 				continue;	/* Let kswapd poll it */
 			if (IS_ENABLED(CONFIG_COMPACTION)) {
 				/*
@@ -2283,11 +2302,6 @@
 	return aborted_reclaim;
 }
 
-static bool zone_reclaimable(struct zone *zone)
-{
-	return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
-}
-
 /* All zones in zonelist are unreclaimable? */
 static bool all_unreclaimable(struct zonelist *zonelist,
 		struct scan_control *sc)
@@ -2301,7 +2315,7 @@
 			continue;
 		if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
 			continue;
-		if (!zone->all_unreclaimable)
+		if (zone_reclaimable(zone))
 			return false;
 	}
 
@@ -2712,7 +2726,7 @@
 		 * DEF_PRIORITY. Effectively, it considers them balanced so
 		 * they must be considered balanced here as well!
 		 */
-		if (zone->all_unreclaimable) {
+		if (!zone_reclaimable(zone)) {
 			balanced_pages += zone->managed_pages;
 			continue;
 		}
@@ -2773,7 +2787,6 @@
 			       unsigned long lru_pages,
 			       unsigned long *nr_attempted)
 {
-	unsigned long nr_slab;
 	int testorder = sc->order;
 	unsigned long balance_gap;
 	struct reclaim_state *reclaim_state = current->reclaim_state;
@@ -2818,15 +2831,12 @@
 	shrink_zone(zone, sc);
 
 	reclaim_state->reclaimed_slab = 0;
-	nr_slab = shrink_slab(&shrink, sc->nr_scanned, lru_pages);
+	shrink_slab(&shrink, sc->nr_scanned, lru_pages);
 	sc->nr_reclaimed += reclaim_state->reclaimed_slab;
 
 	/* Account for the number of pages attempted to reclaim */
 	*nr_attempted += sc->nr_to_reclaim;
 
-	if (nr_slab == 0 && !zone_reclaimable(zone))
-		zone->all_unreclaimable = 1;
-
 	zone_clear_flag(zone, ZONE_WRITEBACK);
 
 	/*
@@ -2835,7 +2845,7 @@
 	 * BDIs but as pressure is relieved, speculatively avoid congestion
 	 * waits.
 	 */
-	if (!zone->all_unreclaimable &&
+	if (zone_reclaimable(zone) &&
 	    zone_balanced(zone, testorder, 0, classzone_idx)) {
 		zone_clear_flag(zone, ZONE_CONGESTED);
 		zone_clear_flag(zone, ZONE_TAIL_LRU_DIRTY);
@@ -2901,8 +2911,8 @@
 			if (!populated_zone(zone))
 				continue;
 
-			if (zone->all_unreclaimable &&
-			    sc.priority != DEF_PRIORITY)
+			if (sc.priority != DEF_PRIORITY &&
+			    !zone_reclaimable(zone))
 				continue;
 
 			/*
@@ -2980,8 +2990,8 @@
 			if (!populated_zone(zone))
 				continue;
 
-			if (zone->all_unreclaimable &&
-			    sc.priority != DEF_PRIORITY)
+			if (sc.priority != DEF_PRIORITY &&
+			    !zone_reclaimable(zone))
 				continue;
 
 			sc.nr_scanned = 0;
@@ -3237,7 +3247,7 @@
 	}
 	if (!waitqueue_active(&pgdat->kswapd_wait))
 		return;
-	if (zone_watermark_ok_safe(zone, order, low_wmark_pages(zone), 0, 0))
+	if (zone_balanced(zone, order, 0, 0))
 		return;
 
 	trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
@@ -3265,20 +3275,6 @@
 	return nr;
 }
 
-unsigned long zone_reclaimable_pages(struct zone *zone)
-{
-	int nr;
-
-	nr = zone_page_state(zone, NR_ACTIVE_FILE) +
-	     zone_page_state(zone, NR_INACTIVE_FILE);
-
-	if (get_nr_swap_pages() > 0)
-		nr += zone_page_state(zone, NR_ACTIVE_ANON) +
-		      zone_page_state(zone, NR_INACTIVE_ANON);
-
-	return nr;
-}
-
 #ifdef CONFIG_HIBERNATION
 /*
  * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
@@ -3576,7 +3572,7 @@
 	    zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages)
 		return ZONE_RECLAIM_FULL;
 
-	if (zone->all_unreclaimable)
+	if (!zone_reclaimable(zone))
 		return ZONE_RECLAIM_FULL;
 
 	/*
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 20c2ef4..9bb3145 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -19,6 +19,9 @@
 #include <linux/math64.h>
 #include <linux/writeback.h>
 #include <linux/compaction.h>
+#include <linux/mm_inline.h>
+
+#include "internal.h"
 
 #ifdef CONFIG_VM_EVENT_COUNTERS
 DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
@@ -414,12 +417,17 @@
 EXPORT_SYMBOL(dec_zone_page_state);
 #endif
 
+static inline void fold_diff(int *diff)
+{
+	int i;
+
+	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
+		if (diff[i])
+			atomic_long_add(diff[i], &vm_stat[i]);
+}
+
 /*
- * Update the zone counters for one cpu.
- *
- * The cpu specified must be either the current cpu or a processor that
- * is not online. If it is the current cpu then the execution thread must
- * be pinned to the current cpu.
+ * Update the zone counters for the current cpu.
  *
  * Note that refresh_cpu_vm_stats strives to only access
  * node local memory. The per cpu pagesets on remote zones are placed
@@ -432,7 +440,67 @@
  * with the global counters. These could cause remote node cache line
  * bouncing and will have to be only done when necessary.
  */
-void refresh_cpu_vm_stats(int cpu)
+static void refresh_cpu_vm_stats(void)
+{
+	struct zone *zone;
+	int i;
+	int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
+
+	for_each_populated_zone(zone) {
+		struct per_cpu_pageset __percpu *p = zone->pageset;
+
+		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
+			int v;
+
+			v = this_cpu_xchg(p->vm_stat_diff[i], 0);
+			if (v) {
+
+				atomic_long_add(v, &zone->vm_stat[i]);
+				global_diff[i] += v;
+#ifdef CONFIG_NUMA
+				/* 3 seconds idle till flush */
+				__this_cpu_write(p->expire, 3);
+#endif
+			}
+		}
+		cond_resched();
+#ifdef CONFIG_NUMA
+		/*
+		 * Deal with draining the remote pageset of this
+		 * processor
+		 *
+		 * Check if there are pages remaining in this pageset
+		 * if not then there is nothing to expire.
+		 */
+		if (!__this_cpu_read(p->expire) ||
+			       !__this_cpu_read(p->pcp.count))
+			continue;
+
+		/*
+		 * We never drain zones local to this processor.
+		 */
+		if (zone_to_nid(zone) == numa_node_id()) {
+			__this_cpu_write(p->expire, 0);
+			continue;
+		}
+
+
+		if (__this_cpu_dec_return(p->expire))
+			continue;
+
+		if (__this_cpu_read(p->pcp.count))
+			drain_zone_pages(zone, __this_cpu_ptr(&p->pcp));
+#endif
+	}
+	fold_diff(global_diff);
+}
+
+/*
+ * Fold the data for an offline cpu into the global array.
+ * There cannot be any access by the offline cpu and therefore
+ * synchronization is simplified.
+ */
+void cpu_vm_stats_fold(int cpu)
 {
 	struct zone *zone;
 	int i;
@@ -445,52 +513,16 @@
 
 		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
 			if (p->vm_stat_diff[i]) {
-				unsigned long flags;
 				int v;
 
-				local_irq_save(flags);
 				v = p->vm_stat_diff[i];
 				p->vm_stat_diff[i] = 0;
-				local_irq_restore(flags);
 				atomic_long_add(v, &zone->vm_stat[i]);
 				global_diff[i] += v;
-#ifdef CONFIG_NUMA
-				/* 3 seconds idle till flush */
-				p->expire = 3;
-#endif
 			}
-		cond_resched();
-#ifdef CONFIG_NUMA
-		/*
-		 * Deal with draining the remote pageset of this
-		 * processor
-		 *
-		 * Check if there are pages remaining in this pageset
-		 * if not then there is nothing to expire.
-		 */
-		if (!p->expire || !p->pcp.count)
-			continue;
-
-		/*
-		 * We never drain zones local to this processor.
-		 */
-		if (zone_to_nid(zone) == numa_node_id()) {
-			p->expire = 0;
-			continue;
-		}
-
-		p->expire--;
-		if (p->expire)
-			continue;
-
-		if (p->pcp.count)
-			drain_zone_pages(zone, &p->pcp);
-#endif
 	}
 
-	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
-		if (global_diff[i])
-			atomic_long_add(global_diff[i], &vm_stat[i]);
+	fold_diff(global_diff);
 }
 
 /*
@@ -703,6 +735,7 @@
 const char * const vmstat_text[] = {
 	/* Zoned VM counters */
 	"nr_free_pages",
+	"nr_alloc_batch",
 	"nr_inactive_anon",
 	"nr_active_anon",
 	"nr_inactive_file",
@@ -817,6 +850,12 @@
 	"thp_zero_page_alloc",
 	"thp_zero_page_alloc_failed",
 #endif
+#ifdef CONFIG_SMP
+	"nr_tlb_remote_flush",
+	"nr_tlb_remote_flush_received",
+#endif
+	"nr_tlb_local_flush_all",
+	"nr_tlb_local_flush_one",
 
 #endif /* CONFIG_VM_EVENTS_COUNTERS */
 };
@@ -1052,7 +1091,7 @@
 		   "\n  all_unreclaimable: %u"
 		   "\n  start_pfn:         %lu"
 		   "\n  inactive_ratio:    %u",
-		   zone->all_unreclaimable,
+		   !zone_reclaimable(zone),
 		   zone->zone_start_pfn,
 		   zone->inactive_ratio);
 	seq_putc(m, '\n');
@@ -1177,7 +1216,7 @@
 
 static void vmstat_update(struct work_struct *w)
 {
-	refresh_cpu_vm_stats(smp_processor_id());
+	refresh_cpu_vm_stats();
 	schedule_delayed_work(&__get_cpu_var(vmstat_work),
 		round_jiffies_relative(sysctl_stat_interval));
 }
diff --git a/mm/zbud.c b/mm/zbud.c
index ad1e781..9451361 100644
--- a/mm/zbud.c
+++ b/mm/zbud.c
@@ -16,7 +16,7 @@
  *
  * zbud works by storing compressed pages, or "zpages", together in pairs in a
  * single memory page called a "zbud page".  The first buddy is "left
- * justifed" at the beginning of the zbud page, and the last buddy is "right
+ * justified" at the beginning of the zbud page, and the last buddy is "right
  * justified" at the end of the zbud page.  The benefit is that if either
  * buddy is freed, the freed buddy space, coalesced with whatever slack space
  * that existed between the buddies, results in the largest possible free region
@@ -243,7 +243,7 @@
  * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
  * as zbud pool pages.
  *
- * Return: 0 if success and handle is set, otherwise -EINVAL is the size or
+ * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
  * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
  * a new page.
  */
diff --git a/mm/zswap.c b/mm/zswap.c
index deda2b6..841e35f 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -409,7 +409,7 @@
 				struct page **retpage)
 {
 	struct page *found_page, *new_page = NULL;
-	struct address_space *swapper_space = &swapper_spaces[swp_type(entry)];
+	struct address_space *swapper_space = swap_address_space(entry);
 	int err;
 
 	*retpage = NULL;
@@ -790,26 +790,14 @@
 static void zswap_frontswap_invalidate_area(unsigned type)
 {
 	struct zswap_tree *tree = zswap_trees[type];
-	struct rb_node *node;
-	struct zswap_entry *entry;
+	struct zswap_entry *entry, *n;
 
 	if (!tree)
 		return;
 
 	/* walk the tree and free everything */
 	spin_lock(&tree->lock);
-	/*
-	 * TODO: Even though this code should not be executed because
-	 * the try_to_unuse() in swapoff should have emptied the tree,
-	 * it is very wasteful to rebalance the tree after every
-	 * removal when we are freeing the whole tree.
-	 *
-	 * If post-order traversal code is ever added to the rbtree
-	 * implementation, it should be used here.
-	 */
-	while ((node = rb_first(&tree->rbroot))) {
-		entry = rb_entry(node, struct zswap_entry, rbnode);
-		rb_erase(&entry->rbnode, &tree->rbroot);
+	rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode) {
 		zbud_free(tree->pool, entry->handle);
 		zswap_entry_cache_free(entry);
 		atomic_dec(&zswap_stored_pages);
diff --git a/net/9p/client.c b/net/9p/client.c
index ba93bda..ee8fd6b 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -987,6 +987,7 @@
 {
 	int err;
 	struct p9_client *clnt;
+	char *client_id;
 
 	err = 0;
 	clnt = kmalloc(sizeof(struct p9_client), GFP_KERNEL);
@@ -995,6 +996,10 @@
 
 	clnt->trans_mod = NULL;
 	clnt->trans = NULL;
+
+	client_id = utsname()->nodename;
+	memcpy(clnt->name, client_id, strlen(client_id) + 1);
+
 	spin_lock_init(&clnt->lock);
 	INIT_LIST_HEAD(&clnt->fidlist);
 
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index e1c26b1..990afab 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -577,6 +577,10 @@
 	mutex_lock(&virtio_9p_lock);
 	list_add_tail(&chan->chan_list, &virtio_chan_list);
 	mutex_unlock(&virtio_9p_lock);
+
+	/* Let udev rules use the new mount_tag attribute. */
+	kobject_uevent(&(vdev->dev.kobj), KOBJ_CHANGE);
+
 	return 0;
 
 out_free_tag:
@@ -654,6 +658,7 @@
 	list_del(&chan->chan_list);
 	mutex_unlock(&virtio_9p_lock);
 	sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr);
+	kobject_uevent(&(vdev->dev.kobj), KOBJ_CHANGE);
 	kfree(chan->tag);
 	kfree(chan->vc_wq);
 	kfree(chan);
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index b9259ef..e74ddc1 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -207,7 +207,7 @@
 	       struct net_device *dev, u32 filter_mask)
 {
 	int err = 0;
-	struct net_bridge_port *port = br_port_get_rcu(dev);
+	struct net_bridge_port *port = br_port_get_rtnl(dev);
 
 	/* not a bridge port and  */
 	if (!port && !(filter_mask & RTEXT_FILTER_BRVLAN))
@@ -451,7 +451,7 @@
 	struct net_port_vlans *pv;
 
 	if (br_port_exists(dev))
-		pv = nbp_get_vlan_info(br_port_get_rcu(dev));
+		pv = nbp_get_vlan_info(br_port_get_rtnl(dev));
 	else if (dev->priv_flags & IFF_EBRIDGE)
 		pv = br_get_vlan_info((struct net_bridge *)netdev_priv(dev));
 	else
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 598cb0b..efb57d9 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -202,13 +202,10 @@
 
 static inline struct net_bridge_port *br_port_get_rcu(const struct net_device *dev)
 {
-	struct net_bridge_port *port =
-			rcu_dereference_rtnl(dev->rx_handler_data);
-
-	return br_port_exists(dev) ? port : NULL;
+	return rcu_dereference(dev->rx_handler_data);
 }
 
-static inline struct net_bridge_port *br_port_get_rtnl(struct net_device *dev)
+static inline struct net_bridge_port *br_port_get_rtnl(const struct net_device *dev)
 {
 	return br_port_exists(dev) ?
 		rtnl_dereference(dev->rx_handler_data) : NULL;
@@ -746,6 +743,7 @@
 extern void br_init_port(struct net_bridge_port *p);
 extern void br_become_designated_port(struct net_bridge_port *p);
 
+extern void __br_set_forward_delay(struct net_bridge *br, unsigned long t);
 extern int br_set_forward_delay(struct net_bridge *br, unsigned long x);
 extern int br_set_hello_time(struct net_bridge *br, unsigned long x);
 extern int br_set_max_age(struct net_bridge *br, unsigned long x);
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index 1c0a50f..3c86f05 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -209,7 +209,7 @@
 	p->designated_age = jiffies - bpdu->message_age;
 
 	mod_timer(&p->message_age_timer, jiffies
-		  + (p->br->max_age - bpdu->message_age));
+		  + (bpdu->max_age - bpdu->message_age));
 }
 
 /* called under bridge lock */
@@ -544,18 +544,27 @@
 
 }
 
-int br_set_forward_delay(struct net_bridge *br, unsigned long val)
+void __br_set_forward_delay(struct net_bridge *br, unsigned long t)
 {
-	unsigned long t = clock_t_to_jiffies(val);
-
-	if (br->stp_enabled != BR_NO_STP &&
-	    (t < BR_MIN_FORWARD_DELAY || t > BR_MAX_FORWARD_DELAY))
-		return -ERANGE;
-
-	spin_lock_bh(&br->lock);
 	br->bridge_forward_delay = t;
 	if (br_is_root_bridge(br))
 		br->forward_delay = br->bridge_forward_delay;
+}
+
+int br_set_forward_delay(struct net_bridge *br, unsigned long val)
+{
+	unsigned long t = clock_t_to_jiffies(val);
+	int err = -ERANGE;
+
+	spin_lock_bh(&br->lock);
+	if (br->stp_enabled != BR_NO_STP &&
+	    (t < BR_MIN_FORWARD_DELAY || t > BR_MAX_FORWARD_DELAY))
+		goto unlock;
+
+	__br_set_forward_delay(br, t);
+	err = 0;
+
+unlock:
 	spin_unlock_bh(&br->lock);
-	return 0;
+	return err;
 }
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index d45e760..108084a 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -129,6 +129,14 @@
 	char *envp[] = { NULL };
 
 	r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
+
+	spin_lock_bh(&br->lock);
+
+	if (br->bridge_forward_delay < BR_MIN_FORWARD_DELAY)
+		__br_set_forward_delay(br, BR_MIN_FORWARD_DELAY);
+	else if (br->bridge_forward_delay < BR_MAX_FORWARD_DELAY)
+		__br_set_forward_delay(br, BR_MAX_FORWARD_DELAY);
+
 	if (r == 0) {
 		br->stp_enabled = BR_USER_STP;
 		br_debug(br, "userspace STP started\n");
@@ -137,10 +145,10 @@
 		br_debug(br, "using kernel STP\n");
 
 		/* To start timers on any ports left in blocking */
-		spin_lock_bh(&br->lock);
 		br_port_state_selection(br);
-		spin_unlock_bh(&br->lock);
 	}
+
+	spin_unlock_bh(&br->lock);
 }
 
 static void br_stp_stop(struct net_bridge *br)
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 0ff42f0..1929af8 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -352,7 +352,7 @@
 
 		if (queue_index != new_index && sk &&
 		    rcu_access_pointer(sk->sk_dst_cache))
-			sk_tx_queue_set(sk, queue_index);
+			sk_tx_queue_set(sk, new_index);
 
 		queue_index = new_index;
 	}
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 2c637e9..c3c7b27 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -550,7 +550,7 @@
 		return;
 
 	proto = ntohs(eth_hdr(skb)->h_proto);
-	if (proto == ETH_P_IP) {
+	if (proto == ETH_P_ARP) {
 		struct arphdr *arp;
 		unsigned char *arp_ptr;
 		/* No arp on this interface */
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index 4a22f3e..52f3c6b 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -502,7 +502,9 @@
 	 * ACKs, wait for troubles.
 	 */
 	if (crtt > tp->srtt) {
-		inet_csk(sk)->icsk_rto = crtt + max(crtt >> 2, tcp_rto_min(sk));
+		/* Set RTO like tcp_rtt_estimator(), but from cached RTT. */
+		crtt >>= 3;
+		inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk));
 	} else if (tp->srtt == 0) {
 		/* RFC6298: 5.7 We've failed to get a valid RTT sample from
 		 * 3WHS. This is most likely due to retransmission,
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 136fe55..7c96100 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -915,6 +915,9 @@
 	err = ip6_route_init();
 	if (err)
 		goto ip6_route_fail;
+	err = ndisc_late_init();
+	if (err)
+		goto ndisc_late_fail;
 	err = ip6_flowlabel_init();
 	if (err)
 		goto ip6_flowlabel_fail;
@@ -981,6 +984,8 @@
 addrconf_fail:
 	ip6_flowlabel_cleanup();
 ip6_flowlabel_fail:
+	ndisc_late_cleanup();
+ndisc_late_fail:
 	ip6_route_cleanup();
 ip6_route_fail:
 #ifdef CONFIG_PROC_FS
@@ -1043,6 +1048,7 @@
 	ipv6_exthdrs_exit();
 	addrconf_cleanup();
 	ip6_flowlabel_cleanup();
+	ndisc_late_cleanup();
 	ip6_route_cleanup();
 #ifdef CONFIG_PROC_FS
 
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 07a7d65..8d67900 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -162,12 +162,6 @@
 		off += optlen;
 		len -= optlen;
 	}
-	/* This case will not be caught by above check since its padding
-	 * length is smaller than 7:
-	 * 1 byte NH + 1 byte Length + 6 bytes Padding
-	 */
-	if ((padlen == 6) && ((off - skb_network_header_len(skb)) == 8))
-		goto bad;
 
 	if (len == 0)
 		return true;
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index a6c58ce..e275916 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -138,8 +138,8 @@
 	return false;
 
 suppress_route:
-		ip6_rt_put(rt);
-		return true;
+	ip6_rt_put(rt);
+	return true;
 }
 
 static int fib6_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 73db48e..5bec666 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -825,9 +825,9 @@
 	fn = fib6_add_1(root, &rt->rt6i_dst.addr, rt->rt6i_dst.plen,
 			offsetof(struct rt6_info, rt6i_dst), allow_create,
 			replace_required);
-
 	if (IS_ERR(fn)) {
 		err = PTR_ERR(fn);
+		fn = NULL;
 		goto out;
 	}
 
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 61355f7..2d8f482 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1656,9 +1656,9 @@
 
 	if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
 	    nla_put(skb, IFLA_IPTUN_LOCAL, sizeof(struct in6_addr),
-		    &parm->raddr) ||
-	    nla_put(skb, IFLA_IPTUN_REMOTE, sizeof(struct in6_addr),
 		    &parm->laddr) ||
+	    nla_put(skb, IFLA_IPTUN_REMOTE, sizeof(struct in6_addr),
+		    &parm->raddr) ||
 	    nla_put_u8(skb, IFLA_IPTUN_TTL, parm->hop_limit) ||
 	    nla_put_u8(skb, IFLA_IPTUN_ENCAP_LIMIT, parm->encap_limit) ||
 	    nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) ||
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 1217945..f8a55ff 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1727,24 +1727,28 @@
 	if (err)
 		goto out_unregister_pernet;
 #endif
-	err = register_netdevice_notifier(&ndisc_netdev_notifier);
-	if (err)
-		goto out_unregister_sysctl;
 out:
 	return err;
 
-out_unregister_sysctl:
 #ifdef CONFIG_SYSCTL
-	neigh_sysctl_unregister(&nd_tbl.parms);
 out_unregister_pernet:
-#endif
 	unregister_pernet_subsys(&ndisc_net_ops);
 	goto out;
+#endif
+}
+
+int __init ndisc_late_init(void)
+{
+	return register_netdevice_notifier(&ndisc_netdev_notifier);
+}
+
+void ndisc_late_cleanup(void)
+{
+	unregister_netdevice_notifier(&ndisc_netdev_notifier);
 }
 
 void ndisc_cleanup(void)
 {
-	unregister_netdevice_notifier(&ndisc_netdev_notifier);
 #ifdef CONFIG_SYSCTL
 	neigh_sysctl_unregister(&nd_tbl.parms);
 #endif
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index fb36f85..410db90 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -1178,6 +1178,7 @@
 		if (type > OVS_KEY_ATTR_MAX) {
 			OVS_NLERR("Unknown key attribute (type=%d, max=%d).\n",
 				  type, OVS_KEY_ATTR_MAX);
+			return -EINVAL;
 		}
 
 		if (attrs & (1 << type)) {
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index c2178b1..863846c 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1495,7 +1495,7 @@
 	psched_ratecfg_precompute(&cl->ceil, &hopt->ceil);
 
 	cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
-	cl->cbuffer = PSCHED_TICKS2NS(hopt->buffer);
+	cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer);
 
 	sch_tree_unlock(sch);
 
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 5f20686..98b69bb 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -634,8 +634,7 @@
 		break;
 	case ICMP_REDIRECT:
 		sctp_icmp_redirect(sk, transport, skb);
-		err = 0;
-		break;
+		/* Fall through to out_unlock. */
 	default:
 		goto out_unlock;
 	}
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index da613ce..e7b2d4f 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -183,7 +183,7 @@
 		break;
 	case NDISC_REDIRECT:
 		sctp_icmp_redirect(sk, transport, skb);
-		break;
+		goto out_unlock;
 	default:
 		break;
 	}
@@ -204,44 +204,23 @@
 		in6_dev_put(idev);
 }
 
-/* Based on tcp_v6_xmit() in tcp_ipv6.c. */
 static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
 {
 	struct sock *sk = skb->sk;
 	struct ipv6_pinfo *np = inet6_sk(sk);
-	struct flowi6 fl6;
-
-	memset(&fl6, 0, sizeof(fl6));
-
-	fl6.flowi6_proto = sk->sk_protocol;
-
-	/* Fill in the dest address from the route entry passed with the skb
-	 * and the source address from the transport.
-	 */
-	fl6.daddr = transport->ipaddr.v6.sin6_addr;
-	fl6.saddr = transport->saddr.v6.sin6_addr;
-
-	fl6.flowlabel = np->flow_label;
-	IP6_ECN_flow_xmit(sk, fl6.flowlabel);
-	if (ipv6_addr_type(&fl6.saddr) & IPV6_ADDR_LINKLOCAL)
-		fl6.flowi6_oif = transport->saddr.v6.sin6_scope_id;
-	else
-		fl6.flowi6_oif = sk->sk_bound_dev_if;
-
-	if (np->opt && np->opt->srcrt) {
-		struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt;
-		fl6.daddr = *rt0->addr;
-	}
+	struct flowi6 *fl6 = &transport->fl.u.ip6;
 
 	pr_debug("%s: skb:%p, len:%d, src:%pI6 dst:%pI6\n", __func__, skb,
-		 skb->len, &fl6.saddr, &fl6.daddr);
+		 skb->len, &fl6->saddr, &fl6->daddr);
 
-	SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS);
+	IP6_ECN_flow_xmit(sk, fl6->flowlabel);
 
 	if (!(transport->param_flags & SPP_PMTUD_ENABLE))
 		skb->local_df = 1;
 
-	return ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
+	SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS);
+
+	return ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
 }
 
 /* Returns the dst cache entry for the given source and destination ip
@@ -254,10 +233,12 @@
 	struct dst_entry *dst = NULL;
 	struct flowi6 *fl6 = &fl->u.ip6;
 	struct sctp_bind_addr *bp;
+	struct ipv6_pinfo *np = inet6_sk(sk);
 	struct sctp_sockaddr_entry *laddr;
 	union sctp_addr *baddr = NULL;
 	union sctp_addr *daddr = &t->ipaddr;
 	union sctp_addr dst_saddr;
+	struct in6_addr *final_p, final;
 	__u8 matchlen = 0;
 	__u8 bmatchlen;
 	sctp_scope_t scope;
@@ -281,7 +262,8 @@
 		pr_debug("src=%pI6 - ", &fl6->saddr);
 	}
 
-	dst = ip6_dst_lookup_flow(sk, fl6, NULL, false);
+	final_p = fl6_update_dst(fl6, np->opt, &final);
+	dst = ip6_dst_lookup_flow(sk, fl6, final_p, false);
 	if (!asoc || saddr)
 		goto out;
 
@@ -333,10 +315,12 @@
 		}
 	}
 	rcu_read_unlock();
+
 	if (baddr) {
 		fl6->saddr = baddr->v6.sin6_addr;
 		fl6->fl6_sport = baddr->v6.sin6_port;
-		dst = ip6_dst_lookup_flow(sk, fl6, NULL, false);
+		final_p = fl6_update_dst(fl6, np->opt, &final);
+		dst = ip6_dst_lookup_flow(sk, fl6, final_p, false);
 	}
 
 out:
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index d5d5882..911b71b 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -806,6 +806,9 @@
 			goto skip_mkasconf;
 		}
 
+		if (laddr == NULL)
+			return -EINVAL;
+
 		/* We do not need RCU protection throughout this loop
 		 * because this is done under a socket lock from the
 		 * setsockopt call.
@@ -6176,7 +6179,7 @@
 	/* Is there any exceptional events?  */
 	if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
 		mask |= POLLERR |
-			sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0;
+			(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
 	if (sk->sk_shutdown & RCV_SHUTDOWN)
 		mask |= POLLRDHUP | POLLIN | POLLRDNORM;
 	if (sk->sk_shutdown == SHUTDOWN_MASK)
diff --git a/net/socket.c b/net/socket.c
index b2d7c62..0ceaa5c 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -3072,12 +3072,12 @@
 
 	uifmap32 = &uifr32->ifr_ifru.ifru_map;
 	err = copy_from_user(&ifr, uifr32, sizeof(ifr.ifr_name));
-	err |= __get_user(ifr.ifr_map.mem_start, &uifmap32->mem_start);
-	err |= __get_user(ifr.ifr_map.mem_end, &uifmap32->mem_end);
-	err |= __get_user(ifr.ifr_map.base_addr, &uifmap32->base_addr);
-	err |= __get_user(ifr.ifr_map.irq, &uifmap32->irq);
-	err |= __get_user(ifr.ifr_map.dma, &uifmap32->dma);
-	err |= __get_user(ifr.ifr_map.port, &uifmap32->port);
+	err |= get_user(ifr.ifr_map.mem_start, &uifmap32->mem_start);
+	err |= get_user(ifr.ifr_map.mem_end, &uifmap32->mem_end);
+	err |= get_user(ifr.ifr_map.base_addr, &uifmap32->base_addr);
+	err |= get_user(ifr.ifr_map.irq, &uifmap32->irq);
+	err |= get_user(ifr.ifr_map.dma, &uifmap32->dma);
+	err |= get_user(ifr.ifr_map.port, &uifmap32->port);
 	if (err)
 		return -EFAULT;
 
@@ -3088,12 +3088,12 @@
 
 	if (cmd == SIOCGIFMAP && !err) {
 		err = copy_to_user(uifr32, &ifr, sizeof(ifr.ifr_name));
-		err |= __put_user(ifr.ifr_map.mem_start, &uifmap32->mem_start);
-		err |= __put_user(ifr.ifr_map.mem_end, &uifmap32->mem_end);
-		err |= __put_user(ifr.ifr_map.base_addr, &uifmap32->base_addr);
-		err |= __put_user(ifr.ifr_map.irq, &uifmap32->irq);
-		err |= __put_user(ifr.ifr_map.dma, &uifmap32->dma);
-		err |= __put_user(ifr.ifr_map.port, &uifmap32->port);
+		err |= put_user(ifr.ifr_map.mem_start, &uifmap32->mem_start);
+		err |= put_user(ifr.ifr_map.mem_end, &uifmap32->mem_end);
+		err |= put_user(ifr.ifr_map.base_addr, &uifmap32->base_addr);
+		err |= put_user(ifr.ifr_map.irq, &uifmap32->irq);
+		err |= put_user(ifr.ifr_map.dma, &uifmap32->dma);
+		err |= put_user(ifr.ifr_map.port, &uifmap32->port);
 		if (err)
 			err = -EFAULT;
 	}
@@ -3167,25 +3167,25 @@
 		struct in6_rtmsg32 __user *ur6 = argp;
 		ret = copy_from_user(&r6.rtmsg_dst, &(ur6->rtmsg_dst),
 			3 * sizeof(struct in6_addr));
-		ret |= __get_user(r6.rtmsg_type, &(ur6->rtmsg_type));
-		ret |= __get_user(r6.rtmsg_dst_len, &(ur6->rtmsg_dst_len));
-		ret |= __get_user(r6.rtmsg_src_len, &(ur6->rtmsg_src_len));
-		ret |= __get_user(r6.rtmsg_metric, &(ur6->rtmsg_metric));
-		ret |= __get_user(r6.rtmsg_info, &(ur6->rtmsg_info));
-		ret |= __get_user(r6.rtmsg_flags, &(ur6->rtmsg_flags));
-		ret |= __get_user(r6.rtmsg_ifindex, &(ur6->rtmsg_ifindex));
+		ret |= get_user(r6.rtmsg_type, &(ur6->rtmsg_type));
+		ret |= get_user(r6.rtmsg_dst_len, &(ur6->rtmsg_dst_len));
+		ret |= get_user(r6.rtmsg_src_len, &(ur6->rtmsg_src_len));
+		ret |= get_user(r6.rtmsg_metric, &(ur6->rtmsg_metric));
+		ret |= get_user(r6.rtmsg_info, &(ur6->rtmsg_info));
+		ret |= get_user(r6.rtmsg_flags, &(ur6->rtmsg_flags));
+		ret |= get_user(r6.rtmsg_ifindex, &(ur6->rtmsg_ifindex));
 
 		r = (void *) &r6;
 	} else { /* ipv4 */
 		struct rtentry32 __user *ur4 = argp;
 		ret = copy_from_user(&r4.rt_dst, &(ur4->rt_dst),
 					3 * sizeof(struct sockaddr));
-		ret |= __get_user(r4.rt_flags, &(ur4->rt_flags));
-		ret |= __get_user(r4.rt_metric, &(ur4->rt_metric));
-		ret |= __get_user(r4.rt_mtu, &(ur4->rt_mtu));
-		ret |= __get_user(r4.rt_window, &(ur4->rt_window));
-		ret |= __get_user(r4.rt_irtt, &(ur4->rt_irtt));
-		ret |= __get_user(rtdev, &(ur4->rt_dev));
+		ret |= get_user(r4.rt_flags, &(ur4->rt_flags));
+		ret |= get_user(r4.rt_metric, &(ur4->rt_metric));
+		ret |= get_user(r4.rt_mtu, &(ur4->rt_mtu));
+		ret |= get_user(r4.rt_window, &(ur4->rt_window));
+		ret |= get_user(r4.rt_irtt, &(ur4->rt_irtt));
+		ret |= get_user(rtdev, &(ur4->rt_dev));
 		if (rtdev) {
 			ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
 			r4.rt_dev = (char __user __force *)devname;
diff --git a/net/sunrpc/auth_gss/gss_rpc_upcall.c b/net/sunrpc/auth_gss/gss_rpc_upcall.c
index af7ffd4..f1eb0d1 100644
--- a/net/sunrpc/auth_gss/gss_rpc_upcall.c
+++ b/net/sunrpc/auth_gss/gss_rpc_upcall.c
@@ -213,6 +213,26 @@
 	return status;
 }
 
+static void gssp_free_receive_pages(struct gssx_arg_accept_sec_context *arg)
+{
+	int i;
+
+	for (i = 0; i < arg->npages && arg->pages[i]; i++)
+		__free_page(arg->pages[i]);
+}
+
+static int gssp_alloc_receive_pages(struct gssx_arg_accept_sec_context *arg)
+{
+	arg->npages = DIV_ROUND_UP(NGROUPS_MAX * 4, PAGE_SIZE);
+	arg->pages = kzalloc(arg->npages * sizeof(struct page *), GFP_KERNEL);
+	/*
+	 * XXX: actual pages are allocated by xdr layer in
+	 * xdr_partial_copy_from_skb.
+	 */
+	if (!arg->pages)
+		return -ENOMEM;
+	return 0;
+}
 
 /*
  * Public functions
@@ -261,10 +281,16 @@
 		arg.context_handle = &ctxh;
 	res.output_token->len = GSSX_max_output_token_sz;
 
+	ret = gssp_alloc_receive_pages(&arg);
+	if (ret)
+		return ret;
+
 	/* use nfs/ for targ_name ? */
 
 	ret = gssp_call(net, &msg);
 
+	gssp_free_receive_pages(&arg);
+
 	/* we need to fetch all data even in case of error so
 	 * that we can free special strctures is they have been allocated */
 	data->major_status = res.status.major_status;
diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
index 3c85d1c..f0f78c5 100644
--- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
+++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
@@ -166,14 +166,15 @@
 	return 0;
 }
 
-static int get_s32(void **p, void *max, s32 *res)
+static int get_host_u32(struct xdr_stream *xdr, u32 *res)
 {
-	void *base = *p;
-	void *next = (void *)((char *)base + sizeof(s32));
-	if (unlikely(next > max || next < base))
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (!p)
 		return -EINVAL;
-	memcpy(res, base, sizeof(s32));
-	*p = next;
+	/* Contents of linux creds are all host-endian: */
+	memcpy(res, p, sizeof(u32));
 	return 0;
 }
 
@@ -182,9 +183,9 @@
 {
 	u32 length;
 	__be32 *p;
-	void *q, *end;
-	s32 tmp;
-	int N, i, err;
+	u32 tmp;
+	u32 N;
+	int i, err;
 
 	p = xdr_inline_decode(xdr, 4);
 	if (unlikely(p == NULL))
@@ -192,33 +193,28 @@
 
 	length = be32_to_cpup(p);
 
-	/* FIXME: we do not want to use the scratch buffer for this one
-	 * may need to use functions that allows us to access an io vector
-	 * directly */
-	p = xdr_inline_decode(xdr, length);
-	if (unlikely(p == NULL))
+	if (length > (3 + NGROUPS_MAX) * sizeof(u32))
 		return -ENOSPC;
 
-	q = p;
-	end = q + length;
-
 	/* uid */
-	err = get_s32(&q, end, &tmp);
+	err = get_host_u32(xdr, &tmp);
 	if (err)
 		return err;
 	creds->cr_uid = make_kuid(&init_user_ns, tmp);
 
 	/* gid */
-	err = get_s32(&q, end, &tmp);
+	err = get_host_u32(xdr, &tmp);
 	if (err)
 		return err;
 	creds->cr_gid = make_kgid(&init_user_ns, tmp);
 
 	/* number of additional gid's */
-	err = get_s32(&q, end, &tmp);
+	err = get_host_u32(xdr, &tmp);
 	if (err)
 		return err;
 	N = tmp;
+	if ((3 + N) * sizeof(u32) != length)
+		return -EINVAL;
 	creds->cr_group_info = groups_alloc(N);
 	if (creds->cr_group_info == NULL)
 		return -ENOMEM;
@@ -226,7 +222,7 @@
 	/* gid's */
 	for (i = 0; i < N; i++) {
 		kgid_t kgid;
-		err = get_s32(&q, end, &tmp);
+		err = get_host_u32(xdr, &tmp);
 		if (err)
 			goto out_free_groups;
 		err = -EINVAL;
@@ -784,6 +780,9 @@
 	/* arg->options */
 	err = dummy_enc_opt_array(xdr, &arg->options);
 
+	xdr_inline_pages(&req->rq_rcv_buf,
+		PAGE_SIZE/2 /* pretty arbitrary */,
+		arg->pages, 0 /* page base */, arg->npages * PAGE_SIZE);
 done:
 	if (err)
 		dprintk("RPC:       gssx_enc_accept_sec_context: %d\n", err);
diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.h b/net/sunrpc/auth_gss/gss_rpc_xdr.h
index 1c98b27..685a688 100644
--- a/net/sunrpc/auth_gss/gss_rpc_xdr.h
+++ b/net/sunrpc/auth_gss/gss_rpc_xdr.h
@@ -147,6 +147,8 @@
 	struct gssx_cb *input_cb;
 	u32 ret_deleg_cred;
 	struct gssx_option_array options;
+	struct page **pages;
+	unsigned int npages;
 };
 
 struct gssx_res_accept_sec_context {
@@ -240,7 +242,8 @@
 			     2 * GSSX_max_princ_sz + \
 			     8 + 8 + 4 + 4 + 4)
 #define GSSX_max_output_token_sz 1024
-#define GSSX_max_creds_sz (4 + 4 + 4 + NGROUPS_MAX * 4)
+/* grouplist not included; we allocate separate pages for that: */
+#define GSSX_max_creds_sz (4 + 4 + 4 /* + NGROUPS_MAX*4 */)
 #define GSSX_RES_accept_sec_context_sz (GSSX_default_status_sz + \
 					GSSX_default_ctx_sz + \
 					GSSX_max_output_token_sz + \
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 2ee9eb7..47016c3 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -31,12 +31,16 @@
 my $fix = 0;
 my $root;
 my %debug;
-my %ignore_type = ();
 my %camelcase = ();
+my %use_type = ();
+my @use = ();
+my %ignore_type = ();
 my @ignore = ();
 my $help = 0;
 my $configuration_file = ".checkpatch.conf";
 my $max_line_length = 80;
+my $ignore_perl_version = 0;
+my $minimum_perl_version = 5.10.0;
 
 sub help {
 	my ($exitcode) = @_;
@@ -54,6 +58,7 @@
   --terse                    one line per report
   -f, --file                 treat FILE as regular source file
   --subjective, --strict     enable more subjective tests
+  --types TYPE(,TYPE2...)    show only these comma separated message types
   --ignore TYPE(,TYPE2...)   ignore various comma separated message types
   --max-line-length=n        set the maximum line length, if exceeded, warn
   --show-types               show the message "types" in the output
@@ -71,6 +76,8 @@
                              "<inputfile>.EXPERIMENTAL-checkpatch-fixes"
                              with potential errors corrected to the preferred
                              checkpatch style
+  --ignore-perl-version      override checking of perl version.  expect
+                             runtime errors.
   -h, --help, --version      display this help and exit
 
 When FILE is - read standard input.
@@ -116,6 +123,7 @@
 	'subjective!'	=> \$check,
 	'strict!'	=> \$check,
 	'ignore=s'	=> \@ignore,
+	'types=s'	=> \@use,
 	'show-types!'	=> \$show_types,
 	'max-line-length=i' => \$max_line_length,
 	'root=s'	=> \$root,
@@ -123,6 +131,7 @@
 	'mailback!'	=> \$mailback,
 	'summary-file!'	=> \$summary_file,
 	'fix!'		=> \$fix,
+	'ignore-perl-version!' => \$ignore_perl_version,
 	'debug=s'	=> \%debug,
 	'test-only=s'	=> \$tst_only,
 	'h|help'	=> \$help,
@@ -133,24 +142,50 @@
 
 my $exit = 0;
 
+if ($^V && $^V lt $minimum_perl_version) {
+	printf "$P: requires at least perl version %vd\n", $minimum_perl_version;
+	if (!$ignore_perl_version) {
+		exit(1);
+	}
+}
+
 if ($#ARGV < 0) {
 	print "$P: no input files\n";
 	exit(1);
 }
 
-@ignore = split(/,/, join(',',@ignore));
-foreach my $word (@ignore) {
-	$word =~ s/\s*\n?$//g;
-	$word =~ s/^\s*//g;
-	$word =~ s/\s+/ /g;
-	$word =~ tr/[a-z]/[A-Z]/;
+sub hash_save_array_words {
+	my ($hashRef, $arrayRef) = @_;
 
-	next if ($word =~ m/^\s*#/);
-	next if ($word =~ m/^\s*$/);
+	my @array = split(/,/, join(',', @$arrayRef));
+	foreach my $word (@array) {
+		$word =~ s/\s*\n?$//g;
+		$word =~ s/^\s*//g;
+		$word =~ s/\s+/ /g;
+		$word =~ tr/[a-z]/[A-Z]/;
 
-	$ignore_type{$word}++;
+		next if ($word =~ m/^\s*#/);
+		next if ($word =~ m/^\s*$/);
+
+		$hashRef->{$word}++;
+	}
 }
 
+sub hash_show_words {
+	my ($hashRef, $prefix) = @_;
+
+	if ($quiet == 0 && keys %$hashRef) {
+		print "NOTE: $prefix message types:";
+		foreach my $word (sort keys %$hashRef) {
+			print " $word";
+		}
+		print "\n\n";
+	}
+}
+
+hash_save_array_words(\%ignore_type, \@ignore);
+hash_save_array_words(\%use_type, \@use);
+
 my $dbg_values = 0;
 my $dbg_possible = 0;
 my $dbg_type = 0;
@@ -207,6 +242,8 @@
 			__rcu
 		}x;
 
+our $InitAttribute = qr{__(?:mem|cpu|dev|net_|)(?:initdata|initconst|init\b)};
+
 # Notes to $Attribute:
 # We need \b after 'init' otherwise 'initconst' will cause a false positive in a check
 our $Attribute	= qr{
@@ -227,7 +264,7 @@
 			__deprecated|
 			__read_mostly|
 			__kprobes|
-			__(?:mem|cpu|dev|)(?:initdata|initconst|init\b)|
+			$InitAttribute|
 			____cacheline_aligned|
 			____cacheline_aligned_in_smp|
 			____cacheline_internodealigned_in_smp|
@@ -257,6 +294,7 @@
 		  }x;
 
 our $NonptrType;
+our $NonptrTypeWithAttr;
 our $Type;
 our $Declare;
 
@@ -319,6 +357,12 @@
 	qr{${Ident}_handler},
 	qr{${Ident}_handler_fn},
 );
+our @typeListWithAttr = (
+	@typeList,
+	qr{struct\s+$InitAttribute\s+$Ident},
+	qr{union\s+$InitAttribute\s+$Ident},
+);
+
 our @modifierList = (
 	qr{fastcall},
 );
@@ -332,6 +376,7 @@
 sub build_types {
 	my $mods = "(?x:  \n" . join("|\n  ", @modifierList) . "\n)";
 	my $all = "(?x:  \n" . join("|\n  ", @typeList) . "\n)";
+	my $allWithAttr = "(?x:  \n" . join("|\n  ", @typeListWithAttr) . "\n)";
 	$Modifier	= qr{(?:$Attribute|$Sparse|$mods)};
 	$NonptrType	= qr{
 			(?:$Modifier\s+|const\s+)*
@@ -342,6 +387,15 @@
 			)
 			(?:\s+$Modifier|\s+const)*
 		  }x;
+	$NonptrTypeWithAttr	= qr{
+			(?:$Modifier\s+|const\s+)*
+			(?:
+				(?:typeof|__typeof__)\s*\([^\)]*\)|
+				(?:$typeTypedefs\b)|
+				(?:${allWithAttr}\b)
+			)
+			(?:\s+$Modifier|\s+const)*
+		  }x;
 	$Type	= qr{
 			$NonptrType
 			(?:(?:\s|\*|\[\])+\s*const|(?:\s|\*|\[\])+|(?:\s*\[\s*\])+)?
@@ -1355,7 +1409,9 @@
 my $prefix = '';
 
 sub show_type {
-       return !defined $ignore_type{$_[0]};
+	return defined $use_type{$_[0]} if (scalar keys %use_type > 0);
+
+	return !defined $ignore_type{$_[0]};
 }
 
 sub report {
@@ -1435,7 +1491,23 @@
 sub trim {
 	my ($string) = @_;
 
-	$string =~ s/(^\s+|\s+$)//g;
+	$string =~ s/^\s+|\s+$//g;
+
+	return $string;
+}
+
+sub ltrim {
+	my ($string) = @_;
+
+	$string =~ s/^\s+//;
+
+	return $string;
+}
+
+sub rtrim {
+	my ($string) = @_;
+
+	$string =~ s/\s+$//;
 
 	return $string;
 }
@@ -1532,6 +1604,7 @@
 	my %suppress_export;
 	my $suppress_statement = 0;
 
+	my %signatures = ();
 
 	# Pre-scan the patch sanitizing the lines.
 	# Pre-scan the patch looking for any __setup documentation.
@@ -1624,6 +1697,8 @@
 	$linenr = 0;
 	foreach my $line (@lines) {
 		$linenr++;
+		my $sline = $line;	#copy of $line
+		$sline =~ s/$;/ /g;	#with comments as spaces
 
 		my $rawline = $rawlines[$linenr - 1];
 
@@ -1781,6 +1856,17 @@
 					     "email address '$email' might be better as '$suggested_email$comment'\n" . $herecurr);
 				}
 			}
+
+# Check for duplicate signatures
+			my $sig_nospace = $line;
+			$sig_nospace =~ s/\s//g;
+			$sig_nospace = lc($sig_nospace);
+			if (defined $signatures{$sig_nospace}) {
+				WARN("BAD_SIGN_OFF",
+				     "Duplicate signature\n" . $herecurr);
+			} else {
+				$signatures{$sig_nospace} = 1;
+			}
 		}
 
 # Check for wrappage within a valid hunk of the file
@@ -1845,15 +1931,17 @@
 #trailing whitespace
 		if ($line =~ /^\+.*\015/) {
 			my $herevet = "$here\n" . cat_vet($rawline) . "\n";
-			ERROR("DOS_LINE_ENDINGS",
-			      "DOS line endings\n" . $herevet);
-
+			if (ERROR("DOS_LINE_ENDINGS",
+				  "DOS line endings\n" . $herevet) &&
+			    $fix) {
+				$fixed[$linenr - 1] =~ s/[\s\015]+$//;
+			}
 		} elsif ($rawline =~ /^\+.*\S\s+$/ || $rawline =~ /^\+\s+$/) {
 			my $herevet = "$here\n" . cat_vet($rawline) . "\n";
 			if (ERROR("TRAILING_WHITESPACE",
 				  "trailing whitespace\n" . $herevet) &&
 			    $fix) {
-				$fixed[$linenr - 1] =~ s/^(\+.*?)\s+$/$1/;
+				$fixed[$linenr - 1] =~ s/\s+$//;
 			}
 
 			$rpt_cleaners = 1;
@@ -2060,6 +2148,7 @@
 		if ($realfile =~ m@^(drivers/net/|net/)@ &&
 		    $prevrawline =~ /^\+[ \t]*\/\*/ &&		#starting /*
 		    $prevrawline !~ /\*\/[ \t]*$/ &&		#no trailing */
+		    $rawline =~ /^\+/ &&			#line is new
 		    $rawline !~ /^\+[ \t]*\*/) {		#no leading *
 			WARN("NETWORKING_BLOCK_COMMENT_STYLE",
 			     "networking block comments start with * on subsequent lines\n" . $hereprev);
@@ -2126,7 +2215,7 @@
 		    $realline_next);
 #print "LINE<$line>\n";
 		if ($linenr >= $suppress_statement &&
-		    $realcnt && $line =~ /.\s*\S/) {
+		    $realcnt && $sline =~ /.\s*\S/) {
 			($stat, $cond, $line_nr_next, $remain_next, $off_next) =
 				ctx_statement_block($linenr, $realcnt, 0);
 			$stat =~ s/\n./\n /g;
@@ -2486,16 +2575,22 @@
 		}
 
 # check for global initialisers.
-		if ($line =~ /^.$Type\s*$Ident\s*(?:\s+$Modifier)*\s*=\s*(0|NULL|false)\s*;/) {
-			ERROR("GLOBAL_INITIALISERS",
-			      "do not initialise globals to 0 or NULL\n" .
-				$herecurr);
+		if ($line =~ /^\+(\s*$Type\s*$Ident\s*(?:\s+$Modifier))*\s*=\s*(0|NULL|false)\s*;/) {
+			if (ERROR("GLOBAL_INITIALISERS",
+				  "do not initialise globals to 0 or NULL\n" .
+				      $herecurr) &&
+			    $fix) {
+				$fixed[$linenr - 1] =~ s/($Type\s*$Ident\s*(?:\s+$Modifier))*\s*=\s*(0|NULL|false)\s*;/$1;/;
+			}
 		}
 # check for static initialisers.
-		if ($line =~ /\bstatic\s.*=\s*(0|NULL|false)\s*;/) {
-			ERROR("INITIALISED_STATIC",
-			      "do not initialise statics to 0 or NULL\n" .
-				$herecurr);
+		if ($line =~ /^\+.*\bstatic\s.*=\s*(0|NULL|false)\s*;/) {
+			if (ERROR("INITIALISED_STATIC",
+				  "do not initialise statics to 0 or NULL\n" .
+				      $herecurr) &&
+			    $fix) {
+				$fixed[$linenr - 1] =~ s/(\bstatic\s.*?)\s*=\s*(0|NULL|false)\s*;/$1;/;
+			}
 		}
 
 # check for static const char * arrays.
@@ -2638,8 +2733,12 @@
 		}
 
 		if ($line =~ /\bpr_warning\s*\(/) {
-			WARN("PREFER_PR_LEVEL",
-			     "Prefer pr_warn(... to pr_warning(...\n" . $herecurr);
+			if (WARN("PREFER_PR_LEVEL",
+				 "Prefer pr_warn(... to pr_warning(...\n" . $herecurr) &&
+			    $fix) {
+				$fixed[$linenr - 1] =~
+				    s/\bpr_warning\b/pr_warn/;
+			}
 		}
 
 		if ($line =~ /\bdev_printk\s*\(\s*KERN_([A-Z]+)/) {
@@ -2759,6 +2858,7 @@
 			$off = 0;
 
 			my $blank = copy_spacing($opline);
+			my $last_after = -1;
 
 			for (my $n = 0; $n < $#elements; $n += 2) {
 
@@ -2824,7 +2924,7 @@
 					    $cc !~ /^\\/ && $cc !~ /^;/) {
 						if (ERROR("SPACING",
 							  "space required after that '$op' $at\n" . $hereptr)) {
-							$good = trim($fix_elements[$n]) . " " . trim($fix_elements[$n + 1]) . " ";
+							$good = $fix_elements[$n] . trim($fix_elements[$n + 1]) . " ";
 							$line_fixed = 1;
 						}
 					}
@@ -2839,11 +2939,11 @@
 					if ($ctx =~ /Wx.|.xW/) {
 						if (ERROR("SPACING",
 							  "spaces prohibited around that '$op' $at\n" . $hereptr)) {
-							$good = trim($fix_elements[$n]) . trim($fix_elements[$n + 1]);
-							$line_fixed = 1;
+							$good = rtrim($fix_elements[$n]) . trim($fix_elements[$n + 1]);
 							if (defined $fix_elements[$n + 2]) {
 								$fix_elements[$n + 2] =~ s/^\s+//;
 							}
+							$line_fixed = 1;
 						}
 					}
 
@@ -2852,8 +2952,9 @@
 					if ($ctx !~ /.x[WEC]/ && $cc !~ /^}/) {
 						if (ERROR("SPACING",
 							  "space required after that '$op' $at\n" . $hereptr)) {
-							$good = trim($fix_elements[$n]) . trim($fix_elements[$n + 1]) . " ";
+							$good = $fix_elements[$n] . trim($fix_elements[$n + 1]) . " ";
 							$line_fixed = 1;
+							$last_after = $n;
 						}
 					}
 
@@ -2870,8 +2971,10 @@
 					if ($ctx !~ /[WEBC]x./ && $ca !~ /(?:\)|!|~|\*|-|\&|\||\+\+|\-\-|\{)$/) {
 						if (ERROR("SPACING",
 							  "space required before that '$op' $at\n" . $hereptr)) {
-							$good = trim($fix_elements[$n]) . " " . trim($fix_elements[$n + 1]);
-							$line_fixed = 1;
+							if ($n != $last_after + 2) {
+								$good = $fix_elements[$n] . " " . ltrim($fix_elements[$n + 1]);
+								$line_fixed = 1;
+							}
 						}
 					}
 					if ($op eq '*' && $cc =~/\s*$Modifier\b/) {
@@ -2880,12 +2983,11 @@
 					} elsif ($ctx =~ /.xW/) {
 						if (ERROR("SPACING",
 							  "space prohibited after that '$op' $at\n" . $hereptr)) {
-							$fixed_line =~ s/\s+$//;
-							$good = trim($fix_elements[$n]) . trim($fix_elements[$n + 1]);
-							$line_fixed = 1;
+							$good = $fix_elements[$n] . rtrim($fix_elements[$n + 1]);
 							if (defined $fix_elements[$n + 2]) {
 								$fix_elements[$n + 2] =~ s/^\s+//;
 							}
+							$line_fixed = 1;
 						}
 					}
 
@@ -2894,8 +2996,7 @@
 					if ($ctx !~ /[WEOBC]x[^W]/ && $ctx !~ /[^W]x[WOBEC]/) {
 						if (ERROR("SPACING",
 							  "space required one side of that '$op' $at\n" . $hereptr)) {
-							$fixed_line =~ s/\s+$//;
-							$good = trim($fix_elements[$n]) . trim($fix_elements[$n + 1]) . " ";
+							$good = $fix_elements[$n] . trim($fix_elements[$n + 1]) . " ";
 							$line_fixed = 1;
 						}
 					}
@@ -2903,20 +3004,18 @@
 					    ($ctx =~ /Wx./ && $cc =~ /^;/)) {
 						if (ERROR("SPACING",
 							  "space prohibited before that '$op' $at\n" . $hereptr)) {
-							$fixed_line =~ s/\s+$//;
-							$good = trim($fix_elements[$n]) . trim($fix_elements[$n + 1]);
+							$good = rtrim($fix_elements[$n]) . trim($fix_elements[$n + 1]);
 							$line_fixed = 1;
 						}
 					}
 					if ($ctx =~ /ExW/) {
 						if (ERROR("SPACING",
 							  "space prohibited after that '$op' $at\n" . $hereptr)) {
-							$fixed_line =~ s/\s+$//;
-							$good = trim($fix_elements[$n]) . trim($fix_elements[$n + 1]);
-							$line_fixed = 1;
+							$good = $fix_elements[$n] . trim($fix_elements[$n + 1]);
 							if (defined $fix_elements[$n + 2]) {
 								$fix_elements[$n + 2] =~ s/^\s+//;
 							}
+							$line_fixed = 1;
 						}
 					}
 
@@ -2930,8 +3029,10 @@
 					if ($ctx =~ /Wx[^WCE]|[^WCE]xW/) {
 						if (ERROR("SPACING",
 							  "need consistent spacing around '$op' $at\n" . $hereptr)) {
-							$fixed_line =~ s/\s+$//;
-							$good = trim($fix_elements[$n]) . " " . trim($fix_elements[$n + 1]) . " ";
+							$good = rtrim($fix_elements[$n]) . " " . trim($fix_elements[$n + 1]) . " ";
+							if (defined $fix_elements[$n + 2]) {
+								$fix_elements[$n + 2] =~ s/^\s+//;
+							}
 							$line_fixed = 1;
 						}
 					}
@@ -2942,7 +3043,7 @@
 					if ($ctx =~ /Wx./) {
 						if (ERROR("SPACING",
 							  "space prohibited before that '$op' $at\n" . $hereptr)) {
-							$good = trim($fix_elements[$n]) . trim($fix_elements[$n + 1]);
+							$good = rtrim($fix_elements[$n]) . trim($fix_elements[$n + 1]);
 							$line_fixed = 1;
 						}
 					}
@@ -2969,8 +3070,10 @@
 					if ($ok == 0) {
 						if (ERROR("SPACING",
 							  "spaces required around that '$op' $at\n" . $hereptr)) {
-							$good = trim($fix_elements[$n]) . " " . trim($fix_elements[$n + 1]) . " ";
-							$good = $fix_elements[$n] . " " . trim($fix_elements[$n + 1]) . " ";
+							$good = rtrim($fix_elements[$n]) . " " . trim($fix_elements[$n + 1]) . " ";
+							if (defined $fix_elements[$n + 2]) {
+								$fix_elements[$n + 2] =~ s/^\s+//;
+							}
 							$line_fixed = 1;
 						}
 					}
@@ -3031,8 +3134,7 @@
 			if (ERROR("SPACING",
 				  "space required before the open brace '{'\n" . $herecurr) &&
 			    $fix) {
-				$fixed[$linenr - 1] =~
-				    s/^(\+.*(?:do|\))){/$1 {/;
+				$fixed[$linenr - 1] =~ s/^(\+.*(?:do|\))){/$1 {/;
 			}
 		}
 
@@ -3047,8 +3149,12 @@
 # closing brace should have a space following it when it has anything
 # on the line
 		if ($line =~ /}(?!(?:,|;|\)))\S/) {
-			ERROR("SPACING",
-			      "space required after that close brace '}'\n" . $herecurr);
+			if (ERROR("SPACING",
+				  "space required after that close brace '}'\n" . $herecurr) &&
+			    $fix) {
+				$fixed[$linenr - 1] =~
+				    s/}((?!(?:,|;|\)))\S)/} $1/;
+			}
 		}
 
 # check spacing on square brackets
@@ -3271,8 +3377,13 @@
 
 #gcc binary extension
 			if ($var =~ /^$Binary$/) {
-				WARN("GCC_BINARY_CONSTANT",
-				     "Avoid gcc v4.3+ binary constant extension: <$var>\n" . $herecurr);
+				if (WARN("GCC_BINARY_CONSTANT",
+					 "Avoid gcc v4.3+ binary constant extension: <$var>\n" . $herecurr) &&
+				    $fix) {
+					my $hexval = sprintf("0x%x", oct($var));
+					$fixed[$linenr - 1] =~
+					    s/\b$var\b/$hexval/;
+				}
 			}
 
 #CamelCase
@@ -3282,19 +3393,26 @@
 			    $var !~ /^(?:Clear|Set|TestClear|TestSet|)Page[A-Z]/ &&
 #Ignore SI style variants like nS, mV and dB (ie: max_uV, regulator_min_uA_show)
 			    $var !~ /^(?:[a-z_]*?)_?[a-z][A-Z](?:_[a-z_]+)?$/) {
-				seed_camelcase_includes() if ($check);
-				if (!defined $camelcase{$var}) {
-					$camelcase{$var} = 1;
-					CHK("CAMELCASE",
-					    "Avoid CamelCase: <$var>\n" . $herecurr);
+				while ($var =~ m{($Ident)}g) {
+					my $word = $1;
+					next if ($word !~ /[A-Z][a-z]|[a-z][A-Z]/);
+					seed_camelcase_includes() if ($check);
+					if (!defined $camelcase{$word}) {
+						$camelcase{$word} = 1;
+						CHK("CAMELCASE",
+						    "Avoid CamelCase: <$word>\n" . $herecurr);
+					}
 				}
 			}
 		}
 
 #no spaces allowed after \ in define
-		if ($line=~/\#\s*define.*\\\s$/) {
-			WARN("WHITESPACE_AFTER_LINE_CONTINUATION",
-			     "Whitepspace after \\ makes next lines useless\n" . $herecurr);
+		if ($line =~ /\#\s*define.*\\\s+$/) {
+			if (WARN("WHITESPACE_AFTER_LINE_CONTINUATION",
+				 "Whitespace after \\ makes next lines useless\n" . $herecurr) &&
+			    $fix) {
+				$fixed[$linenr - 1] =~ s/\s+$//;
+			}
 		}
 
 #warn if <asm/foo.h> is #included and <linux/foo.h> is available (uses RAW line)
@@ -3374,7 +3492,8 @@
 			    $dstat !~ /^for\s*$Constant$/ &&				# for (...)
 			    $dstat !~ /^for\s*$Constant\s+(?:$Ident|-?$Constant)$/ &&	# for (...) bar()
 			    $dstat !~ /^do\s*{/ &&					# do {...
-			    $dstat !~ /^\({/)						# ({...
+			    $dstat !~ /^\({/ &&						# ({...
+			    $ctx !~ /^.\s*#\s*define\s+TRACE_(?:SYSTEM|INCLUDE_FILE|INCLUDE_PATH)\b/)
 			{
 				$ctx =~ s/\n*$//;
 				my $herectx = $here . "\n";
@@ -3606,6 +3725,32 @@
 			}
 		}
 
+sub string_find_replace {
+	my ($string, $find, $replace) = @_;
+
+	$string =~ s/$find/$replace/g;
+
+	return $string;
+}
+
+# check for bad placement of section $InitAttribute (e.g.: __initdata)
+		if ($line =~ /(\b$InitAttribute\b)/) {
+			my $attr = $1;
+			if ($line =~ /^\+\s*static\s+(?:const\s+)?(?:$attr\s+)?($NonptrTypeWithAttr)\s+(?:$attr\s+)?($Ident(?:\[[^]]*\])?)\s*[=;]/) {
+				my $ptr = $1;
+				my $var = $2;
+				if ((($ptr =~ /\b(union|struct)\s+$attr\b/ &&
+				      ERROR("MISPLACED_INIT",
+					    "$attr should be placed after $var\n" . $herecurr)) ||
+				     ($ptr !~ /\b(union|struct)\s+$attr\b/ &&
+				      WARN("MISPLACED_INIT",
+					   "$attr should be placed after $var\n" . $herecurr))) &&
+				    $fix) {
+					$fixed[$linenr - 1] =~ s/(\bstatic\s+(?:const\s+)?)(?:$attr\s+)?($NonptrTypeWithAttr)\s+(?:$attr\s+)?($Ident(?:\[[^]]*\])?)\s*([=;])\s*/"$1" . trim(string_find_replace($2, "\\s*$attr\\s*", " ")) . " " . trim(string_find_replace($3, "\\s*$attr\\s*", "")) . " $attr" . ("$4" eq ";" ? ";" : " = ")/e;
+				}
+			}
+		}
+
 # prefer usleep_range over udelay
 		if ($line =~ /\budelay\s*\(\s*(\d+)\s*\)/) {
 			# ignore udelay's < 10, however
@@ -3691,8 +3836,12 @@
 
 # Check for __inline__ and __inline, prefer inline
 		if ($line =~ /\b(__inline__|__inline)\b/) {
-			WARN("INLINE",
-			     "plain inline is preferred over $1\n" . $herecurr);
+			if (WARN("INLINE",
+				 "plain inline is preferred over $1\n" . $herecurr) &&
+			    $fix) {
+				$fixed[$linenr - 1] =~ s/\b(__inline__|__inline)\b/inline/;
+
+			}
 		}
 
 # Check for __attribute__ packed, prefer __packed
@@ -3709,14 +3858,21 @@
 
 # Check for __attribute__ format(printf, prefer __printf
 		if ($line =~ /\b__attribute__\s*\(\s*\(\s*format\s*\(\s*printf/) {
-			WARN("PREFER_PRINTF",
-			     "__printf(string-index, first-to-check) is preferred over __attribute__((format(printf, string-index, first-to-check)))\n" . $herecurr);
+			if (WARN("PREFER_PRINTF",
+				 "__printf(string-index, first-to-check) is preferred over __attribute__((format(printf, string-index, first-to-check)))\n" . $herecurr) &&
+			    $fix) {
+				$fixed[$linenr - 1] =~ s/\b__attribute__\s*\(\s*\(\s*format\s*\(\s*printf\s*,\s*(.*)\)\s*\)\s*\)/"__printf(" . trim($1) . ")"/ex;
+
+			}
 		}
 
 # Check for __attribute__ format(scanf, prefer __scanf
 		if ($line =~ /\b__attribute__\s*\(\s*\(\s*format\s*\(\s*scanf\b/) {
-			WARN("PREFER_SCANF",
-			     "__scanf(string-index, first-to-check) is preferred over __attribute__((format(scanf, string-index, first-to-check)))\n" . $herecurr);
+			if (WARN("PREFER_SCANF",
+				 "__scanf(string-index, first-to-check) is preferred over __attribute__((format(scanf, string-index, first-to-check)))\n" . $herecurr) &&
+			    $fix) {
+				$fixed[$linenr - 1] =~ s/\b__attribute__\s*\(\s*\(\s*format\s*\(\s*scanf\s*,\s*(.*)\)\s*\)\s*\)/"__scanf(" . trim($1) . ")"/ex;
+			}
 		}
 
 # check for sizeof(&)
@@ -3727,8 +3883,11 @@
 
 # check for sizeof without parenthesis
 		if ($line =~ /\bsizeof\s+((?:\*\s*|)$Lval|$Type(?:\s+$Lval|))/) {
-			WARN("SIZEOF_PARENTHESIS",
-			     "sizeof $1 should be sizeof($1)\n" . $herecurr);
+			if (WARN("SIZEOF_PARENTHESIS",
+				 "sizeof $1 should be sizeof($1)\n" . $herecurr) &&
+			    $fix) {
+				$fixed[$linenr - 1] =~ s/\bsizeof\s+((?:\*\s*|)$Lval|$Type(?:\s+$Lval|))/"sizeof(" . trim($1) . ")"/ex;
+			}
 		}
 
 # check for line continuations in quoted strings with odd counts of "
@@ -3747,8 +3906,11 @@
 		if ($line =~ /\bseq_printf\s*\(/) {
 			my $fmt = get_quoted_string($line, $rawline);
 			if ($fmt !~ /[^\\]\%/) {
-				WARN("PREFER_SEQ_PUTS",
-				     "Prefer seq_puts to seq_printf\n" . $herecurr);
+				if (WARN("PREFER_SEQ_PUTS",
+					 "Prefer seq_puts to seq_printf\n" . $herecurr) &&
+				    $fix) {
+					$fixed[$linenr - 1] =~ s/\bseq_printf\b/seq_puts/;
+				}
 			}
 		}
 
@@ -3810,6 +3972,16 @@
 			}
 		}
 
+# check for new externs in .h files.
+		if ($realfile =~ /\.h$/ &&
+		    $line =~ /^\+\s*(extern\s+)$Type\s*$Ident\s*\(/s) {
+			if (WARN("AVOID_EXTERNS",
+				 "extern prototypes should be avoided in .h files\n" . $herecurr) &&
+			    $fix) {
+				$fixed[$linenr - 1] =~ s/(.*)\bextern\b\s*(.*)/$1$2/;
+			}
+		}
+
 # check for new externs in .c files.
 		if ($realfile =~ /\.c$/ && defined $stat &&
 		    $stat =~ /^.\s*(?:extern\s+)?$Type\s+($Ident)(\s*)\(/s)
@@ -3879,8 +4051,11 @@
 
 # check for multiple semicolons
 		if ($line =~ /;\s*;\s*$/) {
-			WARN("ONE_SEMICOLON",
-			     "Statements terminations use 1 semicolon\n" . $herecurr);
+			if (WARN("ONE_SEMICOLON",
+				 "Statements terminations use 1 semicolon\n" . $herecurr) &&
+			    $fix) {
+				$fixed[$linenr - 1] =~ s/(\s*;\s*){2,}$/;/g;
+			}
 		}
 
 # check for switch/default statements without a break;
@@ -3898,9 +4073,12 @@
 		}
 
 # check for gcc specific __FUNCTION__
-		if ($line =~ /__FUNCTION__/) {
-			WARN("USE_FUNC",
-			     "__func__ should be used instead of gcc specific __FUNCTION__\n"  . $herecurr);
+		if ($line =~ /\b__FUNCTION__\b/) {
+			if (WARN("USE_FUNC",
+				 "__func__ should be used instead of gcc specific __FUNCTION__\n"  . $herecurr) &&
+			    $fix) {
+				$fixed[$linenr - 1] =~ s/\b__FUNCTION__\b/__func__/g;
+			}
 		}
 
 # check for use of yield()
@@ -4105,13 +4283,8 @@
 		}
 	}
 
-	if ($quiet == 0 && keys %ignore_type) {
-	    print "NOTE: Ignored message types:";
-	    foreach my $ignore (sort keys %ignore_type) {
-		print " $ignore";
-	    }
-	    print "\n\n";
-	}
+	hash_show_words(\%use_type, "Used");
+	hash_show_words(\%ignore_type, "Ignored");
 
 	if ($clean == 0 && $fix && "@rawlines" ne "@fixed") {
 		my $newfile = $filename . ".EXPERIMENTAL-checkpatch-fixes";
diff --git a/scripts/config b/scripts/config
index 567120a..2283be2 100755
--- a/scripts/config
+++ b/scripts/config
@@ -62,15 +62,52 @@
 	fi
 }
 
+txt_append() {
+	local anchor="$1"
+	local insert="$2"
+	local infile="$3"
+	local tmpfile="$infile.swp"
+
+	# sed append cmd: 'a\' + newline + text + newline
+	cmd="$(printf "a\\%b$insert" "\n")"
+
+	sed -e "/$anchor/$cmd" "$infile" >"$tmpfile"
+	# replace original file with the edited one
+	mv "$tmpfile" "$infile"
+}
+
+txt_subst() {
+	local before="$1"
+	local after="$2"
+	local infile="$3"
+	local tmpfile="$infile.swp"
+
+	sed -e "s/$before/$after/" "$infile" >"$tmpfile"
+	# replace original file with the edited one
+	mv "$tmpfile" "$infile"
+}
+
+txt_delete() {
+	local text="$1"
+	local infile="$2"
+	local tmpfile="$infile.swp"
+
+	sed -e "/$text/d" "$infile" >"$tmpfile"
+	# replace original file with the edited one
+	mv "$tmpfile" "$infile"
+}
+
 set_var() {
 	local name=$1 new=$2 before=$3
 
 	name_re="^($name=|# $name is not set)"
 	before_re="^($before=|# $before is not set)"
 	if test -n "$before" && grep -Eq "$before_re" "$FN"; then
-		sed -ri "/$before_re/a $new" "$FN"
+		txt_append "^$before=" "$new" "$FN"
+		txt_append "^# $before is not set" "$new" "$FN"
 	elif grep -Eq "$name_re" "$FN"; then
-		sed -ri "s:$name_re.*:$new:" "$FN"
+		txt_subst "^$name=.*" "$new" "$FN"
+		txt_subst "^# $name is not set" "$new" "$FN"
 	else
 		echo "$new" >>"$FN"
 	fi
@@ -79,7 +116,8 @@
 undef_var() {
 	local name=$1
 
-	sed -ri "/^($name=|# $name is not set)/d" "$FN"
+	txt_delete "^$name=" "$FN"
+	txt_delete "^# $name is not set" "$FN"
 }
 
 if [ "$1" = "--file" ]; then
diff --git a/scripts/diffconfig b/scripts/diffconfig
index b91f3e3..6d67283 100755
--- a/scripts/diffconfig
+++ b/scripts/diffconfig
@@ -10,7 +10,7 @@
 import sys, os
 
 def usage():
-    print """Usage: diffconfig [-h] [-m] [<config1> <config2>]
+    print("""Usage: diffconfig [-h] [-m] [<config1> <config2>]
 
 Diffconfig is a simple utility for comparing two .config files.
 Using standard diff to compare .config files often includes extraneous and
@@ -33,7 +33,7 @@
  EXT2_FS  y -> n
  LOG_BUF_SHIFT  14 -> 16
  PRINTK_TIME  n -> y
-"""
+""")
     sys.exit(0)
 
 # returns a dictionary of name/value pairs for config items in the file
@@ -54,23 +54,23 @@
     if merge_style:
         if new_value:
             if new_value=="n":
-                print "# CONFIG_%s is not set" % config
+                print("# CONFIG_%s is not set" % config)
             else:
-                print "CONFIG_%s=%s" % (config, new_value)
+                print("CONFIG_%s=%s" % (config, new_value))
     else:
         if op=="-":
-            print "-%s %s" % (config, value)
+            print("-%s %s" % (config, value))
         elif op=="+":
-            print "+%s %s" % (config, new_value)
+            print("+%s %s" % (config, new_value))
         else:
-            print " %s %s -> %s" % (config, value, new_value)
+            print(" %s %s -> %s" % (config, value, new_value))
 
 def main():
     global merge_style
 
     # parse command line args
     if ("-h" in sys.argv or "--help" in sys.argv):
-	usage()
+        usage()
 
     merge_style = 0
     if "-m" in sys.argv:
@@ -79,23 +79,27 @@
 
     argc = len(sys.argv)
     if not (argc==1 or argc == 3):
-        print "Error: incorrect number of arguments or unrecognized option"
+        print("Error: incorrect number of arguments or unrecognized option")
         usage()
 
     if argc == 1:
         # if no filenames given, assume .config and .config.old
         build_dir=""
-        if os.environ.has_key("KBUILD_OUTPUT"):
+        if "KBUILD_OUTPUT" in os.environ:
             build_dir = os.environ["KBUILD_OUTPUT"]+"/"
-
         configa_filename = build_dir + ".config.old"
         configb_filename = build_dir + ".config"
     else:
         configa_filename = sys.argv[1]
         configb_filename = sys.argv[2]
 
-    a = readconfig(file(configa_filename))
-    b = readconfig(file(configb_filename))
+    try:
+        a = readconfig(open(configa_filename))
+        b = readconfig(open(configb_filename))
+    except (IOError):
+        e = sys.exc_info()[1]
+        print("I/O error[%s]: %s\n" % (e.args[0],e.args[1]))
+        usage()
 
     # print items in a but not b (accumulate, sort and print)
     old = []
@@ -121,8 +125,7 @@
 
     # now print items in b but not in a
     # (items from b that were in a were removed above)
-    new = b.keys()
-    new.sort()
+    new = sorted(b.keys())
     for config in new:
         print_config("+", config, None, b[config])
 
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
index c55c227..87f7238 100644
--- a/scripts/kconfig/confdata.c
+++ b/scripts/kconfig/confdata.c
@@ -140,7 +140,9 @@
 			sym->flags |= def_flags;
 			break;
 		}
-		conf_warning("symbol value '%s' invalid for %s", p, sym->name);
+		if (def != S_DEF_AUTO)
+			conf_warning("symbol value '%s' invalid for %s",
+				     p, sym->name);
 		return 1;
 	case S_OTHER:
 		if (*p != '"') {
@@ -161,7 +163,8 @@
 			memmove(p2, p2 + 1, strlen(p2));
 		}
 		if (!p2) {
-			conf_warning("invalid string found");
+			if (def != S_DEF_AUTO)
+				conf_warning("invalid string found");
 			return 1;
 		}
 		/* fall through */
@@ -172,7 +175,9 @@
 			sym->def[def].val = strdup(p);
 			sym->flags |= def_flags;
 		} else {
-			conf_warning("symbol value '%s' invalid for %s", p, sym->name);
+			if (def != S_DEF_AUTO)
+				conf_warning("symbol value '%s' invalid for %s",
+					     p, sym->name);
 			return 1;
 		}
 		break;
diff --git a/scripts/kconfig/mconf.c b/scripts/kconfig/mconf.c
index 6c9c45f..2c39631 100644
--- a/scripts/kconfig/mconf.c
+++ b/scripts/kconfig/mconf.c
@@ -401,8 +401,8 @@
 	struct subtitle_part stpart;
 
 	title = str_new();
-	str_printf( &title, _("Enter %s (sub)string or regexp to search for "
-			      "(with or without \"%s\")"), CONFIG_, CONFIG_);
+	str_printf( &title, _("Enter (sub)string or regexp to search for "
+			      "(with or without \"%s\")"), CONFIG_);
 
 again:
 	dialog_clear();
diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
index 7e233a6..c1d5320 100644
--- a/scripts/kconfig/menu.c
+++ b/scripts/kconfig/menu.c
@@ -197,12 +197,15 @@
 
 void menu_add_option(int token, char *arg)
 {
-	struct property *prop;
-
 	switch (token) {
 	case T_OPT_MODULES:
-		prop = prop_alloc(P_DEFAULT, modules_sym);
-		prop->expr = expr_alloc_symbol(current_entry->sym);
+		if (modules_sym)
+			zconf_error("symbol '%s' redefines option 'modules'"
+				    " already defined by symbol '%s'",
+				    current_entry->sym->name,
+				    modules_sym->name
+				    );
+		modules_sym = current_entry->sym;
 		break;
 	case T_OPT_DEFCONFIG_LIST:
 		if (!sym_defconfig_list)
diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c
index 7975d8d..4fbecd2 100644
--- a/scripts/kconfig/nconf.c
+++ b/scripts/kconfig/nconf.c
@@ -695,8 +695,8 @@
 	int dres;
 
 	title = str_new();
-	str_printf( &title, _("Enter %s (sub)string or regexp to search for "
-			      "(with or without \"%s\")"), CONFIG_, CONFIG_);
+	str_printf( &title, _("Enter (sub)string or regexp to search for "
+			      "(with or without \"%s\")"), CONFIG_);
 
 again:
 	dres = dialog_inputbox(main_window,
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
index d550300..c9a6775 100644
--- a/scripts/kconfig/symbol.c
+++ b/scripts/kconfig/symbol.c
@@ -136,7 +136,7 @@
 	return NULL;
 }
 
-static long sym_get_range_val(struct symbol *sym, int base)
+static long long sym_get_range_val(struct symbol *sym, int base)
 {
 	sym_calc_value(sym);
 	switch (sym->type) {
@@ -149,13 +149,14 @@
 	default:
 		break;
 	}
-	return strtol(sym->curr.val, NULL, base);
+	return strtoll(sym->curr.val, NULL, base);
 }
 
 static void sym_validate_range(struct symbol *sym)
 {
 	struct property *prop;
-	long base, val, val2;
+	int base;
+	long long val, val2;
 	char str[64];
 
 	switch (sym->type) {
@@ -171,7 +172,7 @@
 	prop = sym_get_range_prop(sym);
 	if (!prop)
 		return;
-	val = strtol(sym->curr.val, NULL, base);
+	val = strtoll(sym->curr.val, NULL, base);
 	val2 = sym_get_range_val(prop->expr->left.sym, base);
 	if (val >= val2) {
 		val2 = sym_get_range_val(prop->expr->right.sym, base);
@@ -179,9 +180,9 @@
 			return;
 	}
 	if (sym->type == S_INT)
-		sprintf(str, "%ld", val2);
+		sprintf(str, "%lld", val2);
 	else
-		sprintf(str, "0x%lx", val2);
+		sprintf(str, "0x%llx", val2);
 	sym->curr.val = strdup(str);
 }
 
@@ -594,7 +595,7 @@
 bool sym_string_within_range(struct symbol *sym, const char *str)
 {
 	struct property *prop;
-	long val;
+	long long val;
 
 	switch (sym->type) {
 	case S_STRING:
@@ -605,7 +606,7 @@
 		prop = sym_get_range_prop(sym);
 		if (!prop)
 			return true;
-		val = strtol(str, NULL, 10);
+		val = strtoll(str, NULL, 10);
 		return val >= sym_get_range_val(prop->expr->left.sym, 10) &&
 		       val <= sym_get_range_val(prop->expr->right.sym, 10);
 	case S_HEX:
@@ -614,7 +615,7 @@
 		prop = sym_get_range_prop(sym);
 		if (!prop)
 			return true;
-		val = strtol(str, NULL, 16);
+		val = strtoll(str, NULL, 16);
 		return val >= sym_get_range_val(prop->expr->left.sym, 16) &&
 		       val <= sym_get_range_val(prop->expr->right.sym, 16);
 	case S_BOOLEAN:
@@ -963,11 +964,11 @@
  * - first, symbols that match exactly
  * - then, alphabetical sort
  */
-static int sym_rel_comp( const void *sym1, const void *sym2 )
+static int sym_rel_comp(const void *sym1, const void *sym2)
 {
-	struct sym_match *s1 = *(struct sym_match **)sym1;
-	struct sym_match *s2 = *(struct sym_match **)sym2;
-	int l1, l2;
+	const struct sym_match *s1 = sym1;
+	const struct sym_match *s2 = sym2;
+	int exact1, exact2;
 
 	/* Exact match:
 	 * - if matched length on symbol s1 is the length of that symbol,
@@ -978,11 +979,11 @@
 	 * exactly; if this is the case, we can't decide which comes first,
 	 * and we fallback to sorting alphabetically.
 	 */
-	l1 = s1->eo - s1->so;
-	l2 = s2->eo - s2->so;
-	if (l1 == strlen(s1->sym->name) && l2 != strlen(s2->sym->name))
+	exact1 = (s1->eo - s1->so) == strlen(s1->sym->name);
+	exact2 = (s2->eo - s2->so) == strlen(s2->sym->name);
+	if (exact1 && !exact2)
 		return -1;
-	if (l1 != strlen(s1->sym->name) && l2 == strlen(s2->sym->name))
+	if (!exact1 && exact2)
 		return 1;
 
 	/* As a fallback, sort symbols alphabetically */
@@ -992,7 +993,7 @@
 struct symbol **sym_re_search(const char *pattern)
 {
 	struct symbol *sym, **sym_arr = NULL;
-	struct sym_match **sym_match_arr = NULL;
+	struct sym_match *sym_match_arr = NULL;
 	int i, cnt, size;
 	regex_t re;
 	regmatch_t match[1];
@@ -1005,47 +1006,38 @@
 		return NULL;
 
 	for_all_symbols(i, sym) {
-		struct sym_match *tmp_sym_match;
 		if (sym->flags & SYMBOL_CONST || !sym->name)
 			continue;
 		if (regexec(&re, sym->name, 1, match, 0))
 			continue;
-		if (cnt + 1 >= size) {
+		if (cnt >= size) {
 			void *tmp;
 			size += 16;
-			tmp = realloc(sym_match_arr, size * sizeof(struct sym_match *));
-			if (!tmp) {
+			tmp = realloc(sym_match_arr, size * sizeof(struct sym_match));
+			if (!tmp)
 				goto sym_re_search_free;
-			}
 			sym_match_arr = tmp;
 		}
 		sym_calc_value(sym);
-		tmp_sym_match = (struct sym_match*)malloc(sizeof(struct sym_match));
-		if (!tmp_sym_match)
-			goto sym_re_search_free;
-		tmp_sym_match->sym = sym;
-		/* As regexec return 0, we know we have a match, so
+		/* As regexec returned 0, we know we have a match, so
 		 * we can use match[0].rm_[se]o without further checks
 		 */
-		tmp_sym_match->so = match[0].rm_so;
-		tmp_sym_match->eo = match[0].rm_eo;
-		sym_match_arr[cnt++] = tmp_sym_match;
+		sym_match_arr[cnt].so = match[0].rm_so;
+		sym_match_arr[cnt].eo = match[0].rm_eo;
+		sym_match_arr[cnt++].sym = sym;
 	}
 	if (sym_match_arr) {
-		qsort(sym_match_arr, cnt, sizeof(struct sym_match*), sym_rel_comp);
+		qsort(sym_match_arr, cnt, sizeof(struct sym_match), sym_rel_comp);
 		sym_arr = malloc((cnt+1) * sizeof(struct symbol));
 		if (!sym_arr)
 			goto sym_re_search_free;
 		for (i = 0; i < cnt; i++)
-			sym_arr[i] = sym_match_arr[i]->sym;
+			sym_arr[i] = sym_match_arr[i].sym;
 		sym_arr[cnt] = NULL;
 	}
 sym_re_search_free:
-	if (sym_match_arr) {
-		for (i = 0; i < cnt; i++)
-			free(sym_match_arr[i]);
-		free(sym_match_arr);
-	}
+	/* sym_match_arr can be NULL if no match, but free(NULL) is OK */
+	free(sym_match_arr);
 	regfree(&re);
 
 	return sym_arr;
diff --git a/scripts/kconfig/zconf.tab.c_shipped b/scripts/kconfig/zconf.tab.c_shipped
index f636141..25ae16a 100644
--- a/scripts/kconfig/zconf.tab.c_shipped
+++ b/scripts/kconfig/zconf.tab.c_shipped
@@ -1,9 +1,8 @@
-/* A Bison parser, made by GNU Bison 2.4.3.  */
+/* A Bison parser, made by GNU Bison 2.5.  */
 
-/* Skeleton implementation for Bison's Yacc-like parsers in C
+/* Bison implementation for Yacc-like parsers in C
    
-      Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004, 2005, 2006,
-   2009, 2010 Free Software Foundation, Inc.
+      Copyright (C) 1984, 1989-1990, 2000-2011 Free Software Foundation, Inc.
    
    This program is free software: you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
@@ -45,7 +44,7 @@
 #define YYBISON 1
 
 /* Bison version.  */
-#define YYBISON_VERSION "2.4.3"
+#define YYBISON_VERSION "2.5"
 
 /* Skeleton name.  */
 #define YYSKELETON_NAME "yacc.c"
@@ -302,11 +301,11 @@
 #    define alloca _alloca
 #   else
 #    define YYSTACK_ALLOC alloca
-#    if ! defined _ALLOCA_H && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \
+#    if ! defined _ALLOCA_H && ! defined EXIT_SUCCESS && (defined __STDC__ || defined __C99__FUNC__ \
      || defined __cplusplus || defined _MSC_VER)
 #     include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
-#     ifndef _STDLIB_H
-#      define _STDLIB_H 1
+#     ifndef EXIT_SUCCESS
+#      define EXIT_SUCCESS 0
 #     endif
 #    endif
 #   endif
@@ -329,24 +328,24 @@
 #  ifndef YYSTACK_ALLOC_MAXIMUM
 #   define YYSTACK_ALLOC_MAXIMUM YYSIZE_MAXIMUM
 #  endif
-#  if (defined __cplusplus && ! defined _STDLIB_H \
+#  if (defined __cplusplus && ! defined EXIT_SUCCESS \
        && ! ((defined YYMALLOC || defined malloc) \
 	     && (defined YYFREE || defined free)))
 #   include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
-#   ifndef _STDLIB_H
-#    define _STDLIB_H 1
+#   ifndef EXIT_SUCCESS
+#    define EXIT_SUCCESS 0
 #   endif
 #  endif
 #  ifndef YYMALLOC
 #   define YYMALLOC malloc
-#   if ! defined malloc && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \
+#   if ! defined malloc && ! defined EXIT_SUCCESS && (defined __STDC__ || defined __C99__FUNC__ \
      || defined __cplusplus || defined _MSC_VER)
 void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */
 #   endif
 #  endif
 #  ifndef YYFREE
 #   define YYFREE free
-#   if ! defined free && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \
+#   if ! defined free && ! defined EXIT_SUCCESS && (defined __STDC__ || defined __C99__FUNC__ \
      || defined __cplusplus || defined _MSC_VER)
 void free (void *); /* INFRINGES ON USER NAME SPACE */
 #   endif
@@ -375,23 +374,7 @@
      ((N) * (sizeof (yytype_int16) + sizeof (YYSTYPE)) \
       + YYSTACK_GAP_MAXIMUM)
 
-/* Copy COUNT objects from FROM to TO.  The source and destination do
-   not overlap.  */
-# ifndef YYCOPY
-#  if defined __GNUC__ && 1 < __GNUC__
-#   define YYCOPY(To, From, Count) \
-      __builtin_memcpy (To, From, (Count) * sizeof (*(From)))
-#  else
-#   define YYCOPY(To, From, Count)		\
-      do					\
-	{					\
-	  YYSIZE_T yyi;				\
-	  for (yyi = 0; yyi < (Count); yyi++)	\
-	    (To)[yyi] = (From)[yyi];		\
-	}					\
-      while (YYID (0))
-#  endif
-# endif
+# define YYCOPY_NEEDED 1
 
 /* Relocate STACK from its old location to the new one.  The
    local variables YYSIZE and YYSTACKSIZE give the old and new number of
@@ -411,6 +394,26 @@
 
 #endif
 
+#if defined YYCOPY_NEEDED && YYCOPY_NEEDED
+/* Copy COUNT objects from FROM to TO.  The source and destination do
+   not overlap.  */
+# ifndef YYCOPY
+#  if defined __GNUC__ && 1 < __GNUC__
+#   define YYCOPY(To, From, Count) \
+      __builtin_memcpy (To, From, (Count) * sizeof (*(From)))
+#  else
+#   define YYCOPY(To, From, Count)		\
+      do					\
+	{					\
+	  YYSIZE_T yyi;				\
+	  for (yyi = 0; yyi < (Count); yyi++)	\
+	    (To)[yyi] = (From)[yyi];		\
+	}					\
+      while (YYID (0))
+#  endif
+# endif
+#endif /* !YYCOPY_NEEDED */
+
 /* YYFINAL -- State number of the termination state.  */
 #define YYFINAL  11
 /* YYLAST -- Last index in YYTABLE.  */
@@ -529,18 +532,18 @@
 /* YYRLINE[YYN] -- source line where rule number YYN was defined.  */
 static const yytype_uint16 yyrline[] =
 {
-       0,   104,   104,   104,   106,   106,   108,   110,   111,   112,
-     113,   114,   115,   119,   123,   123,   123,   123,   123,   123,
-     123,   123,   127,   128,   129,   130,   131,   132,   136,   137,
-     143,   151,   157,   165,   175,   177,   178,   179,   180,   181,
-     182,   185,   193,   199,   209,   215,   221,   224,   226,   237,
-     238,   243,   252,   257,   265,   268,   270,   271,   272,   273,
-     274,   277,   283,   294,   300,   310,   312,   317,   325,   333,
-     336,   338,   339,   340,   345,   352,   359,   364,   372,   375,
-     377,   378,   379,   382,   390,   397,   404,   410,   417,   419,
-     420,   421,   424,   432,   434,   435,   438,   445,   447,   452,
-     453,   456,   457,   458,   462,   463,   466,   467,   470,   471,
-     472,   473,   474,   475,   476,   479,   480,   483,   484
+       0,   103,   103,   103,   105,   105,   107,   109,   110,   111,
+     112,   113,   114,   118,   122,   122,   122,   122,   122,   122,
+     122,   122,   126,   127,   128,   129,   130,   131,   135,   136,
+     142,   150,   156,   164,   174,   176,   177,   178,   179,   180,
+     181,   184,   192,   198,   208,   214,   220,   223,   225,   236,
+     237,   242,   251,   256,   264,   267,   269,   270,   271,   272,
+     273,   276,   282,   293,   299,   309,   311,   316,   324,   332,
+     335,   337,   338,   339,   344,   351,   358,   363,   371,   374,
+     376,   377,   378,   381,   389,   396,   403,   409,   416,   418,
+     419,   420,   423,   431,   433,   434,   437,   444,   446,   451,
+     452,   455,   456,   457,   461,   462,   465,   466,   469,   470,
+     471,   472,   473,   474,   475,   478,   479,   482,   483
 };
 #endif
 
@@ -615,8 +618,8 @@
        3,     3,     2,     3,     3,     1,     1,     0,     1
 };
 
-/* YYDEFACT[STATE-NAME] -- Default rule to reduce with in state
-   STATE-NUM when YYTABLE doesn't specify something else to do.  Zero
+/* YYDEFACT[STATE-NAME] -- Default reduction number in state STATE-NUM.
+   Performed when YYTABLE doesn't specify something else to do.  Zero
    means the default is an error.  */
 static const yytype_uint8 yydefact[] =
 {
@@ -691,8 +694,7 @@
 
 /* YYTABLE[YYPACT[STATE-NUM]].  What to do in state STATE-NUM.  If
    positive, shift that token.  If negative, reduce the rule which
-   number is the opposite.  If zero, do what YYDEFACT says.
-   If YYTABLE_NINF, syntax error.  */
+   number is the opposite.  If YYTABLE_NINF, syntax error.  */
 #define YYTABLE_NINF -86
 static const yytype_int16 yytable[] =
 {
@@ -728,6 +730,12 @@
      184
 };
 
+#define yypact_value_is_default(yystate) \
+  ((yystate) == (-90))
+
+#define yytable_value_is_error(yytable_value) \
+  YYID (0)
+
 static const yytype_int16 yycheck[] =
 {
        1,    67,    68,    10,    93,    94,    76,     3,    76,    14,
@@ -821,7 +829,6 @@
     {								\
       yychar = (Token);						\
       yylval = (Value);						\
-      yytoken = YYTRANSLATE (yychar);				\
       YYPOPSTACK (1);						\
       goto yybackup;						\
     }								\
@@ -863,19 +870,10 @@
 #endif
 
 
-/* YY_LOCATION_PRINT -- Print the location on the stream.
-   This macro was not mandated originally: define only if we know
-   we won't break user code: when these are the locations we know.  */
+/* This macro is provided for backward compatibility. */
 
 #ifndef YY_LOCATION_PRINT
-# if defined YYLTYPE_IS_TRIVIAL && YYLTYPE_IS_TRIVIAL
-#  define YY_LOCATION_PRINT(File, Loc)			\
-     fprintf (File, "%d.%d-%d.%d",			\
-	      (Loc).first_line, (Loc).first_column,	\
-	      (Loc).last_line,  (Loc).last_column)
-# else
-#  define YY_LOCATION_PRINT(File, Loc) ((void) 0)
-# endif
+# define YY_LOCATION_PRINT(File, Loc) ((void) 0)
 #endif
 
 
@@ -1067,7 +1065,6 @@
 # define YYMAXDEPTH 10000
 #endif
 
-
 
 #if YYERROR_VERBOSE
 
@@ -1170,115 +1167,142 @@
 }
 # endif
 
-/* Copy into YYRESULT an error message about the unexpected token
-   YYCHAR while in state YYSTATE.  Return the number of bytes copied,
-   including the terminating null byte.  If YYRESULT is null, do not
-   copy anything; just return the number of bytes that would be
-   copied.  As a special case, return 0 if an ordinary "syntax error"
-   message will do.  Return YYSIZE_MAXIMUM if overflow occurs during
-   size calculation.  */
-static YYSIZE_T
-yysyntax_error (char *yyresult, int yystate, int yychar)
+/* Copy into *YYMSG, which is of size *YYMSG_ALLOC, an error message
+   about the unexpected token YYTOKEN for the state stack whose top is
+   YYSSP.
+
+   Return 0 if *YYMSG was successfully written.  Return 1 if *YYMSG is
+   not large enough to hold the message.  In that case, also set
+   *YYMSG_ALLOC to the required number of bytes.  Return 2 if the
+   required number of bytes is too large to store.  */
+static int
+yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg,
+                yytype_int16 *yyssp, int yytoken)
 {
-  int yyn = yypact[yystate];
+  YYSIZE_T yysize0 = yytnamerr (0, yytname[yytoken]);
+  YYSIZE_T yysize = yysize0;
+  YYSIZE_T yysize1;
+  enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 };
+  /* Internationalized format string. */
+  const char *yyformat = 0;
+  /* Arguments of yyformat. */
+  char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM];
+  /* Number of reported tokens (one for the "unexpected", one per
+     "expected"). */
+  int yycount = 0;
 
-  if (! (YYPACT_NINF < yyn && yyn <= YYLAST))
-    return 0;
-  else
+  /* There are many possibilities here to consider:
+     - Assume YYFAIL is not used.  It's too flawed to consider.  See
+       <http://lists.gnu.org/archive/html/bison-patches/2009-12/msg00024.html>
+       for details.  YYERROR is fine as it does not invoke this
+       function.
+     - If this state is a consistent state with a default action, then
+       the only way this function was invoked is if the default action
+       is an error action.  In that case, don't check for expected
+       tokens because there are none.
+     - The only way there can be no lookahead present (in yychar) is if
+       this state is a consistent state with a default action.  Thus,
+       detecting the absence of a lookahead is sufficient to determine
+       that there is no unexpected or expected token to report.  In that
+       case, just report a simple "syntax error".
+     - Don't assume there isn't a lookahead just because this state is a
+       consistent state with a default action.  There might have been a
+       previous inconsistent state, consistent state with a non-default
+       action, or user semantic action that manipulated yychar.
+     - Of course, the expected token list depends on states to have
+       correct lookahead information, and it depends on the parser not
+       to perform extra reductions after fetching a lookahead from the
+       scanner and before detecting a syntax error.  Thus, state merging
+       (from LALR or IELR) and default reductions corrupt the expected
+       token list.  However, the list is correct for canonical LR with
+       one exception: it will still contain any token that will not be
+       accepted due to an error action in a later state.
+  */
+  if (yytoken != YYEMPTY)
     {
-      int yytype = YYTRANSLATE (yychar);
-      YYSIZE_T yysize0 = yytnamerr (0, yytname[yytype]);
-      YYSIZE_T yysize = yysize0;
-      YYSIZE_T yysize1;
-      int yysize_overflow = 0;
-      enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 };
-      char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM];
-      int yyx;
+      int yyn = yypact[*yyssp];
+      yyarg[yycount++] = yytname[yytoken];
+      if (!yypact_value_is_default (yyn))
+        {
+          /* Start YYX at -YYN if negative to avoid negative indexes in
+             YYCHECK.  In other words, skip the first -YYN actions for
+             this state because they are default actions.  */
+          int yyxbegin = yyn < 0 ? -yyn : 0;
+          /* Stay within bounds of both yycheck and yytname.  */
+          int yychecklim = YYLAST - yyn + 1;
+          int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS;
+          int yyx;
 
-# if 0
-      /* This is so xgettext sees the translatable formats that are
-	 constructed on the fly.  */
-      YY_("syntax error, unexpected %s");
-      YY_("syntax error, unexpected %s, expecting %s");
-      YY_("syntax error, unexpected %s, expecting %s or %s");
-      YY_("syntax error, unexpected %s, expecting %s or %s or %s");
-      YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s");
-# endif
-      char *yyfmt;
-      char const *yyf;
-      static char const yyunexpected[] = "syntax error, unexpected %s";
-      static char const yyexpecting[] = ", expecting %s";
-      static char const yyor[] = " or %s";
-      char yyformat[sizeof yyunexpected
-		    + sizeof yyexpecting - 1
-		    + ((YYERROR_VERBOSE_ARGS_MAXIMUM - 2)
-		       * (sizeof yyor - 1))];
-      char const *yyprefix = yyexpecting;
-
-      /* Start YYX at -YYN if negative to avoid negative indexes in
-	 YYCHECK.  */
-      int yyxbegin = yyn < 0 ? -yyn : 0;
-
-      /* Stay within bounds of both yycheck and yytname.  */
-      int yychecklim = YYLAST - yyn + 1;
-      int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS;
-      int yycount = 1;
-
-      yyarg[0] = yytname[yytype];
-      yyfmt = yystpcpy (yyformat, yyunexpected);
-
-      for (yyx = yyxbegin; yyx < yyxend; ++yyx)
-	if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR)
-	  {
-	    if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM)
-	      {
-		yycount = 1;
-		yysize = yysize0;
-		yyformat[sizeof yyunexpected - 1] = '\0';
-		break;
-	      }
-	    yyarg[yycount++] = yytname[yyx];
-	    yysize1 = yysize + yytnamerr (0, yytname[yyx]);
-	    yysize_overflow |= (yysize1 < yysize);
-	    yysize = yysize1;
-	    yyfmt = yystpcpy (yyfmt, yyprefix);
-	    yyprefix = yyor;
-	  }
-
-      yyf = YY_(yyformat);
-      yysize1 = yysize + yystrlen (yyf);
-      yysize_overflow |= (yysize1 < yysize);
-      yysize = yysize1;
-
-      if (yysize_overflow)
-	return YYSIZE_MAXIMUM;
-
-      if (yyresult)
-	{
-	  /* Avoid sprintf, as that infringes on the user's name space.
-	     Don't have undefined behavior even if the translation
-	     produced a string with the wrong number of "%s"s.  */
-	  char *yyp = yyresult;
-	  int yyi = 0;
-	  while ((*yyp = *yyf) != '\0')
-	    {
-	      if (*yyp == '%' && yyf[1] == 's' && yyi < yycount)
-		{
-		  yyp += yytnamerr (yyp, yyarg[yyi++]);
-		  yyf += 2;
-		}
-	      else
-		{
-		  yyp++;
-		  yyf++;
-		}
-	    }
-	}
-      return yysize;
+          for (yyx = yyxbegin; yyx < yyxend; ++yyx)
+            if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR
+                && !yytable_value_is_error (yytable[yyx + yyn]))
+              {
+                if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM)
+                  {
+                    yycount = 1;
+                    yysize = yysize0;
+                    break;
+                  }
+                yyarg[yycount++] = yytname[yyx];
+                yysize1 = yysize + yytnamerr (0, yytname[yyx]);
+                if (! (yysize <= yysize1
+                       && yysize1 <= YYSTACK_ALLOC_MAXIMUM))
+                  return 2;
+                yysize = yysize1;
+              }
+        }
     }
+
+  switch (yycount)
+    {
+# define YYCASE_(N, S)                      \
+      case N:                               \
+        yyformat = S;                       \
+      break
+      YYCASE_(0, YY_("syntax error"));
+      YYCASE_(1, YY_("syntax error, unexpected %s"));
+      YYCASE_(2, YY_("syntax error, unexpected %s, expecting %s"));
+      YYCASE_(3, YY_("syntax error, unexpected %s, expecting %s or %s"));
+      YYCASE_(4, YY_("syntax error, unexpected %s, expecting %s or %s or %s"));
+      YYCASE_(5, YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s"));
+# undef YYCASE_
+    }
+
+  yysize1 = yysize + yystrlen (yyformat);
+  if (! (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM))
+    return 2;
+  yysize = yysize1;
+
+  if (*yymsg_alloc < yysize)
+    {
+      *yymsg_alloc = 2 * yysize;
+      if (! (yysize <= *yymsg_alloc
+             && *yymsg_alloc <= YYSTACK_ALLOC_MAXIMUM))
+        *yymsg_alloc = YYSTACK_ALLOC_MAXIMUM;
+      return 1;
+    }
+
+  /* Avoid sprintf, as that infringes on the user's name space.
+     Don't have undefined behavior even if the translation
+     produced a string with the wrong number of "%s"s.  */
+  {
+    char *yyp = *yymsg;
+    int yyi = 0;
+    while ((*yyp = *yyformat) != '\0')
+      if (*yyp == '%' && yyformat[1] == 's' && yyi < yycount)
+        {
+          yyp += yytnamerr (yyp, yyarg[yyi++]);
+          yyformat += 2;
+        }
+      else
+        {
+          yyp++;
+          yyformat++;
+        }
+  }
+  return 0;
 }
 #endif /* YYERROR_VERBOSE */
-
 
 /*-----------------------------------------------.
 | Release the memory associated to this symbol.  |
@@ -1341,6 +1365,7 @@
     }
 }
 
+
 /* Prevent warnings from -Wmissing-prototypes.  */
 #ifdef YYPARSE_PARAM
 #if defined __STDC__ || defined __cplusplus
@@ -1367,10 +1392,9 @@
 int yynerrs;
 
 
-
-/*-------------------------.
-| yyparse or yypush_parse.  |
-`-------------------------*/
+/*----------.
+| yyparse.  |
+`----------*/
 
 #ifdef YYPARSE_PARAM
 #if (defined __STDC__ || defined __C99__FUNC__ \
@@ -1394,8 +1418,6 @@
 #endif
 #endif
 {
-
-
     int yystate;
     /* Number of tokens to shift before error messages enabled.  */
     int yyerrstatus;
@@ -1550,7 +1572,7 @@
 
   /* First try to decide what to do without reference to lookahead token.  */
   yyn = yypact[yystate];
-  if (yyn == YYPACT_NINF)
+  if (yypact_value_is_default (yyn))
     goto yydefault;
 
   /* Not known => get a lookahead token if don't already have one.  */
@@ -1581,8 +1603,8 @@
   yyn = yytable[yyn];
   if (yyn <= 0)
     {
-      if (yyn == 0 || yyn == YYTABLE_NINF)
-	goto yyerrlab;
+      if (yytable_value_is_error (yyn))
+        goto yyerrlab;
       yyn = -yyn;
       goto yyreduce;
     }
@@ -1637,34 +1659,34 @@
     {
         case 10:
 
-    { zconf_error("unexpected end statement"); ;}
+    { zconf_error("unexpected end statement"); }
     break;
 
   case 11:
 
-    { zconf_error("unknown statement \"%s\"", (yyvsp[(2) - (4)].string)); ;}
+    { zconf_error("unknown statement \"%s\"", (yyvsp[(2) - (4)].string)); }
     break;
 
   case 12:
 
     {
 	zconf_error("unexpected option \"%s\"", kconf_id_strings + (yyvsp[(2) - (4)].id)->name);
-;}
+}
     break;
 
   case 13:
 
-    { zconf_error("invalid statement"); ;}
+    { zconf_error("invalid statement"); }
     break;
 
   case 28:
 
-    { zconf_error("unknown option \"%s\"", (yyvsp[(1) - (3)].string)); ;}
+    { zconf_error("unknown option \"%s\"", (yyvsp[(1) - (3)].string)); }
     break;
 
   case 29:
 
-    { zconf_error("invalid option"); ;}
+    { zconf_error("invalid option"); }
     break;
 
   case 30:
@@ -1674,7 +1696,7 @@
 	sym->flags |= SYMBOL_OPTIONAL;
 	menu_add_entry(sym);
 	printd(DEBUG_PARSE, "%s:%d:config %s\n", zconf_curname(), zconf_lineno(), (yyvsp[(2) - (3)].string));
-;}
+}
     break;
 
   case 31:
@@ -1682,7 +1704,7 @@
     {
 	menu_end_entry();
 	printd(DEBUG_PARSE, "%s:%d:endconfig\n", zconf_curname(), zconf_lineno());
-;}
+}
     break;
 
   case 32:
@@ -1692,7 +1714,7 @@
 	sym->flags |= SYMBOL_OPTIONAL;
 	menu_add_entry(sym);
 	printd(DEBUG_PARSE, "%s:%d:menuconfig %s\n", zconf_curname(), zconf_lineno(), (yyvsp[(2) - (3)].string));
-;}
+}
     break;
 
   case 33:
@@ -1704,7 +1726,7 @@
 		zconfprint("warning: menuconfig statement without prompt");
 	menu_end_entry();
 	printd(DEBUG_PARSE, "%s:%d:endconfig\n", zconf_curname(), zconf_lineno());
-;}
+}
     break;
 
   case 41:
@@ -1714,7 +1736,7 @@
 	printd(DEBUG_PARSE, "%s:%d:type(%u)\n",
 		zconf_curname(), zconf_lineno(),
 		(yyvsp[(1) - (3)].id)->stype);
-;}
+}
     break;
 
   case 42:
@@ -1722,7 +1744,7 @@
     {
 	menu_add_prompt(P_PROMPT, (yyvsp[(2) - (4)].string), (yyvsp[(3) - (4)].expr));
 	printd(DEBUG_PARSE, "%s:%d:prompt\n", zconf_curname(), zconf_lineno());
-;}
+}
     break;
 
   case 43:
@@ -1734,7 +1756,7 @@
 	printd(DEBUG_PARSE, "%s:%d:default(%u)\n",
 		zconf_curname(), zconf_lineno(),
 		(yyvsp[(1) - (4)].id)->stype);
-;}
+}
     break;
 
   case 44:
@@ -1742,7 +1764,7 @@
     {
 	menu_add_symbol(P_SELECT, sym_lookup((yyvsp[(2) - (4)].string), 0), (yyvsp[(3) - (4)].expr));
 	printd(DEBUG_PARSE, "%s:%d:select\n", zconf_curname(), zconf_lineno());
-;}
+}
     break;
 
   case 45:
@@ -1750,7 +1772,7 @@
     {
 	menu_add_expr(P_RANGE, expr_alloc_comp(E_RANGE,(yyvsp[(2) - (5)].symbol), (yyvsp[(3) - (5)].symbol)), (yyvsp[(4) - (5)].expr));
 	printd(DEBUG_PARSE, "%s:%d:range\n", zconf_curname(), zconf_lineno());
-;}
+}
     break;
 
   case 48:
@@ -1762,17 +1784,17 @@
 	else
 		zconfprint("warning: ignoring unknown option %s", (yyvsp[(2) - (3)].string));
 	free((yyvsp[(2) - (3)].string));
-;}
+}
     break;
 
   case 49:
 
-    { (yyval.string) = NULL; ;}
+    { (yyval.string) = NULL; }
     break;
 
   case 50:
 
-    { (yyval.string) = (yyvsp[(2) - (2)].string); ;}
+    { (yyval.string) = (yyvsp[(2) - (2)].string); }
     break;
 
   case 51:
@@ -1783,14 +1805,14 @@
 	menu_add_entry(sym);
 	menu_add_expr(P_CHOICE, NULL, NULL);
 	printd(DEBUG_PARSE, "%s:%d:choice\n", zconf_curname(), zconf_lineno());
-;}
+}
     break;
 
   case 52:
 
     {
 	(yyval.menu) = menu_add_menu();
-;}
+}
     break;
 
   case 53:
@@ -1800,7 +1822,7 @@
 		menu_end_menu();
 		printd(DEBUG_PARSE, "%s:%d:endchoice\n", zconf_curname(), zconf_lineno());
 	}
-;}
+}
     break;
 
   case 61:
@@ -1808,7 +1830,7 @@
     {
 	menu_add_prompt(P_PROMPT, (yyvsp[(2) - (4)].string), (yyvsp[(3) - (4)].expr));
 	printd(DEBUG_PARSE, "%s:%d:prompt\n", zconf_curname(), zconf_lineno());
-;}
+}
     break;
 
   case 62:
@@ -1821,7 +1843,7 @@
 			(yyvsp[(1) - (3)].id)->stype);
 	} else
 		YYERROR;
-;}
+}
     break;
 
   case 63:
@@ -1829,7 +1851,7 @@
     {
 	current_entry->sym->flags |= SYMBOL_OPTIONAL;
 	printd(DEBUG_PARSE, "%s:%d:optional\n", zconf_curname(), zconf_lineno());
-;}
+}
     break;
 
   case 64:
@@ -1841,7 +1863,7 @@
 			zconf_curname(), zconf_lineno());
 	} else
 		YYERROR;
-;}
+}
     break;
 
   case 67:
@@ -1851,7 +1873,7 @@
 	menu_add_entry(NULL);
 	menu_add_dep((yyvsp[(2) - (3)].expr));
 	(yyval.menu) = menu_add_menu();
-;}
+}
     break;
 
   case 68:
@@ -1861,14 +1883,14 @@
 		menu_end_menu();
 		printd(DEBUG_PARSE, "%s:%d:endif\n", zconf_curname(), zconf_lineno());
 	}
-;}
+}
     break;
 
   case 74:
 
     {
 	menu_add_prompt(P_MENU, (yyvsp[(2) - (3)].string), NULL);
-;}
+}
     break;
 
   case 75:
@@ -1877,14 +1899,14 @@
 	menu_add_entry(NULL);
 	menu_add_prompt(P_MENU, (yyvsp[(2) - (3)].string), NULL);
 	printd(DEBUG_PARSE, "%s:%d:menu\n", zconf_curname(), zconf_lineno());
-;}
+}
     break;
 
   case 76:
 
     {
 	(yyval.menu) = menu_add_menu();
-;}
+}
     break;
 
   case 77:
@@ -1894,7 +1916,7 @@
 		menu_end_menu();
 		printd(DEBUG_PARSE, "%s:%d:endmenu\n", zconf_curname(), zconf_lineno());
 	}
-;}
+}
     break;
 
   case 83:
@@ -1902,7 +1924,7 @@
     {
 	printd(DEBUG_PARSE, "%s:%d:source %s\n", zconf_curname(), zconf_lineno(), (yyvsp[(2) - (3)].string));
 	zconf_nextfile((yyvsp[(2) - (3)].string));
-;}
+}
     break;
 
   case 84:
@@ -1911,14 +1933,14 @@
 	menu_add_entry(NULL);
 	menu_add_prompt(P_COMMENT, (yyvsp[(2) - (3)].string), NULL);
 	printd(DEBUG_PARSE, "%s:%d:comment\n", zconf_curname(), zconf_lineno());
-;}
+}
     break;
 
   case 85:
 
     {
 	menu_end_entry();
-;}
+}
     break;
 
   case 86:
@@ -1926,14 +1948,14 @@
     {
 	printd(DEBUG_PARSE, "%s:%d:help\n", zconf_curname(), zconf_lineno());
 	zconf_starthelp();
-;}
+}
     break;
 
   case 87:
 
     {
 	current_entry->help = (yyvsp[(2) - (2)].string);
-;}
+}
     break;
 
   case 92:
@@ -1941,102 +1963,113 @@
     {
 	menu_add_dep((yyvsp[(3) - (4)].expr));
 	printd(DEBUG_PARSE, "%s:%d:depends on\n", zconf_curname(), zconf_lineno());
-;}
+}
     break;
 
   case 96:
 
     {
 	menu_add_visibility((yyvsp[(2) - (2)].expr));
-;}
+}
     break;
 
   case 98:
 
     {
 	menu_add_prompt(P_PROMPT, (yyvsp[(1) - (2)].string), (yyvsp[(2) - (2)].expr));
-;}
+}
     break;
 
   case 101:
 
-    { (yyval.id) = (yyvsp[(1) - (2)].id); ;}
+    { (yyval.id) = (yyvsp[(1) - (2)].id); }
     break;
 
   case 102:
 
-    { (yyval.id) = (yyvsp[(1) - (2)].id); ;}
+    { (yyval.id) = (yyvsp[(1) - (2)].id); }
     break;
 
   case 103:
 
-    { (yyval.id) = (yyvsp[(1) - (2)].id); ;}
+    { (yyval.id) = (yyvsp[(1) - (2)].id); }
     break;
 
   case 106:
 
-    { (yyval.expr) = NULL; ;}
+    { (yyval.expr) = NULL; }
     break;
 
   case 107:
 
-    { (yyval.expr) = (yyvsp[(2) - (2)].expr); ;}
+    { (yyval.expr) = (yyvsp[(2) - (2)].expr); }
     break;
 
   case 108:
 
-    { (yyval.expr) = expr_alloc_symbol((yyvsp[(1) - (1)].symbol)); ;}
+    { (yyval.expr) = expr_alloc_symbol((yyvsp[(1) - (1)].symbol)); }
     break;
 
   case 109:
 
-    { (yyval.expr) = expr_alloc_comp(E_EQUAL, (yyvsp[(1) - (3)].symbol), (yyvsp[(3) - (3)].symbol)); ;}
+    { (yyval.expr) = expr_alloc_comp(E_EQUAL, (yyvsp[(1) - (3)].symbol), (yyvsp[(3) - (3)].symbol)); }
     break;
 
   case 110:
 
-    { (yyval.expr) = expr_alloc_comp(E_UNEQUAL, (yyvsp[(1) - (3)].symbol), (yyvsp[(3) - (3)].symbol)); ;}
+    { (yyval.expr) = expr_alloc_comp(E_UNEQUAL, (yyvsp[(1) - (3)].symbol), (yyvsp[(3) - (3)].symbol)); }
     break;
 
   case 111:
 
-    { (yyval.expr) = (yyvsp[(2) - (3)].expr); ;}
+    { (yyval.expr) = (yyvsp[(2) - (3)].expr); }
     break;
 
   case 112:
 
-    { (yyval.expr) = expr_alloc_one(E_NOT, (yyvsp[(2) - (2)].expr)); ;}
+    { (yyval.expr) = expr_alloc_one(E_NOT, (yyvsp[(2) - (2)].expr)); }
     break;
 
   case 113:
 
-    { (yyval.expr) = expr_alloc_two(E_OR, (yyvsp[(1) - (3)].expr), (yyvsp[(3) - (3)].expr)); ;}
+    { (yyval.expr) = expr_alloc_two(E_OR, (yyvsp[(1) - (3)].expr), (yyvsp[(3) - (3)].expr)); }
     break;
 
   case 114:
 
-    { (yyval.expr) = expr_alloc_two(E_AND, (yyvsp[(1) - (3)].expr), (yyvsp[(3) - (3)].expr)); ;}
+    { (yyval.expr) = expr_alloc_two(E_AND, (yyvsp[(1) - (3)].expr), (yyvsp[(3) - (3)].expr)); }
     break;
 
   case 115:
 
-    { (yyval.symbol) = sym_lookup((yyvsp[(1) - (1)].string), 0); free((yyvsp[(1) - (1)].string)); ;}
+    { (yyval.symbol) = sym_lookup((yyvsp[(1) - (1)].string), 0); free((yyvsp[(1) - (1)].string)); }
     break;
 
   case 116:
 
-    { (yyval.symbol) = sym_lookup((yyvsp[(1) - (1)].string), SYMBOL_CONST); free((yyvsp[(1) - (1)].string)); ;}
+    { (yyval.symbol) = sym_lookup((yyvsp[(1) - (1)].string), SYMBOL_CONST); free((yyvsp[(1) - (1)].string)); }
     break;
 
   case 117:
 
-    { (yyval.string) = NULL; ;}
+    { (yyval.string) = NULL; }
     break;
 
 
 
       default: break;
     }
+  /* User semantic actions sometimes alter yychar, and that requires
+     that yytoken be updated with the new translation.  We take the
+     approach of translating immediately before every use of yytoken.
+     One alternative is translating here after every semantic action,
+     but that translation would be missed if the semantic action invokes
+     YYABORT, YYACCEPT, or YYERROR immediately after altering yychar or
+     if it invokes YYBACKUP.  In the case of YYABORT or YYACCEPT, an
+     incorrect destructor might then be invoked immediately.  In the
+     case of YYERROR or YYBACKUP, subsequent parser actions might lead
+     to an incorrect destructor call or verbose syntax error message
+     before the lookahead is translated.  */
   YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc);
 
   YYPOPSTACK (yylen);
@@ -2064,6 +2097,10 @@
 | yyerrlab -- here on detecting error |
 `------------------------------------*/
 yyerrlab:
+  /* Make sure we have latest lookahead translation.  See comments at
+     user semantic actions for why this is necessary.  */
+  yytoken = yychar == YYEMPTY ? YYEMPTY : YYTRANSLATE (yychar);
+
   /* If not already recovering from an error, report this error.  */
   if (!yyerrstatus)
     {
@@ -2071,37 +2108,36 @@
 #if ! YYERROR_VERBOSE
       yyerror (YY_("syntax error"));
 #else
+# define YYSYNTAX_ERROR yysyntax_error (&yymsg_alloc, &yymsg, \
+                                        yyssp, yytoken)
       {
-	YYSIZE_T yysize = yysyntax_error (0, yystate, yychar);
-	if (yymsg_alloc < yysize && yymsg_alloc < YYSTACK_ALLOC_MAXIMUM)
-	  {
-	    YYSIZE_T yyalloc = 2 * yysize;
-	    if (! (yysize <= yyalloc && yyalloc <= YYSTACK_ALLOC_MAXIMUM))
-	      yyalloc = YYSTACK_ALLOC_MAXIMUM;
-	    if (yymsg != yymsgbuf)
-	      YYSTACK_FREE (yymsg);
-	    yymsg = (char *) YYSTACK_ALLOC (yyalloc);
-	    if (yymsg)
-	      yymsg_alloc = yyalloc;
-	    else
-	      {
-		yymsg = yymsgbuf;
-		yymsg_alloc = sizeof yymsgbuf;
-	      }
-	  }
-
-	if (0 < yysize && yysize <= yymsg_alloc)
-	  {
-	    (void) yysyntax_error (yymsg, yystate, yychar);
-	    yyerror (yymsg);
-	  }
-	else
-	  {
-	    yyerror (YY_("syntax error"));
-	    if (yysize != 0)
-	      goto yyexhaustedlab;
-	  }
+        char const *yymsgp = YY_("syntax error");
+        int yysyntax_error_status;
+        yysyntax_error_status = YYSYNTAX_ERROR;
+        if (yysyntax_error_status == 0)
+          yymsgp = yymsg;
+        else if (yysyntax_error_status == 1)
+          {
+            if (yymsg != yymsgbuf)
+              YYSTACK_FREE (yymsg);
+            yymsg = (char *) YYSTACK_ALLOC (yymsg_alloc);
+            if (!yymsg)
+              {
+                yymsg = yymsgbuf;
+                yymsg_alloc = sizeof yymsgbuf;
+                yysyntax_error_status = 2;
+              }
+            else
+              {
+                yysyntax_error_status = YYSYNTAX_ERROR;
+                yymsgp = yymsg;
+              }
+          }
+        yyerror (yymsgp);
+        if (yysyntax_error_status == 2)
+          goto yyexhaustedlab;
       }
+# undef YYSYNTAX_ERROR
 #endif
     }
 
@@ -2160,7 +2196,7 @@
   for (;;)
     {
       yyn = yypact[yystate];
-      if (yyn != YYPACT_NINF)
+      if (!yypact_value_is_default (yyn))
 	{
 	  yyn += YYTERROR;
 	  if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR)
@@ -2219,8 +2255,13 @@
 
 yyreturn:
   if (yychar != YYEMPTY)
-     yydestruct ("Cleanup: discarding lookahead",
-		 yytoken, &yylval);
+    {
+      /* Make sure we have latest lookahead translation.  See comments at
+         user semantic actions for why this is necessary.  */
+      yytoken = YYTRANSLATE (yychar);
+      yydestruct ("Cleanup: discarding lookahead",
+                  yytoken, &yylval);
+    }
   /* Do not reclaim the symbols of the rule which action triggered
      this YYABORT or YYACCEPT.  */
   YYPOPSTACK (yylen);
@@ -2256,9 +2297,6 @@
 
 	sym_init();
 	_menu_init();
-	modules_sym = sym_lookup(NULL, 0);
-	modules_sym->type = S_BOOLEAN;
-	modules_sym->flags |= SYMBOL_AUTO;
 	rootmenu.prompt = menu_add_prompt(P_MENU, "Linux Kernel Configuration", NULL);
 
 	if (getenv("ZCONF_DEBUG"))
@@ -2266,12 +2304,8 @@
 	zconfparse();
 	if (zconfnerrs)
 		exit(1);
-	if (!modules_sym->prop) {
-		struct property *prop;
-
-		prop = prop_alloc(P_DEFAULT, modules_sym);
-		prop->expr = expr_alloc_symbol(sym_lookup("MODULES", 0));
-	}
+	if (!modules_sym)
+		modules_sym = sym_find( "n" );
 
 	rootmenu.prompt->text = _(rootmenu.prompt->text);
 	rootmenu.prompt->text = sym_expand_string_value(rootmenu.prompt->text);
diff --git a/scripts/kconfig/zconf.y b/scripts/kconfig/zconf.y
index 864da07..0653886 100644
--- a/scripts/kconfig/zconf.y
+++ b/scripts/kconfig/zconf.y
@@ -493,9 +493,6 @@
 
 	sym_init();
 	_menu_init();
-	modules_sym = sym_lookup(NULL, 0);
-	modules_sym->type = S_BOOLEAN;
-	modules_sym->flags |= SYMBOL_AUTO;
 	rootmenu.prompt = menu_add_prompt(P_MENU, "Linux Kernel Configuration", NULL);
 
 	if (getenv("ZCONF_DEBUG"))
@@ -503,12 +500,8 @@
 	zconfparse();
 	if (zconfnerrs)
 		exit(1);
-	if (!modules_sym->prop) {
-		struct property *prop;
-
-		prop = prop_alloc(P_DEFAULT, modules_sym);
-		prop->expr = expr_alloc_symbol(sym_lookup("MODULES", 0));
-	}
+	if (!modules_sym)
+		modules_sym = sym_find( "n" );
 
 	rootmenu.prompt->text = _(rootmenu.prompt->text);
 	rootmenu.prompt->text = sym_expand_string_value(rootmenu.prompt->text);